summaryrefslogtreecommitdiff
path: root/storage/ndb/src/kernel/blocks
diff options
context:
space:
mode:
authorunknown <brian@zim.(none)>2005-04-26 18:19:54 -0700
committerunknown <brian@zim.(none)>2005-04-26 18:19:54 -0700
commitb7e422be1b7a8ca3f4e761e67db5e8febc701dfd (patch)
treedf9016f3d70b4657f89dcddca2ec4e188fc7fbdf /storage/ndb/src/kernel/blocks
parentc0333ecee42a4de499b3377cfa21d7b5af1ddd2b (diff)
downloadmariadb-git-b7e422be1b7a8ca3f4e761e67db5e8febc701dfd.tar.gz
Changes to create storage directory for storage engines.
storage/heap/.cvsignore: mvdir storage/heap/ChangeLog: mvdir storage/heap/Makefile.am: mvdir storage/heap/_check.c: mvdir storage/heap/_rectest.c: mvdir storage/heap/heapdef.h: mvdir storage/heap/hp_block.c: mvdir storage/heap/hp_clear.c: mvdir storage/heap/hp_close.c: mvdir storage/heap/hp_create.c: mvdir storage/heap/hp_delete.c: mvdir storage/heap/hp_extra.c: mvdir storage/heap/hp_hash.c: mvdir storage/heap/hp_info.c: mvdir storage/heap/hp_open.c: mvdir storage/heap/hp_panic.c: mvdir storage/heap/hp_rename.c: mvdir storage/heap/hp_rfirst.c: mvdir storage/heap/hp_rkey.c: mvdir storage/heap/hp_rlast.c: mvdir storage/heap/hp_rnext.c: mvdir storage/heap/hp_rprev.c: mvdir storage/heap/hp_rrnd.c: mvdir storage/heap/hp_rsame.c: mvdir storage/heap/hp_scan.c: mvdir storage/heap/hp_static.c: mvdir storage/heap/hp_test1.c: mvdir storage/heap/hp_test2.c: mvdir storage/heap/hp_update.c: mvdir storage/heap/hp_write.c: mvdir storage/heap/make-ccc: mvdir storage/myisam/.cvsignore: mvdir storage/myisam/ChangeLog: mvdir storage/myisam/Makefile.am: mvdir storage/myisam/NEWS: mvdir storage/myisam/TODO: mvdir storage/myisam/ft_boolean_search.c: mvdir storage/myisam/ft_eval.c: mvdir storage/myisam/ft_eval.h: mvdir storage/myisam/ft_nlq_search.c: mvdir storage/myisam/ft_parser.c: mvdir storage/myisam/ft_static.c: mvdir storage/myisam/ft_stem.c: mvdir storage/myisam/ft_stopwords.c: mvdir storage/myisam/ft_test1.c: mvdir storage/myisam/ft_test1.h: mvdir storage/myisam/ft_update.c: mvdir storage/myisam/ftdefs.h: mvdir storage/myisam/fulltext.h: mvdir storage/myisam/make-ccc: mvdir storage/myisam/mi_cache.c: mvdir storage/myisam/mi_changed.c: mvdir storage/myisam/mi_check.c: mvdir storage/myisam/mi_checksum.c: mvdir storage/myisam/mi_close.c: mvdir storage/myisam/mi_create.c: mvdir storage/myisam/mi_dbug.c: mvdir storage/myisam/mi_delete.c: mvdir storage/myisam/mi_delete_all.c: mvdir storage/myisam/mi_delete_table.c: mvdir storage/myisam/mi_dynrec.c: mvdir storage/myisam/mi_extra.c: mvdir storage/myisam/mi_info.c: mvdir storage/myisam/mi_key.c: mvdir storage/myisam/mi_keycache.c: mvdir storage/myisam/mi_locking.c: mvdir storage/myisam/mi_log.c: mvdir storage/myisam/mi_open.c: mvdir storage/myisam/mi_packrec.c: mvdir storage/myisam/mi_page.c: mvdir storage/myisam/mi_panic.c: mvdir storage/myisam/mi_preload.c: mvdir storage/myisam/mi_range.c: mvdir storage/myisam/mi_rename.c: mvdir storage/myisam/mi_rfirst.c: mvdir storage/myisam/mi_rkey.c: mvdir storage/myisam/mi_rlast.c: mvdir storage/myisam/mi_rnext.c: mvdir storage/myisam/mi_rnext_same.c: mvdir storage/myisam/mi_rprev.c: mvdir storage/myisam/mi_rrnd.c: mvdir storage/myisam/mi_rsame.c: mvdir storage/myisam/ftbench/Ecompare.pl: mvdir storage/myisam/ftbench/Ecreate.pl: mvdir storage/myisam/ftbench/Ereport.pl: mvdir storage/myisam/ftbench/README: mvdir storage/myisam/ftbench/ft-test-run.sh: mvdir storage/myisam/mi_rsamepos.c: mvdir storage/myisam/mi_scan.c: mvdir storage/myisam/mi_search.c: mvdir storage/myisam/mi_static.c: mvdir storage/myisam/mi_statrec.c: mvdir storage/myisam/mi_test1.c: mvdir storage/myisam/mi_test2.c: mvdir storage/myisam/mi_test3.c: mvdir storage/myisam/mi_test_all.res: mvdir storage/myisam/mi_test_all.sh: mvdir storage/myisam/mi_unique.c: mvdir storage/myisam/mi_update.c: mvdir storage/myisam/mi_write.c: mvdir storage/myisam/myisam_ftdump.c: mvdir storage/myisam/myisamchk.c: mvdir storage/myisam/myisamdef.h: mvdir storage/myisam/myisamlog.c: mvdir storage/myisam/myisampack.c: mvdir storage/myisam/rt_index.c: mvdir storage/myisam/rt_index.h: mvdir storage/myisam/rt_key.c: mvdir storage/myisam/rt_key.h: mvdir storage/myisam/rt_mbr.c: mvdir storage/myisam/rt_mbr.h: mvdir storage/myisam/rt_split.c: mvdir storage/myisam/rt_test.c: mvdir storage/myisam/sort.c: mvdir storage/myisam/sp_defs.h: mvdir storage/myisam/sp_key.c: mvdir storage/myisam/sp_test.c: mvdir storage/myisam/test_pack: mvdir storage/myisammrg/.cvsignore: mvdir storage/myisammrg/Makefile.am: mvdir storage/myisammrg/make-ccc: mvdir storage/myisammrg/myrg_close.c: mvdir storage/myisammrg/myrg_create.c: mvdir storage/myisammrg/myrg_def.h: mvdir storage/myisammrg/myrg_delete.c: mvdir storage/myisammrg/myrg_extra.c: mvdir storage/myisammrg/myrg_info.c: mvdir storage/myisammrg/myrg_locking.c: mvdir storage/myisammrg/myrg_open.c: mvdir storage/myisammrg/myrg_panic.c: mvdir storage/myisammrg/myrg_queue.c: mvdir storage/myisammrg/myrg_range.c: mvdir storage/myisammrg/myrg_rfirst.c: mvdir storage/myisammrg/myrg_rkey.c: mvdir storage/myisammrg/myrg_rlast.c: mvdir storage/myisammrg/myrg_rnext.c: mvdir storage/myisammrg/myrg_rnext_same.c: mvdir storage/myisammrg/myrg_rprev.c: mvdir storage/myisammrg/myrg_rrnd.c: mvdir storage/myisammrg/myrg_rsame.c: mvdir storage/myisammrg/myrg_static.c: mvdir storage/myisammrg/myrg_update.c: mvdir storage/myisammrg/myrg_write.c: mvdir storage/innobase/Makefile.am: mvdir storage/innobase/btr/Makefile.am: mvdir storage/innobase/btr/btr0btr.c: mvdir storage/innobase/btr/btr0cur.c: mvdir storage/innobase/btr/btr0pcur.c: mvdir storage/innobase/btr/btr0sea.c: mvdir storage/innobase/btr/makefilewin: mvdir storage/innobase/buf/Makefile.am: mvdir storage/innobase/buf/buf0buf.c: mvdir storage/innobase/buf/buf0flu.c: mvdir storage/innobase/buf/buf0lru.c: mvdir storage/innobase/buf/buf0rea.c: mvdir storage/innobase/buf/makefilewin: mvdir storage/innobase/configure.in: mvdir storage/innobase/data/Makefile.am: mvdir storage/innobase/data/data0data.c: mvdir storage/innobase/data/data0type.c: mvdir storage/innobase/data/makefilewin: mvdir storage/innobase/db/db0err.h: mvdir storage/innobase/dict/Makefile.am: mvdir storage/innobase/dict/dict0boot.c: mvdir storage/innobase/dict/dict0crea.c: mvdir storage/innobase/dict/dict0dict.c: mvdir storage/innobase/dict/dict0load.c: mvdir storage/innobase/makefilewin: mvdir storage/innobase/my_cnf: mvdir storage/innobase/dict/dict0mem.c: mvdir storage/innobase/dict/makefilewin: mvdir storage/innobase/dyn/Makefile.am: mvdir storage/innobase/dyn/dyn0dyn.c: mvdir storage/innobase/dyn/makefilewin: mvdir storage/innobase/eval/Makefile.am: mvdir storage/innobase/eval/eval0eval.c: mvdir storage/innobase/eval/eval0proc.c: mvdir storage/innobase/eval/makefilewin: mvdir storage/innobase/fil/Makefile.am: mvdir storage/innobase/fil/fil0fil.c: mvdir storage/innobase/fil/makefilewin: mvdir storage/innobase/fsp/Makefile.am: mvdir storage/innobase/fsp/fsp0fsp.c: mvdir storage/innobase/fsp/makefilewin: mvdir storage/innobase/fut/Makefile.am: mvdir storage/innobase/fut/fut0fut.c: mvdir storage/innobase/fut/fut0lst.c: mvdir storage/innobase/fut/makefilewin: mvdir storage/innobase/ha/Makefile.am: mvdir storage/innobase/ha/ha0ha.c: mvdir storage/innobase/ha/hash0hash.c: mvdir storage/innobase/ha/makefilewin: mvdir storage/innobase/ibuf/Makefile.am: mvdir storage/innobase/ibuf/ibuf0ibuf.c: mvdir storage/innobase/ibuf/makefilewin: mvdir storage/innobase/include/Makefile.am: mvdir storage/innobase/include/Makefile.i: mvdir storage/innobase/include/btr0btr.h: mvdir storage/innobase/include/btr0btr.ic: mvdir storage/innobase/include/btr0cur.h: mvdir storage/innobase/include/btr0cur.ic: mvdir storage/innobase/include/btr0pcur.h: mvdir storage/innobase/include/btr0pcur.ic: mvdir storage/innobase/include/btr0sea.h: mvdir storage/innobase/include/btr0sea.ic: mvdir storage/innobase/include/btr0types.h: mvdir storage/innobase/include/buf0buf.h: mvdir storage/innobase/include/buf0buf.ic: mvdir storage/innobase/include/buf0flu.h: mvdir storage/innobase/include/buf0flu.ic: mvdir storage/innobase/include/buf0lru.h: mvdir storage/innobase/include/buf0lru.ic: mvdir storage/innobase/include/buf0rea.h: mvdir storage/innobase/include/buf0types.h: mvdir storage/innobase/include/data0data.h: mvdir storage/innobase/include/data0data.ic: mvdir storage/innobase/include/data0type.h: mvdir storage/innobase/include/data0type.ic: mvdir storage/innobase/include/data0types.h: mvdir storage/innobase/include/db0err.h: mvdir storage/innobase/include/dict0boot.h: mvdir storage/innobase/include/dict0boot.ic: mvdir storage/innobase/include/dict0crea.h: mvdir storage/innobase/include/dict0crea.ic: mvdir storage/innobase/include/dict0dict.h: mvdir storage/innobase/include/dict0dict.ic: mvdir storage/innobase/include/dict0load.h: mvdir storage/innobase/include/dict0load.ic: mvdir storage/innobase/include/dict0mem.h: mvdir storage/innobase/include/dict0mem.ic: mvdir storage/innobase/include/dict0types.h: mvdir storage/innobase/include/dyn0dyn.h: mvdir storage/innobase/include/dyn0dyn.ic: mvdir storage/innobase/include/eval0eval.h: mvdir storage/innobase/include/eval0eval.ic: mvdir storage/innobase/include/eval0proc.h: mvdir storage/innobase/include/eval0proc.ic: mvdir storage/innobase/include/fil0fil.h: mvdir storage/innobase/include/fsp0fsp.h: mvdir storage/innobase/include/fsp0fsp.ic: mvdir storage/innobase/include/fut0fut.h: mvdir storage/innobase/include/fut0fut.ic: mvdir storage/innobase/include/fut0lst.h: mvdir storage/innobase/include/fut0lst.ic: mvdir storage/innobase/include/ha0ha.h: mvdir storage/innobase/include/ha0ha.ic: mvdir storage/innobase/include/hash0hash.h: mvdir storage/innobase/include/hash0hash.ic: mvdir storage/innobase/include/ibuf0ibuf.h: mvdir storage/innobase/include/ibuf0ibuf.ic: mvdir storage/innobase/include/ibuf0types.h: mvdir storage/innobase/include/lock0lock.h: mvdir storage/innobase/include/lock0lock.ic: mvdir storage/innobase/include/lock0types.h: mvdir storage/innobase/include/log0log.h: mvdir storage/innobase/include/log0log.ic: mvdir storage/innobase/include/log0recv.h: mvdir storage/innobase/include/log0recv.ic: mvdir storage/innobase/include/mach0data.h: mvdir storage/innobase/include/mach0data.ic: mvdir storage/innobase/include/makefilewin.i: mvdir storage/innobase/include/mem0dbg.h: mvdir storage/innobase/include/mem0dbg.ic: mvdir storage/innobase/include/mem0mem.h: mvdir storage/innobase/include/mem0mem.ic: mvdir storage/innobase/include/mem0pool.h: mvdir storage/innobase/include/mem0pool.ic: mvdir storage/innobase/include/mtr0log.h: mvdir storage/innobase/include/mtr0log.ic: mvdir storage/innobase/include/mtr0mtr.h: mvdir storage/innobase/include/mtr0mtr.ic: mvdir storage/innobase/include/mtr0types.h: mvdir storage/innobase/include/os0file.h: mvdir storage/innobase/include/os0proc.h: mvdir storage/innobase/include/os0proc.ic: mvdir storage/innobase/include/os0sync.h: mvdir storage/innobase/include/os0sync.ic: mvdir storage/innobase/include/os0thread.h: mvdir storage/innobase/include/os0thread.ic: mvdir storage/innobase/include/page0cur.h: mvdir storage/innobase/include/page0cur.ic: mvdir storage/innobase/include/page0page.h: mvdir storage/innobase/include/page0page.ic: mvdir storage/innobase/include/page0types.h: mvdir storage/innobase/include/pars0grm.h: mvdir storage/innobase/include/pars0opt.h: mvdir storage/innobase/include/pars0opt.ic: mvdir storage/innobase/include/pars0pars.h: mvdir storage/innobase/include/pars0pars.ic: mvdir storage/innobase/include/pars0sym.h: mvdir storage/innobase/include/pars0sym.ic: mvdir storage/innobase/include/pars0types.h: mvdir storage/innobase/include/que0que.h: mvdir storage/innobase/include/que0que.ic: mvdir storage/innobase/include/que0types.h: mvdir storage/innobase/include/read0read.h: mvdir storage/innobase/include/read0read.ic: mvdir storage/innobase/include/read0types.h: mvdir storage/innobase/include/rem0cmp.h: mvdir storage/innobase/include/rem0cmp.ic: mvdir storage/innobase/include/rem0rec.h: mvdir storage/innobase/include/rem0rec.ic: mvdir storage/innobase/include/rem0types.h: mvdir storage/innobase/include/row0ins.h: mvdir storage/innobase/include/row0ins.ic: mvdir storage/innobase/include/row0mysql.h: mvdir storage/innobase/include/row0mysql.ic: mvdir storage/innobase/include/row0purge.h: mvdir storage/innobase/include/row0purge.ic: mvdir storage/innobase/include/row0row.h: mvdir storage/innobase/include/row0row.ic: mvdir storage/innobase/include/row0sel.h: mvdir storage/innobase/include/row0sel.ic: mvdir storage/innobase/include/row0types.h: mvdir storage/innobase/include/row0uins.h: mvdir storage/innobase/include/row0uins.ic: mvdir storage/innobase/include/row0umod.h: mvdir storage/innobase/include/row0umod.ic: mvdir storage/innobase/include/row0undo.h: mvdir storage/innobase/include/row0undo.ic: mvdir storage/innobase/include/row0upd.h: mvdir storage/innobase/include/row0upd.ic: mvdir storage/innobase/include/row0vers.h: mvdir storage/innobase/include/row0vers.ic: mvdir storage/innobase/include/srv0que.h: mvdir storage/innobase/include/srv0srv.h: mvdir storage/innobase/include/srv0srv.ic: mvdir storage/innobase/include/srv0start.h: mvdir storage/innobase/include/sync0arr.h: mvdir storage/innobase/include/sync0arr.ic: mvdir storage/innobase/include/sync0rw.h: mvdir storage/innobase/include/sync0rw.ic: mvdir storage/innobase/include/sync0sync.h: mvdir storage/innobase/include/sync0sync.ic: mvdir storage/innobase/include/sync0types.h: mvdir storage/innobase/include/thr0loc.h: mvdir storage/innobase/include/thr0loc.ic: mvdir storage/innobase/include/trx0purge.h: mvdir storage/innobase/include/trx0purge.ic: mvdir storage/innobase/include/trx0rec.h: mvdir storage/innobase/include/trx0rec.ic: mvdir storage/innobase/include/trx0roll.h: mvdir storage/innobase/include/trx0roll.ic: mvdir storage/innobase/include/trx0rseg.h: mvdir storage/innobase/include/trx0rseg.ic: mvdir storage/innobase/include/trx0sys.h: mvdir storage/innobase/include/trx0sys.ic: mvdir storage/innobase/include/trx0trx.h: mvdir storage/innobase/include/trx0trx.ic: mvdir storage/innobase/include/trx0types.h: mvdir storage/innobase/include/trx0undo.h: mvdir storage/innobase/include/trx0undo.ic: mvdir storage/innobase/include/trx0xa.h: mvdir storage/innobase/include/univ.i: mvdir storage/innobase/include/usr0sess.h: mvdir storage/innobase/include/usr0sess.ic: mvdir storage/innobase/include/usr0types.h: mvdir storage/innobase/include/ut0byte.h: mvdir storage/innobase/include/ut0byte.ic: mvdir storage/innobase/include/ut0dbg.h: mvdir storage/innobase/include/ut0lst.h: mvdir storage/innobase/include/ut0mem.h: mvdir storage/innobase/include/ut0mem.ic: mvdir storage/innobase/include/ut0rnd.h: mvdir storage/innobase/include/ut0rnd.ic: mvdir storage/innobase/include/ut0sort.h: mvdir storage/innobase/include/ut0ut.h: mvdir storage/innobase/include/ut0ut.ic: mvdir storage/innobase/lock/Makefile.am: mvdir storage/innobase/lock/lock0lock.c: mvdir storage/innobase/lock/makefilewin: mvdir storage/innobase/log/Makefile.am: mvdir storage/innobase/log/log0log.c: mvdir storage/innobase/log/log0recv.c: mvdir storage/innobase/log/makefilewin: mvdir storage/innobase/mach/Makefile.am: mvdir storage/innobase/mach/mach0data.c: mvdir storage/innobase/mach/makefilewin: mvdir storage/innobase/mem/Makefile.am: mvdir storage/innobase/mem/makefilewin: mvdir storage/innobase/mem/mem0dbg.c: mvdir storage/innobase/mem/mem0mem.c: mvdir storage/innobase/mem/mem0pool.c: mvdir storage/innobase/mtr/Makefile.am: mvdir storage/innobase/mtr/makefilewin: mvdir storage/innobase/mtr/mtr0log.c: mvdir storage/innobase/mtr/mtr0mtr.c: mvdir storage/innobase/os/Makefile.am: mvdir storage/innobase/os/makefilewin: mvdir storage/innobase/os/os0file.c: mvdir storage/innobase/os/os0proc.c: mvdir storage/innobase/os/os0sync.c: mvdir storage/innobase/os/os0thread.c: mvdir storage/innobase/page/Makefile.am: mvdir storage/innobase/page/makefilewin: mvdir storage/innobase/page/page0cur.c: mvdir storage/innobase/page/page0page.c: mvdir storage/innobase/pars/Makefile.am: mvdir storage/innobase/pars/lexyy.c: mvdir storage/innobase/pars/makefilewin: mvdir storage/innobase/pars/pars0grm.c: mvdir storage/innobase/pars/pars0grm.h: mvdir storage/innobase/pars/pars0grm.y: mvdir storage/innobase/pars/pars0lex.l: mvdir storage/innobase/pars/pars0opt.c: mvdir storage/innobase/pars/pars0pars.c: mvdir storage/innobase/pars/pars0sym.c: mvdir storage/innobase/que/Makefile.am: mvdir storage/innobase/que/makefilewin: mvdir storage/innobase/que/que0que.c: mvdir storage/innobase/read/Makefile.am: mvdir storage/innobase/read/makefilewin: mvdir storage/innobase/read/read0read.c: mvdir storage/innobase/rem/Makefile.am: mvdir storage/innobase/rem/makefilewin: mvdir storage/innobase/rem/rem0cmp.c: mvdir storage/innobase/rem/rem0rec.c: mvdir storage/innobase/row/Makefile.am: mvdir storage/innobase/row/makefilewin: mvdir storage/innobase/row/row0ins.c: mvdir storage/innobase/row/row0mysql.c: mvdir storage/innobase/row/row0purge.c: mvdir storage/innobase/row/row0row.c: mvdir storage/innobase/row/row0sel.c: mvdir storage/innobase/row/row0uins.c: mvdir storage/innobase/row/row0umod.c: mvdir storage/innobase/row/row0undo.c: mvdir storage/innobase/row/row0upd.c: mvdir storage/innobase/row/row0vers.c: mvdir storage/innobase/srv/Makefile.am: mvdir storage/innobase/srv/makefilewin: mvdir storage/innobase/srv/srv0que.c: mvdir storage/innobase/srv/srv0srv.c: mvdir storage/innobase/srv/srv0start.c: mvdir storage/innobase/sync/Makefile.am: mvdir storage/innobase/sync/makefilewin: mvdir storage/innobase/sync/sync0arr.c: mvdir storage/innobase/sync/sync0rw.c: mvdir storage/innobase/sync/sync0sync.c: mvdir storage/innobase/thr/Makefile.am: mvdir storage/innobase/thr/makefilewin: mvdir storage/innobase/thr/thr0loc.c: mvdir storage/innobase/trx/Makefile.am: mvdir storage/innobase/trx/makefilewin: mvdir storage/innobase/trx/trx0purge.c: mvdir storage/innobase/trx/trx0rec.c: mvdir storage/innobase/trx/trx0roll.c: mvdir storage/innobase/trx/trx0rseg.c: mvdir storage/innobase/trx/trx0sys.c: mvdir storage/innobase/trx/trx0trx.c: mvdir storage/innobase/trx/trx0undo.c: mvdir storage/innobase/usr/Makefile.am: mvdir storage/innobase/usr/makefilewin: mvdir storage/innobase/usr/usr0sess.c: mvdir storage/innobase/ut/Makefile.am: mvdir storage/innobase/ut/makefilewin: mvdir storage/innobase/ut/ut0byte.c: mvdir storage/innobase/ut/ut0dbg.c: mvdir storage/innobase/ut/ut0mem.c: mvdir storage/innobase/ut/ut0rnd.c: mvdir storage/innobase/ut/ut0ut.c: mvdir storage/ndb/Makefile.am: mvdir storage/ndb/bin/.empty: mvdir storage/ndb/bin/check-regression.sh: mvdir storage/ndb/bin/makeTestPrograms_html.sh: mvdir storage/ndb/config/common.mk.am: mvdir storage/ndb/config/make-win-dsw.sh: mvdir storage/ndb/config/type_kernel.mk.am: mvdir storage/ndb/config/type_mgmapiclient.mk.am: mvdir storage/ndb/config/type_ndbapi.mk.am: mvdir storage/ndb/config/type_ndbapiclient.mk.am: mvdir storage/ndb/config/type_ndbapitest.mk.am: mvdir storage/ndb/config/type_ndbapitools.mk.am: mvdir storage/ndb/config/type_util.mk.am: mvdir storage/ndb/config/win-includes: mvdir storage/ndb/config/win-lib.am: mvdir storage/ndb/config/win-libraries: mvdir storage/ndb/config/win-name: mvdir storage/ndb/config/win-prg.am: mvdir storage/ndb/config/win-sources: mvdir storage/ndb/demos/1-node/1-api-3/Ndb.cfg: mvdir storage/ndb/demos/1-node/1-db-2/Ndb.cfg: mvdir storage/ndb/demos/1-node/1-mgm-1/Ndb.cfg: mvdir storage/ndb/demos/1-node/1-mgm-1/template_config.ini: mvdir storage/ndb/demos/2-node/2-api-4/Ndb.cfg: mvdir storage/ndb/demos/2-node/2-api-5/Ndb.cfg: mvdir storage/ndb/demos/2-node/2-api-6/Ndb.cfg: mvdir storage/ndb/demos/2-node/2-api-7/Ndb.cfg: mvdir storage/ndb/demos/2-node/2-db-2/Ndb.cfg: mvdir storage/ndb/demos/2-node/2-db-3/Ndb.cfg: mvdir storage/ndb/demos/2-node/2-mgm-1/Ndb.cfg: mvdir storage/ndb/demos/2-node/2-mgm-1/template_config.ini: mvdir storage/ndb/demos/config-templates/config_template-1-REP.ini: mvdir storage/ndb/demos/config-templates/config_template-4.ini: mvdir storage/ndb/demos/config-templates/config_template-install.ini: mvdir storage/ndb/demos/run_demo1-PS-SS_common.sh: mvdir storage/ndb/demos/run_demo1-PS.sh: mvdir storage/ndb/demos/run_demo1-SS.sh: mvdir storage/ndb/demos/run_demo1.sh: mvdir storage/ndb/demos/run_demo2.sh: mvdir storage/ndb/docs/Makefile.am: mvdir storage/ndb/docs/README: mvdir storage/ndb/docs/doxygen/Doxyfile.mgmapi: mvdir storage/ndb/docs/doxygen/Doxyfile.ndbapi: mvdir storage/ndb/docs/doxygen/Doxyfile.ndb: mvdir storage/ndb/docs/doxygen/Doxyfile.odbc: mvdir storage/ndb/docs/doxygen/Doxyfile.test: mvdir storage/ndb/docs/doxygen/header.mgmapi.tex: mvdir storage/ndb/docs/doxygen/header.ndbapi.tex: mvdir storage/ndb/docs/doxygen/postdoxy.pl: mvdir storage/ndb/docs/doxygen/predoxy.pl: mvdir storage/ndb/docs/wl2077.txt: mvdir storage/ndb/home/bin/Linuxmkisofs: mvdir storage/ndb/home/bin/Solarismkisofs: mvdir storage/ndb/home/bin/cvs2cl.pl: mvdir storage/ndb/home/bin/fix-cvs-root: mvdir storage/ndb/home/bin/import-from-bk.sh: mvdir storage/ndb/home/bin/ndb_deploy: mvdir storage/ndb/home/bin/ndbdoxy.pl: mvdir storage/ndb/home/bin/ngcalc: mvdir storage/ndb/home/bin/parseConfigFile.awk: mvdir storage/ndb/home/bin/setup-test.sh: mvdir storage/ndb/home/bin/signallog2html.lib/signallog2list.awk: mvdir storage/ndb/home/bin/signallog2html.lib/uniq_blocks.awk: mvdir storage/ndb/home/bin/signallog2html.sh: mvdir storage/ndb/home/bin/stripcr: mvdir storage/ndb/home/lib/funcs.sh: mvdir storage/ndb/include/Makefile.am: mvdir storage/ndb/include/debugger/DebuggerNames.hpp: mvdir storage/ndb/include/debugger/EventLogger.hpp: mvdir storage/ndb/include/debugger/GrepError.hpp: mvdir storage/ndb/include/debugger/SignalLoggerManager.hpp: mvdir storage/ndb/include/editline/editline.h: mvdir storage/ndb/include/kernel/AttributeDescriptor.hpp: mvdir storage/ndb/include/kernel/AttributeHeader.hpp: mvdir storage/ndb/include/kernel/AttributeList.hpp: mvdir storage/ndb/include/kernel/BlockNumbers.h: mvdir storage/ndb/include/kernel/GlobalSignalNumbers.h: mvdir storage/ndb/include/kernel/GrepEvent.hpp: mvdir storage/ndb/include/kernel/Interpreter.hpp: mvdir storage/ndb/include/kernel/LogLevel.hpp: mvdir storage/ndb/include/kernel/NodeBitmask.hpp: mvdir storage/ndb/include/kernel/NodeInfo.hpp: mvdir storage/ndb/include/kernel/NodeState.hpp: mvdir storage/ndb/include/kernel/RefConvert.hpp: mvdir storage/ndb/include/kernel/kernel_config_parameters.h: mvdir storage/ndb/include/kernel/kernel_types.h: mvdir storage/ndb/include/kernel/ndb_limits.h: mvdir storage/ndb/include/kernel/signaldata/AbortAll.hpp: mvdir storage/ndb/include/kernel/signaldata/AccFrag.hpp: mvdir storage/ndb/include/kernel/signaldata/AccLock.hpp: mvdir storage/ndb/include/kernel/signaldata/AccScan.hpp: mvdir storage/ndb/include/kernel/signaldata/AccSizeAltReq.hpp: mvdir storage/ndb/include/kernel/signaldata/AlterIndx.hpp: mvdir storage/ndb/include/kernel/signaldata/AlterTab.hpp: mvdir storage/ndb/include/kernel/signaldata/AlterTable.hpp: mvdir storage/ndb/include/kernel/signaldata/AlterTrig.hpp: mvdir storage/ndb/include/kernel/signaldata/ApiRegSignalData.hpp: mvdir storage/ndb/include/kernel/signaldata/ApiVersion.hpp: mvdir storage/ndb/include/kernel/signaldata/ArbitSignalData.hpp: mvdir storage/ndb/include/kernel/signaldata/AttrInfo.hpp: mvdir storage/ndb/include/kernel/trigger_definitions.h: mvdir storage/ndb/include/ndb_constants.h: mvdir storage/ndb/include/ndb_global.h.in: mvdir storage/ndb/include/ndb_init.h: mvdir storage/ndb/include/ndb_net.h: mvdir storage/ndb/include/ndb_types.h.in: mvdir storage/ndb/include/ndb_version.h.in: mvdir storage/ndb/include/kernel/signaldata/BackupContinueB.hpp: mvdir storage/ndb/include/kernel/signaldata/BackupImpl.hpp: mvdir storage/ndb/include/kernel/signaldata/BackupSignalData.hpp: mvdir storage/ndb/include/kernel/signaldata/BlockCommitOrd.hpp: mvdir storage/ndb/include/kernel/signaldata/BuildIndx.hpp: mvdir storage/ndb/include/kernel/signaldata/CheckNodeGroups.hpp: mvdir storage/ndb/include/kernel/signaldata/CloseComReqConf.hpp: mvdir storage/ndb/include/kernel/signaldata/CmInit.hpp: mvdir storage/ndb/include/kernel/signaldata/CmRegSignalData.hpp: mvdir storage/ndb/include/kernel/signaldata/CmvmiCfgConf.hpp: mvdir storage/ndb/include/kernel/signaldata/CntrMasterConf.hpp: mvdir storage/ndb/include/kernel/signaldata/CntrMasterReq.hpp: mvdir storage/ndb/include/kernel/signaldata/CntrStart.hpp: mvdir storage/ndb/include/kernel/signaldata/ConfigParamId.hpp: mvdir storage/ndb/include/kernel/signaldata/ContinueFragmented.hpp: mvdir storage/ndb/include/kernel/signaldata/CopyActive.hpp: mvdir storage/ndb/include/kernel/signaldata/CopyFrag.hpp: mvdir storage/ndb/include/kernel/signaldata/CopyGCIReq.hpp: mvdir storage/ndb/include/kernel/signaldata/CreateEvnt.hpp: mvdir storage/ndb/include/kernel/signaldata/CreateFrag.hpp: mvdir storage/ndb/include/kernel/signaldata/CreateFragmentation.hpp: mvdir storage/ndb/include/kernel/signaldata/CreateIndx.hpp: mvdir storage/ndb/include/kernel/signaldata/CreateTab.hpp: mvdir storage/ndb/include/kernel/signaldata/CreateTable.hpp: mvdir storage/ndb/include/kernel/signaldata/CreateTrig.hpp: mvdir storage/ndb/include/kernel/signaldata/DiAddTab.hpp: mvdir storage/ndb/include/kernel/signaldata/DiGetNodes.hpp: mvdir storage/ndb/include/kernel/signaldata/DictSchemaInfo.hpp: mvdir storage/ndb/include/kernel/signaldata/DictSizeAltReq.hpp: mvdir storage/ndb/include/kernel/signaldata/DictStart.hpp: mvdir storage/ndb/include/kernel/signaldata/DictTabInfo.hpp: mvdir storage/ndb/include/kernel/signaldata/DihAddFrag.hpp: mvdir storage/ndb/include/kernel/signaldata/DihContinueB.hpp: mvdir storage/ndb/include/kernel/signaldata/DihSizeAltReq.hpp: mvdir storage/ndb/include/kernel/signaldata/DihStartTab.hpp: mvdir storage/ndb/include/kernel/signaldata/DihSwitchReplica.hpp: mvdir storage/ndb/include/kernel/signaldata/DisconnectRep.hpp: mvdir storage/ndb/include/kernel/signaldata/DropIndx.hpp: mvdir storage/ndb/include/kernel/signaldata/DropTab.hpp: mvdir storage/ndb/include/kernel/signaldata/DropTabFile.hpp: mvdir storage/ndb/include/kernel/signaldata/DropTable.hpp: mvdir storage/ndb/include/kernel/signaldata/DropTrig.hpp: mvdir storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp: mvdir storage/ndb/include/kernel/signaldata/EmptyLcp.hpp: mvdir storage/ndb/include/kernel/signaldata/EndTo.hpp: mvdir storage/ndb/include/kernel/signaldata/EventReport.hpp: mvdir storage/ndb/include/kernel/signaldata/EventSubscribeReq.hpp: mvdir storage/ndb/include/kernel/signaldata/ExecFragReq.hpp: mvdir storage/ndb/include/kernel/signaldata/FailRep.hpp: mvdir storage/ndb/include/kernel/signaldata/FireTrigOrd.hpp: mvdir storage/ndb/include/kernel/signaldata/FsAppendReq.hpp: mvdir storage/ndb/include/kernel/signaldata/FsCloseReq.hpp: mvdir storage/ndb/include/kernel/signaldata/FsConf.hpp: mvdir storage/ndb/include/kernel/signaldata/FsOpenReq.hpp: mvdir storage/ndb/include/kernel/signaldata/FsReadWriteReq.hpp: mvdir storage/ndb/include/kernel/signaldata/FsRef.hpp: mvdir storage/ndb/include/kernel/signaldata/FsRemoveReq.hpp: mvdir storage/ndb/include/kernel/signaldata/GCPSave.hpp: mvdir storage/ndb/include/kernel/signaldata/GetTabInfo.hpp: mvdir storage/ndb/include/kernel/signaldata/GetTableId.hpp: mvdir storage/ndb/include/kernel/signaldata/GrepImpl.hpp: mvdir storage/ndb/include/kernel/signaldata/HotSpareRep.hpp: mvdir storage/ndb/include/kernel/signaldata/IndxAttrInfo.hpp: mvdir storage/ndb/include/kernel/signaldata/IndxKeyInfo.hpp: mvdir storage/ndb/include/kernel/signaldata/InvalidateNodeLCPConf.hpp: mvdir storage/ndb/include/kernel/signaldata/InvalidateNodeLCPReq.hpp: mvdir storage/ndb/include/kernel/signaldata/KeyInfo.hpp: mvdir storage/ndb/include/kernel/signaldata/LCP.hpp: mvdir storage/ndb/include/kernel/signaldata/ListTables.hpp: mvdir storage/ndb/include/kernel/signaldata/LqhFrag.hpp: mvdir storage/ndb/include/kernel/signaldata/LqhKey.hpp: mvdir storage/ndb/include/kernel/signaldata/LqhSizeAltReq.hpp: mvdir storage/ndb/include/kernel/signaldata/LqhTransConf.hpp: mvdir storage/ndb/include/kernel/signaldata/ManagementServer.hpp: mvdir storage/ndb/include/kernel/signaldata/MasterGCP.hpp: mvdir storage/ndb/include/kernel/signaldata/MasterLCP.hpp: mvdir storage/ndb/include/kernel/signaldata/NFCompleteRep.hpp: mvdir storage/ndb/include/kernel/signaldata/NdbSttor.hpp: mvdir storage/ndb/include/kernel/signaldata/NdbfsContinueB.hpp: mvdir storage/ndb/include/kernel/signaldata/NextScan.hpp: mvdir storage/ndb/include/kernel/signaldata/NodeFailRep.hpp: mvdir storage/ndb/include/kernel/signaldata/NodeStateSignalData.hpp: mvdir storage/ndb/include/kernel/signaldata/PackedSignal.hpp: mvdir storage/ndb/include/kernel/signaldata/PrepDropTab.hpp: mvdir storage/ndb/include/kernel/signaldata/PrepFailReqRef.hpp: mvdir storage/ndb/include/kernel/signaldata/ReadConfig.hpp: mvdir storage/ndb/include/kernel/signaldata/ReadNodesConf.hpp: mvdir storage/ndb/include/kernel/signaldata/RelTabMem.hpp: mvdir storage/ndb/include/kernel/signaldata/RepImpl.hpp: mvdir storage/ndb/include/kernel/signaldata/ResumeReq.hpp: mvdir storage/ndb/include/kernel/signaldata/ScanFrag.hpp: mvdir storage/ndb/include/kernel/signaldata/ScanTab.hpp: mvdir storage/ndb/include/kernel/signaldata/SetLogLevelOrd.hpp: mvdir storage/ndb/include/kernel/signaldata/SetVarReq.hpp: mvdir storage/ndb/include/kernel/signaldata/SignalData.hpp: mvdir storage/ndb/include/kernel/signaldata/SignalDataPrint.hpp: mvdir storage/ndb/include/kernel/signaldata/SignalDroppedRep.hpp: mvdir storage/ndb/include/kernel/signaldata/SrFragidConf.hpp: mvdir storage/ndb/include/kernel/signaldata/StartFragReq.hpp: mvdir storage/ndb/include/kernel/signaldata/StartInfo.hpp: mvdir storage/ndb/include/kernel/signaldata/StartMe.hpp: mvdir storage/ndb/include/kernel/signaldata/StartOrd.hpp: mvdir storage/ndb/include/kernel/signaldata/StartPerm.hpp: mvdir storage/ndb/include/kernel/signaldata/StartRec.hpp: mvdir storage/ndb/include/kernel/signaldata/StartTo.hpp: mvdir storage/ndb/include/kernel/signaldata/StopMe.hpp: mvdir storage/ndb/include/kernel/signaldata/StopPerm.hpp: mvdir storage/ndb/include/kernel/signaldata/StopReq.hpp: mvdir storage/ndb/include/kernel/signaldata/SumaImpl.hpp: mvdir storage/ndb/include/kernel/signaldata/SystemError.hpp: mvdir storage/ndb/include/kernel/signaldata/TamperOrd.hpp: mvdir storage/ndb/include/kernel/signaldata/TcCommit.hpp: mvdir storage/ndb/include/kernel/signaldata/TcContinueB.hpp: mvdir storage/ndb/include/kernel/signaldata/TcHbRep.hpp: mvdir storage/ndb/include/kernel/signaldata/TcIndx.hpp: mvdir storage/ndb/include/kernel/signaldata/TcKeyConf.hpp: mvdir storage/ndb/include/kernel/signaldata/TcKeyFailConf.hpp: mvdir storage/ndb/include/kernel/signaldata/TcKeyRef.hpp: mvdir storage/ndb/include/kernel/signaldata/TcKeyReq.hpp: mvdir storage/ndb/include/kernel/signaldata/TcRollbackRep.hpp: mvdir storage/ndb/include/kernel/signaldata/TcSizeAltReq.hpp: mvdir storage/ndb/include/kernel/signaldata/TestOrd.hpp: mvdir storage/ndb/include/kernel/signaldata/TransIdAI.hpp: mvdir storage/ndb/include/kernel/signaldata/TrigAttrInfo.hpp: mvdir storage/ndb/include/kernel/signaldata/TupCommit.hpp: mvdir storage/ndb/include/kernel/signaldata/TupFrag.hpp: mvdir storage/ndb/include/kernel/signaldata/TupKey.hpp: mvdir storage/ndb/include/kernel/signaldata/TupSizeAltReq.hpp: mvdir storage/ndb/include/kernel/signaldata/TuxBound.hpp: mvdir storage/ndb/include/kernel/signaldata/TuxContinueB.hpp: mvdir storage/ndb/include/kernel/signaldata/TuxMaint.hpp: mvdir storage/ndb/include/kernel/signaldata/TuxSizeAltReq.hpp: mvdir storage/ndb/include/kernel/signaldata/UpdateTo.hpp: mvdir storage/ndb/include/kernel/signaldata/UpgradeStartup.hpp: mvdir storage/ndb/include/kernel/signaldata/UtilDelete.hpp: mvdir storage/ndb/include/kernel/signaldata/UtilExecute.hpp: mvdir storage/ndb/include/kernel/signaldata/UtilLock.hpp: mvdir storage/ndb/include/kernel/signaldata/UtilPrepare.hpp: mvdir storage/ndb/include/kernel/signaldata/UtilRelease.hpp: mvdir storage/ndb/include/kernel/signaldata/UtilSequence.hpp: mvdir storage/ndb/include/kernel/signaldata/WaitGCP.hpp: mvdir storage/ndb/include/logger/ConsoleLogHandler.hpp: mvdir storage/ndb/include/logger/FileLogHandler.hpp: mvdir storage/ndb/include/logger/LogHandler.hpp: mvdir storage/ndb/include/logger/Logger.hpp: mvdir storage/ndb/include/logger/SysLogHandler.hpp: mvdir storage/ndb/include/mgmapi/mgmapi.h: mvdir storage/ndb/include/mgmapi/mgmapi_config_parameters.h: mvdir storage/ndb/include/mgmapi/mgmapi_config_parameters_debug.h: mvdir storage/ndb/include/mgmapi/mgmapi_debug.h: mvdir storage/ndb/include/mgmapi/ndb_logevent.h: mvdir storage/ndb/include/mgmcommon/ConfigRetriever.hpp: mvdir storage/ndb/include/mgmcommon/IPCConfig.hpp: mvdir storage/ndb/include/mgmcommon/MgmtErrorReporter.hpp: mvdir storage/ndb/include/ndbapi/Ndb.hpp: mvdir storage/ndb/include/ndbapi/NdbApi.hpp: mvdir storage/ndb/include/ndbapi/NdbBlob.hpp: mvdir storage/ndb/include/ndbapi/NdbDictionary.hpp: mvdir storage/ndb/include/ndbapi/NdbError.hpp: mvdir storage/ndb/include/ndbapi/NdbEventOperation.hpp: mvdir storage/ndb/include/ndbapi/NdbIndexOperation.hpp: mvdir storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp: mvdir storage/ndb/include/ndbapi/NdbOperation.hpp: mvdir storage/ndb/include/ndbapi/NdbPool.hpp: mvdir storage/ndb/include/ndbapi/NdbRecAttr.hpp: mvdir storage/ndb/include/ndbapi/NdbReceiver.hpp: mvdir storage/ndb/include/ndbapi/NdbScanFilter.hpp: mvdir storage/ndb/include/ndbapi/NdbScanOperation.hpp: mvdir storage/ndb/include/ndbapi/NdbTransaction.hpp: mvdir storage/ndb/include/ndbapi/ndb_cluster_connection.hpp: mvdir storage/ndb/include/ndbapi/ndb_opt_defaults.h: mvdir storage/ndb/include/ndbapi/ndbapi_limits.h: mvdir storage/ndb/include/ndbapi/ndberror.h: mvdir storage/ndb/include/newtonapi/dba.h: mvdir storage/ndb/include/newtonapi/defs/pcn_types.h: mvdir storage/ndb/include/portlib/NdbCondition.h: mvdir storage/ndb/include/portlib/NdbConfig.h: mvdir storage/ndb/include/portlib/NdbDaemon.h: mvdir storage/ndb/include/portlib/NdbEnv.h: mvdir storage/ndb/include/portlib/NdbHost.h: mvdir storage/ndb/include/portlib/NdbMain.h: mvdir storage/ndb/include/portlib/NdbMem.h: mvdir storage/ndb/include/portlib/NdbMutex.h: mvdir storage/ndb/include/portlib/NdbSleep.h: mvdir storage/ndb/include/portlib/NdbTCP.h: mvdir storage/ndb/include/portlib/NdbThread.h: mvdir storage/ndb/include/portlib/NdbTick.h: mvdir storage/ndb/include/portlib/PortDefs.h: mvdir storage/ndb/include/portlib/prefetch.h: mvdir storage/ndb/include/transporter/TransporterCallback.hpp: mvdir storage/ndb/include/transporter/TransporterDefinitions.hpp: mvdir storage/ndb/include/transporter/TransporterRegistry.hpp: mvdir storage/ndb/include/util/Base64.hpp: mvdir storage/ndb/include/util/BaseString.hpp: mvdir storage/ndb/include/util/Bitmask.hpp: mvdir storage/ndb/include/util/ConfigValues.hpp: mvdir storage/ndb/include/util/File.hpp: mvdir storage/ndb/include/util/InputStream.hpp: mvdir storage/ndb/include/util/NdbAutoPtr.hpp: mvdir storage/ndb/include/util/NdbOut.hpp: mvdir storage/ndb/include/util/NdbSqlUtil.hpp: mvdir storage/ndb/include/util/OutputStream.hpp: mvdir storage/ndb/include/util/Parser.hpp: mvdir storage/ndb/include/util/Properties.hpp: mvdir storage/ndb/include/util/SimpleProperties.hpp: mvdir storage/ndb/include/util/SocketAuthenticator.hpp: mvdir storage/ndb/include/util/SocketClient.hpp: mvdir storage/ndb/include/util/SocketServer.hpp: mvdir storage/ndb/include/util/UtilBuffer.hpp: mvdir storage/ndb/include/util/Vector.hpp: mvdir storage/ndb/include/util/basestring_vsnprintf.h: mvdir storage/ndb/include/util/md5_hash.hpp: mvdir storage/ndb/include/util/ndb_opts.h: mvdir storage/ndb/include/util/random.h: mvdir storage/ndb/include/util/socket_io.h: mvdir storage/ndb/include/util/uucode.h: mvdir storage/ndb/include/util/version.h: mvdir storage/ndb/lib/.empty: mvdir storage/ndb/ndbapi-examples/Makefile: mvdir storage/ndb/ndbapi-examples/mgmapi_logevent_example/Makefile: mvdir storage/ndb/ndbapi-examples/mgmapi_logevent_example/mgmapi_logevent.cpp: mvdir storage/ndb/ndbapi-examples/ndbapi_async_example/Makefile: mvdir storage/ndb/ndbapi-examples/ndbapi_async_example/ndbapi_async.cpp: mvdir storage/ndb/ndbapi-examples/ndbapi_async_example/readme.txt: mvdir storage/ndb/ndbapi-examples/ndbapi_async_example1/Makefile: mvdir storage/ndb/ndbapi-examples/ndbapi_async_example1/ndbapi_async1.cpp: mvdir storage/ndb/ndbapi-examples/ndbapi_event_example/Makefile: mvdir storage/ndb/ndbapi-examples/ndbapi_event_example/ndbapi_event.cpp: mvdir storage/ndb/ndbapi-examples/ndbapi_retries_example/Makefile: mvdir storage/ndb/ndbapi-examples/ndbapi_retries_example/ndbapi_retries.cpp: mvdir storage/ndb/ndbapi-examples/ndbapi_scan_example/Makefile: mvdir storage/ndb/ndbapi-examples/ndbapi_scan_example/ndbapi_scan.cpp: mvdir storage/ndb/ndbapi-examples/ndbapi_scan_example/readme.txt: mvdir storage/ndb/ndbapi-examples/ndbapi_simple_example/Makefile: mvdir storage/ndb/ndbapi-examples/ndbapi_simple_example/ndbapi_simple.cpp: mvdir storage/ndb/ndbapi-examples/ndbapi_simple_index_example/Makefile: mvdir storage/ndb/ndbapi-examples/ndbapi_simple_index_example/ndbapi_simple_index.cpp: mvdir storage/ndb/src/Makefile.am: mvdir storage/ndb/src/common/Makefile.am: mvdir storage/ndb/src/common/debugger/BlockNames.cpp: mvdir storage/ndb/src/common/debugger/DebuggerNames.cpp: mvdir storage/ndb/src/common/debugger/EventLogger.cpp: mvdir storage/ndb/src/common/debugger/GrepError.cpp: mvdir storage/ndb/src/common/debugger/Makefile.am: mvdir storage/ndb/src/common/debugger/SignalLoggerManager.cpp: mvdir storage/ndb/src/common/debugger/signaldata/AccLock.cpp: mvdir storage/ndb/src/common/debugger/signaldata/AlterIndx.cpp: mvdir storage/ndb/src/common/debugger/signaldata/AlterTab.cpp: mvdir storage/ndb/src/common/debugger/signaldata/AlterTable.cpp: mvdir storage/ndb/src/common/debugger/signaldata/AlterTrig.cpp: mvdir storage/ndb/src/common/debugger/signaldata/BackupImpl.cpp: mvdir storage/ndb/src/common/debugger/signaldata/BackupSignalData.cpp: mvdir storage/ndb/src/common/debugger/signaldata/CloseComReqConf.cpp: mvdir storage/ndb/src/common/debugger/signaldata/CntrStart.cpp: mvdir storage/ndb/src/common/debugger/signaldata/ContinueB.cpp: mvdir storage/ndb/src/common/debugger/signaldata/CopyGCI.cpp: mvdir storage/ndb/src/common/debugger/signaldata/CreateEvnt.cpp: mvdir storage/ndb/src/common/debugger/signaldata/CreateFragmentation.cpp: mvdir storage/ndb/src/common/debugger/signaldata/CreateIndx.cpp: mvdir storage/ndb/src/common/debugger/signaldata/CreateTrig.cpp: mvdir storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp: mvdir storage/ndb/src/common/debugger/signaldata/DihContinueB.cpp: mvdir storage/ndb/src/common/debugger/signaldata/DihSwitchReplicaReq.cpp: mvdir storage/ndb/src/common/debugger/signaldata/DisconnectRep.cpp: mvdir storage/ndb/src/common/debugger/signaldata/DropIndx.cpp: mvdir storage/ndb/src/common/debugger/signaldata/DropTab.cpp: mvdir storage/ndb/src/common/debugger/signaldata/DropTrig.cpp: mvdir storage/ndb/src/common/debugger/signaldata/FailRep.cpp: mvdir storage/ndb/src/common/debugger/signaldata/FireTrigOrd.cpp: mvdir storage/ndb/src/common/debugger/signaldata/FsAppendReq.cpp: mvdir storage/ndb/src/common/debugger/signaldata/FsCloseReq.cpp: mvdir storage/ndb/src/common/debugger/signaldata/FsConf.cpp: mvdir storage/ndb/src/common/debugger/signaldata/FsOpenReq.cpp: mvdir storage/ndb/src/common/debugger/signaldata/FsReadWriteReq.cpp: mvdir storage/ndb/src/common/debugger/signaldata/FsRef.cpp: mvdir storage/ndb/src/common/debugger/signaldata/GCPSave.cpp: mvdir storage/ndb/src/common/debugger/signaldata/IndxAttrInfo.cpp: mvdir storage/ndb/src/common/debugger/signaldata/IndxKeyInfo.cpp: mvdir storage/ndb/src/common/debugger/signaldata/LCP.cpp: mvdir storage/ndb/src/common/debugger/signaldata/LqhFrag.cpp: mvdir storage/ndb/src/common/debugger/signaldata/LqhKey.cpp: mvdir storage/ndb/src/common/debugger/signaldata/LqhTrans.cpp: mvdir storage/ndb/src/common/debugger/signaldata/Makefile.am: mvdir storage/ndb/src/common/debugger/signaldata/MasterLCP.cpp: mvdir storage/ndb/src/common/debugger/signaldata/NFCompleteRep.cpp: mvdir storage/ndb/src/common/debugger/signaldata/NdbSttor.cpp: mvdir storage/ndb/src/common/debugger/signaldata/NdbfsContinueB.cpp: mvdir storage/ndb/src/common/debugger/signaldata/PackedSignal.cpp: mvdir storage/ndb/src/common/debugger/signaldata/PrepDropTab.cpp: mvdir storage/ndb/src/common/debugger/signaldata/PrepFailReqRef.cpp: mvdir storage/ndb/src/common/debugger/signaldata/ReadNodesConf.cpp: mvdir storage/ndb/src/common/debugger/signaldata/ScanFrag.cpp: mvdir storage/ndb/src/common/debugger/signaldata/ScanTab.cpp: mvdir storage/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp: mvdir storage/ndb/src/common/debugger/signaldata/SignalDroppedRep.cpp: mvdir storage/ndb/src/common/debugger/signaldata/SignalNames.cpp: mvdir storage/ndb/src/common/debugger/signaldata/StartRec.cpp: mvdir storage/ndb/src/common/debugger/signaldata/SumaImpl.cpp: mvdir storage/ndb/src/common/debugger/signaldata/SystemError.cpp: mvdir storage/ndb/src/common/debugger/signaldata/TcIndx.cpp: mvdir storage/ndb/src/common/debugger/signaldata/TcKeyConf.cpp: mvdir storage/ndb/src/common/debugger/signaldata/TcKeyRef.cpp: mvdir storage/ndb/src/common/debugger/signaldata/TcKeyReq.cpp: mvdir storage/ndb/src/common/debugger/signaldata/TcRollbackRep.cpp: mvdir storage/ndb/src/common/debugger/signaldata/TrigAttrInfo.cpp: mvdir storage/ndb/src/common/debugger/signaldata/TupCommit.cpp: mvdir storage/ndb/src/common/debugger/signaldata/TupKey.cpp: mvdir storage/ndb/src/common/debugger/signaldata/TuxMaint.cpp: mvdir storage/ndb/src/common/debugger/signaldata/UtilDelete.cpp: mvdir storage/ndb/src/common/debugger/signaldata/UtilExecute.cpp: mvdir storage/ndb/src/common/debugger/signaldata/UtilLock.cpp: mvdir storage/ndb/src/common/debugger/signaldata/UtilPrepare.cpp: mvdir storage/ndb/src/common/debugger/signaldata/UtilSequence.cpp: mvdir storage/ndb/src/common/debugger/signaldata/print.awk: mvdir storage/ndb/src/common/logger/ConsoleLogHandler.cpp: mvdir storage/ndb/src/common/logger/FileLogHandler.cpp: mvdir storage/ndb/src/common/logger/LogHandler.cpp: mvdir storage/ndb/src/common/logger/LogHandlerList.cpp: mvdir storage/ndb/src/common/logger/LogHandlerList.hpp: mvdir storage/ndb/src/common/logger/Logger.cpp: mvdir storage/ndb/src/common/logger/Makefile.am: mvdir storage/ndb/src/common/logger/SysLogHandler.cpp: mvdir storage/ndb/src/common/logger/listtest/LogHandlerListUnitTest.cpp: mvdir storage/ndb/src/common/logger/listtest/LogHandlerListUnitTest.hpp: mvdir storage/ndb/src/common/logger/listtest/Makefile: mvdir storage/ndb/src/common/logger/loggertest/LoggerUnitTest.cpp: mvdir storage/ndb/src/common/logger/loggertest/LoggerUnitTest.hpp: mvdir storage/ndb/src/common/logger/loggertest/Makefile: mvdir storage/ndb/src/common/mgmcommon/ConfigRetriever.cpp: mvdir storage/ndb/src/common/mgmcommon/IPCConfig.cpp: mvdir storage/ndb/src/common/mgmcommon/Makefile.am: mvdir storage/ndb/src/common/mgmcommon/printConfig/Makefile: mvdir storage/ndb/src/common/mgmcommon/printConfig/printConfig.cpp: mvdir storage/ndb/src/common/portlib/Makefile.am: mvdir storage/ndb/src/common/portlib/NdbCondition.c: mvdir storage/ndb/src/common/portlib/NdbConfig.c: mvdir storage/ndb/src/common/portlib/NdbDaemon.c: mvdir storage/ndb/src/common/portlib/NdbEnv.c: mvdir storage/ndb/src/common/portlib/NdbHost.c: mvdir storage/ndb/src/common/portlib/NdbMem.c: mvdir storage/ndb/src/common/portlib/NdbMutex.c: mvdir storage/ndb/src/common/portlib/NdbPortLibTest.cpp: mvdir storage/ndb/src/common/portlib/NdbSleep.c: mvdir storage/ndb/src/common/portlib/NdbTCP.cpp: mvdir storage/ndb/src/common/portlib/NdbThread.c: mvdir storage/ndb/src/common/portlib/NdbTick.c: mvdir storage/ndb/src/common/portlib/gcc.cpp: mvdir storage/ndb/src/common/portlib/memtest.c: mvdir storage/ndb/src/common/portlib/mmslist.cpp: mvdir storage/ndb/src/common/portlib/mmstest.cpp: mvdir storage/ndb/src/common/portlib/munmaptest.cpp: mvdir storage/ndb/src/common/portlib/old_dirs/memtest/Makefile: mvdir storage/ndb/src/common/portlib/old_dirs/memtest/munmaptest/Makefile: mvdir storage/ndb/src/common/portlib/old_dirs/ose/Makefile: mvdir storage/ndb/src/common/portlib/old_dirs/ose/NdbCondition.c: mvdir storage/ndb/src/common/portlib/old_dirs/ose/NdbConditionOSE.h: mvdir storage/ndb/src/common/portlib/old_dirs/ose/NdbEnv.c: mvdir storage/ndb/src/common/portlib/old_dirs/ose/NdbHost.c: mvdir storage/ndb/src/common/portlib/old_dirs/ose/NdbMem.c: mvdir storage/ndb/src/common/portlib/old_dirs/ose/NdbMem_SoftOse.cpp: mvdir storage/ndb/src/common/portlib/old_dirs/ose/NdbMutex.c: mvdir storage/ndb/src/common/portlib/old_dirs/ose/NdbOut.cpp: mvdir storage/ndb/src/common/portlib/old_dirs/ose/NdbSleep.c: mvdir storage/ndb/src/common/portlib/old_dirs/ose/NdbTCP.c: mvdir storage/ndb/src/common/portlib/old_dirs/ose/NdbThread.c: mvdir storage/ndb/src/common/portlib/old_dirs/ose/NdbTick.c: mvdir storage/ndb/src/common/portlib/old_dirs/test/Makefile: mvdir storage/ndb/src/common/portlib/old_dirs/win32/Makefile: mvdir storage/ndb/src/common/portlib/old_dirs/win32/NdbCondition.c: mvdir storage/ndb/src/common/portlib/old_dirs/win32/NdbDaemon.c: mvdir storage/ndb/src/common/portlib/old_dirs/win32/NdbEnv.c: mvdir storage/ndb/src/common/portlib/old_dirs/win32/NdbHost.c: mvdir storage/ndb/src/common/portlib/old_dirs/win32/NdbMem.c: mvdir storage/ndb/src/common/portlib/old_dirs/win32/NdbMutex.c: mvdir storage/ndb/src/common/portlib/old_dirs/win32/NdbSleep.c: mvdir storage/ndb/src/common/portlib/old_dirs/win32/NdbTCP.c: mvdir storage/ndb/src/common/portlib/old_dirs/win32/NdbThread.c: mvdir storage/ndb/src/common/portlib/old_dirs/win32/NdbTick.c: mvdir storage/ndb/src/common/portlib/win32/NdbCondition.c: mvdir storage/ndb/src/common/portlib/win32/NdbDaemon.c: mvdir storage/ndb/src/common/portlib/win32/NdbEnv.c: mvdir storage/ndb/src/common/portlib/win32/NdbHost.c: mvdir storage/ndb/src/common/portlib/win32/NdbMem.c: mvdir storage/ndb/src/common/portlib/win32/NdbMutex.c: mvdir storage/ndb/src/common/portlib/win32/NdbSleep.c: mvdir storage/ndb/src/common/portlib/win32/NdbTCP.c: mvdir storage/ndb/src/common/portlib/win32/NdbThread.c: mvdir storage/ndb/src/common/portlib/win32/NdbTick.c: mvdir storage/ndb/src/common/transporter/Makefile.am: mvdir storage/ndb/src/common/transporter/OSE_Receiver.cpp: mvdir storage/ndb/src/common/transporter/OSE_Receiver.hpp: mvdir storage/ndb/src/common/transporter/OSE_Signals.hpp: mvdir storage/ndb/src/common/transporter/OSE_Transporter.cpp: mvdir storage/ndb/src/common/transporter/OSE_Transporter.hpp: mvdir storage/ndb/src/common/transporter/Packer.cpp: mvdir storage/ndb/src/common/transporter/Packer.hpp: mvdir storage/ndb/src/common/transporter/SCI_Transporter.cpp: mvdir storage/ndb/src/common/transporter/SCI_Transporter.hpp: mvdir storage/ndb/src/common/transporter/SHM_Buffer.hpp: mvdir storage/ndb/src/common/transporter/SHM_Transporter.cpp: mvdir storage/ndb/src/common/transporter/SHM_Transporter.hpp: mvdir storage/ndb/src/common/transporter/SHM_Transporter.unix.cpp: mvdir storage/ndb/src/common/transporter/SHM_Transporter.win32.cpp: mvdir storage/ndb/src/common/transporter/SendBuffer.cpp: mvdir storage/ndb/src/common/transporter/SendBuffer.hpp: mvdir storage/ndb/src/common/transporter/TCP_Transporter.cpp: mvdir storage/ndb/src/common/transporter/TCP_Transporter.hpp: mvdir storage/ndb/src/common/transporter/Transporter.cpp: mvdir storage/ndb/src/common/transporter/Transporter.hpp: mvdir storage/ndb/src/common/transporter/TransporterInternalDefinitions.hpp: mvdir storage/ndb/src/common/transporter/TransporterRegistry.cpp: mvdir storage/ndb/src/common/transporter/basictest/Makefile: mvdir storage/ndb/src/common/transporter/basictest/basicTransporterTest.cpp: mvdir storage/ndb/src/common/transporter/buddy.cpp: mvdir storage/ndb/src/common/transporter/buddy.hpp: mvdir storage/ndb/src/common/transporter/failoverSCI/Makefile: mvdir storage/ndb/src/common/transporter/failoverSCI/failoverSCI.cpp: mvdir storage/ndb/src/common/transporter/perftest/Makefile: mvdir storage/ndb/src/common/transporter/perftest/perfTransporterTest.cpp: mvdir storage/ndb/src/common/transporter/priotest/Makefile: mvdir storage/ndb/src/common/transporter/priotest/prioOSE/Makefile: mvdir storage/ndb/src/common/transporter/priotest/prioSCI/Makefile: mvdir storage/ndb/src/common/transporter/priotest/prioSCI/prioSCI.cpp: mvdir storage/ndb/src/common/transporter/priotest/prioSHM/Makefile: mvdir storage/ndb/src/common/transporter/priotest/prioSHM/prioSHM.cpp: mvdir storage/ndb/src/common/transporter/priotest/prioTCP/Makefile: mvdir storage/ndb/src/common/transporter/priotest/prioTCP/prioTCP.cpp: mvdir storage/ndb/src/common/transporter/priotest/prioTransporterTest.cpp: mvdir storage/ndb/src/common/transporter/priotest/prioTransporterTest.hpp: mvdir storage/ndb/src/common/util/Base64.cpp: mvdir storage/ndb/src/common/util/BaseString.cpp: mvdir storage/ndb/src/common/util/Bitmask.cpp: mvdir storage/ndb/src/common/util/ConfigValues.cpp: mvdir storage/ndb/src/common/util/File.cpp: mvdir storage/ndb/src/common/util/InputStream.cpp: mvdir storage/ndb/src/common/util/Makefile.am: mvdir storage/ndb/src/common/util/NdbErrHnd.cpp: mvdir storage/ndb/src/common/util/NdbOut.cpp: mvdir storage/ndb/src/common/util/NdbSqlUtil.cpp: mvdir storage/ndb/src/common/util/OutputStream.cpp: mvdir storage/ndb/src/common/util/Parser.cpp: mvdir storage/ndb/src/common/util/Properties.cpp: mvdir storage/ndb/src/common/util/SimpleProperties.cpp: mvdir storage/ndb/src/common/util/SocketAuthenticator.cpp: mvdir storage/ndb/src/common/util/SocketClient.cpp: mvdir storage/ndb/src/common/util/SocketServer.cpp: mvdir storage/ndb/src/common/util/basestring_vsnprintf.c: mvdir storage/ndb/src/common/util/filetest/FileUnitTest.cpp: mvdir storage/ndb/src/common/util/filetest/FileUnitTest.hpp: mvdir storage/ndb/src/common/util/filetest/Makefile: mvdir storage/ndb/src/common/util/getarg.cat3: mvdir storage/ndb/src/common/util/md5_hash.cpp: mvdir storage/ndb/src/common/util/ndb_init.c: mvdir storage/ndb/src/common/util/new.cpp: mvdir storage/ndb/src/common/util/random.c: mvdir storage/ndb/src/common/util/socket_io.cpp: mvdir storage/ndb/src/common/util/strdup.c: mvdir storage/ndb/src/common/util/testConfigValues/Makefile: mvdir storage/ndb/src/common/util/testConfigValues/testConfigValues.cpp: mvdir storage/ndb/src/common/util/uucode.c: mvdir storage/ndb/src/common/util/version.c: mvdir storage/ndb/src/common/util/testProperties/Makefile: mvdir storage/ndb/src/common/util/testProperties/testProperties.cpp: mvdir storage/ndb/src/common/util/testSimpleProperties/Makefile: mvdir storage/ndb/src/common/util/testSimpleProperties/sp_test.cpp: mvdir storage/ndb/src/cw/Makefile.am: mvdir storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.cpp: mvdir storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.dsp: mvdir storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.dsw: mvdir storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.h: mvdir storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.ico: mvdir storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.rc: mvdir storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.sln: mvdir storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.suo: mvdir storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.vcproj: mvdir storage/ndb/src/cw/cpcc-win32/C++/Closed.ICO: mvdir storage/ndb/src/cw/cpcc-win32/C++/NdbControls.cpp: mvdir storage/ndb/src/cw/cpcc-win32/C++/Open.ICO: mvdir storage/ndb/src/cw/cpcc-win32/C++/StdAfx.cpp: mvdir storage/ndb/src/cw/cpcc-win32/C++/StdAfx.h: mvdir storage/ndb/src/cw/cpcc-win32/C++/TreeView.cpp: mvdir storage/ndb/src/cw/cpcc-win32/C++/TreeView.h: mvdir storage/ndb/src/cw/cpcc-win32/C++/bmp00001.bmp: mvdir storage/ndb/src/cw/cpcc-win32/C++/resource.h: mvdir storage/ndb/src/cw/cpcc-win32/C++/small.ico: mvdir storage/ndb/src/cw/cpcc-win32/C++/toolbar.bmp: mvdir storage/ndb/src/cw/cpcc-win32/csharp/App.ico: mvdir storage/ndb/src/cw/cpcc-win32/csharp/CPC_Form.cs: mvdir storage/ndb/src/cw/cpcc-win32/csharp/Computer.cs: mvdir storage/ndb/src/cw/cpcc-win32/csharp/ComputerAddDialog.cs: mvdir storage/ndb/src/cw/cpcc-win32/csharp/ComputerRemoveDialog.cs: mvdir storage/ndb/src/cw/cpcc-win32/csharp/DATABASE.ICO: mvdir storage/ndb/src/cw/cpcc-win32/csharp/Database.cs: mvdir storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.csproj.user: mvdir storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.csproj: mvdir storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.ncb: mvdir storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.sln: mvdir storage/ndb/src/cw/cpcc-win32/csharp/PanelWizard.cs: mvdir storage/ndb/src/cw/cpcc-win32/csharp/Process.cs: mvdir storage/ndb/src/cw/cpcc-win32/csharp/ProcessDefineDialog.cs: mvdir storage/ndb/src/cw/cpcc-win32/csharp/fileaccess/FileMgmt.cs: mvdir storage/ndb/src/cw/cpcc-win32/csharp/simpleparser/SimpleCPCParser.cs: mvdir storage/ndb/src/cw/cpcc-win32/csharp/socketcomm/SocketComm.cs: mvdir storage/ndb/src/cw/cpcc-win32/csharp/socketcomm/myTcpClient.cs: mvdir storage/ndb/src/cw/cpcc-win32/csharp/startDatabaseDlg.cs: mvdir storage/ndb/src/cw/cpcc-win32/csharp/telnetclient/telnetClient.cs: mvdir storage/ndb/src/cw/cpcc-win32/vb6/Computer.cls: mvdir storage/ndb/src/cw/cpcc-win32/vb6/Database.cls: mvdir storage/ndb/src/cw/cpcc-win32/vb6/Icon 110.ico: mvdir storage/ndb/src/cw/cpcc-win32/vb6/Icon 231.ico: mvdir storage/ndb/src/cw/cpcc-win32/vb6/Icon 237.ico: mvdir storage/ndb/src/cw/cpcc-win32/vb6/Icon 241.ico: mvdir storage/ndb/src/cw/cpcc-win32/vb6/Icon 242.ico: mvdir storage/ndb/src/cw/cpcc-win32/vb6/Icon 270.ico: mvdir storage/ndb/src/cw/cpcc-win32/vb6/Icon 271.ico: mvdir storage/ndb/src/cw/cpcc-win32/vb6/Icon 273.ico: mvdir storage/ndb/src/cw/cpcc-win32/vb6/Icon 31.ico: mvdir storage/ndb/src/cw/cpcc-win32/vb6/Icon 337.ico: mvdir storage/ndb/src/cw/cpcc-win32/vb6/Icon 338.ico: mvdir storage/ndb/src/cw/cpcc-win32/vb6/Icon 339.ico: mvdir storage/ndb/src/cw/cpcc-win32/vb6/MSSCCPRJ.SCC: mvdir storage/ndb/src/cw/cpcc-win32/vb6/Module1.bas: mvdir storage/ndb/src/cw/cpcc-win32/vb6/NdbCPC.vbp: mvdir storage/ndb/src/cw/cpcc-win32/vb6/NdbCPC.vbw: mvdir storage/ndb/src/cw/cpcc-win32/vb6/Process.cls: mvdir storage/ndb/src/cw/cpcc-win32/vb6/closed folder.ico: mvdir storage/ndb/src/cw/cpcc-win32/vb6/computer.ico: mvdir storage/ndb/src/cw/cpcc-win32/vb6/frmAbout.frm: mvdir storage/ndb/src/cw/cpcc-win32/vb6/frmLogin.frm: mvdir storage/ndb/src/cw/cpcc-win32/vb6/frmMain.frm: mvdir storage/ndb/src/cw/cpcc-win32/vb6/frmNewComputer.frm: mvdir storage/ndb/src/cw/cpcc-win32/vb6/frmNewComputer.frx: mvdir storage/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase.frx: mvdir storage/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase1.frm: mvdir storage/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase2.frm: mvdir storage/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase2.log: mvdir storage/ndb/src/cw/cpcc-win32/vb6/frmNewDatabase3.frm: mvdir storage/ndb/src/cw/cpcc-win32/vb6/frmOptions.frm: mvdir storage/ndb/src/cw/cpcc-win32/vb6/frmSplash.frx: mvdir storage/ndb/src/cw/cpcc-win32/vb6/networking.ico: mvdir storage/ndb/src/cw/cpcc-win32/vb6/open folder.ico: mvdir storage/ndb/src/cw/cpcd/APIService.cpp: mvdir storage/ndb/src/cw/cpcd/APIService.hpp: mvdir storage/ndb/src/cw/cpcd/CPCD.cpp: mvdir storage/ndb/src/cw/cpcd/CPCD.hpp: mvdir storage/ndb/src/cw/cpcd/Makefile.am: mvdir storage/ndb/src/cw/cpcd/Monitor.cpp: mvdir storage/ndb/src/cw/cpcd/Process.cpp: mvdir storage/ndb/src/cw/cpcd/common.cpp: mvdir storage/ndb/src/cw/cpcd/common.hpp: mvdir storage/ndb/src/cw/cpcd/main.cpp: mvdir storage/ndb/src/cw/test/socketclient/Makefile: mvdir storage/ndb/src/cw/test/socketclient/socketClientTest.cpp: mvdir storage/ndb/src/cw/util/ClientInterface.cpp: mvdir storage/ndb/src/cw/util/ClientInterface.hpp: mvdir storage/ndb/src/cw/util/Makefile: mvdir storage/ndb/src/cw/util/SocketRegistry.cpp: mvdir storage/ndb/src/cw/util/SocketRegistry.hpp: mvdir storage/ndb/src/cw/util/SocketService.cpp: mvdir storage/ndb/src/cw/util/SocketService.hpp: mvdir storage/ndb/src/external/WIN32.x86/sci/lib/SISCI_LIBRARY_WIN32.TXT: mvdir storage/ndb/src/external/WIN32.x86/sci/lib/scilib.lib: mvdir storage/ndb/src/external/WIN32.x86/sci/lib/scilib_md.lib: mvdir storage/ndb/src/external/WIN32.x86/sci/lib/scilib_mt.lib: mvdir storage/ndb/src/external/WIN32.x86/sci/lib/sisci_api.lib: mvdir storage/ndb/src/external/WIN32.x86/sci/lib/sisci_api_md.lib: mvdir storage/ndb/src/external/WIN32.x86/sci/lib/sisci_api_mt.lib: mvdir storage/ndb/src/kernel/Makefile.am: mvdir storage/ndb/src/kernel/SimBlockList.cpp: mvdir storage/ndb/src/kernel/blocks/ERROR_codes.txt: mvdir storage/ndb/src/kernel/blocks/Makefile.am: mvdir storage/ndb/src/kernel/blocks/NodeRestart.new.txt: mvdir storage/ndb/src/kernel/blocks/NodeRestart.txt: mvdir storage/ndb/src/kernel/blocks/Start.txt: mvdir storage/ndb/src/kernel/blocks/SystemRestart.new.txt: mvdir storage/ndb/src/kernel/blocks/SystemRestart.txt: mvdir storage/ndb/src/kernel/blocks/backup/Backup.cpp: mvdir storage/ndb/src/kernel/blocks/backup/Backup.hpp: mvdir storage/ndb/src/kernel/blocks/backup/Backup.txt: mvdir storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp: mvdir storage/ndb/src/kernel/blocks/backup/BackupInit.cpp: mvdir storage/ndb/src/kernel/blocks/backup/FsBuffer.hpp: mvdir storage/ndb/src/kernel/blocks/backup/Makefile.am: mvdir storage/ndb/src/kernel/blocks/backup/read.cpp: mvdir storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp: mvdir storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp: mvdir storage/ndb/src/kernel/blocks/cmvmi/Makefile.am: mvdir storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp: mvdir storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp: mvdir storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp: mvdir storage/ndb/src/kernel/blocks/dbacc/Makefile.am: mvdir storage/ndb/src/kernel/blocks/mutexes.hpp: mvdir storage/ndb/src/kernel/blocks/new-block.tar.gz: mvdir storage/ndb/src/kernel/main.cpp: mvdir storage/ndb/src/kernel/blocks/dbdict/CreateIndex.txt: mvdir storage/ndb/src/kernel/blocks/dbdict/CreateTable.new.txt: mvdir storage/ndb/src/kernel/blocks/dbdict/CreateTable.txt: mvdir storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp: mvdir storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp: mvdir storage/ndb/src/kernel/blocks/dbdict/Dbdict.txt: mvdir storage/ndb/src/kernel/blocks/dbdict/DropTable.txt: mvdir storage/ndb/src/kernel/blocks/dbdict/Event.txt: mvdir storage/ndb/src/kernel/blocks/dbdict/Makefile.am: mvdir storage/ndb/src/kernel/blocks/dbdict/Master_AddTable.sfl: mvdir storage/ndb/src/kernel/blocks/dbdict/SchemaFile.hpp: mvdir storage/ndb/src/kernel/blocks/dbdict/Slave_AddTable.sfl: mvdir storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp: mvdir storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp: mvdir storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp: mvdir storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp: mvdir storage/ndb/src/kernel/blocks/dbdih/LCP.txt: mvdir storage/ndb/src/kernel/blocks/dbdih/Makefile.am: mvdir storage/ndb/src/kernel/blocks/dbdih/Sysfile.hpp: mvdir storage/ndb/src/kernel/blocks/dbdih/printSysfile/Makefile: mvdir storage/ndb/src/kernel/blocks/dbdih/printSysfile/printSysfile.cpp: mvdir storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp: mvdir storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp: mvdir storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp: mvdir storage/ndb/src/kernel/blocks/dblqh/Makefile.am: mvdir storage/ndb/src/kernel/blocks/dblqh/redoLogReader/Makefile: mvdir storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.cpp: mvdir storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.hpp: mvdir storage/ndb/src/kernel/blocks/dblqh/redoLogReader/redoLogFileReader.cpp: mvdir storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp: mvdir storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp: mvdir storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp: mvdir storage/ndb/src/kernel/blocks/dbtc/Makefile.am: mvdir storage/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp: mvdir storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupLCP.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupStoredProcDef.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/DbtupUndoLog.cpp: mvdir storage/ndb/src/kernel/blocks/dbtup/Makefile.am: mvdir storage/ndb/src/kernel/blocks/dbtup/Notes.txt: mvdir storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp: mvdir storage/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp: mvdir storage/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp: mvdir storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp: mvdir storage/ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp: mvdir storage/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp: mvdir storage/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp: mvdir storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp: mvdir storage/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp: mvdir storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp: mvdir storage/ndb/src/kernel/blocks/dbtux/Makefile.am: mvdir storage/ndb/src/kernel/blocks/dbtux/Times.txt: mvdir storage/ndb/src/kernel/blocks/dbtux/tuxstatus.html: mvdir storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp: mvdir storage/ndb/src/kernel/blocks/dbutil/DbUtil.hpp: mvdir storage/ndb/src/kernel/blocks/dbutil/DbUtil.txt: mvdir storage/ndb/src/kernel/blocks/dbutil/Makefile.am: mvdir storage/ndb/src/kernel/blocks/grep/Grep.cpp: mvdir storage/ndb/src/kernel/blocks/grep/Grep.hpp: mvdir storage/ndb/src/kernel/blocks/grep/GrepInit.cpp: mvdir storage/ndb/src/kernel/blocks/grep/Makefile.am: mvdir storage/ndb/src/kernel/blocks/grep/systab_test/Makefile: mvdir storage/ndb/src/kernel/blocks/grep/systab_test/grep_systab_test.cpp: mvdir storage/ndb/src/kernel/blocks/ndbcntr/Makefile.am: mvdir storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp: mvdir storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp: mvdir storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp: mvdir storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp: mvdir storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp: mvdir storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp: mvdir storage/ndb/src/kernel/blocks/ndbfs/CircularIndex.cpp: mvdir storage/ndb/src/kernel/blocks/ndbfs/CircularIndex.hpp: mvdir storage/ndb/src/kernel/blocks/ndbfs/Filename.cpp: mvdir storage/ndb/src/kernel/blocks/ndbfs/Filename.hpp: mvdir storage/ndb/src/kernel/blocks/ndbfs/Makefile.am: mvdir storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.cpp: mvdir storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp: mvdir storage/ndb/src/kernel/blocks/ndbfs/AsyncFileTest/AsyncFileTest.cpp: mvdir storage/ndb/src/kernel/blocks/ndbfs/AsyncFileTest/Makefile: mvdir storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelOSE.hpp: mvdir storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/Makefile: mvdir storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/MemoryChannelTest.cpp: mvdir storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp: mvdir storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp: mvdir storage/ndb/src/kernel/blocks/ndbfs/OpenFiles.hpp: mvdir storage/ndb/src/kernel/blocks/ndbfs/Pool.hpp: mvdir storage/ndb/src/kernel/blocks/ndbfs/VoidFs.cpp: mvdir storage/ndb/src/kernel/blocks/qmgr/Makefile.am: mvdir storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp: mvdir storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp: mvdir storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp: mvdir storage/ndb/src/kernel/blocks/qmgr/timer.hpp: mvdir storage/ndb/src/kernel/blocks/suma/Makefile.am: mvdir storage/ndb/src/kernel/blocks/suma/Suma.cpp: mvdir storage/ndb/src/kernel/blocks/suma/Suma.hpp: mvdir storage/ndb/src/kernel/blocks/suma/Suma.txt: mvdir storage/ndb/src/kernel/blocks/suma/SumaInit.cpp: mvdir storage/ndb/src/kernel/blocks/trix/Makefile.am: mvdir storage/ndb/src/kernel/blocks/trix/Trix.cpp: mvdir storage/ndb/src/kernel/blocks/trix/Trix.hpp: mvdir storage/ndb/src/kernel/error/Error.hpp: mvdir storage/ndb/src/kernel/error/ErrorHandlingMacros.hpp: mvdir storage/ndb/src/kernel/error/ErrorMessages.cpp: mvdir storage/ndb/src/kernel/error/ErrorMessages.hpp: mvdir storage/ndb/src/kernel/error/ErrorReporter.cpp: mvdir storage/ndb/src/kernel/error/ErrorReporter.hpp: mvdir storage/ndb/src/kernel/error/Makefile.am: mvdir storage/ndb/src/kernel/error/TimeModule.cpp: mvdir storage/ndb/src/kernel/error/TimeModule.hpp: mvdir storage/ndb/src/kernel/vm/Array.hpp: mvdir storage/ndb/src/kernel/vm/ArrayFifoList.hpp: mvdir storage/ndb/src/kernel/vm/ArrayList.hpp: mvdir storage/ndb/src/kernel/vm/ArrayPool.hpp: mvdir storage/ndb/src/kernel/vm/CArray.hpp: mvdir storage/ndb/src/kernel/vm/Callback.hpp: mvdir storage/ndb/src/kernel/vm/ClusterConfiguration.cpp: mvdir storage/ndb/src/kernel/vm/ClusterConfiguration.hpp: mvdir storage/ndb/src/kernel/vm/Configuration.cpp: mvdir storage/ndb/src/kernel/vm/Configuration.hpp: mvdir storage/ndb/src/kernel/vm/DLFifoList.hpp: mvdir storage/ndb/src/kernel/vm/DLHashTable.hpp: mvdir storage/ndb/src/kernel/vm/DLHashTable2.hpp: mvdir storage/ndb/src/kernel/vm/DLList.hpp: mvdir storage/ndb/src/kernel/vm/DataBuffer.hpp: mvdir storage/ndb/src/kernel/vm/Emulator.cpp: mvdir storage/ndb/src/kernel/vm/Emulator.hpp: mvdir storage/ndb/src/kernel/vm/FastScheduler.cpp: mvdir storage/ndb/src/kernel/vm/FastScheduler.hpp: mvdir storage/ndb/src/kernel/vm/GlobalData.hpp: mvdir storage/ndb/src/kernel/vm/KeyTable.hpp: mvdir storage/ndb/src/kernel/vm/KeyTable2.hpp: mvdir storage/ndb/src/kernel/vm/LongSignal.hpp: mvdir storage/ndb/src/kernel/vm/Makefile.am: mvdir storage/ndb/src/kernel/vm/MetaData.cpp: mvdir storage/ndb/src/kernel/vm/MetaData.hpp: mvdir storage/ndb/src/kernel/vm/Mutex.cpp: mvdir storage/ndb/src/kernel/vm/Mutex.hpp: mvdir storage/ndb/src/kernel/vm/Prio.hpp: mvdir storage/ndb/src/kernel/vm/RequestTracker.hpp: mvdir storage/ndb/src/kernel/vm/SLList.hpp: mvdir storage/ndb/src/kernel/vm/SafeCounter.cpp: mvdir storage/ndb/src/kernel/vm/SafeCounter.hpp: mvdir storage/ndb/src/kernel/vm/SectionReader.cpp: mvdir storage/ndb/src/kernel/vm/SectionReader.hpp: mvdir storage/ndb/src/kernel/vm/SignalCounter.hpp: mvdir storage/ndb/src/kernel/vm/SimBlockList.hpp: mvdir storage/ndb/src/kernel/vm/SimplePropertiesSection.cpp: mvdir storage/ndb/src/kernel/vm/SimulatedBlock.cpp: mvdir storage/ndb/src/kernel/vm/SimulatedBlock.hpp: mvdir storage/ndb/src/kernel/vm/ThreadConfig.cpp: mvdir storage/ndb/src/kernel/vm/ThreadConfig.hpp: mvdir storage/ndb/src/kernel/vm/TimeQueue.cpp: mvdir storage/ndb/src/kernel/vm/TimeQueue.hpp: mvdir storage/ndb/src/kernel/vm/TransporterCallback.cpp: mvdir storage/ndb/src/kernel/vm/VMSignal.cpp: mvdir storage/ndb/src/kernel/vm/VMSignal.hpp: mvdir storage/ndb/src/kernel/vm/WaitQueue.hpp: mvdir storage/ndb/src/kernel/vm/WatchDog.cpp: mvdir storage/ndb/src/kernel/vm/WatchDog.hpp: mvdir storage/ndb/src/kernel/vm/al_test/Makefile: mvdir storage/ndb/src/kernel/vm/al_test/arrayListTest.cpp: mvdir storage/ndb/src/kernel/vm/al_test/arrayPoolTest.cpp: mvdir storage/ndb/src/kernel/vm/al_test/main.cpp: mvdir storage/ndb/src/kernel/vm/pc.hpp: mvdir storage/ndb/src/kernel/vm/testCopy/Makefile: mvdir storage/ndb/src/kernel/vm/testCopy/rr.cpp: mvdir storage/ndb/src/kernel/vm/testCopy/testCopy.cpp: mvdir storage/ndb/src/kernel/vm/testDataBuffer/Makefile: mvdir storage/ndb/src/kernel/vm/testDataBuffer/testDataBuffer.cpp: mvdir storage/ndb/src/kernel/vm/testLongSig/Makefile: mvdir storage/ndb/src/kernel/vm/testLongSig/testLongSig.cpp: mvdir storage/ndb/src/kernel/vm/testSimplePropertiesSection/Makefile: mvdir storage/ndb/src/kernel/vm/testSimplePropertiesSection/test.cpp: mvdir storage/ndb/src/mgmapi/LocalConfig.cpp: mvdir storage/ndb/src/mgmapi/LocalConfig.hpp: mvdir storage/ndb/src/mgmapi/Makefile.am: mvdir storage/ndb/src/mgmapi/mgmapi.cpp: mvdir storage/ndb/src/mgmapi/mgmapi_configuration.cpp: mvdir storage/ndb/src/mgmapi/mgmapi_configuration.hpp: mvdir storage/ndb/src/mgmapi/mgmapi_internal.h: mvdir storage/ndb/src/mgmapi/ndb_logevent.cpp: mvdir storage/ndb/src/mgmapi/ndb_logevent.hpp: mvdir storage/ndb/src/mgmapi/test/Makefile: mvdir storage/ndb/src/mgmapi/test/keso.c: mvdir storage/ndb/src/mgmapi/test/mgmSrvApi.cpp: mvdir storage/ndb/src/mgmclient/CommandInterpreter.cpp: mvdir storage/ndb/src/mgmclient/Makefile.am: mvdir storage/ndb/src/mgmclient/main.cpp: mvdir storage/ndb/src/mgmclient/ndb_mgmclient.hpp: mvdir storage/ndb/src/mgmclient/ndb_mgmclient.h: mvdir storage/ndb/src/mgmclient/test_cpcd/Makefile: mvdir storage/ndb/src/mgmclient/test_cpcd/test_cpcd.cpp: mvdir storage/ndb/src/mgmsrv/Config.cpp: mvdir storage/ndb/src/mgmsrv/Config.hpp: mvdir storage/ndb/src/mgmsrv/ConfigInfo.cpp: mvdir storage/ndb/src/mgmsrv/ConfigInfo.hpp: mvdir storage/ndb/src/mgmsrv/InitConfigFileParser.cpp: mvdir storage/ndb/src/mgmsrv/InitConfigFileParser.hpp: mvdir storage/ndb/src/mgmsrv/Makefile.am: mvdir storage/ndb/src/mgmsrv/MgmtSrvr.cpp: mvdir storage/ndb/src/mgmsrv/MgmtSrvr.hpp: mvdir storage/ndb/src/mgmsrv/MgmtSrvrConfig.cpp: mvdir storage/ndb/src/mgmsrv/MgmtSrvrGeneralSignalHandling.cpp: mvdir storage/ndb/src/mgmsrv/Services.cpp: mvdir storage/ndb/src/mgmsrv/Services.hpp: mvdir storage/ndb/src/mgmsrv/SignalQueue.cpp: mvdir storage/ndb/src/mgmsrv/SignalQueue.hpp: mvdir storage/ndb/src/mgmsrv/convertStrToInt.cpp: mvdir storage/ndb/src/mgmsrv/convertStrToInt.hpp: mvdir storage/ndb/src/mgmsrv/main.cpp: mvdir storage/ndb/src/mgmsrv/mkconfig/Makefile: mvdir storage/ndb/src/mgmsrv/mkconfig/mkconfig.cpp: mvdir storage/ndb/src/ndbapi/API.hpp: mvdir storage/ndb/src/ndbapi/ClusterMgr.cpp: mvdir storage/ndb/src/ndbapi/ClusterMgr.hpp: mvdir storage/ndb/src/ndbapi/DictCache.cpp: mvdir storage/ndb/src/ndbapi/DictCache.hpp: mvdir storage/ndb/src/ndbapi/Makefile.am: mvdir storage/ndb/src/ndbapi/Ndb.cpp: mvdir storage/ndb/src/ndbapi/NdbApiSignal.cpp: mvdir storage/ndb/src/ndbapi/NdbApiSignal.hpp: mvdir storage/ndb/src/ndbapi/NdbBlob.cpp: mvdir storage/ndb/src/ndbapi/NdbBlobImpl.hpp: mvdir storage/ndb/src/ndbapi/NdbDictionary.cpp: mvdir storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp: mvdir storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp: mvdir storage/ndb/src/ndbapi/NdbErrorOut.cpp: mvdir storage/ndb/src/ndbapi/NdbEventOperation.cpp: mvdir storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp: mvdir storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp: mvdir storage/ndb/src/ndbapi/NdbImpl.hpp: mvdir storage/ndb/src/ndbapi/NdbIndexOperation.cpp: mvdir storage/ndb/src/ndbapi/NdbLinHash.hpp: mvdir storage/ndb/src/ndbapi/NdbOperation.cpp: mvdir storage/ndb/src/ndbapi/NdbOperationDefine.cpp: mvdir storage/ndb/src/ndbapi/NdbOperationExec.cpp: mvdir storage/ndb/src/ndbapi/NdbOperationInt.cpp: mvdir storage/ndb/src/ndbapi/NdbOperationScan.cpp: mvdir storage/ndb/src/ndbapi/NdbOperationSearch.cpp: mvdir storage/ndb/src/ndbapi/NdbPool.cpp: mvdir storage/ndb/src/ndbapi/NdbPoolImpl.cpp: mvdir storage/ndb/src/ndbapi/NdbPoolImpl.hpp: mvdir storage/ndb/src/ndbapi/NdbRecAttr.cpp: mvdir storage/ndb/src/ndbapi/NdbReceiver.cpp: mvdir storage/ndb/src/ndbapi/NdbScanFilter.cpp: mvdir storage/ndb/src/ndbapi/NdbScanOperation.cpp: mvdir storage/ndb/src/ndbapi/NdbTransaction.cpp: mvdir storage/ndb/src/ndbapi/NdbTransactionScan.cpp: mvdir storage/ndb/src/ndbapi/NdbUtil.cpp: mvdir storage/ndb/src/ndbapi/NdbUtil.hpp: mvdir storage/ndb/src/ndbapi/NdbWaiter.hpp: mvdir storage/ndb/src/ndbapi/Ndberr.cpp: mvdir storage/ndb/src/ndbapi/Ndbif.cpp: mvdir storage/ndb/src/ndbapi/Ndbinit.cpp: mvdir storage/ndb/src/ndbapi/Ndblist.cpp: mvdir storage/ndb/src/ndbapi/ObjectMap.hpp: mvdir storage/ndb/src/ndbapi/ScanOperation.txt: mvdir storage/ndb/src/ndbapi/TransporterFacade.cpp: mvdir storage/ndb/src/ndbapi/TransporterFacade.hpp: mvdir storage/ndb/src/ndbapi/ndb_cluster_connection.cpp: mvdir storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp: mvdir storage/ndb/src/ndbapi/ndberror.c: mvdir storage/ndb/src/ndbapi/signal-sender/Makefile: mvdir storage/ndb/src/ndbapi/signal-sender/SignalSender.cpp: mvdir storage/ndb/src/ndbapi/signal-sender/SignalSender.hpp: mvdir storage/ndb/src/old_files/client/Makefile: mvdir storage/ndb/src/old_files/client/odbc/Extra.mk: mvdir storage/ndb/src/old_files/client/odbc/Makefile: mvdir storage/ndb/src/old_files/client/odbc/NdbOdbc.cpp: mvdir storage/ndb/src/old_files/client/odbc/NdbOdbc.def: mvdir storage/ndb/src/old_files/client/odbc/codegen/CodeGen.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/CodeGen.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_base.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_base.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_column.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_column.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_comp_op.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_comp_op.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_create_index.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_create_index.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_create_row.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_create_row.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_create_table.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_create_table.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_data_type.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_data_type.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_ddl.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_ddl.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_ddl_column.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_ddl_column.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_ddl_constr.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_ddl_constr.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_ddl_row.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_ddl_row.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_delete.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_delete.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_delete_index.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_delete_index.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_delete_lookup.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_delete_lookup.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_delete_scan.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_delete_scan.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_dml.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_dml.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_dml_column.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_dml_column.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_dml_row.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_dml_row.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_drop_index.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_drop_index.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_drop_table.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_drop_table.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_expr.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_expr.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_expr_column.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_expr_column.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_expr_const.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_expr_const.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_expr_conv.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_expr_conv.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_expr_func.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_expr_func.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_expr_op.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_expr_op.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_expr_param.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_expr_param.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_expr_row.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_expr_row.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_idx_column.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_idx_column.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_insert.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_insert.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_pred.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_pred.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_pred_op.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_pred_op.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_count.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_count.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_distinct.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_distinct.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_filter.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_filter.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_group.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_group.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_index.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_index.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_join.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_join.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_lookup.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_lookup.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_project.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_project.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_range.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_range.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_repeat.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_repeat.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_scan.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_scan.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_sort.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_sort.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_sys.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_query_sys.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_root.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_root.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_select.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_select.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_set_row.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_set_row.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_stmt.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_stmt.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_table.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_table.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_table_list.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_table_list.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_update.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_update.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_update_index.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_update_index.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_update_lookup.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_update_lookup.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_update_scan.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Code_update_scan.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/Makefile: mvdir storage/ndb/src/old_files/client/odbc/codegen/SimpleGram.ypp: mvdir storage/ndb/src/old_files/client/odbc/codegen/SimpleParser.cpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/SimpleParser.hpp: mvdir storage/ndb/src/old_files/client/odbc/codegen/SimpleScan.lpp: mvdir storage/ndb/src/old_files/client/odbc/common/AttrArea.cpp: mvdir storage/ndb/src/old_files/client/odbc/common/AttrArea.hpp: mvdir storage/ndb/src/old_files/client/odbc/common/CodeTree.cpp: mvdir storage/ndb/src/old_files/client/odbc/common/CodeTree.hpp: mvdir storage/ndb/src/old_files/client/odbc/common/ConnArea.cpp: mvdir storage/ndb/src/old_files/client/odbc/common/ConnArea.hpp: mvdir storage/ndb/src/old_files/client/odbc/common/Ctx.cpp: mvdir storage/ndb/src/old_files/client/odbc/common/Ctx.hpp: mvdir storage/ndb/src/old_files/client/odbc/common/DataField.cpp: mvdir storage/ndb/src/old_files/client/odbc/common/DataField.hpp: mvdir storage/ndb/src/old_files/client/odbc/common/DataRow.cpp: mvdir storage/ndb/src/old_files/client/odbc/common/DataRow.hpp: mvdir storage/ndb/src/old_files/client/odbc/common/DataType.cpp: mvdir storage/ndb/src/old_files/client/odbc/common/DataType.hpp: mvdir storage/ndb/src/old_files/client/odbc/common/DescArea.cpp: mvdir storage/ndb/src/old_files/client/odbc/common/DescArea.hpp: mvdir storage/ndb/src/old_files/client/odbc/common/DiagArea.cpp: mvdir storage/ndb/src/old_files/client/odbc/common/DiagArea.hpp: mvdir storage/ndb/src/old_files/client/odbc/common/Makefile: mvdir storage/ndb/src/old_files/client/odbc/common/OdbcData.cpp: mvdir storage/ndb/src/old_files/client/odbc/common/OdbcData.hpp: mvdir storage/ndb/src/old_files/client/odbc/common/ResultArea.cpp: mvdir storage/ndb/src/old_files/client/odbc/common/ResultArea.hpp: mvdir storage/ndb/src/old_files/client/odbc/common/Sqlstate.cpp: mvdir storage/ndb/src/old_files/client/odbc/common/Sqlstate.hpp: mvdir storage/ndb/src/old_files/client/odbc/common/StmtArea.cpp: mvdir storage/ndb/src/old_files/client/odbc/common/StmtArea.hpp: mvdir storage/ndb/src/old_files/client/odbc/common/StmtInfo.cpp: mvdir storage/ndb/src/old_files/client/odbc/common/StmtInfo.hpp: mvdir storage/ndb/src/old_files/client/odbc/common/common.cpp: mvdir storage/ndb/src/old_files/client/odbc/common/common.hpp: mvdir storage/ndb/src/old_files/client/odbc/dictionary/DictCatalog.cpp: mvdir storage/ndb/src/old_files/client/odbc/dictionary/DictCatalog.hpp: mvdir storage/ndb/src/old_files/client/odbc/dictionary/DictColumn.cpp: mvdir storage/ndb/src/old_files/client/odbc/dictionary/DictColumn.hpp: mvdir storage/ndb/src/old_files/client/odbc/dictionary/DictIndex.cpp: mvdir storage/ndb/src/old_files/client/odbc/dictionary/DictIndex.hpp: mvdir storage/ndb/src/old_files/client/odbc/dictionary/DictSchema.cpp: mvdir storage/ndb/src/old_files/client/odbc/dictionary/DictSchema.hpp: mvdir storage/ndb/src/old_files/client/odbc/dictionary/DictSys.cpp: mvdir storage/ndb/src/old_files/client/odbc/dictionary/DictSys.hpp: mvdir storage/ndb/src/old_files/client/odbc/dictionary/DictTable.cpp: mvdir storage/ndb/src/old_files/client/odbc/dictionary/DictTable.hpp: mvdir storage/ndb/src/old_files/client/odbc/dictionary/Makefile: mvdir storage/ndb/src/old_files/client/odbc/docs/class.fig: mvdir storage/ndb/src/old_files/client/odbc/docs/descfield.pl: mvdir storage/ndb/src/old_files/client/odbc/docs/diag.txt: mvdir storage/ndb/src/old_files/client/odbc/docs/getinfo.pl: mvdir storage/ndb/src/old_files/client/odbc/docs/gettypeinfo.pl: mvdir storage/ndb/src/old_files/client/odbc/docs/handleattr.pl: mvdir storage/ndb/src/old_files/client/odbc/docs/main.hpp: mvdir storage/ndb/src/old_files/client/odbc/docs/ndbodbc.html: mvdir storage/ndb/src/old_files/client/odbc/docs/select.fig: mvdir storage/ndb/src/old_files/client/odbc/docs/systables.pl: mvdir storage/ndb/src/old_files/client/odbc/docs/type.txt: mvdir storage/ndb/src/old_files/client/odbc/driver/Func.data: mvdir storage/ndb/src/old_files/client/odbc/driver/Func.pl: mvdir storage/ndb/src/old_files/client/odbc/driver/Makefile: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLAllocConnect.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLAllocEnv.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLAllocHandle.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLAllocHandleStd.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLAllocStmt.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLBindCol.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLBindParam.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLBindParameter.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLBrowseConnect.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLBulkOperations.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLCancel.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLCloseCursor.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLColAttribute.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLColAttributes.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLColumnPrivileges.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLColumns.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLConnect.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLCopyDesc.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLDataSources.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLDescribeCol.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLDescribeParam.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLDisconnect.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLDriverConnect.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLDrivers.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLEndTran.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLError.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLExecDirect.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLExecute.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLExtendedFetch.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLFetch.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLFetchScroll.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLForeignKeys.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLFreeConnect.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLFreeEnv.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLFreeHandle.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLFreeStmt.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLGetConnectAttr.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLGetConnectOption.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLGetCursorName.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLGetData.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLGetDescField.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLGetDescRec.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLGetDiagField.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLGetDiagRec.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLGetEnvAttr.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLGetFunctions.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLGetInfo.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLGetStmtAttr.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLGetStmtOption.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLGetTypeInfo.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLMoreResults.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLNativeSql.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLNumParams.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLNumResultCols.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLParamData.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLParamOptions.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLPrepare.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLPrimaryKeys.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLProcedureColumns.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLProcedures.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLPutData.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLRowCount.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLSetConnectAttr.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLSetConnectOption.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLSetCursorName.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLSetDescField.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLSetDescRec.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLSetEnvAttr.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLSetParam.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLSetPos.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLSetScrollOptions.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLSetStmtAttr.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLSetStmtOption.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLSpecialColumns.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLStatistics.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLTablePrivileges.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLTables.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/SQLTransact.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/driver.cpp: mvdir storage/ndb/src/old_files/client/odbc/driver/driver.hpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_comp_op.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_create_index.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_create_table.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_delete_index.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_delete_lookup.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_delete_scan.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_drop_index.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_drop_table.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_expr_conv.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_expr_func.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_expr_op.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_insert.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_pred_op.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_query_index.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_query_lookup.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_query_range.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_query_scan.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_query_sys.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_update_index.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_update_lookup.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Exec_update_scan.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Executor.cpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Executor.hpp: mvdir storage/ndb/src/old_files/client/odbc/executor/Makefile: mvdir storage/ndb/src/old_files/client/odbc/handles/AttrDbc.cpp: mvdir storage/ndb/src/old_files/client/odbc/handles/AttrEnv.cpp: mvdir storage/ndb/src/old_files/client/odbc/handles/AttrRoot.cpp: mvdir storage/ndb/src/old_files/client/odbc/handles/AttrStmt.cpp: mvdir storage/ndb/src/old_files/client/odbc/handles/DescSpec.cpp: mvdir storage/ndb/src/old_files/client/odbc/handles/FuncTab.cpp: mvdir storage/ndb/src/old_files/client/odbc/handles/HandleBase.cpp: mvdir storage/ndb/src/old_files/client/odbc/handles/HandleBase.hpp: mvdir storage/ndb/src/old_files/client/odbc/handles/HandleDbc.cpp: mvdir storage/ndb/src/old_files/client/odbc/handles/HandleDbc.hpp: mvdir storage/ndb/src/old_files/client/odbc/handles/HandleDesc.cpp: mvdir storage/ndb/src/old_files/client/odbc/handles/HandleDesc.hpp: mvdir storage/ndb/src/old_files/client/odbc/handles/HandleEnv.cpp: mvdir storage/ndb/src/old_files/client/odbc/handles/HandleEnv.hpp: mvdir storage/ndb/src/old_files/client/odbc/handles/HandleRoot.cpp: mvdir storage/ndb/src/old_files/client/odbc/handles/HandleRoot.hpp: mvdir storage/ndb/src/old_files/client/odbc/handles/HandleStmt.cpp: mvdir storage/ndb/src/old_files/client/odbc/handles/HandleStmt.hpp: mvdir storage/ndb/src/old_files/client/odbc/handles/InfoTab.cpp: mvdir storage/ndb/src/old_files/client/odbc/handles/Makefile: mvdir storage/ndb/src/old_files/client/odbc/handles/PoolNdb.cpp: mvdir storage/ndb/src/old_files/client/odbc/handles/PoolNdb.hpp: mvdir storage/ndb/src/old_files/client/odbc/handles/handles.hpp: mvdir storage/ndb/src/old_files/ndbbaseclient/Makefile: mvdir storage/ndb/src/old_files/ndbbaseclient/ndbbaseclient_dummy.cpp: mvdir storage/ndb/src/old_files/ndbclient/Makefile: mvdir storage/ndb/src/old_files/ndbclient/ndbclient_dummy.cpp: mvdir storage/ndb/src/old_files/newtonapi/Makefile: mvdir storage/ndb/src/old_files/newtonapi/dba_binding.cpp: mvdir storage/ndb/src/old_files/newtonapi/dba_bulkread.cpp: mvdir storage/ndb/src/old_files/newtonapi/dba_config.cpp: mvdir storage/ndb/src/old_files/newtonapi/dba_dac.cpp: mvdir storage/ndb/src/old_files/newtonapi/dba_error.cpp: mvdir storage/ndb/src/old_files/newtonapi/dba_init.cpp: mvdir storage/ndb/src/old_files/newtonapi/dba_internal.hpp: mvdir storage/ndb/src/old_files/newtonapi/dba_process.cpp: mvdir storage/ndb/src/old_files/newtonapi/dba_process.hpp: mvdir storage/ndb/src/old_files/newtonapi/dba_schema.cpp: mvdir storage/ndb/src/old_files/rep/ExtSender.cpp: mvdir storage/ndb/src/old_files/rep/ExtSender.hpp: mvdir storage/ndb/src/old_files/rep/Makefile: mvdir storage/ndb/src/old_files/rep/NodeConnectInfo.hpp: mvdir storage/ndb/src/old_files/rep/README: mvdir storage/ndb/src/old_files/rep/RepApiInterpreter.cpp: mvdir storage/ndb/src/old_files/rep/RepApiInterpreter.hpp: mvdir storage/ndb/src/old_files/rep/RepApiService.cpp: mvdir storage/ndb/src/old_files/rep/RepApiService.hpp: mvdir storage/ndb/src/old_files/rep/RepCommandInterpreter.cpp: mvdir storage/ndb/src/old_files/rep/RepCommandInterpreter.hpp: mvdir storage/ndb/src/old_files/rep/RepComponents.cpp: mvdir storage/ndb/src/old_files/rep/RepComponents.hpp: mvdir storage/ndb/src/old_files/rep/RepMain.cpp: mvdir storage/ndb/src/old_files/rep/Requestor.cpp: mvdir storage/ndb/src/old_files/rep/Requestor.hpp: mvdir storage/ndb/src/old_files/rep/RequestorSubscriptions.cpp: mvdir storage/ndb/src/old_files/rep/SignalQueue.cpp: mvdir storage/ndb/src/old_files/rep/SignalQueue.hpp: mvdir storage/ndb/src/old_files/rep/TODO: mvdir storage/ndb/src/old_files/rep/adapters/AppNDB.cpp: mvdir storage/ndb/src/old_files/rep/adapters/AppNDB.hpp: mvdir storage/ndb/src/old_files/rep/adapters/ExtAPI.cpp: mvdir storage/ndb/src/old_files/rep/adapters/ExtAPI.hpp: mvdir storage/ndb/src/old_files/rep/adapters/ExtNDB.cpp: mvdir storage/ndb/src/old_files/rep/adapters/ExtNDB.hpp: mvdir storage/ndb/src/old_files/rep/adapters/Makefile: mvdir storage/ndb/src/old_files/rep/adapters/TableInfoPs.hpp: mvdir storage/ndb/src/old_files/rep/dbug_hack.cpp: mvdir storage/ndb/src/old_files/rep/rep_version.hpp: mvdir storage/ndb/src/old_files/rep/repapi/Makefile: mvdir storage/ndb/src/old_files/rep/repapi/repapi.cpp: mvdir storage/ndb/src/old_files/rep/repapi/repapi.h: mvdir storage/ndb/src/old_files/rep/state/Channel.cpp: mvdir storage/ndb/src/old_files/rep/state/Channel.hpp: mvdir storage/ndb/src/old_files/rep/state/Interval.cpp: mvdir storage/ndb/src/old_files/rep/state/Interval.hpp: mvdir storage/ndb/src/old_files/rep/state/Makefile: mvdir storage/ndb/src/old_files/rep/state/RepState.cpp: mvdir storage/ndb/src/old_files/rep/state/RepState.hpp: mvdir storage/ndb/src/old_files/rep/state/RepStateEvent.cpp: mvdir storage/ndb/src/old_files/rep/state/RepStateRequests.cpp: mvdir storage/ndb/src/old_files/rep/state/testInterval/Makefile: mvdir storage/ndb/src/old_files/rep/state/testInterval/testInterval.cpp: mvdir storage/ndb/src/old_files/rep/state/testRepState/Makefile: mvdir storage/ndb/src/old_files/rep/state/testRepState/testRequestor.cpp: mvdir storage/ndb/src/old_files/rep/state/testRepState/testRequestor.hpp: mvdir storage/ndb/src/old_files/rep/storage/GCIBuffer.cpp: mvdir storage/ndb/src/old_files/rep/storage/GCIBuffer.hpp: mvdir storage/ndb/src/old_files/rep/storage/GCIContainer.cpp: mvdir storage/ndb/src/old_files/rep/storage/GCIContainer.hpp: mvdir storage/ndb/src/old_files/rep/storage/GCIContainerPS.cpp: mvdir storage/ndb/src/old_files/rep/storage/GCIContainerPS.hpp: mvdir storage/ndb/src/old_files/rep/storage/GCIPage.cpp: mvdir storage/ndb/src/old_files/rep/storage/GCIPage.hpp: mvdir storage/ndb/src/old_files/rep/storage/LogRecord.hpp: mvdir storage/ndb/src/old_files/rep/storage/Makefile: mvdir storage/ndb/src/old_files/rep/storage/NodeConnectInfo.hpp: mvdir storage/ndb/src/old_files/rep/storage/NodeGroup.cpp: mvdir storage/ndb/src/old_files/rep/storage/NodeGroup.hpp: mvdir storage/ndb/src/old_files/rep/storage/NodeGroupInfo.cpp: mvdir storage/ndb/src/old_files/rep/storage/NodeGroupInfo.hpp: mvdir storage/ndb/src/old_files/rep/transfer/Makefile: mvdir storage/ndb/src/old_files/rep/transfer/TransPS.cpp: mvdir storage/ndb/src/old_files/rep/transfer/TransPS.hpp: mvdir storage/ndb/src/old_files/rep/transfer/TransSS.cpp: mvdir storage/ndb/src/old_files/rep/transfer/TransSS.hpp: mvdir storage/ndb/src/old_files/rep/transfer/TransSSSubscriptions.cpp: mvdir storage/ndb/test/Makefile.am: mvdir storage/ndb/test/include/CpcClient.hpp: mvdir storage/ndb/test/include/HugoAsynchTransactions.hpp: mvdir storage/ndb/test/include/HugoCalculator.hpp: mvdir storage/ndb/test/include/HugoOperations.hpp: mvdir storage/ndb/test/include/HugoTransactions.hpp: mvdir storage/ndb/test/include/NDBT.hpp: mvdir storage/ndb/test/include/NDBT_DataSet.hpp: mvdir storage/ndb/test/include/NDBT_DataSetTransaction.hpp: mvdir storage/ndb/test/include/NDBT_Error.hpp: mvdir storage/ndb/test/include/NDBT_Output.hpp: mvdir storage/ndb/test/include/NDBT_ResultRow.hpp: mvdir storage/ndb/test/include/NDBT_ReturnCodes.h: mvdir storage/ndb/test/include/NDBT_Stats.hpp: mvdir storage/ndb/test/include/NDBT_Table.hpp: mvdir storage/ndb/test/include/NDBT_Tables.hpp: mvdir storage/ndb/test/include/NDBT_Test.hpp: mvdir storage/ndb/test/include/NdbBackup.hpp: mvdir storage/ndb/test/include/NdbConfig.hpp: mvdir storage/ndb/test/include/NdbGrep.hpp: mvdir storage/ndb/test/include/NdbRestarter.hpp: mvdir storage/ndb/test/include/NdbRestarts.hpp: mvdir storage/ndb/test/include/NdbSchemaCon.hpp: mvdir storage/ndb/test/include/NdbSchemaOp.hpp: mvdir storage/ndb/test/include/NdbTest.hpp: mvdir storage/ndb/test/include/NdbTimer.hpp: mvdir storage/ndb/test/include/TestNdbEventOperation.hpp: mvdir storage/ndb/test/include/UtilTransactions.hpp: mvdir storage/ndb/test/include/getarg.h: mvdir storage/ndb/test/ndbapi/InsertRecs.cpp: mvdir storage/ndb/test/ndbapi/Makefile.am: mvdir storage/ndb/test/ndbapi/ScanFilter.hpp: mvdir storage/ndb/test/ndbapi/ScanFunctions.hpp: mvdir storage/ndb/test/ndbapi/ScanInterpretTest.hpp: mvdir storage/ndb/test/ndbapi/TraceNdbApi.cpp: mvdir storage/ndb/test/ndbapi/VerifyNdbApi.cpp: mvdir storage/ndb/test/ndbapi/acid.cpp: mvdir storage/ndb/test/ndbapi/acid2.cpp: mvdir storage/ndb/test/ndbapi/adoInsertRecs.cpp: mvdir storage/ndb/test/ndbapi/asyncGenerator.cpp: mvdir storage/ndb/test/ndbapi/benchronja.cpp: mvdir storage/ndb/test/ndbapi/bulk_copy.cpp: mvdir storage/ndb/test/ndbapi/cdrserver.cpp: mvdir storage/ndb/test/ndbapi/celloDb.cpp: mvdir storage/ndb/test/ndbapi/create_all_tabs.cpp: mvdir storage/ndb/test/ndbapi/create_tab.cpp: mvdir storage/ndb/test/ndbapi/drop_all_tabs.cpp: mvdir storage/ndb/test/ndbapi/flexAsynch.cpp: mvdir storage/ndb/test/ndbapi/flexBench.cpp: mvdir storage/ndb/test/ndbapi/flexHammer.cpp: mvdir storage/ndb/test/ndbapi/flexScan.cpp: mvdir storage/ndb/test/ndbapi/flexTT.cpp: mvdir storage/ndb/test/ndbapi/flexTimedAsynch.cpp: mvdir storage/ndb/test/ndbapi/flex_bench_mysql.cpp: mvdir storage/ndb/test/ndbapi/index.cpp: mvdir storage/ndb/test/ndbapi/index2.cpp: mvdir storage/ndb/test/ndbapi/initronja.cpp: mvdir storage/ndb/test/ndbapi/interpreterInTup.cpp: mvdir storage/ndb/test/ndbapi/mainAsyncGenerator.cpp: mvdir storage/ndb/test/ndbapi/msa.cpp: mvdir storage/ndb/test/ndbapi/ndb_async1.cpp: mvdir storage/ndb/test/ndbapi/ndb_async2.cpp: mvdir storage/ndb/test/ndbapi/ndb_user_populate.cpp: mvdir storage/ndb/test/ndbapi/ndb_user_transaction.cpp: mvdir storage/ndb/test/ndbapi/ndb_user_transaction2.cpp: mvdir storage/ndb/test/ndbapi/ndb_user_transaction3.cpp: mvdir storage/ndb/test/ndbapi/ndb_user_transaction4.cpp: mvdir storage/ndb/test/ndbapi/ndb_user_transaction5.cpp: mvdir storage/ndb/test/ndbapi/ndb_user_transaction6.cpp: mvdir storage/ndb/test/ndbapi/restarter.cpp: mvdir storage/ndb/test/ndbapi/restarter2.cpp: mvdir storage/ndb/test/ndbapi/restarts.cpp: mvdir storage/ndb/test/ndbapi/size.cpp: mvdir storage/ndb/test/ndbapi/slow_select.cpp: mvdir storage/ndb/test/ndbapi/testBackup.cpp: mvdir storage/ndb/test/ndbapi/testBasic.cpp: mvdir storage/ndb/test/ndbapi/testBasicAsynch.cpp: mvdir storage/ndb/test/ndbapi/testBitfield.cpp: mvdir storage/ndb/test/ndbapi/testBlobs.cpp: mvdir storage/ndb/test/ndbapi/testDataBuffers.cpp: mvdir storage/ndb/test/ndbapi/testDeadlock.cpp: mvdir storage/ndb/test/ndbapi/testDict.cpp: mvdir storage/ndb/test/ndbapi/testGrep.cpp: mvdir storage/ndb/test/ndbapi/testGrepVerify.cpp: mvdir storage/ndb/test/ndbapi/testIndex.cpp: mvdir storage/ndb/test/ndbapi/testInterpreter.cpp: mvdir storage/ndb/test/ndbapi/testLcp.cpp: mvdir storage/ndb/test/ndbapi/testMgm.cpp: mvdir storage/ndb/test/ndbapi/testNdbApi.cpp: mvdir storage/ndb/test/ndbapi/testNodeRestart.cpp: mvdir storage/ndb/test/ndbapi/testOIBasic.cpp: mvdir storage/ndb/test/ndbapi/testOperations.cpp: mvdir storage/ndb/test/ndbapi/testOrderedIndex.cpp: mvdir storage/ndb/test/ndbapi/testPartitioning.cpp: mvdir storage/ndb/test/ndbapi/testReadPerf.cpp: mvdir storage/ndb/test/ndbapi/testRestartGci.cpp: mvdir storage/ndb/test/ndbapi/bank/Bank.cpp: mvdir storage/ndb/test/ndbapi/bank/Bank.hpp: mvdir storage/ndb/test/ndbapi/bank/BankLoad.cpp: mvdir storage/ndb/test/ndbapi/bank/Makefile.am: mvdir storage/ndb/test/ndbapi/bank/bankCreator.cpp: mvdir storage/ndb/test/ndbapi/bank/bankMakeGL.cpp: mvdir storage/ndb/test/ndbapi/bank/bankSumAccounts.cpp: mvdir storage/ndb/test/ndbapi/bank/bankTimer.cpp: mvdir storage/ndb/test/ndbapi/bank/bankTransactionMaker.cpp: mvdir storage/ndb/test/ndbapi/bank/bankValidateAllGLs.cpp: mvdir storage/ndb/test/ndbapi/bank/testBank.cpp: mvdir storage/ndb/test/ndbapi/bench/asyncGenerator.cpp: mvdir storage/ndb/test/ndbapi/bench/dbGenerator.h: mvdir storage/ndb/test/ndbapi/bench/dbPopulate.cpp: mvdir storage/ndb/test/ndbapi/bench/dbPopulate.h: mvdir storage/ndb/test/ndbapi/bench/macros.h: mvdir storage/ndb/test/ndbapi/bench/mainAsyncGenerator.cpp: mvdir storage/ndb/test/ndbapi/bench/mainPopulate.cpp: mvdir storage/ndb/test/ndbapi/bench/ndb_async1.cpp: mvdir storage/ndb/test/ndbapi/bench/ndb_async2.cpp: mvdir storage/ndb/test/ndbapi/bench/ndb_error.hpp: mvdir storage/ndb/test/ndbapi/bench/ndb_schema.hpp: mvdir storage/ndb/test/ndbapi/bench/ndb_user_transaction.cpp: mvdir storage/ndb/test/ndbapi/bench/ndb_user_transaction2.cpp: mvdir storage/ndb/test/ndbapi/bench/ndb_user_transaction3.cpp: mvdir storage/ndb/test/ndbapi/bench/ndb_user_transaction4.cpp: mvdir storage/ndb/test/ndbapi/bench/ndb_user_transaction5.cpp: mvdir storage/ndb/test/ndbapi/testScan.cpp: mvdir storage/ndb/test/ndbapi/testScanInterpreter.cpp: mvdir storage/ndb/test/ndbapi/testScanPerf.cpp: mvdir storage/ndb/test/ndbapi/testSystemRestart.cpp: mvdir storage/ndb/test/ndbapi/testTimeout.cpp: mvdir storage/ndb/test/ndbapi/testTransactions.cpp: mvdir storage/ndb/test/ndbapi/test_event.cpp: mvdir storage/ndb/test/ndbapi/test_event_multi_table.cpp: mvdir storage/ndb/test/ndbapi/userInterface.cpp: mvdir storage/ndb/test/ndbapi/bench/ndb_user_transaction6.cpp: mvdir storage/ndb/test/ndbapi/bench/testData.h: mvdir storage/ndb/test/ndbapi/bench/testDefinitions.h: mvdir storage/ndb/test/ndbapi/bench/userInterface.cpp: mvdir storage/ndb/test/ndbapi/bench/userInterface.h: mvdir storage/ndb/test/ndbapi/old_dirs/acid/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/acid2/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/acid2/TraceNdbApi.hpp: mvdir storage/ndb/test/ndbapi/old_dirs/acid2/VerifyNdbApi.hpp: mvdir storage/ndb/test/ndbapi/old_dirs/basicAsynch/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/bulk_copy/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/create_all_tabs/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/create_tab/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/drop_all_tabs/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/flexAsynch/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/flexBench/Makefile.am: mvdir storage/ndb/test/ndbapi/old_dirs/flexBench/ndbplot.pl: mvdir storage/ndb/test/ndbapi/old_dirs/flexHammer/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/flexHammer/README: mvdir storage/ndb/test/ndbapi/old_dirs/flexScan/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/flexScan/README: mvdir storage/ndb/test/ndbapi/old_dirs/flexTT/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/flexTimedAsynch/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/flex_bench_mysql/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/indexTest/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/indexTest2/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/interpreterInTup/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/generator/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/include/dbGenerator.h: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/include/testData.h: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/include/userInterface.h: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/user/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/user/macros.h: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/async-src/user/ndb_error.hpp: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/bin/.empty: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/include/ndb_schema.hpp: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/include/testDefinitions.h: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/lib/.empty: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/script/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/script/async-lmc-bench-l-p10.sh: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/script/async-lmc-bench-l.sh: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/script/async-lmc-bench-p10.sh: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/script/async-lmc-bench.sh: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/README: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/generator/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/generator/dbGenerator.c: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/generator/dbGenerator.h: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/generator/mainGenerator.c: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/include/testData.h: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/include/userInterface.h: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/makevars.linux: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/makevars.sparc: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/populator/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/populator/dbPopulate.c: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/populator/dbPopulate.h: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/populator/mainPopulate.c: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/localDbPrepare.c: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/macros.h: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/ndb_error.hpp: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/old/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/old/userHandle.h: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/old/userInterface.c: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/userHandle.h: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/userInterface.cpp: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/userTransaction.c: mvdir storage/ndb/test/ndbapi/old_dirs/lmc-bench/src/user/old/userTransaction.c: mvdir storage/ndb/test/ndbapi/old_dirs/restarter/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/restarter2/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/restarts/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/ronja/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/ronja/benchronja/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/ronja/initronja/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/telco/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/telco/readme: mvdir storage/ndb/test/ndbapi/old_dirs/testBackup/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testBasic/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testBlobs/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testDataBuffers/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testDict/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testGrep/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testGrep/verify/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testIndex/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testInterpreter/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testMgm/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testNdbApi/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testNodeRestart/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testOIBasic/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testOIBasic/times.txt: mvdir storage/ndb/test/ndbapi/old_dirs/testOperations/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testOrderedIndex/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testRestartGci/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testScan/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testScanInterpreter/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testSystemRestart/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testTimeout/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/testTransactions/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/test_event/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/vw_test/Makefile: mvdir storage/ndb/test/ndbapi/old_dirs/vw_test/bcd.h: mvdir storage/ndb/test/ndbapi/old_dirs/vw_test/script/client_start: mvdir storage/ndb/test/ndbapi/old_dirs/vw_test/utv.h: mvdir storage/ndb/test/ndbapi/old_dirs/vw_test/vcdrfunc.h: mvdir storage/ndb/test/ndbnet/test.run: mvdir storage/ndb/test/ndbnet/testError.run: mvdir storage/ndb/test/ndbnet/testMNF.run: mvdir storage/ndb/test/ndbnet/testNR.run: mvdir storage/ndb/test/ndbnet/testNR1.run: mvdir storage/ndb/test/ndbnet/testNR4.run: mvdir storage/ndb/test/ndbnet/testSRhang.run: mvdir storage/ndb/test/ndbnet/testTR295.run: mvdir storage/ndb/test/newtonapi/basic_test/Makefile: mvdir storage/ndb/test/newtonapi/basic_test/basic/Makefile: mvdir storage/ndb/test/newtonapi/basic_test/basic/basic.cpp: mvdir storage/ndb/test/newtonapi/basic_test/bulk_read/Makefile: mvdir storage/ndb/test/newtonapi/basic_test/bulk_read/br_test.cpp: mvdir storage/ndb/test/newtonapi/basic_test/common.cpp: mvdir storage/ndb/test/newtonapi/basic_test/common.hpp: mvdir storage/ndb/test/newtonapi/basic_test/ptr_binding/Makefile: mvdir storage/ndb/test/newtonapi/basic_test/ptr_binding/ptr_binding_test.cpp: mvdir storage/ndb/test/newtonapi/basic_test/too_basic.cpp: mvdir storage/ndb/test/newtonapi/perf_test/Makefile: mvdir storage/ndb/test/newtonapi/perf_test/perf.cpp: mvdir storage/ndb/test/odbc/SQL99_test/Makefile: mvdir storage/ndb/test/odbc/SQL99_test/SQL99_test.cpp: mvdir storage/ndb/test/odbc/SQL99_test/SQL99_test.h: mvdir storage/ndb/test/odbc/client/Makefile: mvdir storage/ndb/test/odbc/client/NDBT_ALLOCHANDLE.cpp: mvdir storage/ndb/test/odbc/client/NDBT_ALLOCHANDLE_HDBC.cpp: mvdir storage/ndb/test/odbc/client/NDBT_SQLConnect.cpp: mvdir storage/ndb/test/odbc/client/NDBT_SQLPrepare.cpp: mvdir storage/ndb/test/odbc/client/SQLAllocEnvTest.cpp: mvdir storage/ndb/test/odbc/client/SQLAllocHandleTest.cpp: mvdir storage/ndb/test/odbc/client/SQLAllocHandleTest_bf.cpp: mvdir storage/ndb/test/odbc/client/SQLBindColTest.cpp: mvdir storage/ndb/test/odbc/client/SQLBindParameterTest.cpp: mvdir storage/ndb/test/odbc/client/SQLCancelTest.cpp: mvdir storage/ndb/test/odbc/client/SQLCloseCursorTest.cpp: mvdir storage/ndb/test/odbc/client/SQLColAttributeTest.cpp: mvdir storage/ndb/test/odbc/client/SQLColAttributeTest1.cpp: mvdir storage/ndb/test/odbc/client/SQLColAttributeTest2.cpp: mvdir storage/ndb/test/odbc/client/SQLColAttributeTest3.cpp: mvdir storage/ndb/test/odbc/client/SQLConnectTest.cpp: mvdir storage/ndb/test/odbc/client/SQLCopyDescTest.cpp: mvdir storage/ndb/test/odbc/client/SQLDescribeColTest.cpp: mvdir storage/ndb/test/odbc/client/SQLDisconnectTest.cpp: mvdir storage/ndb/test/odbc/client/SQLDriverConnectTest.cpp: mvdir storage/ndb/test/odbc/client/SQLEndTranTest.cpp: mvdir storage/ndb/test/odbc/client/SQLErrorTest.cpp: mvdir storage/ndb/test/odbc/client/SQLExecDirectTest.cpp: mvdir storage/ndb/test/odbc/client/SQLExecuteTest.cpp: mvdir storage/ndb/test/odbc/client/SQLFetchScrollTest.cpp: mvdir storage/ndb/test/odbc/client/SQLFetchTest.cpp: mvdir storage/ndb/test/odbc/client/SQLFreeHandleTest.cpp: mvdir storage/ndb/test/odbc/client/SQLFreeStmtTest.cpp: mvdir storage/ndb/test/odbc/client/SQLGetConnectAttrTest.cpp: mvdir storage/ndb/test/odbc/client/SQLGetCursorNameTest.cpp: mvdir storage/ndb/test/odbc/client/SQLGetDataTest.cpp: mvdir storage/ndb/test/odbc/client/SQLGetDescFieldTest.cpp: mvdir storage/ndb/test/odbc/client/SQLGetDescRecTest.cpp: mvdir storage/ndb/test/odbc/client/SQLGetDiagFieldTest.cpp: mvdir storage/ndb/test/odbc/client/SQLGetDiagRecSimpleTest.cpp: mvdir storage/ndb/test/odbc/client/SQLGetDiagRecTest.cpp: mvdir storage/ndb/test/odbc/client/SQLGetEnvAttrTest.cpp: mvdir storage/ndb/test/odbc/client/SQLGetFunctionsTest.cpp: mvdir storage/ndb/test/odbc/client/SQLGetInfoTest.cpp: mvdir storage/ndb/test/odbc/client/SQLGetStmtAttrTest.cpp: mvdir storage/ndb/test/odbc/client/SQLGetTypeInfoTest.cpp: mvdir storage/ndb/test/odbc/client/SQLMoreResultsTest.cpp: mvdir storage/ndb/test/odbc/client/SQLNumResultColsTest.cpp: mvdir storage/ndb/test/odbc/client/SQLParamDataTest.cpp: mvdir storage/ndb/test/odbc/client/SQLPrepareTest.cpp: mvdir storage/ndb/test/odbc/client/SQLPutDataTest.cpp: mvdir storage/ndb/test/odbc/client/SQLRowCountTest.cpp: mvdir storage/ndb/test/odbc/client/SQLSetConnectAttrTest.cpp: mvdir storage/ndb/test/odbc/client/SQLSetCursorNameTest.cpp: mvdir storage/ndb/test/odbc/client/SQLSetDescFieldTest.cpp: mvdir storage/ndb/test/odbc/client/SQLSetDescRecTest.cpp: mvdir storage/ndb/test/odbc/client/SQLSetEnvAttrTest.cpp: mvdir storage/ndb/test/odbc/client/SQLSetStmtAttrTest.cpp: mvdir storage/ndb/test/odbc/client/SQLTablesTest.cpp: mvdir storage/ndb/test/odbc/client/SQLTransactTest.cpp: mvdir storage/ndb/test/odbc/client/common.hpp: mvdir storage/ndb/test/odbc/client/main.cpp: mvdir storage/ndb/test/odbc/dm-iodbc/Makefile: mvdir storage/ndb/test/odbc/dm-unixodbc/Makefile: mvdir storage/ndb/test/odbc/driver/Makefile: mvdir storage/ndb/test/odbc/driver/testOdbcDriver.cpp: mvdir storage/ndb/test/odbc/test_compiler/Makefile: mvdir storage/ndb/test/odbc/test_compiler/test_compiler.cpp: mvdir storage/ndb/test/run-test/16node-tests.txt: mvdir storage/ndb/test/run-test/Makefile.am: mvdir storage/ndb/test/run-test/README.ATRT: mvdir storage/ndb/test/run-test/README: mvdir storage/ndb/test/run-test/atrt-analyze-result.sh: mvdir storage/ndb/test/run-test/atrt-clear-result.sh: mvdir storage/ndb/test/run-test/atrt-example.tgz: mvdir storage/ndb/test/run-test/atrt-gather-result.sh: mvdir storage/ndb/test/run-test/atrt-mysql-test-run: mvdir storage/ndb/test/run-test/atrt-setup.sh: mvdir storage/ndb/test/run-test/atrt-testBackup: mvdir storage/ndb/test/run-test/basic.txt: mvdir storage/ndb/test/run-test/daily-basic-tests.txt: mvdir storage/ndb/test/run-test/daily-devel-tests.txt: mvdir storage/ndb/test/run-test/example.conf: mvdir storage/ndb/test/run-test/main.cpp: mvdir storage/ndb/test/run-test/make-config.sh: mvdir storage/ndb/test/run-test/make-html-reports.sh: mvdir storage/ndb/test/run-test/make-index.sh: mvdir storage/ndb/test/run-test/ndb-autotest.sh: mvdir storage/ndb/test/run-test/run-test.hpp: mvdir storage/ndb/test/src/CpcClient.cpp: mvdir storage/ndb/test/src/HugoAsynchTransactions.cpp: mvdir storage/ndb/test/src/HugoCalculator.cpp: mvdir storage/ndb/test/src/HugoOperations.cpp: mvdir storage/ndb/test/src/HugoTransactions.cpp: mvdir storage/ndb/test/src/Makefile.am: mvdir storage/ndb/test/src/NDBT_Error.cpp: mvdir storage/ndb/test/src/NDBT_Output.cpp: mvdir storage/ndb/test/src/NDBT_ResultRow.cpp: mvdir storage/ndb/test/src/NDBT_ReturnCodes.cpp: mvdir storage/ndb/test/src/NDBT_Table.cpp: mvdir storage/ndb/test/src/NDBT_Tables.cpp: mvdir storage/ndb/test/src/NDBT_Test.cpp: mvdir storage/ndb/test/src/NdbBackup.cpp: mvdir storage/ndb/test/src/NdbConfig.cpp: mvdir storage/ndb/test/src/NdbGrep.cpp: mvdir storage/ndb/test/src/NdbRestarter.cpp: mvdir storage/ndb/test/src/NdbRestarts.cpp: mvdir storage/ndb/test/src/NdbSchemaCon.cpp: mvdir storage/ndb/test/src/NdbSchemaOp.cpp: mvdir storage/ndb/test/src/UtilTransactions.cpp: mvdir storage/ndb/test/src/getarg.c: mvdir storage/ndb/test/tools/Makefile.am: mvdir storage/ndb/test/tools/copy_tab.cpp: mvdir storage/ndb/test/tools/cpcc.cpp: mvdir storage/ndb/test/tools/create_index.cpp: mvdir storage/ndb/test/tools/hugoCalculator.cpp: mvdir storage/ndb/test/tools/hugoFill.cpp: mvdir storage/ndb/test/tools/hugoLoad.cpp: mvdir storage/ndb/test/tools/hugoLockRecords.cpp: mvdir storage/ndb/test/tools/hugoPkDelete.cpp: mvdir storage/ndb/test/tools/hugoPkRead.cpp: mvdir storage/ndb/test/tools/hugoPkReadRecord.cpp: mvdir storage/ndb/test/tools/hugoPkUpdate.cpp: mvdir storage/ndb/test/tools/hugoScanRead.cpp: mvdir storage/ndb/test/tools/hugoScanUpdate.cpp: mvdir storage/ndb/test/tools/old_dirs/hugoCalculator/Makefile: mvdir storage/ndb/test/tools/old_dirs/hugoFill/Makefile: mvdir storage/ndb/test/tools/old_dirs/hugoLoad/Makefile: mvdir storage/ndb/test/tools/old_dirs/hugoLockRecords/Makefile: mvdir storage/ndb/test/tools/old_dirs/hugoPkDelete/Makefile: mvdir storage/ndb/test/tools/old_dirs/hugoPkRead/Makefile: mvdir storage/ndb/test/tools/restart.cpp: mvdir storage/ndb/test/tools/transproxy.cpp: mvdir storage/ndb/test/tools/verify_index.cpp: mvdir storage/ndb/test/tools/old_dirs/hugoPkReadRecord/Makefile: mvdir storage/ndb/test/tools/old_dirs/hugoPkUpdate/Makefile: mvdir storage/ndb/test/tools/old_dirs/hugoScanRead/Makefile: mvdir storage/ndb/test/tools/old_dirs/hugoScanUpdate/Makefile: mvdir storage/ndb/test/tools/old_dirs/restart/Makefile: mvdir storage/ndb/test/tools/old_dirs/transproxy/Makefile: mvdir storage/ndb/test/tools/old_dirs/verify_index/Makefile: mvdir storage/ndb/test/tools/old_dirs/waiter/waiter.cpp: mvdir storage/ndb/tools/Makefile.am: mvdir storage/ndb/tools/clean-links.sh: mvdir storage/ndb/tools/delete_all.cpp: mvdir storage/ndb/tools/desc.cpp: mvdir storage/ndb/tools/drop_index.cpp: mvdir storage/ndb/tools/drop_tab.cpp: mvdir storage/ndb/tools/listTables.cpp: mvdir storage/ndb/tools/make-errors.pl: mvdir storage/ndb/tools/make-links.sh: mvdir storage/ndb/tools/ndb_test_platform.cpp: mvdir storage/ndb/tools/ndbsql.cpp: mvdir storage/ndb/tools/old_dirs/copy_tab/Makefile: mvdir storage/ndb/tools/old_dirs/cpcc/Makefile: mvdir storage/ndb/tools/old_dirs/create_index/Makefile: mvdir storage/ndb/tools/old_dirs/delete_all/Makefile: mvdir storage/ndb/tools/old_dirs/desc/Makefile: mvdir storage/ndb/tools/old_dirs/drop_index/Makefile: mvdir storage/ndb/tools/old_dirs/drop_tab/Makefile: mvdir storage/ndb/tools/old_dirs/list_tables/Makefile: mvdir storage/ndb/tools/old_dirs/ndbnet/Makefile.PL: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Run.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/ndbnet.pl: mvdir storage/ndb/tools/old_dirs/ndbnet/ndbnetd.pl: mvdir storage/ndb/tools/old_dirs/ndbnet/ndbrun: mvdir storage/ndb/tools/rgrep: mvdir storage/ndb/tools/select_all.cpp: mvdir storage/ndb/tools/select_count.cpp: mvdir storage/ndb/tools/waiter.cpp: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Base.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Client.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Command.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Config.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Database.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Env.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Node.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/NodeApi.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/NodeDb.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/NodeMgmt.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/Server.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/ServerINET.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Net/ServerUNIX.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Run/Base.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Run/Database.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Run/Env.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Run/Node.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Base.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Dir.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Event.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/File.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/IO.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Lock.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Log.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/Socket.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/SocketINET.pm: mvdir storage/ndb/tools/old_dirs/ndbnet/lib/NDB/Util/SocketUNIX.pm: mvdir storage/ndb/tools/old_dirs/ndbsql/Makefile: mvdir storage/ndb/tools/old_dirs/select_all/Makefile: mvdir storage/ndb/tools/old_dirs/select_count/Makefile: mvdir storage/ndb/tools/old_dirs/src/counterviewer/CounterViewer.java: mvdir storage/ndb/tools/restore/Restore.cpp: mvdir storage/ndb/tools/restore/Restore.hpp: mvdir storage/ndb/tools/restore/consumer.cpp: mvdir storage/ndb/tools/restore/consumer.hpp: mvdir storage/ndb/tools/restore/consumer_printer.cpp: mvdir storage/ndb/tools/restore/consumer_printer.hpp: mvdir storage/ndb/tools/restore/consumer_restore.cpp: mvdir storage/ndb/tools/restore/consumer_restore.hpp: mvdir storage/ndb/tools/restore/consumer_restorem.cpp: mvdir storage/ndb/tools/restore/restore_main.cpp: mvdir storage/bdb/LICENSE: mvdir storage/bdb/Makefile.in: mvdir storage/bdb/btree/bt_compare.c: mvdir storage/bdb/btree/bt_conv.c: mvdir storage/bdb/btree/bt_curadj.c: mvdir storage/bdb/btree/bt_cursor.c: mvdir storage/bdb/btree/bt_delete.c: mvdir storage/bdb/btree/bt_method.c: mvdir storage/bdb/btree/bt_open.c: mvdir storage/bdb/btree/bt_put.c: mvdir storage/bdb/btree/bt_rec.c: mvdir storage/bdb/btree/bt_reclaim.c: mvdir storage/bdb/btree/bt_recno.c: mvdir storage/bdb/btree/bt_rsearch.c: mvdir storage/bdb/btree/bt_search.c: mvdir storage/bdb/btree/bt_split.c: mvdir storage/bdb/btree/bt_stat.c: mvdir storage/bdb/btree/bt_upgrade.c: mvdir storage/bdb/btree/bt_verify.c: mvdir storage/bdb/btree/btree.src: mvdir storage/bdb/build_unix/.IGNORE_ME: mvdir storage/bdb/build_vxworks/BerkeleyDB.wsp: mvdir storage/bdb/build_vxworks/dbdemo/README: mvdir storage/bdb/build_win32/Berkeley_DB.dsw: mvdir storage/bdb/build_win32/app_dsp.src: mvdir storage/bdb/build_win32/build_all.dsp: mvdir storage/bdb/build_win32/db_java_xa.dsp: mvdir storage/bdb/build_win32/db_java_xaj.mak: mvdir storage/bdb/build_win32/db_lib.dsp: mvdir storage/bdb/build_win32/db_test.src: mvdir storage/bdb/build_win32/dbkill.cpp: mvdir storage/bdb/build_win32/dllmain.c: mvdir storage/bdb/build_win32/dynamic_dsp.src: mvdir storage/bdb/build_win32/java_dsp.src: mvdir storage/bdb/build_win32/libdb_tcl.def: mvdir storage/bdb/build_win32/libdbrc.src: mvdir storage/bdb/build_win32/srcfile_dsp.src: mvdir storage/bdb/build_win32/static_dsp.src: mvdir storage/bdb/build_win32/tcl_dsp.src: mvdir storage/bdb/clib/getcwd.c: mvdir storage/bdb/clib/getopt.c: mvdir storage/bdb/clib/memcmp.c: mvdir storage/bdb/clib/memmove.c: mvdir storage/bdb/clib/raise.c: mvdir storage/bdb/clib/snprintf.c: mvdir storage/bdb/clib/strcasecmp.c: mvdir storage/bdb/clib/strdup.c: mvdir storage/bdb/clib/strerror.c: mvdir storage/bdb/clib/vsnprintf.c: mvdir storage/bdb/common/db_byteorder.c: mvdir storage/bdb/common/db_err.c: mvdir storage/bdb/common/db_getlong.c: mvdir storage/bdb/common/db_idspace.c: mvdir storage/bdb/common/db_log2.c: mvdir storage/bdb/common/util_arg.c: mvdir storage/bdb/common/util_cache.c: mvdir storage/bdb/common/util_log.c: mvdir storage/bdb/common/util_sig.c: mvdir storage/bdb/cxx/cxx_db.cpp: mvdir storage/bdb/cxx/cxx_dbc.cpp: mvdir storage/bdb/cxx/cxx_dbt.cpp: mvdir storage/bdb/cxx/cxx_env.cpp: mvdir storage/bdb/cxx/cxx_except.cpp: mvdir storage/bdb/cxx/cxx_lock.cpp: mvdir storage/bdb/cxx/cxx_logc.cpp: mvdir storage/bdb/cxx/cxx_mpool.cpp: mvdir storage/bdb/cxx/cxx_txn.cpp: mvdir storage/bdb/db/crdel.src: mvdir storage/bdb/db/crdel_rec.c: mvdir storage/bdb/db/db.c: mvdir storage/bdb/db/db.src: mvdir storage/bdb/db/db_am.c: mvdir storage/bdb/db/db_cam.c: mvdir storage/bdb/db/db_conv.c: mvdir storage/bdb/db/db_dispatch.c: mvdir storage/bdb/db/db_dup.c: mvdir storage/bdb/db/db_iface.c: mvdir storage/bdb/db/db_join.c: mvdir storage/bdb/db/db_meta.c: mvdir storage/bdb/db/db_method.c: mvdir storage/bdb/db/db_open.c: mvdir storage/bdb/db/db_overflow.c: mvdir storage/bdb/db/db_pr.c: mvdir storage/bdb/db/db_rec.c: mvdir storage/bdb/db/db_reclaim.c: mvdir storage/bdb/db/db_remove.c: mvdir storage/bdb/db/db_rename.c: mvdir storage/bdb/db/db_ret.c: mvdir storage/bdb/db/db_truncate.c: mvdir storage/bdb/db/db_upg.c: mvdir storage/bdb/db/db_upg_opd.c: mvdir storage/bdb/db/db_vrfy.c: mvdir storage/bdb/db/db_vrfyutil.c: mvdir storage/bdb/db185/db185.c: mvdir storage/bdb/db185/db185_int.in: mvdir storage/bdb/db_archive/db_archive.c: mvdir storage/bdb/db_checkpoint/db_checkpoint.c: mvdir storage/bdb/db_deadlock/db_deadlock.c: mvdir storage/bdb/db_dump/db_dump.c: mvdir storage/bdb/db_dump185/db_dump185.c: mvdir storage/bdb/db_load/db_load.c: mvdir storage/bdb/db_printlog/README: mvdir storage/bdb/db_printlog/commit.awk: mvdir storage/bdb/db_printlog/count.awk: mvdir storage/bdb/db_printlog/db_printlog.c: mvdir storage/bdb/db_printlog/dbname.awk: mvdir storage/bdb/db_printlog/fileid.awk: mvdir storage/bdb/db_printlog/logstat.awk: mvdir storage/bdb/db_printlog/pgno.awk: mvdir storage/bdb/db_printlog/range.awk: mvdir storage/bdb/db_printlog/rectype.awk: mvdir storage/bdb/db_printlog/status.awk: mvdir storage/bdb/db_printlog/txn.awk: mvdir storage/bdb/db_recover/db_recover.c: mvdir storage/bdb/db_stat/db_stat.c: mvdir storage/bdb/db_upgrade/db_upgrade.c: mvdir storage/bdb/db_verify/db_verify.c: mvdir storage/bdb/dbinc/btree.h: mvdir storage/bdb/dbinc/crypto.h: mvdir storage/bdb/dbinc/cxx_common.h: mvdir storage/bdb/dbinc/cxx_except.h: mvdir storage/bdb/dbinc/cxx_int.h: mvdir storage/bdb/dbinc/db.in: mvdir storage/bdb/dbinc/db_185.in: mvdir storage/bdb/dbinc/db_am.h: mvdir storage/bdb/dbinc/db_cxx.in: mvdir storage/bdb/dbinc/db_dispatch.h: mvdir storage/bdb/dbinc/db_int.in: mvdir storage/bdb/dbinc/db_join.h: mvdir storage/bdb/dbinc/db_page.h: mvdir storage/bdb/dbinc/db_server_int.h: mvdir storage/bdb/dbinc/db_shash.h: mvdir storage/bdb/dbinc/db_swap.h: mvdir storage/bdb/dbinc/db_upgrade.h: mvdir storage/bdb/dbinc/db_verify.h: mvdir storage/bdb/dbinc/debug.h: mvdir storage/bdb/dbinc/fop.h: mvdir storage/bdb/dbinc/globals.h: mvdir storage/bdb/dbinc/hash.h: mvdir storage/bdb/dbinc/hmac.h: mvdir storage/bdb/dbinc/lock.h: mvdir storage/bdb/dbinc/log.h: mvdir storage/bdb/dbinc/mp.h: mvdir storage/bdb/dbinc/mutex.h: mvdir storage/bdb/dbinc/os.h: mvdir storage/bdb/dbinc/qam.h: mvdir storage/bdb/dbinc/queue.h: mvdir storage/bdb/dbinc/region.h: mvdir storage/bdb/dbinc/rep.h: mvdir storage/bdb/dbinc/shqueue.h: mvdir storage/bdb/dbinc/tcl_db.h: mvdir storage/bdb/dbinc/txn.h: mvdir storage/bdb/dbinc/xa.h: mvdir storage/bdb/dbm/dbm.c: mvdir storage/bdb/dbreg/dbreg.c: mvdir storage/bdb/dbreg/dbreg.src: mvdir storage/bdb/dbreg/dbreg_rec.c: mvdir storage/bdb/dbreg/dbreg_util.c: mvdir storage/bdb/dist/Makefile.in: mvdir storage/bdb/dist/RELEASE: mvdir storage/bdb/dist/buildrel: mvdir storage/bdb/dist/config.guess: mvdir storage/bdb/dist/config.sub: mvdir storage/bdb/dist/configure.ac: mvdir storage/bdb/dist/db.ecd.in: mvdir storage/bdb/dist/db.spec.in: mvdir storage/bdb/dist/gen_inc.awk: mvdir storage/bdb/dist/gen_rec.awk: mvdir storage/bdb/dist/gen_rpc.awk: mvdir storage/bdb/dist/install-sh: mvdir storage/bdb/dist/ltmain.sh: mvdir storage/bdb/dist/pubdef.in: mvdir storage/bdb/dist/s_all: mvdir storage/bdb/dist/s_config: mvdir storage/bdb/dist/aclocal/config.ac: mvdir storage/bdb/dist/aclocal/cxx.ac: mvdir storage/bdb/dist/aclocal/gcc.ac: mvdir storage/bdb/dist/aclocal/libtool.ac: mvdir storage/bdb/dist/s_crypto: mvdir storage/bdb/dist/s_dir: mvdir storage/bdb/dist/s_include: mvdir storage/bdb/dist/s_javah: mvdir storage/bdb/dist/s_java: mvdir storage/bdb/dist/s_perm: mvdir storage/bdb/dist/s_readme: mvdir storage/bdb/dist/s_recover: mvdir storage/bdb/dist/s_rpc: mvdir storage/bdb/dist/s_symlink: mvdir storage/bdb/dist/s_tags: mvdir storage/bdb/dist/s_test: mvdir storage/bdb/dist/s_vxworks: mvdir storage/bdb/dist/s_win32_dsp: mvdir storage/bdb/dist/s_win32: mvdir storage/bdb/dist/srcfiles.in: mvdir storage/bdb/dist/vx_buildcd: mvdir storage/bdb/dist/vx_config.in: mvdir storage/bdb/dist/win_config.in: mvdir storage/bdb/dist/win_exports.in: mvdir storage/bdb/dist/aclocal/mutex.ac: mvdir storage/bdb/dist/aclocal/options.ac: mvdir storage/bdb/dist/aclocal/programs.ac: mvdir storage/bdb/dist/aclocal/sosuffix.ac: mvdir storage/bdb/dist/aclocal/tcl.ac: mvdir storage/bdb/dist/aclocal/types.ac: mvdir storage/bdb/dist/aclocal_java/ac_check_class.ac: mvdir storage/bdb/dist/aclocal_java/ac_check_classpath.ac: mvdir storage/bdb/dist/aclocal_java/ac_check_junit.ac: mvdir storage/bdb/dist/aclocal_java/ac_check_rqrd_class.ac: mvdir storage/bdb/dist/aclocal_java/ac_java_options.ac: mvdir storage/bdb/dist/aclocal_java/ac_jni_include_dirs.ac: mvdir storage/bdb/dist/aclocal_java/ac_prog_jar.ac: mvdir storage/bdb/dist/aclocal_java/ac_prog_java.ac: mvdir storage/bdb/dist/aclocal_java/ac_prog_java_works.ac: mvdir storage/bdb/dist/aclocal_java/ac_prog_javac.ac: mvdir storage/bdb/dist/aclocal_java/ac_prog_javac_works.ac: mvdir storage/bdb/dist/aclocal_java/ac_prog_javadoc.ac: mvdir storage/bdb/dist/aclocal_java/ac_prog_javah.ac: mvdir storage/bdb/dist/aclocal_java/ac_try_compile_java.ac: mvdir storage/bdb/dist/aclocal_java/ac_try_run_javac.ac: mvdir storage/bdb/dist/template/rec_ctemp: mvdir storage/bdb/dist/vx_2.0/BerkeleyDB.wpj: mvdir storage/bdb/dist/vx_2.0/wpj.in: mvdir storage/bdb/dist/vx_3.1/Makefile.custom: mvdir storage/bdb/dist/vx_3.1/cdf.1: mvdir storage/bdb/dist/vx_3.1/cdf.2: mvdir storage/bdb/dist/vx_3.1/cdf.3: mvdir storage/bdb/dist/vx_3.1/component.cdf: mvdir storage/bdb/dist/vx_3.1/component.wpj: mvdir storage/bdb/dist/vx_3.1/wpj.1: mvdir storage/bdb/dist/vx_3.1/wpj.2: mvdir storage/bdb/dist/vx_3.1/wpj.3: mvdir storage/bdb/dist/vx_3.1/wpj.4: mvdir storage/bdb/dist/vx_3.1/wpj.5: mvdir storage/bdb/dist/vx_setup/CONFIG.in: mvdir storage/bdb/dist/vx_setup/LICENSE.TXT: mvdir storage/bdb/dist/vx_setup/MESSAGES.TCL: mvdir storage/bdb/dist/vx_setup/README.in: mvdir storage/bdb/dist/vx_setup/SETUP.BMP: mvdir storage/bdb/dist/vx_setup/vx_allfile.in: mvdir storage/bdb/dist/vx_setup/vx_demofile.in: mvdir storage/bdb/dist/vx_setup/vx_setup.in: mvdir storage/bdb/env/db_salloc.c: mvdir storage/bdb/env/db_shash.c: mvdir storage/bdb/env/env_file.c: mvdir storage/bdb/env/env_method.c.b: mvdir storage/bdb/env/env_method.c: mvdir storage/bdb/env/env_open.c: mvdir storage/bdb/env/env_recover.c: mvdir storage/bdb/env/env_region.c: mvdir storage/bdb/fileops/fileops.src: mvdir storage/bdb/fileops/fop_basic.c: mvdir storage/bdb/fileops/fop_rec.c: mvdir storage/bdb/fileops/fop_util.c: mvdir storage/bdb/hash/hash.c: mvdir storage/bdb/hash/hash.src: mvdir storage/bdb/hash/hash_conv.c: mvdir storage/bdb/hash/hash_dup.c: mvdir storage/bdb/hash/hash_func.c: mvdir storage/bdb/hash/hash_meta.c: mvdir storage/bdb/hash/hash_method.c: mvdir storage/bdb/hash/hash_open.c: mvdir storage/bdb/hash/hash_page.c: mvdir storage/bdb/hash/hash_rec.c: mvdir storage/bdb/hash/hash_reclaim.c: mvdir storage/bdb/hash/hash_stat.c: mvdir storage/bdb/hash/hash_upgrade.c: mvdir storage/bdb/hash/hash_verify.c: mvdir storage/bdb/hmac/hmac.c: mvdir storage/bdb/hmac/sha1.c: mvdir storage/bdb/hsearch/hsearch.c: mvdir storage/bdb/libdb_java/checkapi.prl: mvdir storage/bdb/libdb_java/com_sleepycat_db_Db.h: mvdir storage/bdb/libdb_java/com_sleepycat_db_DbEnv.h: mvdir storage/bdb/libdb_java/com_sleepycat_db_DbLock.h: mvdir storage/bdb/libdb_java/com_sleepycat_db_DbLogc.h: mvdir storage/bdb/libdb_java/com_sleepycat_db_DbLsn.h: mvdir storage/bdb/libdb_java/com_sleepycat_db_DbTxn.h: mvdir storage/bdb/libdb_java/com_sleepycat_db_DbUtil.h: mvdir storage/bdb/libdb_java/com_sleepycat_db_Dbc.h: mvdir storage/bdb/libdb_java/com_sleepycat_db_Dbt.h: mvdir storage/bdb/libdb_java/com_sleepycat_db_xa_DbXAResource.h: mvdir storage/bdb/libdb_java/java_Db.c: mvdir storage/bdb/libdb_java/java_DbEnv.c: mvdir storage/bdb/libdb_java/java_DbLock.c: mvdir storage/bdb/libdb_java/java_DbLogc.c: mvdir storage/bdb/libdb_java/java_DbLsn.c: mvdir storage/bdb/libdb_java/java_DbTxn.c: mvdir storage/bdb/libdb_java/java_DbUtil.c: mvdir storage/bdb/libdb_java/java_DbXAResource.c: mvdir storage/bdb/libdb_java/java_Dbc.c: mvdir storage/bdb/libdb_java/java_Dbt.c: mvdir storage/bdb/libdb_java/java_info.c: mvdir storage/bdb/libdb_java/java_info.h: mvdir storage/bdb/libdb_java/java_locked.c: mvdir storage/bdb/libdb_java/java_locked.h: mvdir storage/bdb/libdb_java/java_util.c: mvdir storage/bdb/libdb_java/java_util.h: mvdir storage/bdb/lock/Design: mvdir storage/bdb/lock/lock.c: mvdir storage/bdb/lock/lock_deadlock.c: mvdir storage/bdb/lock/lock_method.c: mvdir storage/bdb/lock/lock_region.c: mvdir storage/bdb/lock/lock_stat.c: mvdir storage/bdb/lock/lock_util.c: mvdir storage/bdb/log/log.c: mvdir storage/bdb/log/log_archive.c: mvdir storage/bdb/log/log_compare.c: mvdir storage/bdb/log/log_get.c: mvdir storage/bdb/log/log_method.c: mvdir storage/bdb/log/log_put.c: mvdir storage/bdb/mp/mp_alloc.c: mvdir storage/bdb/mp/mp_bh.c: mvdir storage/bdb/mp/mp_fget.c: mvdir storage/bdb/mp/mp_fopen.c: mvdir storage/bdb/mp/mp_fput.c: mvdir storage/bdb/mp/mp_fset.c: mvdir storage/bdb/mp/mp_method.c: mvdir storage/bdb/mp/mp_region.c: mvdir storage/bdb/mp/mp_register.c: mvdir storage/bdb/mp/mp_stat.c: mvdir storage/bdb/mp/mp_sync.c: mvdir storage/bdb/mp/mp_trickle.c: mvdir storage/bdb/mutex/README: mvdir storage/bdb/mutex/mut_fcntl.c: mvdir storage/bdb/mutex/mut_pthread.c: mvdir storage/bdb/mutex/mut_tas.c: mvdir storage/bdb/mutex/mut_win32.c: mvdir storage/bdb/mutex/mutex.c: mvdir storage/bdb/mutex/tm.c: mvdir storage/bdb/mutex/uts4_cc.s: mvdir storage/bdb/os/os_abs.c: mvdir storage/bdb/os/os_alloc.c: mvdir storage/bdb/os/os_clock.c: mvdir storage/bdb/os/os_config.c: mvdir storage/bdb/os/os_dir.c: mvdir storage/bdb/os/os_errno.c: mvdir storage/bdb/os/os_fid.c: mvdir storage/bdb/os/os_fsync.c: mvdir storage/bdb/os/os_handle.c: mvdir storage/bdb/os/os_id.c: mvdir storage/bdb/os/os_map.c: mvdir storage/bdb/os/os_method.c: mvdir storage/bdb/os/os_oflags.c: mvdir storage/bdb/os/os_open.c: mvdir storage/bdb/os/os_region.c: mvdir storage/bdb/os/os_rename.c: mvdir storage/bdb/os/os_root.c: mvdir storage/bdb/os/os_rpath.c: mvdir storage/bdb/os/os_rw.c: mvdir storage/bdb/os/os_seek.c: mvdir storage/bdb/os/os_sleep.c: mvdir storage/bdb/os/os_spin.c: mvdir storage/bdb/os/os_stat.c: mvdir storage/bdb/os/os_tmpdir.c: mvdir storage/bdb/os/os_unlink.c: mvdir storage/bdb/os_vxworks/os_vx_abs.c: mvdir storage/bdb/os_vxworks/os_vx_config.c: mvdir storage/bdb/os_vxworks/os_vx_map.c: mvdir storage/bdb/os_win32/os_abs.c: mvdir storage/bdb/os_win32/os_clock.c: mvdir storage/bdb/os_win32/os_config.c: mvdir storage/bdb/os_win32/os_dir.c: mvdir storage/bdb/os_win32/os_errno.c: mvdir storage/bdb/os_win32/os_fid.c: mvdir storage/bdb/os_win32/os_fsync.c: mvdir storage/bdb/os_win32/os_handle.c: mvdir storage/bdb/os_win32/os_map.c: mvdir storage/bdb/os_win32/os_open.c: mvdir storage/bdb/os_win32/os_rename.c: mvdir storage/bdb/os_win32/os_rw.c: mvdir storage/bdb/os_win32/os_seek.c: mvdir storage/bdb/os_win32/os_sleep.c: mvdir storage/bdb/os_win32/os_spin.c: mvdir storage/bdb/os_win32/os_stat.c: mvdir storage/bdb/os_win32/os_type.c: mvdir storage/bdb/perl/BerkeleyDB/BerkeleyDB.pm: mvdir storage/bdb/perl/BerkeleyDB/BerkeleyDB.pod.P: mvdir storage/bdb/perl/BerkeleyDB/BerkeleyDB.pod: mvdir storage/bdb/perl/BerkeleyDB/BerkeleyDB.xs: mvdir storage/bdb/perl/BerkeleyDB/Changes: mvdir storage/bdb/perl/BerkeleyDB/MANIFEST: mvdir storage/bdb/perl/BerkeleyDB/Makefile.PL: mvdir storage/bdb/perl/BerkeleyDB/README: mvdir storage/bdb/perl/BerkeleyDB/Todo: mvdir storage/bdb/perl/BerkeleyDB/config.in: mvdir storage/bdb/perl/BerkeleyDB/constants.h: mvdir storage/bdb/perl/BerkeleyDB/constants.xs: mvdir storage/bdb/perl/BerkeleyDB/dbinfo: mvdir storage/bdb/perl/BerkeleyDB/mkconsts: mvdir storage/bdb/perl/BerkeleyDB/mkpod: mvdir storage/bdb/perl/BerkeleyDB/BerkeleyDB/Btree.pm: mvdir storage/bdb/perl/BerkeleyDB/BerkeleyDB/Hash.pm: mvdir storage/bdb/perl/BerkeleyDB/hints/dec_osf.pl: mvdir storage/bdb/perl/BerkeleyDB/hints/irix_6_5.pl: mvdir storage/bdb/perl/BerkeleyDB/hints/solaris.pl: mvdir storage/bdb/perl/BerkeleyDB/patches/5.004_01: mvdir storage/bdb/perl/BerkeleyDB/patches/5.004_02: mvdir storage/bdb/perl/BerkeleyDB/patches/5.004_03: mvdir storage/bdb/perl/BerkeleyDB/patches/5.004_04: mvdir storage/bdb/perl/BerkeleyDB/patches/5.004_05: mvdir storage/bdb/perl/BerkeleyDB/patches/5.004: mvdir storage/bdb/perl/BerkeleyDB/patches/5.005_01: mvdir storage/bdb/perl/BerkeleyDB/patches/5.005_02: mvdir storage/bdb/perl/BerkeleyDB/patches/5.005_03: mvdir storage/bdb/perl/BerkeleyDB/patches/5.005: mvdir storage/bdb/perl/BerkeleyDB/patches/5.6.0: mvdir storage/bdb/perl/BerkeleyDB/ppport.h: mvdir storage/bdb/perl/BerkeleyDB/scan: mvdir storage/bdb/perl/BerkeleyDB/t/btree.t: mvdir storage/bdb/perl/BerkeleyDB/t/destroy.t: mvdir storage/bdb/perl/BerkeleyDB/t/env.t: mvdir storage/bdb/perl/BerkeleyDB/t/examples.t.T: mvdir storage/bdb/perl/BerkeleyDB/t/examples.t: mvdir storage/bdb/perl/BerkeleyDB/t/examples3.t.T: mvdir storage/bdb/perl/BerkeleyDB/t/examples3.t: mvdir storage/bdb/perl/BerkeleyDB/t/filter.t: mvdir storage/bdb/perl/BerkeleyDB/t/hash.t: mvdir storage/bdb/perl/BerkeleyDB/t/join.t: mvdir storage/bdb/perl/BerkeleyDB/t/mldbm.t: mvdir storage/bdb/perl/BerkeleyDB/t/queue.t: mvdir storage/bdb/perl/BerkeleyDB/t/recno.t: mvdir storage/bdb/perl/BerkeleyDB/t/strict.t: mvdir storage/bdb/perl/BerkeleyDB/t/subdb.t: mvdir storage/bdb/perl/BerkeleyDB/t/txn.t: mvdir storage/bdb/perl/BerkeleyDB/typemap: mvdir storage/bdb/perl/BerkeleyDB/t/unknown.t: mvdir storage/bdb/perl/BerkeleyDB/t/util.pm: mvdir storage/bdb/perl/DB_File/Changes: mvdir storage/bdb/perl/DB_File/DB_File.pm: mvdir storage/bdb/perl/DB_File/DB_File.xs: mvdir storage/bdb/perl/DB_File/DB_File_BS: mvdir storage/bdb/perl/DB_File/MANIFEST: mvdir storage/bdb/perl/DB_File/Makefile.PL: mvdir storage/bdb/perl/DB_File/README: mvdir storage/bdb/perl/DB_File/config.in: mvdir storage/bdb/perl/DB_File/dbinfo: mvdir storage/bdb/perl/DB_File/fallback.h: mvdir storage/bdb/perl/DB_File/fallback.xs: mvdir storage/bdb/perl/DB_File/hints/dynixptx.pl: mvdir storage/bdb/perl/DB_File/hints/sco.pl: mvdir storage/bdb/perl/DB_File/patches/5.004_01: mvdir storage/bdb/perl/DB_File/patches/5.004_02: mvdir storage/bdb/perl/DB_File/patches/5.004_03: mvdir storage/bdb/perl/DB_File/patches/5.004_04: mvdir storage/bdb/perl/DB_File/patches/5.004_05: mvdir storage/bdb/perl/DB_File/patches/5.004: mvdir storage/bdb/perl/DB_File/patches/5.005_01: mvdir storage/bdb/perl/DB_File/patches/5.005_02: mvdir storage/bdb/perl/DB_File/patches/5.005_03: mvdir storage/bdb/perl/DB_File/patches/5.005: mvdir storage/bdb/perl/DB_File/patches/5.6.0: mvdir storage/bdb/perl/DB_File/ppport.h: mvdir storage/bdb/perl/DB_File/t/db-btree.t: mvdir storage/bdb/perl/DB_File/t/db-hash.t: mvdir storage/bdb/perl/DB_File/t/db-recno.t: mvdir storage/bdb/perl/DB_File/typemap: mvdir storage/bdb/perl/DB_File/version.c: mvdir storage/bdb/qam/qam.c: mvdir storage/bdb/qam/qam.src: mvdir storage/bdb/qam/qam_conv.c: mvdir storage/bdb/qam/qam_files.c: mvdir storage/bdb/qam/qam_method.c: mvdir storage/bdb/qam/qam_open.c: mvdir storage/bdb/qam/qam_rec.c: mvdir storage/bdb/qam/qam_stat.c: mvdir storage/bdb/qam/qam_upgrade.c: mvdir storage/bdb/qam/qam_verify.c: mvdir storage/bdb/rep/rep_method.c: mvdir storage/bdb/rep/rep_record.c: mvdir storage/bdb/rep/rep_region.c: mvdir storage/bdb/rep/rep_util.c: mvdir storage/bdb/rpc_client/client.c: mvdir storage/bdb/rpc_client/gen_client_ret.c: mvdir storage/bdb/rpc_server/c/db_server_proc.c.in: mvdir storage/bdb/rpc_server/c/db_server_util.c: mvdir storage/bdb/rpc_server/clsrv.html: mvdir storage/bdb/rpc_server/cxx/db_server_cxxproc.cpp: mvdir storage/bdb/rpc_server/cxx/db_server_cxxutil.cpp: mvdir storage/bdb/rpc_server/java/DbDispatcher.java: mvdir storage/bdb/rpc_server/java/DbServer.java: mvdir storage/bdb/rpc_server/java/FreeList.java: mvdir storage/bdb/rpc_server/java/LocalIterator.java: mvdir storage/bdb/rpc_server/java/README: mvdir storage/bdb/rpc_server/java/RpcDb.java: mvdir storage/bdb/rpc_server/java/RpcDbEnv.java: mvdir storage/bdb/rpc_server/java/RpcDbTxn.java: mvdir storage/bdb/rpc_server/java/RpcDbc.java: mvdir storage/bdb/rpc_server/java/Timer.java: mvdir storage/bdb/rpc_server/java/jrpcgen.jar: mvdir storage/bdb/rpc_server/java/oncrpc.jar: mvdir storage/bdb/rpc_server/rpc.src: mvdir storage/bdb/rpc_server/java/gen/DbServerStub.java: mvdir storage/bdb/rpc_server/java/gen/__db_associate_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_associate_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_bt_maxkey_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_bt_maxkey_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_bt_minkey_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_bt_minkey_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_close_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_close_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_create_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_create_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_cursor_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_cursor_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_del_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_del_reply.java: mvdir storage/bdb/rpc_server/java/s_jrpcgen: mvdir storage/bdb/rpc_server/java/gen/__db_encrypt_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_encrypt_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_extentsize_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_extentsize_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_flags_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_flags_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_get_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_get_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_h_ffactor_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_h_ffactor_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_h_nelem_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_h_nelem_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_join_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_join_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_key_range_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_key_range_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_lorder_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_lorder_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_open_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_open_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_pagesize_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_pagesize_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_pget_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_pget_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_put_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_put_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_re_delim_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_re_delim_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_re_len_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_re_len_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_re_pad_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_re_pad_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_remove_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_remove_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_rename_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_rename_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_stat_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_stat_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_sync_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_sync_reply.java: mvdir storage/bdb/rpc_server/java/gen/__db_truncate_msg.java: mvdir storage/bdb/rpc_server/java/gen/__db_truncate_reply.java: mvdir storage/bdb/rpc_server/java/gen/__dbc_close_msg.java: mvdir storage/bdb/rpc_server/java/gen/__dbc_close_reply.java: mvdir storage/bdb/rpc_server/java/gen/__dbc_count_msg.java: mvdir storage/bdb/rpc_server/java/gen/__dbc_count_reply.java: mvdir storage/bdb/rpc_server/java/gen/__dbc_del_msg.java: mvdir storage/bdb/rpc_server/java/gen/__dbc_del_reply.java: mvdir storage/bdb/rpc_server/java/gen/__dbc_dup_msg.java: mvdir storage/bdb/rpc_server/java/gen/__dbc_dup_reply.java: mvdir storage/bdb/rpc_server/java/gen/__dbc_get_msg.java: mvdir storage/bdb/rpc_server/java/gen/__dbc_get_reply.java: mvdir storage/bdb/rpc_server/java/gen/__dbc_pget_msg.java: mvdir storage/bdb/rpc_server/java/gen/__dbc_pget_reply.java: mvdir storage/bdb/rpc_server/java/gen/__dbc_put_msg.java: mvdir storage/bdb/rpc_server/java/gen/__dbc_put_reply.java: mvdir storage/bdb/rpc_server/java/gen/__env_cachesize_msg.java: mvdir storage/bdb/rpc_server/java/gen/__env_cachesize_reply.java: mvdir storage/bdb/rpc_server/java/gen/__env_close_msg.java: mvdir storage/bdb/rpc_server/java/gen/__env_close_reply.java: mvdir storage/bdb/rpc_server/java/gen/__env_create_msg.java: mvdir storage/bdb/rpc_server/java/gen/__env_create_reply.java: mvdir storage/bdb/rpc_server/java/gen/__env_dbremove_msg.java: mvdir storage/bdb/rpc_server/java/gen/__env_dbremove_reply.java: mvdir storage/bdb/rpc_server/java/gen/__env_dbrename_msg.java: mvdir storage/bdb/rpc_server/java/gen/__env_dbrename_reply.java: mvdir storage/bdb/rpc_server/java/gen/__env_encrypt_msg.java: mvdir storage/bdb/rpc_server/java/gen/__env_encrypt_reply.java: mvdir storage/bdb/rpc_server/java/gen/__env_flags_msg.java: mvdir storage/bdb/rpc_server/java/gen/__env_flags_reply.java: mvdir storage/bdb/rpc_server/java/gen/__env_open_msg.java: mvdir storage/bdb/rpc_server/java/gen/__env_open_reply.java: mvdir storage/bdb/rpc_server/java/gen/__env_remove_msg.java: mvdir storage/bdb/rpc_server/java/gen/__env_remove_reply.java: mvdir storage/bdb/rpc_server/java/gen/__txn_abort_msg.java: mvdir storage/bdb/rpc_server/java/gen/__txn_abort_reply.java: mvdir storage/bdb/rpc_server/java/gen/__txn_begin_msg.java: mvdir storage/bdb/rpc_server/java/gen/__txn_begin_reply.java: mvdir storage/bdb/rpc_server/java/gen/__txn_commit_msg.java: mvdir storage/bdb/rpc_server/java/gen/__txn_commit_reply.java: mvdir storage/bdb/rpc_server/java/gen/__txn_discard_msg.java: mvdir storage/bdb/rpc_server/java/gen/__txn_discard_reply.java: mvdir storage/bdb/rpc_server/java/gen/__txn_prepare_msg.java: mvdir storage/bdb/rpc_server/java/gen/__txn_prepare_reply.java: mvdir storage/bdb/rpc_server/java/gen/__txn_recover_msg.java: mvdir storage/bdb/rpc_server/java/gen/__txn_recover_reply.java: mvdir storage/bdb/rpc_server/java/gen/db_server.java: mvdir storage/bdb/tcl/tcl_compat.c: mvdir storage/bdb/tcl/tcl_db.c: mvdir storage/bdb/tcl/tcl_db_pkg.c: mvdir storage/bdb/tcl/docs/db.html: mvdir storage/bdb/tcl/docs/env.html: mvdir storage/bdb/tcl/docs/historic.html: mvdir storage/bdb/tcl/docs/index.html: mvdir storage/bdb/tcl/docs/library.html: mvdir storage/bdb/tcl/docs/lock.html: mvdir storage/bdb/tcl/docs/log.html: mvdir storage/bdb/tcl/docs/mpool.html: mvdir storage/bdb/tcl/docs/rep.html: mvdir storage/bdb/tcl/docs/test.html: mvdir storage/bdb/tcl/docs/txn.html: mvdir storage/bdb/tcl/tcl_dbcursor.c: mvdir storage/bdb/tcl/tcl_env.c: mvdir storage/bdb/tcl/tcl_internal.c: mvdir storage/bdb/tcl/tcl_lock.c: mvdir storage/bdb/tcl/tcl_log.c: mvdir storage/bdb/tcl/tcl_mp.c: mvdir storage/bdb/tcl/tcl_rep.c: mvdir storage/bdb/tcl/tcl_txn.c: mvdir storage/bdb/tcl/tcl_util.c: mvdir storage/bdb/test/archive.tcl: mvdir storage/bdb/test/bigfile001.tcl: mvdir storage/bdb/test/bigfile002.tcl: mvdir storage/bdb/test/byteorder.tcl: mvdir storage/bdb/test/conscript.tcl: mvdir storage/bdb/test/dbm.tcl: mvdir storage/bdb/test/dbscript.tcl: mvdir storage/bdb/test/ddoyscript.tcl: mvdir storage/bdb/test/ddscript.tcl: mvdir storage/bdb/test/dead001.tcl: mvdir storage/bdb/test/dead002.tcl: mvdir storage/bdb/test/dead003.tcl: mvdir storage/bdb/test/dead004.tcl: mvdir storage/bdb/test/dead005.tcl: mvdir storage/bdb/test/dead006.tcl: mvdir storage/bdb/test/dead007.tcl: mvdir storage/bdb/test/env001.tcl: mvdir storage/bdb/test/env002.tcl: mvdir storage/bdb/test/env003.tcl: mvdir storage/bdb/test/env004.tcl: mvdir storage/bdb/test/env005.tcl: mvdir storage/bdb/test/env006.tcl: mvdir storage/bdb/test/env007.tcl: mvdir storage/bdb/test/env008.tcl: mvdir storage/bdb/test/env009.tcl: mvdir storage/bdb/test/env010.tcl: mvdir storage/bdb/test/env011.tcl: mvdir storage/bdb/test/hsearch.tcl: mvdir storage/bdb/test/join.tcl: mvdir storage/bdb/test/lock001.tcl: mvdir storage/bdb/test/lock002.tcl: mvdir storage/bdb/test/lock003.tcl: mvdir storage/bdb/test/lock004.tcl: mvdir storage/bdb/test/lock005.tcl: mvdir storage/bdb/test/lockscript.tcl: mvdir storage/bdb/test/log001.tcl: mvdir storage/bdb/test/log002.tcl: mvdir storage/bdb/test/log003.tcl: mvdir storage/bdb/test/log004.tcl: mvdir storage/bdb/test/log005.tcl: mvdir storage/bdb/test/logtrack.tcl: mvdir storage/bdb/test/mdbscript.tcl: mvdir storage/bdb/test/memp001.tcl: mvdir storage/bdb/test/memp002.tcl: mvdir storage/bdb/test/memp003.tcl: mvdir storage/bdb/test/mpoolscript.tcl: mvdir storage/bdb/test/mutex001.tcl: mvdir storage/bdb/test/mutex002.tcl: mvdir storage/bdb/test/mutex003.tcl: mvdir storage/bdb/test/mutexscript.tcl: mvdir storage/bdb/test/ndbm.tcl: mvdir storage/bdb/test/parallel.tcl: mvdir storage/bdb/test/recd001.tcl: mvdir storage/bdb/test/recd002.tcl: mvdir storage/bdb/test/recd003.tcl: mvdir storage/bdb/test/recd004.tcl: mvdir storage/bdb/test/recd005.tcl: mvdir storage/bdb/test/recd006.tcl: mvdir storage/bdb/test/recd007.tcl: mvdir storage/bdb/test/recd008.tcl: mvdir storage/bdb/test/recd009.tcl: mvdir storage/bdb/test/recd010.tcl: mvdir storage/bdb/test/recd011.tcl: mvdir storage/bdb/test/recd012.tcl: mvdir storage/bdb/test/recd013.tcl: mvdir storage/bdb/test/recd014.tcl: mvdir storage/bdb/test/recd015.tcl: mvdir storage/bdb/test/recd016.tcl: mvdir storage/bdb/test/recd017.tcl: mvdir storage/bdb/test/recd018.tcl: mvdir storage/bdb/test/recd019.tcl: mvdir storage/bdb/test/recd020.tcl: mvdir storage/bdb/test/recd15scr.tcl: mvdir storage/bdb/test/recdscript.tcl: mvdir storage/bdb/test/rep001.tcl: mvdir storage/bdb/test/rep002.tcl: mvdir storage/bdb/test/rep003.tcl: mvdir storage/bdb/test/rep004.tcl: mvdir storage/bdb/test/rep005.tcl: mvdir storage/bdb/test/reputils.tcl: mvdir storage/bdb/test/rpc001.tcl: mvdir storage/bdb/test/rpc002.tcl: mvdir storage/bdb/test/rpc003.tcl: mvdir storage/bdb/test/rpc004.tcl: mvdir storage/bdb/test/rpc005.tcl: mvdir storage/bdb/test/rsrc001.tcl: mvdir storage/bdb/test/rsrc002.tcl: mvdir storage/bdb/test/rsrc003.tcl: mvdir storage/bdb/test/rsrc004.tcl: mvdir storage/bdb/test/sdb001.tcl: mvdir storage/bdb/test/sdb002.tcl: mvdir storage/bdb/test/sdb003.tcl: mvdir storage/bdb/test/sdb004.tcl: mvdir storage/bdb/test/sdb005.tcl: mvdir storage/bdb/test/sdb006.tcl: mvdir storage/bdb/test/sdb007.tcl: mvdir storage/bdb/test/sdb008.tcl: mvdir storage/bdb/test/sdb009.tcl: mvdir storage/bdb/test/sdb010.tcl: mvdir storage/bdb/test/sdb011.tcl: mvdir storage/bdb/test/sdb012.tcl: mvdir storage/bdb/test/sdbscript.tcl: mvdir storage/bdb/test/sdbtest001.tcl: mvdir storage/bdb/test/sdbtest002.tcl: mvdir storage/bdb/test/sdbutils.tcl: mvdir storage/bdb/test/sec001.tcl: mvdir storage/bdb/test/sec002.tcl: mvdir storage/bdb/test/shelltest.tcl: mvdir storage/bdb/test/si001.tcl: mvdir storage/bdb/test/si002.tcl: mvdir storage/bdb/test/si003.tcl: mvdir storage/bdb/test/si004.tcl: mvdir storage/bdb/test/si005.tcl: mvdir storage/bdb/test/si006.tcl: mvdir storage/bdb/test/sindex.tcl: mvdir storage/bdb/test/sysscript.tcl: mvdir storage/bdb/test/test.tcl: mvdir storage/bdb/test/test001.tcl: mvdir storage/bdb/test/test002.tcl: mvdir storage/bdb/test/test003.tcl: mvdir storage/bdb/test/test004.tcl: mvdir storage/bdb/test/test005.tcl: mvdir storage/bdb/test/test006.tcl: mvdir storage/bdb/test/test007.tcl: mvdir storage/bdb/test/test008.tcl: mvdir storage/bdb/test/test009.tcl: mvdir storage/bdb/test/test010.tcl: mvdir storage/bdb/test/test011.tcl: mvdir storage/bdb/test/test012.tcl: mvdir storage/bdb/test/test013.tcl: mvdir storage/bdb/test/test014.tcl: mvdir storage/bdb/test/test015.tcl: mvdir storage/bdb/test/test016.tcl: mvdir storage/bdb/test/test017.tcl: mvdir storage/bdb/test/test018.tcl: mvdir storage/bdb/test/test019.tcl: mvdir storage/bdb/test/test020.tcl: mvdir storage/bdb/test/test021.tcl: mvdir storage/bdb/test/test022.tcl: mvdir storage/bdb/test/test023.tcl: mvdir storage/bdb/test/test024.tcl: mvdir storage/bdb/test/test025.tcl: mvdir storage/bdb/test/test026.tcl: mvdir storage/bdb/test/test027.tcl: mvdir storage/bdb/test/test028.tcl: mvdir storage/bdb/test/test029.tcl: mvdir storage/bdb/test/test030.tcl: mvdir storage/bdb/test/test031.tcl: mvdir storage/bdb/test/test032.tcl: mvdir storage/bdb/test/test033.tcl: mvdir storage/bdb/test/test034.tcl: mvdir storage/bdb/test/test035.tcl: mvdir storage/bdb/test/test036.tcl: mvdir storage/bdb/test/test037.tcl: mvdir storage/bdb/test/test038.tcl: mvdir storage/bdb/test/test039.tcl: mvdir storage/bdb/test/test040.tcl: mvdir storage/bdb/test/test041.tcl: mvdir storage/bdb/test/test042.tcl: mvdir storage/bdb/test/test043.tcl: mvdir storage/bdb/test/test044.tcl: mvdir storage/bdb/test/test045.tcl: mvdir storage/bdb/test/test046.tcl: mvdir storage/bdb/test/test047.tcl: mvdir storage/bdb/test/test048.tcl: mvdir storage/bdb/test/test049.tcl: mvdir storage/bdb/test/test050.tcl: mvdir storage/bdb/test/test051.tcl: mvdir storage/bdb/test/test052.tcl: mvdir storage/bdb/test/test053.tcl: mvdir storage/bdb/test/test054.tcl: mvdir storage/bdb/test/test055.tcl: mvdir storage/bdb/test/test056.tcl: mvdir storage/bdb/test/test057.tcl: mvdir storage/bdb/test/test058.tcl: mvdir storage/bdb/test/test059.tcl: mvdir storage/bdb/test/test060.tcl: mvdir storage/bdb/test/test061.tcl: mvdir storage/bdb/test/test062.tcl: mvdir storage/bdb/test/test063.tcl: mvdir storage/bdb/test/test064.tcl: mvdir storage/bdb/test/test065.tcl: mvdir storage/bdb/test/test066.tcl: mvdir storage/bdb/test/test067.tcl: mvdir storage/bdb/test/test068.tcl: mvdir storage/bdb/test/test069.tcl: mvdir storage/bdb/test/test070.tcl: mvdir storage/bdb/test/test071.tcl: mvdir storage/bdb/test/test072.tcl: mvdir storage/bdb/test/test073.tcl: mvdir storage/bdb/test/test074.tcl: mvdir storage/bdb/test/test075.tcl: mvdir storage/bdb/test/test076.tcl: mvdir storage/bdb/test/test077.tcl: mvdir storage/bdb/test/test078.tcl: mvdir storage/bdb/test/test079.tcl: mvdir storage/bdb/test/test080.tcl: mvdir storage/bdb/test/test081.tcl: mvdir storage/bdb/test/test082.tcl: mvdir storage/bdb/test/test083.tcl: mvdir storage/bdb/test/test084.tcl: mvdir storage/bdb/test/test085.tcl: mvdir storage/bdb/test/test086.tcl: mvdir storage/bdb/test/test087.tcl: mvdir storage/bdb/test/test088.tcl: mvdir storage/bdb/test/test089.tcl: mvdir storage/bdb/test/test090.tcl: mvdir storage/bdb/test/test091.tcl: mvdir storage/bdb/test/test092.tcl: mvdir storage/bdb/test/test093.tcl: mvdir storage/bdb/test/test094.tcl: mvdir storage/bdb/test/test095.tcl: mvdir storage/bdb/test/test096.tcl: mvdir storage/bdb/test/test097.tcl: mvdir storage/bdb/test/test098.tcl: mvdir storage/bdb/test/test099.tcl: mvdir storage/bdb/test/test100.tcl: mvdir storage/bdb/test/test101.tcl: mvdir storage/bdb/test/testparams.tcl: mvdir storage/bdb/test/testutils.tcl: mvdir storage/bdb/test/txn001.tcl: mvdir storage/bdb/test/txn002.tcl: mvdir storage/bdb/test/txn003.tcl: mvdir storage/bdb/test/txn004.tcl: mvdir storage/bdb/test/txn005.tcl: mvdir storage/bdb/test/txn006.tcl: mvdir storage/bdb/test/txn007.tcl: mvdir storage/bdb/test/txn008.tcl: mvdir storage/bdb/test/txn009.tcl: mvdir storage/bdb/test/txnscript.tcl: mvdir storage/bdb/test/update.tcl: mvdir storage/bdb/test/scr001/chk.code: mvdir storage/bdb/test/scr002/chk.def: mvdir storage/bdb/test/scr003/chk.define: mvdir storage/bdb/test/scr004/chk.javafiles: mvdir storage/bdb/test/scr005/chk.nl: mvdir storage/bdb/test/scr006/chk.offt: mvdir storage/bdb/test/scr007/chk.proto: mvdir storage/bdb/test/scr008/chk.pubdef: mvdir storage/bdb/test/scr009/chk.srcfiles: mvdir storage/bdb/test/scr010/chk.str: mvdir storage/bdb/test/scr010/spell.ok: mvdir storage/bdb/test/scr011/chk.tags: mvdir storage/bdb/test/scr012/chk.vx_code: mvdir storage/bdb/test/scr013/chk.stats: mvdir storage/bdb/test/scr014/chk.err: mvdir storage/bdb/test/scr015/README: mvdir storage/bdb/test/scr015/TestConstruct01.cpp: mvdir storage/bdb/test/scr015/TestConstruct01.testerr: mvdir storage/bdb/test/scr015/TestConstruct01.testout: mvdir storage/bdb/test/scr015/TestExceptInclude.cpp: mvdir storage/bdb/test/scr015/TestGetSetMethods.cpp: mvdir storage/bdb/test/scr015/TestKeyRange.cpp: mvdir storage/bdb/test/scr015/TestKeyRange.testin: mvdir storage/bdb/test/scr015/TestKeyRange.testout: mvdir storage/bdb/test/upgrade.tcl: mvdir storage/bdb/test/wordlist: mvdir storage/bdb/test/wrap.tcl: mvdir storage/bdb/test/scr015/TestLogc.cpp: mvdir storage/bdb/test/scr015/TestLogc.testout: mvdir storage/bdb/test/scr015/TestSimpleAccess.cpp: mvdir storage/bdb/test/scr015/TestSimpleAccess.testout: mvdir storage/bdb/test/scr015/TestTruncate.cpp: mvdir storage/bdb/test/scr015/TestTruncate.testout: mvdir storage/bdb/test/scr015/chk.cxxtests: mvdir storage/bdb/test/scr015/ignore: mvdir storage/bdb/test/scr015/testall: mvdir storage/bdb/test/scr015/testone: mvdir storage/bdb/test/scr016/CallbackTest.java: mvdir storage/bdb/test/scr016/CallbackTest.testout: mvdir storage/bdb/test/scr016/README: mvdir storage/bdb/test/scr016/TestAppendRecno.java: mvdir storage/bdb/test/scr016/TestAppendRecno.testout: mvdir storage/bdb/test/scr016/TestAssociate.java: mvdir storage/bdb/test/scr016/TestAssociate.testout: mvdir storage/bdb/test/scr016/TestClosedDb.java: mvdir storage/bdb/test/scr016/TestClosedDb.testout: mvdir storage/bdb/test/scr016/TestConstruct01.java: mvdir storage/bdb/test/scr016/TestConstruct01.testerr: mvdir storage/bdb/test/scr016/TestConstruct01.testout: mvdir storage/bdb/test/scr016/TestConstruct02.java: mvdir storage/bdb/test/scr016/TestConstruct02.testout: mvdir storage/bdb/test/scr016/TestDbtFlags.java: mvdir storage/bdb/test/scr016/TestDbtFlags.testerr: mvdir storage/bdb/test/scr016/TestDbtFlags.testout: mvdir storage/bdb/test/scr016/TestGetSetMethods.java: mvdir storage/bdb/test/scr016/TestKeyRange.java: mvdir storage/bdb/test/scr016/TestKeyRange.testout: mvdir storage/bdb/test/scr016/TestLockVec.java: mvdir storage/bdb/test/scr016/TestLockVec.testout: mvdir storage/bdb/test/scr016/TestLogc.java: mvdir storage/bdb/test/scr016/TestLogc.testout: mvdir storage/bdb/test/scr016/TestOpenEmpty.java: mvdir storage/bdb/test/scr016/TestOpenEmpty.testerr: mvdir storage/bdb/test/scr016/TestReplication.java: mvdir storage/bdb/test/scr016/TestRpcServer.java: mvdir storage/bdb/test/scr016/TestSameDbt.java: mvdir storage/bdb/test/scr016/TestSameDbt.testout: mvdir storage/bdb/test/scr016/TestSimpleAccess.java: mvdir storage/bdb/test/scr016/TestSimpleAccess.testout: mvdir storage/bdb/test/scr016/TestStat.java: mvdir storage/bdb/test/scr016/TestStat.testout: mvdir storage/bdb/test/scr016/TestTruncate.java: mvdir storage/bdb/test/scr016/TestTruncate.testout: mvdir storage/bdb/test/scr016/TestUtil.java: mvdir storage/bdb/test/scr016/TestXAServlet.java: mvdir storage/bdb/test/scr016/chk.javatests: mvdir storage/bdb/test/scr016/ignore: mvdir storage/bdb/test/scr016/testall: mvdir storage/bdb/test/scr016/testone: mvdir storage/bdb/test/scr017/O.BH: mvdir storage/bdb/test/scr017/O.R: mvdir storage/bdb/test/scr017/chk.db185: mvdir storage/bdb/test/scr017/t.c: mvdir storage/bdb/test/scr018/chk.comma: mvdir storage/bdb/test/scr018/t.c: mvdir storage/bdb/test/scr019/chk.include: mvdir storage/bdb/test/scr020/chk.inc: mvdir storage/bdb/test/scr021/chk.flags: mvdir storage/bdb/test/scr022/chk.rr: mvdir storage/bdb/txn/txn.c: mvdir storage/bdb/txn/txn.src: mvdir storage/bdb/txn/txn_method.c: mvdir storage/bdb/txn/txn_rec.c: mvdir storage/bdb/txn/txn_recover.c: mvdir storage/bdb/txn/txn_region.c: mvdir storage/bdb/txn/txn_stat.c: mvdir storage/bdb/txn/txn_util.c: mvdir storage/bdb/xa/xa.c: mvdir storage/bdb/xa/xa_db.c: mvdir storage/bdb/xa/xa_map.c: mvdir
Diffstat (limited to 'storage/ndb/src/kernel/blocks')
-rw-r--r--storage/ndb/src/kernel/blocks/ERROR_codes.txt451
-rw-r--r--storage/ndb/src/kernel/blocks/Makefile.am19
-rw-r--r--storage/ndb/src/kernel/blocks/NodeRestart.new.txt82
-rw-r--r--storage/ndb/src/kernel/blocks/NodeRestart.txt80
-rw-r--r--storage/ndb/src/kernel/blocks/Start.txt97
-rw-r--r--storage/ndb/src/kernel/blocks/SystemRestart.new.txt61
-rw-r--r--storage/ndb/src/kernel/blocks/SystemRestart.txt61
-rw-r--r--storage/ndb/src/kernel/blocks/backup/Backup.cpp4660
-rw-r--r--storage/ndb/src/kernel/blocks/backup/Backup.hpp696
-rw-r--r--storage/ndb/src/kernel/blocks/backup/Backup.txt343
-rw-r--r--storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp149
-rw-r--r--storage/ndb/src/kernel/blocks/backup/BackupInit.cpp211
-rw-r--r--storage/ndb/src/kernel/blocks/backup/FsBuffer.hpp343
-rw-r--r--storage/ndb/src/kernel/blocks/backup/Makefile.am24
-rw-r--r--storage/ndb/src/kernel/blocks/backup/read.cpp478
-rw-r--r--storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp1393
-rw-r--r--storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp124
-rw-r--r--storage/ndb/src/kernel/blocks/cmvmi/Makefile.am24
-rw-r--r--storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp1470
-rw-r--r--storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp269
-rw-r--r--storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp11653
-rw-r--r--storage/ndb/src/kernel/blocks/dbacc/Makefile.am26
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/CreateIndex.txt152
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/CreateTable.new.txt29
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/CreateTable.txt35
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp11884
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp1990
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/Dbdict.txt88
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/DropTable.txt140
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/Event.txt102
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/Makefile.am25
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/Master_AddTable.sfl751
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/SchemaFile.hpp57
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/Slave_AddTable.sfl416
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp112
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp1603
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp319
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp14272
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/LCP.txt35
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/Makefile.am23
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/Sysfile.hpp275
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/printSysfile/Makefile12
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/printSysfile/printSysfile.cpp158
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp2953
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp455
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp18661
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/Makefile.am25
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/redoLogReader/Makefile9
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.cpp312
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.hpp235
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/redoLogReader/redoLogFileReader.cpp464
-rw-r--r--storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp1974
-rw-r--r--storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp368
-rw-r--r--storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp13098
-rw-r--r--storage/ndb/src/kernel/blocks/dbtc/Makefile.am23
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp136
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp2409
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp473
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp273
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp586
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp411
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp2052
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp384
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp1343
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp569
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupLCP.cpp596
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp711
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp370
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp556
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp1185
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupStoredProcDef.cpp230
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp1021
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp212
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp1150
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupUndoLog.cpp284
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/Makefile.am41
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/Notes.txt183
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp1291
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp175
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp443
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp317
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp183
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp513
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp581
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp1041
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp449
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp709
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/Makefile.am34
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/Times.txt151
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/tuxstatus.html120
-rw-r--r--storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp2589
-rw-r--r--storage/ndb/src/kernel/blocks/dbutil/DbUtil.hpp485
-rw-r--r--storage/ndb/src/kernel/blocks/dbutil/DbUtil.txt68
-rw-r--r--storage/ndb/src/kernel/blocks/dbutil/Makefile.am23
-rw-r--r--storage/ndb/src/kernel/blocks/grep/Grep.cpp2010
-rw-r--r--storage/ndb/src/kernel/blocks/grep/Grep.hpp535
-rw-r--r--storage/ndb/src/kernel/blocks/grep/GrepInit.cpp164
-rw-r--r--storage/ndb/src/kernel/blocks/grep/Makefile.am23
-rw-r--r--storage/ndb/src/kernel/blocks/grep/systab_test/Makefile12
-rw-r--r--storage/ndb/src/kernel/blocks/grep/systab_test/grep_systab_test.cpp138
-rw-r--r--storage/ndb/src/kernel/blocks/mutexes.hpp39
-rw-r--r--storage/ndb/src/kernel/blocks/ndbcntr/Makefile.am26
-rw-r--r--storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp376
-rw-r--r--storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp117
-rw-r--r--storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp2695
-rw-r--r--storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp94
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp1033
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp234
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/AsyncFileTest/AsyncFileTest.cpp695
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/AsyncFileTest/Makefile27
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/CircularIndex.cpp20
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/CircularIndex.hpp116
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/Filename.cpp219
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/Filename.hpp100
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/Makefile.am27
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.cpp18
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp166
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelOSE.hpp204
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/Makefile13
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/MemoryChannelTest.cpp193
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp1018
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp127
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/OpenFiles.hpp114
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/Pool.hpp261
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/VoidFs.cpp200
-rw-r--r--storage/ndb/src/kernel/blocks/new-block.tar.gzbin0 -> 1816 bytes
-rw-r--r--storage/ndb/src/kernel/blocks/qmgr/Makefile.am25
-rw-r--r--storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp392
-rw-r--r--storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp106
-rw-r--r--storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp3928
-rw-r--r--storage/ndb/src/kernel/blocks/qmgr/timer.hpp72
-rw-r--r--storage/ndb/src/kernel/blocks/suma/Makefile.am23
-rw-r--r--storage/ndb/src/kernel/blocks/suma/Suma.cpp4073
-rw-r--r--storage/ndb/src/kernel/blocks/suma/Suma.hpp600
-rw-r--r--storage/ndb/src/kernel/blocks/suma/Suma.txt192
-rw-r--r--storage/ndb/src/kernel/blocks/suma/SumaInit.cpp192
-rw-r--r--storage/ndb/src/kernel/blocks/trix/Makefile.am23
-rw-r--r--storage/ndb/src/kernel/blocks/trix/Trix.cpp967
-rw-r--r--storage/ndb/src/kernel/blocks/trix/Trix.hpp191
139 files changed, 139914 insertions, 0 deletions
diff --git a/storage/ndb/src/kernel/blocks/ERROR_codes.txt b/storage/ndb/src/kernel/blocks/ERROR_codes.txt
new file mode 100644
index 00000000000..a30021607cc
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/ERROR_codes.txt
@@ -0,0 +1,451 @@
+Next QMGR 1
+Next NDBCNTR 1000
+Next NDBFS 2000
+Next DBACC 3002
+Next DBTUP 4013
+Next DBLQH 5042
+Next DBDICT 6006
+Next DBDIH 7174
+Next DBTC 8035
+Next CMVMI 9000
+Next BACKUP 10022
+Next DBUTIL 11002
+Next DBTUX 12007
+Next SUMA 13001
+
+TESTING NODE FAILURE, ARBITRATION
+---------------------------------
+
+911 - 919:
+Crash president when he starts to run in ArbitState 1-9.
+
+910: Crash new president after node crash
+
+ERROR CODES FOR TESTING NODE FAILURE, GLOBAL CHECKPOINT HANDLING:
+-----------------------------------------------------------------
+
+7000:
+Insert system error in master when global checkpoint is idle.
+
+7001:
+Insert system error in master after receiving GCP_PREPARE from
+all nodes in the cluster.
+
+7002:
+Insert system error in master after receiving GCP_NODEFINISH from
+all nodes in the cluster.
+
+7003:
+Insert system error in master after receiving GCP_SAVECONF from
+all nodes in the cluster.
+
+7004:
+Insert system error in master after completing global checkpoint with
+all nodes in the cluster.
+
+7005:
+Insert system error in GCP participant when receiving GCP_PREPARE.
+
+7006:
+Insert system error in GCP participant when receiving GCP_COMMIT.
+
+7007:
+Insert system error in GCP participant when receiving GCP_TCFINISHED.
+
+7008:
+Insert system error in GCP participant when receiving COPY_GCICONF.
+
+5000:
+Insert system error in GCP participant when receiving GCP_SAVEREQ.
+
+5007:
+Delay GCP_SAVEREQ by 10 secs
+
+ERROR CODES FOR TESTING NODE FAILURE, LOCAL CHECKPOINT HANDLING:
+-----------------------------------------------------------------
+
+7009:
+Insert system error in master when local checkpoint is idle.
+
+7010:
+Insert system error in master when local checkpoint is in the
+state clcpStatus = CALCULATE_KEEP_GCI.
+
+7011:
+Stop local checkpoint in the state CALCULATE_KEEP_GCI.
+
+7012:
+Restart local checkpoint after stopping in CALCULATE_KEEP_GCI.
+
+Method:
+1) Error 7011 in master, wait until report of stopped.
+2) Error xxxx in participant to crash it.
+3) Error 7012 in master to start again.
+
+7013:
+Insert system error in master when local checkpoint is in the
+state clcpStatus = COPY_GCI before sending COPY_GCIREQ.
+
+7014:
+Insert system error in master when local checkpoint is in the
+state clcpStatus = TC_CLOPSIZE before sending TC_CLOPSIZEREQ.
+
+7015:
+Insert system error in master when local checkpoint is in the
+state clcpStatus = START_LCP_ROUND before sending START_LCP_ROUND.
+
+7016:
+Insert system error in master when local checkpoint is in the
+state clcpStatus = START_LCP_ROUND after receiving LCP_REPORT.
+
+7017:
+Insert system error in master when local checkpoint is in the
+state clcpStatus = TAB_COMPLETED.
+
+7018:
+Insert system error in master when local checkpoint is in the
+state clcpStatus = TAB_SAVED before sending DIH_LCPCOMPLETE.
+
+7019:
+Insert system error in master when local checkpoint is in the
+state clcpStatus = IDLE before sending CONTINUEB(ZCHECK_TC_COUNTER).
+
+7020:
+Insert system error in local checkpoint participant at reception of
+COPY_GCIREQ.
+
+7075: Master
+Don't send any LCP_FRAG_ORD(last=true)
+And crash when all have "not" been sent
+
+8000: Crash particpant when receiving TCGETOPSIZEREQ
+8001: Crash particpant when receiving TC_CLOPSIZEREQ
+5010: Crash any when receiving LCP_FRAGORD
+
+7021: Crash in master when receiving START_LCP_REQ
+7022: Crash in !master when receiving START_LCP_REQ
+
+7023: Crash in master when sending START_LCP_CONF
+7024: Crash in !master when sending START_LCP_CONF
+
+7025: Crash in master when receiving LCP_FRAG_REP
+7016: Crash in !master when receiving LCP_FRAG_REP
+
+7026: Crash in master when changing state to LCP_TAB_COMPLETED
+7017: Crash in !master when changing state to LCP_TAB_COMPLETED
+
+7027: Crash in master when changing state to LCP_TAB_SAVED
+7018: Crash in master when changing state to LCP_TAB_SAVED
+
+ERROR CODES FOR TESTING NODE FAILURE, FAILURE IN COPY FRAGMENT PROCESS:
+-----------------------------------------------------------------------
+
+5002:
+Insert node failure in starting node when receiving a tuple copied from the copy node
+as part of copy fragment process.
+5003:
+Insert node failure when receiving ABORT signal.
+
+5004:
+Insert node failure handling when receiving COMMITREQ.
+
+5005:
+Insert node failure handling when receiving COMPLETEREQ.
+
+5006:
+Insert node failure handling when receiving ABORTREQ.
+
+These error code can be combined with error codes for testing time-out
+handling in DBTC to ensure that node failures are also well handled in
+time-out handling. They can also be used to test multiple node failure
+handling.
+
+ERROR CODES FOR TESTING TIME-OUT HANDLING IN DBLQH
+-------------------------------------------------
+5011:
+Delay execution of COMMIT signal 2 seconds to generate time-out.
+
+5012 (use 5017):
+First delay execution of COMMIT signal 2 seconds to generate COMMITREQ.
+Delay execution of COMMITREQ signal 2 seconds to generate time-out.
+
+5013:
+Delay execution of COMPLETE signal 2 seconds to generate time-out.
+
+5014 (use 5018):
+First delay execution of COMPLETE signal 2 seconds to generate COMPLETEREQ.
+Delay execution of COMPLETEREQ signal 2 seconds to generate time-out.
+
+5015:
+Delay execution of ABORT signal 2 seconds to generate time-out.
+
+5016: (ABORTREQ only as part of take-over)
+Delay execution of ABORTREQ signal 2 seconds to generate time-out.
+
+5031: lqhKeyRef, ZNO_TC_CONNECT_ERROR
+5032: lqhKeyRef, ZTEMPORARY_REDO_LOG_FAILURE
+5033: lqhKeyRef, ZTAIL_PROBLEM_IN_LOG_ERROR
+
+5034: Don't pop scan queue
+
+5035: Delay ACC_CONTOPCONT
+
+5038: Drop LQHKEYREQ + set 5039
+5039: Drop ABORT + set 5003
+
+8048: Make TC not choose own node for simple/dirty read
+5041: Crash is receiving simple read from other TC on different node
+
+8050: Send TCKEYREF is operation is non local
+
+ERROR CODES FOR TESTING TIME-OUT HANDLING IN DBTC
+-------------------------------------------------
+8040:
+Delay execution of ABORTED signal 2 seconds to generate time-out.
+
+8041:
+Delay execution of COMMITTED signal 2 seconds to generate time-out.
+8042 (use 8046):
+Delay execution of COMMITTED signal 2 seconds to generate COMMITCONF.
+Delay execution of COMMITCONF signal 2 seconds to generate time-out.
+
+8043:
+Delay execution of COMPLETED signal 2 seconds to generate time-out.
+
+8044 (use 8047):
+Delay execution of COMPLETED signal 2 seconds to generate COMPLETECONF.
+Delay execution of COMPLETECONF signal 2 seconds to generate time-out.
+
+8045: (ABORTCONF only as part of take-over)
+Delay execution of ABORTCONF signal 2 seconds to generate time-out.
+
+ERROR CODES FOR TESTING TIME-OUT HANDLING IN DBTC
+-------------------------------------------------
+
+8003: Throw away a LQHKEYCONF in state STARTED
+8004: Throw away a LQHKEYCONF in state RECEIVING
+8005: Throw away a LQHKEYCONF in state REC_COMMITTING
+8006: Throw away a LQHKEYCONF in state START_COMMITTING
+
+8007: Ignore send of LQHKEYREQ in state STARTED
+8008: Ignore send of LQHKEYREQ in state START_COMMITTING
+
+8009: Ignore send of LQHKEYREQ+ATTRINFO in state STARTED
+8010: Ignore send of LQHKEYREQ+ATTRINFO in state START_COMMITTING
+
+8011: Abort at send of CONTINUEB(ZSEND_ATTRINFO) in state STARTED
+8012: Abort at send of CONTINUEB(ZSEND_ATTRINFO) in state START_COMMITTING
+
+8013: Ignore send of CONTINUEB(ZSEND_COMPLETE_LOOP) (should crash eventually)
+8014: Ignore send of CONTINUEB(ZSEND_COMMIT_LOOP) (should crash eventually)
+
+8015: Ignore ATTRINFO signal in DBTC in state REC_COMMITTING
+8016: Ignore ATTRINFO signal in DBTC in state RECEIVING
+
+8017: Return immediately from DIVERIFYCONF (should crash eventually)
+8018: Throw away a COMMITTED signal
+8019: Throw away a COMPLETED signal
+
+TESTING TAKE-OVER FUNCTIONALITY IN DBTC
+---------------------------------------
+
+8002: Crash when sending LQHKEYREQ
+8029: Crash when receiving LQHKEYCONF
+8030: Crash when receiving COMMITTED
+8031: Crash when receiving COMPLETED
+8020: Crash when all COMMITTED has arrived
+8021: Crash when all COMPLETED has arrived
+8022: Crash when all LQHKEYCONF has arrived
+
+COMBINATION OF TIME-OUT + CRASH
+-------------------------------
+
+8023 (use 8024): Ignore LQHKEYCONF and crash when ABORTED signal arrives by setting 8024
+8025 (use 8026): Ignore COMMITTED and crash when COMMITCONF signal arrives by setting 8026
+8027 (use 8028): Ignore COMPLETED and crash when COMPLETECONF signal arrives by setting 8028
+
+ABORT OF TCKEYREQ
+-----------------
+
+8032: No free TC records any more
+
+
+CMVMI
+-----
+9000 Set RestartOnErrorInsert to restart -n
+9998 Enter endless loop (trigger watchdog)
+9999 Crash system immediatly
+
+Test Crashes in handling node restarts
+--------------------------------------
+
+7121: Crash after receiving permission to start (START_PERMCONF) in starting
+ node.
+7122: Crash master when receiving request for permission to start (START_PERMREQ).
+7123: Crash any non-starting node when receiving information about a starting node
+ (START_INFOREQ)
+7124: Respond negatively on an info request (START_INFOREQ)
+7125: Stop an invalidate Node LCP process in the middle to test if START_INFOREQ
+ stopped by long-running processes are handled in a correct manner.
+7126: Allow node restarts for all nodes (used in conjunction with 7025)
+7127: Crash when receiving a INCL_NODEREQ message.
+7128: Crash master after receiving all INCL_NODECONF from all nodes
+7129: Crash master after receiving all INCL_NODECONF from all nodes and releasing
+ the lock on the dictionary
+7130: Crash starting node after receiving START_MECONF
+7131: Crash when receiving START_COPYREQ in master node
+7132: Crash when receiving START_COPYCONF in starting node
+
+DICT:
+6000 Crash during NR when receiving DICTSTARTREQ
+6001 Crash during NR when receiving SCHEMA_INFO
+6002 Crash during NR soon after sending GET_TABINFO_REQ
+
+LQH:
+5026 Crash when receiving COPY_ACTIVEREQ
+5027 Crash when receiving STAT_RECREQ
+
+Test Crashes in handling take over
+----------------------------------
+
+7133: Crash when receiving START_TOREQ
+7134: Crash master after receiving all START_TOCONF
+7135: Crash master after copying table 0 to starting node
+7136: Crash master after completing copy of tables
+7137: Crash master after adding a fragment before copying it
+7138: Crash when receiving CREATE_FRAGREQ in prepare phase
+7139: Crash when receiving CREATE_FRAGREQ in commit phase
+7140: Crash master when receiving all CREATE_FRAGCONF in prepare phase
+7141: Crash master when receiving all CREATE_FRAGCONF in commit phase
+7142: Crash master when receiving COPY_FRAGCONF
+7143: Crash master when receiving COPY_ACTIVECONF
+7144: Crash when receiving END_TOREQ
+7145: Crash master after receiving first END_TOCONF
+7146: Crash master after receiving all END_TOCONF
+7147: Crash master after receiving first START_TOCONF
+7148: Crash master after receiving first CREATE_FRAGCONF
+7152: Crash master after receiving first UPDATE_TOCONF
+7153: Crash master after receiving all UPDATE_TOCONF
+7154: Crash when receiving UPDATE_TOREQ
+7155: Crash master when completing writing start take over info
+7156: Crash master when completing writing end take over info
+
+Test failures in various states in take over functionality
+----------------------------------------------------------
+7157: Block take over at start take over
+7158: Block take over at sending of START_TOREQ
+7159: Block take over at selecting next fragment
+7160: Block take over at creating new fragment
+7161: Block take over at sending of CREATE_FRAGREQ in prepare phase
+7162: Block take over at sending of CREATE_FRAGREQ in commit phase
+7163: Block take over at sending of UPDATE_TOREQ at end of copy frag
+7164: Block take over at sending of END_TOREQ
+7169: Block take over at sending of UPDATE_TOREQ at end of copy
+
+5008: Crash at reception of EMPTY_LCPREQ (at master take over after NF)
+5009: Crash at sending of EMPTY_LCPCONF (at master take over after NF)
+
+Test Crashes in Handling Graceful Shutdown
+------------------------------------------
+7065: Crash when receiving STOP_PERMREQ in master
+7066: Crash when receiving STOP_PERMREQ in slave
+7067: Crash when receiving DIH_SWITCH_REPLICA_REQ
+7068: Crash when receiving DIH_SWITCH_REPLICA_CONF
+
+
+Backup Stuff:
+------------------------------------------
+10001: Crash on NODE_FAILREP in Backup coordinator
+10002: Crash on NODE_FAILREP when coordinatorTakeOver
+10003: Crash on PREP_CREATE_TRIG_{CONF/REF} (only coordinator)
+10004: Crash on START_BACKUP_{CONF/REF} (only coordinator)
+10005: Crash on CREATE_TRIG_{CONF/REF} (only coordinator)
+10006: Crash on WAIT_GCP_REF (only coordinator)
+10007: Crash on WAIT_GCP_CONF (only coordinator)
+10008: Crash on WAIT_GCP_CONF during start of backup (only coordinator)
+10009: Crash on WAIT_GCP_CONF during stop of backup (only coordinator)
+10010: Crash on BACKUP_FRAGMENT_CONF (only coordinator)
+10011: Crash on BACKUP_FRAGMENT_REF (only coordinator)
+10012: Crash on DROP_TRIG_{CONF/REF} (only coordinator)
+10013: Crash on STOP_BACKUP_{CONF/REF} (only coordinator)
+10014: Crash on DEFINE_BACKUP_REQ (participant)
+10015: Crash on START_BACKUP_REQ (participant)
+10016: Crash on BACKUP_FRAGMENT_REQ (participant)
+10017: Crash on SCAN_FRAGCONF (participant)
+10018: Crash on FSAPPENDCONF (participant)
+10019: Crash on TRIG_ATTRINFO (participant)
+10020: Crash on STOP_BACKUP_REQ (participant)
+10021: Crash on NODE_FAILREP in participant not becoming coordinator
+
+10022: Fake no backup records at DEFINE_BACKUP_REQ (participant)
+10023: Abort backup by error at reception of UTIL_SEQUENCE_CONF (code 300)
+10024: Abort backup by error at reception of DEFINE_BACKUP_CONF (code 301)
+10025: Abort backup by error at reception of CREATE_TRIG_CONF last (code 302)
+10026: Abort backup by error at reception of START_BACKUP_CONF (code 303)
+10027: Abort backup by error at reception of DEFINE_BACKUP_REQ at master (code 304)
+10028: Abort backup by error at reception of BACKUP_FRAGMENT_CONF at master (code 305)
+10029: Abort backup by error at reception of FSAPPENDCONF in slave (FileOrScanError = 5)
+10030: Simulate buffer full from trigger execution => abort backup
+
+11001: Send UTIL_SEQUENCE_REF (in master)
+
+5028: Crash when receiving LQHKEYREQ (in non-master)
+
+Failed Create Table:
+--------------------
+7173: Create table failed due to not sufficient number of fragment or
+ replica records.
+3001: Fail create 1st fragment
+4007 12001: Fail create 1st fragment
+4008 12002: Fail create 2nd fragment
+4009 12003: Fail create 1st attribute in 1st fragment
+4010 12004: Fail create last attribute in 1st fragment
+4011 12005: Fail create 1st attribute in 2nd fragment
+4012 12006: Fail create last attribute in 2nd fragment
+
+Drop Table/Index:
+-----------------
+4001: Crash on REL_TABMEMREQ in TUP
+4002: Crash on DROP_TABFILEREQ in TUP
+4003: Fail next trigger create in TUP
+8033: Fail next trigger create in TC
+8034: Fail next index create in TC
+
+
+
+System Restart:
+---------------
+
+5020: Force system to read pages form file when executing prepare operation record
+3000: Delay writing of datapages in ACC when LCP is started
+4000: Delay writing of datapages in TUP when LCP is started
+7070: Set TimeBetweenLcp to min value
+7071: Set TimeBetweenLcp to max value
+7072: Split START_FRAGREQ into several log nodes
+7073: Don't include own node in START_FRAGREQ
+7074: 7072 + 7073
+
+Scan:
+------
+
+5021: Crash when receiving SCAN_NEXTREQ if sender is own node
+5022: Crash when receiving SCAN_NEXTREQ if sender is NOT own node
+5023: Drop SCAN_NEXTREQ if sender is own node
+5024: Drop SCAN_NEXTREQ if sender is NOT own node
+5025: Delay SCAN_NEXTREQ 1 second if sender is NOT own node
+5030: Drop all SCAN_NEXTREQ until node is shutdown with SYSTEM_ERROR
+ because of scan fragment timeout
+
+Test routing of signals:
+-----------------------
+4006: Turn on routing of TRANSID_AI signals from TUP
+5029: Turn on routing of KEYINFO20 signals from LQH
+
+Ordered index:
+--------------
+
+Dbdict:
+-------
+6003 Crash in participant @ CreateTabReq::Prepare
+6004 Crash in participant @ CreateTabReq::Commit
+6005 Crash in participant @ CreateTabReq::CreateDrop
diff --git a/storage/ndb/src/kernel/blocks/Makefile.am b/storage/ndb/src/kernel/blocks/Makefile.am
new file mode 100644
index 00000000000..7ee90e6239f
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/Makefile.am
@@ -0,0 +1,19 @@
+SUBDIRS = \
+ cmvmi \
+ dbacc \
+ dbdict \
+ dbdih \
+ dblqh \
+ dbtc \
+ dbtup \
+ ndbfs \
+ ndbcntr \
+ qmgr \
+ trix \
+ backup \
+ dbutil \
+ suma \
+ grep \
+ dbtux
+
+windoze-dsp:
diff --git a/storage/ndb/src/kernel/blocks/NodeRestart.new.txt b/storage/ndb/src/kernel/blocks/NodeRestart.new.txt
new file mode 100644
index 00000000000..00ab8f0c208
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/NodeRestart.new.txt
@@ -0,0 +1,82 @@
+
+Master DIH Starting DIH Starting DICT
+---------------------- ---------------------- ---------------------
+
+ Check for sysfile
+ DIH_RESTARTCONF ->
+
+******************************************************************************
+* NDB_STTOR interal startphase = 1
+******************************************************************************
+
+ Read schema file
+
+******************************************************************************
+* NDB_STTOR interal startphase = 2
+******************************************************************************
+
+ <- START_PERMREQ
+
+XXX
+
+START_PERMCONF ->
+
+******************************************************************************
+* NDB_STTOR interal startphase = 3
+******************************************************************************
+
+ <- START_MEREQ
+
+START_RECREQ -> starting LQH
+ <- START_RECCONF
+
+For each table
+ COPY_TABREQ -> starting DIH
+
+DICTSTARTREQ -> starting DICT
+ GET_SCHEMA_INFOREQ
+ (to master DICT)
+
+ ->SCHEMA_INFO
+ (schema file)
+
+ 1) For each table
+ If TableStatus OK
+ ReadTableFile
+ else
+ GET_TABINFOREQ
+ 2) DIADDTABREQ->DIH
+
+ For each local frag
+ ADD_FRAG_REQ -> local DICT
+ DI_ADD_TAB_CONF
+ <- DICTSTARTCONF
+
+INCL_NODEREQ -> all DIH
+
+START_MECONF -> starting DIH
+ (including sysfile)
+
+******************************************************************************
+* NDB_STTOR interal startphase = 5
+******************************************************************************
+
+ <- START_COPYREQ
+
+START_TOREQ -> all DIH
+
+For each fragment
+ CREATE_FRAGREQ -> all DIH
+
+ COPY-DATA (LQHKEYREQ++)
+
+ UPDATE_TOREQ -> all DIH
+
+ COPY_ACTIVEREQ -> starting LQH
+
+ CREATE_FRAGREQ -> all DIH
+
+START_COPYCONF ->
+
+LOCAL CHECKPOINT
+
diff --git a/storage/ndb/src/kernel/blocks/NodeRestart.txt b/storage/ndb/src/kernel/blocks/NodeRestart.txt
new file mode 100644
index 00000000000..e9f277bb39e
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/NodeRestart.txt
@@ -0,0 +1,80 @@
+
+Master DIH Starting DIH Starting DICT
+---------------------- ---------------------- ---------------------
+
+ Check for sysfile
+ DIH_RESTARTCONF ->
+
+******************************************************************************
+* NDB_STTOR interal startphase = 1
+******************************************************************************
+
+ Read schema file
+
+******************************************************************************
+* NDB_STTOR interal startphase = 2
+******************************************************************************
+
+ <- START_PERMREQ
+
+XXX
+
+START_PERMCONF ->
+
+******************************************************************************
+* NDB_STTOR interal startphase = 3
+******************************************************************************
+
+ <- START_MEREQ
+
+START_RECREQ -> starting LQH
+ <- START_RECCONF
+
+DICTSTARTREQ -> starting DICT
+ GET_SCHEMA_INFOREQ
+ (to master DICT)
+
+ ->SCHEMA_INFO
+ (schema file)
+
+ 1) For each table
+ 1) If TableStatus match
+ ReadTableFile
+ else
+ GET_TABINFOREQ
+
+ <- DICTSTARTCONF
+
+For each table
+ COPY_TABREQ -> starting DIH
+
+INCL_NODEREQ -> all DIH
+
+START_MECONF -> starting DIH
+ (including sysfile)
+
+******************************************************************************
+* NDB_STTOR interal startphase = 5
+******************************************************************************
+
+ <- START_COPYREQ
+
+START_TOREQ -> all DIH
+
+For each fragment
+ ADD_FRAG_REQ -> local DICT -> LQHFRAGREQ -> starting LQH
+
+ CREATE_FRAGREQ -> all DIH
+
+ COPY-DATA (LQHKEYREQ++)
+
+ UPDATE_TOREQ -> all DIH
+
+ COPY_ACTIVEREQ -> starting LQH
+
+ CREATE_FRAGREQ -> all DIH
+
+START_COPYCONF ->
+
+LOCAL CHECKPOINT
+
diff --git a/storage/ndb/src/kernel/blocks/Start.txt b/storage/ndb/src/kernel/blocks/Start.txt
new file mode 100644
index 00000000000..3e805ebab55
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/Start.txt
@@ -0,0 +1,97 @@
+
+--- Start phase 1 - Qmgr -------------------------------------------
+
+1) Set timer 1 - TimeToWaitAlive
+
+2) Send CM_REGREQ to all connected(and connecting) nodes
+
+3) Wait until -
+a) The precident answers CM_REGCONF
+b) All nodes has answered and I'm the candidate -> election won
+c) 30s has passed and I'm the candidate -> election won
+d) TimeToWaitAlive has passed -> Failure to start
+
+When receiving CM_REGCONF
+4) Send CM_NODEINFOREQ to all connected(and connecting) nodes
+ reported in CM_REGCONF
+
+5) Wait until -
+a) All CM_NODEINFO_CONF has arrived
+b) TimeToWaitAlive has passed -> Failure to start
+
+6) Send CM_ACKADD to president
+
+7) Wait until -
+a) Receive CM_ADD(CommitNew) from president -> I'm in the qmgr cluster
+b) TimeToWaitAlive has passed -> Failure to start
+
+NOTE:
+30s is hardcoded in 3c.
+TimeToWaitAlive should be atleast X sec greater than 30s. i.e. 30+X sec
+to support "partial starts"
+
+NOTE:
+In 3b, a more correct number (instead of all) would be
+N-NG+1 where N is #nodes and NG is #node groups = (N/R where R is # replicas)
+But Qmgr has no notion about node groups or replicas
+
+--- Start phase X - Qmgr -------------------------------------------
+
+President - When accepting a CM_REGREQ
+1) Send CM_REGCONF to starting node
+2) Send CM_ADD(Prepare) to all started nodes + starting node
+3) Send CM_ADD(AddCommit) to all started nodes
+4) Send CM_ADD(CommitNew) to starting node
+
+Cluster participant -
+1) Wait for both CM_NODEINFOREQ from starting and CM_ADD(Prepare) from pres.
+2) Send CM_ACKADD(Prepare)
+3) Wait for CM_ADD(AddCommit) from president
+4) Send CM_ACKADD(AddCommit)
+
+--- Start phase 2 - NdbCntr ----------------------------------------
+
+- Use same TimeToWaitAliveTimer
+
+1) Check sysfile (DIH_RESTART_REQ)
+2) Read nodes (from Qmgr) P = qmgr president
+
+3) Send CNTR_MASTER_REQ to cntr(P)
+ including info in DIH_RESTART_REF/CONF
+
+4) Wait until -
+b) Receiving CNTR_START_CONF -> continue
+b) Receiving CNTR_START_REF -> P = node specified in REF, goto 3
+c) TimeToWaitAlive has passed -> Failure to start
+
+4) Run ndb-startphase 1
+
+--
+Initial start/System restart NdbCntr (on qmgr president node)
+
+1) Wait until -
+a) Receiving CNTR_START_REQ with GCI > than own GCI
+ send CNTR_START_REF to all waiting nodes
+b) Receiving all CNTR_START_REQ (for all defined nodes)
+c) TimeToWait has passed and partition win
+d) TimeToWait has passed and partitioning
+ and configuration "start with partition" = true
+
+2) Send CNTR_START_CONF to all nodes "with filesystem"
+
+3) Wait until -
+ Receiving CNTR_START_REP for all starting nodes
+
+4) Start waiting nodes (if any)
+
+NOTE:
+1c) Partition win = 1 node in each node group and 1 full node group
+1d) Pattitioning = at least 1 node in each node group
+--
+Running NdbCntr
+
+When receiving CNTR_MASTER_REQ
+1) If I'm not master send CNTR_MASTER_REF (including master node id)
+2) If I'm master
+ Coordinate parallell node restarts
+ send CNTR_MASTER_CONF (node restart)
diff --git a/storage/ndb/src/kernel/blocks/SystemRestart.new.txt b/storage/ndb/src/kernel/blocks/SystemRestart.new.txt
new file mode 100644
index 00000000000..3738de28df8
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/SystemRestart.new.txt
@@ -0,0 +1,61 @@
+
+DIH DICT CNTR
+---------------------- ---------------------- ---------------------
+ <- DIHRESTARTREQ
+Check for sysfile
+DIH_RESTARTCONF ->
+
+NDB_STTORY -> DICT (sp=1)
+ Read schema file
+
+******************************************************************************
+* Elect master
+******************************************************************************
+
+-- Master DIH --
+
+Read sysfile
+
+COPY_GCIREQ -> all DIHs
+
+DICTSTARTREQ -> local DICT (master)
+
+ master
+ ======
+ For each table (that should be started)
+ 1) ReadTableFile
+ 2) DI_ADD_TAB_REQ -> local DIH
+
+1) ReadTableFile (DIH)
+2) COPY_TABREQ -> all DIH (but self)
+3) For each local frag
+ ADD_FRAG_REQ -> local DICT
+4) DI_ADD_TAB_CONF
+
+ SCHEMA_INFO -> all DICTs
+ Info = schema file
+
+ Participant
+ ===========
+ 1) For each table
+ 1) If TableStatus match
+ ReadTableFile
+ else
+ GET_TABINFOREQ
+ 2) WriteTableFile
+ 3) Parse Table Data
+ 4) DI_ADD_TAB_REQ -> local DIH
+
+ <- SCHEMA_INFOCONF
+
+
+ <- DICTSTARTCONF
+
+For each fragment
+ IF Fragment is logged
+ START_FRAGREQ -> LQH x
+
+ START_RECREQ -> all LQH
+ Note does not wait for START_FRAGCONF
+
+NDB_STARTCONF ->
diff --git a/storage/ndb/src/kernel/blocks/SystemRestart.txt b/storage/ndb/src/kernel/blocks/SystemRestart.txt
new file mode 100644
index 00000000000..235dfb968fa
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/SystemRestart.txt
@@ -0,0 +1,61 @@
+
+NDBCNTR DIH DICT
+---------------------- ---------------------- ---------------
+DIH_RESTARTREQ -> DIH
+ Check for sysfile
+ <- DIH_RESTARTCONF
+
+NDB_STTORY -> DICT
+sp = 1
+ Read schema file
+
+---- Master
+
+NDB_STARTREQ -> DIH
+ Read sysfile
+
+ COPY_GCIREQ -> all DIHs
+
+ DICTSTARTREQ -> local DICT
+ local
+ ======
+ SCHEMA_INFO -> all DICTs
+ Info = schema file
+
+ Participant
+ ===========
+ 1) For each table
+ If TableStatus match
+ ReadTableFile
+ else
+ GET_TABINFOREQ
+
+ <- SCHEMA_INFOCONF
+
+ local
+ ======
+ For each table
+ DIHSTARTTABREQ -> DIH
+
+ <- DICTSTARTCONF
+
+ For each table (STARTED)
+ Read table description
+ from disk
+
+ For each fragment
+ IF Fragment dont have LCP
+ ADD_FRAGREQ -> local DICT
+ 1) LQHFRAGREQ -> LQH x
+ 2) For each attribute
+ LQHADDATTREQ
+ IF Fragment is logged
+ START_FRAGREQ -> LQH x
+
+ START_RECREQ -> all LQH
+ Note does not wait for START_FRAGCONF
+
+ For each table
+ COPY_TABREQ -> all DIH (but self)
+
+ <- NDB_STARTCONF
diff --git a/storage/ndb/src/kernel/blocks/backup/Backup.cpp b/storage/ndb/src/kernel/blocks/backup/Backup.cpp
new file mode 100644
index 00000000000..840466460cb
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/backup/Backup.cpp
@@ -0,0 +1,4660 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "Backup.hpp"
+
+#include <ndb_version.h>
+
+#include <NdbTCP.h>
+#include <Bitmask.hpp>
+
+#include <signaldata/NodeFailRep.hpp>
+#include <signaldata/ReadNodesConf.hpp>
+
+#include <signaldata/ScanFrag.hpp>
+
+#include <signaldata/GetTabInfo.hpp>
+#include <signaldata/DictTabInfo.hpp>
+#include <signaldata/ListTables.hpp>
+
+#include <signaldata/FsOpenReq.hpp>
+#include <signaldata/FsAppendReq.hpp>
+#include <signaldata/FsCloseReq.hpp>
+#include <signaldata/FsConf.hpp>
+#include <signaldata/FsRef.hpp>
+#include <signaldata/FsRemoveReq.hpp>
+
+#include <signaldata/BackupImpl.hpp>
+#include <signaldata/BackupSignalData.hpp>
+#include <signaldata/BackupContinueB.hpp>
+#include <signaldata/EventReport.hpp>
+
+#include <signaldata/UtilSequence.hpp>
+
+#include <signaldata/CreateTrig.hpp>
+#include <signaldata/AlterTrig.hpp>
+#include <signaldata/DropTrig.hpp>
+#include <signaldata/FireTrigOrd.hpp>
+#include <signaldata/TrigAttrInfo.hpp>
+#include <AttributeHeader.hpp>
+
+#include <signaldata/WaitGCP.hpp>
+
+#include <NdbTick.h>
+
+static NDB_TICKS startTime;
+
+static const Uint32 BACKUP_SEQUENCE = 0x1F000000;
+
+#ifdef VM_TRACE
+#define DEBUG_OUT(x) ndbout << x << endl
+#else
+#define DEBUG_OUT(x)
+#endif
+
+//#define DEBUG_ABORT
+
+//---------------------------------------------------------
+// Ignore this since a completed abort could have preceded
+// this message.
+//---------------------------------------------------------
+#define slaveAbortCheck() \
+if ((ptr.p->backupId != backupId) || \
+ (ptr.p->slaveState.getState() == ABORTING)) { \
+ jam(); \
+ return; \
+}
+
+#define masterAbortCheck() \
+if ((ptr.p->backupId != backupId) || \
+ (ptr.p->masterData.state.getState() == ABORTING)) { \
+ jam(); \
+ return; \
+}
+
+#define defineSlaveAbortCheck() \
+ if (ptr.p->slaveState.getState() == ABORTING) { \
+ jam(); \
+ closeFiles(signal, ptr); \
+ return; \
+ }
+
+static Uint32 g_TypeOfStart = NodeState::ST_ILLEGAL_TYPE;
+
+void
+Backup::execSTTOR(Signal* signal)
+{
+ jamEntry();
+
+ const Uint32 startphase = signal->theData[1];
+ const Uint32 typeOfStart = signal->theData[7];
+
+ if (startphase == 3) {
+ jam();
+ g_TypeOfStart = typeOfStart;
+ signal->theData[0] = reference();
+ sendSignal(NDBCNTR_REF, GSN_READ_NODESREQ, signal, 1, JBB);
+ return;
+ }//if
+
+ if(startphase == 7 && g_TypeOfStart == NodeState::ST_INITIAL_START &&
+ c_masterNodeId == getOwnNodeId()){
+ jam();
+ createSequence(signal);
+ return;
+ }//if
+
+ sendSTTORRY(signal);
+ return;
+}//Dbdict::execSTTOR()
+
+void
+Backup::execREAD_NODESCONF(Signal* signal)
+{
+ jamEntry();
+ ReadNodesConf * conf = (ReadNodesConf *)signal->getDataPtr();
+
+ c_aliveNodes.clear();
+
+ Uint32 count = 0;
+ for (Uint32 i = 0; i<MAX_NDB_NODES; i++) {
+ jam();
+ if(NodeBitmask::get(conf->allNodes, i)){
+ jam();
+ count++;
+
+ NodePtr node;
+ ndbrequire(c_nodes.seize(node));
+
+ node.p->nodeId = i;
+ if(NodeBitmask::get(conf->inactiveNodes, i)) {
+ jam();
+ node.p->alive = 0;
+ } else {
+ jam();
+ node.p->alive = 1;
+ c_aliveNodes.set(i);
+ }//if
+ }//if
+ }//for
+ c_masterNodeId = conf->masterNodeId;
+ ndbrequire(count == conf->noOfNodes);
+ sendSTTORRY(signal);
+}
+
+void
+Backup::sendSTTORRY(Signal* signal)
+{
+ signal->theData[0] = 0;
+ signal->theData[3] = 1;
+ signal->theData[4] = 3;
+ signal->theData[5] = 7;
+ signal->theData[6] = 255; // No more start phases from missra
+ sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 7, JBB);
+}
+
+void
+Backup::createSequence(Signal* signal)
+{
+ UtilSequenceReq * req = (UtilSequenceReq*)signal->getDataPtrSend();
+
+ req->senderData = RNIL;
+ req->sequenceId = BACKUP_SEQUENCE;
+ req->requestType = UtilSequenceReq::Create;
+
+ sendSignal(DBUTIL_REF, GSN_UTIL_SEQUENCE_REQ,
+ signal, UtilSequenceReq::SignalLength, JBB);
+}
+
+void
+Backup::execCONTINUEB(Signal* signal)
+{
+ jamEntry();
+ const Uint32 Tdata0 = signal->theData[0];
+ const Uint32 Tdata1 = signal->theData[1];
+ const Uint32 Tdata2 = signal->theData[2];
+
+ switch(Tdata0) {
+ case BackupContinueB::START_FILE_THREAD:
+ case BackupContinueB::BUFFER_UNDERFLOW:
+ {
+ jam();
+ BackupFilePtr filePtr;
+ c_backupFilePool.getPtr(filePtr, Tdata1);
+ checkFile(signal, filePtr);
+ return;
+ }
+ break;
+ case BackupContinueB::BUFFER_FULL_SCAN:
+ {
+ jam();
+ BackupFilePtr filePtr;
+ c_backupFilePool.getPtr(filePtr, Tdata1);
+ checkScan(signal, filePtr);
+ return;
+ }
+ break;
+ case BackupContinueB::BUFFER_FULL_FRAG_COMPLETE:
+ {
+ jam();
+ BackupFilePtr filePtr;
+ c_backupFilePool.getPtr(filePtr, Tdata1);
+ fragmentCompleted(signal, filePtr);
+ return;
+ }
+ break;
+ case BackupContinueB::BUFFER_FULL_META:
+ {
+ jam();
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, Tdata1);
+
+ if (ptr.p->slaveState.getState() == ABORTING) {
+ jam();
+ closeFiles(signal, ptr);
+ return;
+ }//if
+ BackupFilePtr filePtr;
+ ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr);
+ FsBuffer & buf = filePtr.p->operation.dataBuffer;
+
+ if(buf.getFreeSize() + buf.getMinRead() < buf.getUsableSize()) {
+ jam();
+ TablePtr tabPtr;
+ c_tablePool.getPtr(tabPtr, Tdata2);
+
+ DEBUG_OUT("Backup - Buffer full - " << buf.getFreeSize()
+ << " + " << buf.getMinRead()
+ << " < " << buf.getUsableSize()
+ << " - tableId = " << tabPtr.p->tableId);
+
+ signal->theData[0] = BackupContinueB::BUFFER_FULL_META;
+ signal->theData[1] = Tdata1;
+ signal->theData[2] = Tdata2;
+ sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, 100, 3);
+ return;
+ }//if
+
+ TablePtr tabPtr;
+ c_tablePool.getPtr(tabPtr, Tdata2);
+ GetTabInfoReq * req = (GetTabInfoReq *)signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->senderData = ptr.i;
+ req->requestType = GetTabInfoReq::RequestById |
+ GetTabInfoReq::LongSignalConf;
+ req->tableId = tabPtr.p->tableId;
+ sendSignal(DBDICT_REF, GSN_GET_TABINFOREQ, signal,
+ GetTabInfoReq::SignalLength, JBB);
+ return;
+ }
+ default:
+ ndbrequire(0);
+ }//switch
+}
+
+void
+Backup::execDUMP_STATE_ORD(Signal* signal)
+{
+ jamEntry();
+
+ if(signal->theData[0] == 20){
+ if(signal->length() > 1){
+ c_defaults.m_dataBufferSize = (signal->theData[1] * 1024 * 1024);
+ }
+ if(signal->length() > 2){
+ c_defaults.m_logBufferSize = (signal->theData[2] * 1024 * 1024);
+ }
+ if(signal->length() > 3){
+ c_defaults.m_minWriteSize = signal->theData[3] * 1024;
+ }
+ if(signal->length() > 4){
+ c_defaults.m_maxWriteSize = signal->theData[4] * 1024;
+ }
+
+ infoEvent("Backup: data: %d log: %d min: %d max: %d",
+ c_defaults.m_dataBufferSize,
+ c_defaults.m_logBufferSize,
+ c_defaults.m_minWriteSize,
+ c_defaults.m_maxWriteSize);
+ return;
+ }
+ if(signal->theData[0] == 21){
+ BackupReq * req = (BackupReq*)signal->getDataPtrSend();
+ req->senderData = 23;
+ req->backupDataLen = 0;
+ sendSignal(BACKUP_REF, GSN_BACKUP_REQ,signal,BackupReq::SignalLength, JBB);
+ startTime = NdbTick_CurrentMillisecond();
+ return;
+ }
+
+ if(signal->theData[0] == 22){
+ const Uint32 seq = signal->theData[1];
+ FsRemoveReq * req = (FsRemoveReq *)signal->getDataPtrSend();
+ req->userReference = reference();
+ req->userPointer = 23;
+ req->directory = 1;
+ req->ownDirectory = 1;
+ FsOpenReq::setVersion(req->fileNumber, 2);
+ FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_CTL);
+ FsOpenReq::v2_setSequence(req->fileNumber, seq);
+ FsOpenReq::v2_setNodeId(req->fileNumber, getOwnNodeId());
+ sendSignal(NDBFS_REF, GSN_FSREMOVEREQ, signal,
+ FsRemoveReq::SignalLength, JBA);
+ return;
+ }
+
+ if(signal->theData[0] == 23){
+ /**
+ * Print records
+ */
+ BackupRecordPtr ptr;
+ for(c_backups.first(ptr); ptr.i != RNIL; c_backups.next(ptr)){
+ infoEvent("BackupRecord %d: BackupId: %d MasterRef: %x ClientRef: %x",
+ ptr.i, ptr.p->backupId, ptr.p->masterRef, ptr.p->clientRef);
+ if(ptr.p->masterRef == reference()){
+ infoEvent(" MasterState: %d State: %d",
+ ptr.p->masterData.state.getState(),
+ ptr.p->slaveState.getState());
+ } else {
+ infoEvent(" State: %d", ptr.p->slaveState.getState());
+ }
+ BackupFilePtr filePtr;
+ for(ptr.p->files.first(filePtr); filePtr.i != RNIL;
+ ptr.p->files.next(filePtr)){
+ jam();
+ infoEvent(" file %d: type: %d open: %d running: %d done: %d scan: %d",
+ filePtr.i, filePtr.p->fileType, filePtr.p->fileOpened,
+ filePtr.p->fileRunning,
+ filePtr.p->fileDone, filePtr.p->scanRunning);
+ }
+ }
+ }
+ if(signal->theData[0] == 24){
+ /**
+ * Print size of records etc.
+ */
+ infoEvent("Backup - dump pool sizes");
+ infoEvent("BackupPool: %d BackupFilePool: %d TablePool: %d",
+ c_backupPool.getSize(), c_backupFilePool.getSize(),
+ c_tablePool.getSize());
+ infoEvent("AttrPool: %d TriggerPool: %d FragmentPool: %d",
+ c_backupPool.getSize(), c_backupFilePool.getSize(),
+ c_tablePool.getSize());
+ infoEvent("PagePool: %d",
+ c_pagePool.getSize());
+
+ }
+}
+
+bool
+Backup::findTable(const BackupRecordPtr & ptr,
+ TablePtr & tabPtr, Uint32 tableId) const
+{
+ for(ptr.p->tables.first(tabPtr);
+ tabPtr.i != RNIL;
+ ptr.p->tables.next(tabPtr)) {
+ jam();
+ if(tabPtr.p->tableId == tableId){
+ jam();
+ return true;
+ }//if
+ }//for
+ tabPtr.i = RNIL;
+ tabPtr.p = 0;
+ return false;
+}
+
+static Uint32 xps(Uint32 x, Uint64 ms)
+{
+ float fx = x;
+ float fs = ms;
+
+ if(ms == 0 || x == 0) {
+ jam();
+ return 0;
+ }//if
+ jam();
+ return ((Uint32)(1000.0f * (fx + fs/2.1f))) / ((Uint32)fs);
+}
+
+struct Number {
+ Number(Uint32 r) { val = r;}
+ Number & operator=(Uint32 r) { val = r; return * this; }
+ Uint32 val;
+};
+
+NdbOut &
+operator<< (NdbOut & out, const Number & val){
+ char p = 0;
+ Uint32 loop = 1;
+ while(val.val > loop){
+ loop *= 1000;
+ p += 3;
+ }
+ if(loop != 1){
+ p -= 3;
+ loop /= 1000;
+ }
+
+ switch(p){
+ case 0:
+ break;
+ case 3:
+ p = 'k';
+ break;
+ case 6:
+ p = 'M';
+ break;
+ case 9:
+ p = 'G';
+ break;
+ default:
+ p = 0;
+ }
+ char str[2];
+ str[0] = p;
+ str[1] = 0;
+ Uint32 tmp = (val.val + (loop >> 1)) / loop;
+#if 1
+ if(p > 0)
+ out << tmp << str;
+ else
+ out << tmp;
+#else
+ out << val.val;
+#endif
+
+ return out;
+}
+
+void
+Backup::execBACKUP_CONF(Signal* signal)
+{
+ jamEntry();
+ BackupConf * conf = (BackupConf*)signal->getDataPtr();
+
+ ndbout_c("Backup %d has started", conf->backupId);
+}
+
+void
+Backup::execBACKUP_REF(Signal* signal)
+{
+ jamEntry();
+ BackupRef * ref = (BackupRef*)signal->getDataPtr();
+
+ ndbout_c("Backup (%d) has NOT started %d", ref->senderData, ref->errorCode);
+}
+
+void
+Backup::execBACKUP_COMPLETE_REP(Signal* signal)
+{
+ jamEntry();
+ BackupCompleteRep* rep = (BackupCompleteRep*)signal->getDataPtr();
+
+ startTime = NdbTick_CurrentMillisecond() - startTime;
+
+ ndbout_c("Backup %d has completed", rep->backupId);
+ const Uint32 bytes = rep->noOfBytes;
+ const Uint32 records = rep->noOfRecords;
+
+ Number rps = xps(records, startTime);
+ Number bps = xps(bytes, startTime);
+
+ ndbout << " Data [ "
+ << Number(records) << " rows "
+ << Number(bytes) << " bytes " << startTime << " ms ] "
+ << " => "
+ << rps << " row/s & " << bps << "b/s" << endl;
+
+ bps = xps(rep->noOfLogBytes, startTime);
+ rps = xps(rep->noOfLogRecords, startTime);
+
+ ndbout << " Log [ "
+ << Number(rep->noOfLogRecords) << " log records "
+ << Number(rep->noOfLogBytes) << " bytes " << startTime << " ms ] "
+ << " => "
+ << rps << " records/s & " << bps << "b/s" << endl;
+
+}
+
+void
+Backup::execBACKUP_ABORT_REP(Signal* signal)
+{
+ jamEntry();
+ BackupAbortRep* rep = (BackupAbortRep*)signal->getDataPtr();
+
+ ndbout_c("Backup %d has been aborted %d", rep->backupId, rep->reason);
+}
+
+const TriggerEvent::Value triggerEventValues[] = {
+ TriggerEvent::TE_INSERT,
+ TriggerEvent::TE_UPDATE,
+ TriggerEvent::TE_DELETE
+};
+
+const char* triggerNameFormat[] = {
+ "NDB$BACKUP_%d_%d_INSERT",
+ "NDB$BACKUP_%d_%d_UPDATE",
+ "NDB$BACKUP_%d_%d_DELETE"
+};
+
+const Backup::State
+Backup::validMasterTransitions[] = {
+ INITIAL, DEFINING,
+ DEFINING, DEFINED,
+ DEFINED, STARTED,
+ STARTED, SCANNING,
+ SCANNING, STOPPING,
+ STOPPING, INITIAL,
+
+ DEFINING, ABORTING,
+ DEFINED, ABORTING,
+ STARTED, ABORTING,
+ SCANNING, ABORTING,
+ STOPPING, ABORTING,
+ ABORTING, ABORTING,
+
+ DEFINING, INITIAL,
+ ABORTING, INITIAL,
+ INITIAL, INITIAL
+};
+
+const Backup::State
+Backup::validSlaveTransitions[] = {
+ INITIAL, DEFINING,
+ DEFINING, DEFINED,
+ DEFINED, STARTED,
+ STARTED, STARTED, // Several START_BACKUP_REQ is sent
+ STARTED, SCANNING,
+ SCANNING, STARTED,
+ STARTED, STOPPING,
+ STOPPING, CLEANING,
+ CLEANING, INITIAL,
+
+ INITIAL, ABORTING, // Node fail
+ DEFINING, ABORTING,
+ DEFINED, ABORTING,
+ STARTED, ABORTING,
+ SCANNING, ABORTING,
+ STOPPING, ABORTING,
+ CLEANING, ABORTING, // Node fail w/ master takeover
+ ABORTING, ABORTING, // Slave who initiates ABORT should have this transition
+
+ ABORTING, INITIAL,
+ INITIAL, INITIAL
+};
+
+const Uint32
+Backup::validSlaveTransitionsCount =
+sizeof(Backup::validSlaveTransitions) / sizeof(Backup::State);
+
+const Uint32
+Backup::validMasterTransitionsCount =
+sizeof(Backup::validMasterTransitions) / sizeof(Backup::State);
+
+void
+Backup::CompoundState::setState(State newState){
+ bool found = false;
+ const State currState = state;
+ for(unsigned i = 0; i<noOfValidTransitions; i+= 2) {
+ jam();
+ if(validTransitions[i] == currState &&
+ validTransitions[i+1] == newState){
+ jam();
+ found = true;
+ break;
+ }
+ }
+ ndbrequire(found);
+
+ if (newState == INITIAL)
+ abortState = INITIAL;
+ if(newState == ABORTING && currState != ABORTING) {
+ jam();
+ abortState = currState;
+ }
+ state = newState;
+#ifdef DEBUG_ABORT
+ if (newState != currState) {
+ ndbout_c("%u: Old state = %u, new state = %u, abort state = %u",
+ id, currState, newState, abortState);
+ }
+#endif
+}
+
+void
+Backup::CompoundState::forceState(State newState)
+{
+ const State currState = state;
+ if (newState == INITIAL)
+ abortState = INITIAL;
+ if(newState == ABORTING && currState != ABORTING) {
+ jam();
+ abortState = currState;
+ }
+ state = newState;
+#ifdef DEBUG_ABORT
+ if (newState != currState) {
+ ndbout_c("%u: FORCE: Old state = %u, new state = %u, abort state = %u",
+ id, currState, newState, abortState);
+ }
+#endif
+}
+
+Backup::Table::Table(ArrayPool<Attribute> & ah,
+ ArrayPool<Fragment> & fh)
+ : attributes(ah), fragments(fh)
+{
+ triggerIds[0] = ILLEGAL_TRIGGER_ID;
+ triggerIds[1] = ILLEGAL_TRIGGER_ID;
+ triggerIds[2] = ILLEGAL_TRIGGER_ID;
+ triggerAllocated[0] = false;
+ triggerAllocated[1] = false;
+ triggerAllocated[2] = false;
+}
+
+/*****************************************************************************
+ *
+ * Node state handling
+ *
+ *****************************************************************************/
+void
+Backup::execNODE_FAILREP(Signal* signal)
+{
+ jamEntry();
+
+ NodeFailRep * rep = (NodeFailRep*)signal->getDataPtr();
+
+ bool doStuff = false;
+ /*
+ Start by saving important signal data which will be destroyed before the
+ process is completed.
+ */
+ NodeId new_master_node_id = rep->masterNodeId;
+ Uint32 theFailedNodes[NodeBitmask::Size];
+ for (Uint32 i = 0; i < NodeBitmask::Size; i++)
+ theFailedNodes[i] = rep->theNodes[i];
+
+// NodeId old_master_node_id = getMasterNodeId();
+ c_masterNodeId = new_master_node_id;
+
+ NodePtr nodePtr;
+ for(c_nodes.first(nodePtr); nodePtr.i != RNIL; c_nodes.next(nodePtr)) {
+ jam();
+ if(NodeBitmask::get(theFailedNodes, nodePtr.p->nodeId)){
+ if(nodePtr.p->alive){
+ jam();
+ ndbrequire(c_aliveNodes.get(nodePtr.p->nodeId));
+ doStuff = true;
+ } else {
+ jam();
+ ndbrequire(!c_aliveNodes.get(nodePtr.p->nodeId));
+ }//if
+ nodePtr.p->alive = 0;
+ c_aliveNodes.clear(nodePtr.p->nodeId);
+ }//if
+ }//for
+
+ if(!doStuff){
+ jam();
+ return;
+ }//if
+
+#ifdef DEBUG_ABORT
+ ndbout_c("****************** Node fail rep ******************");
+#endif
+
+ NodeId newCoordinator = c_masterNodeId;
+ BackupRecordPtr ptr;
+ for(c_backups.first(ptr); ptr.i != RNIL; c_backups.next(ptr)) {
+ jam();
+ checkNodeFail(signal, ptr, newCoordinator, theFailedNodes);
+ }
+}
+
+bool
+Backup::verifyNodesAlive(const NdbNodeBitmask& aNodeBitMask)
+{
+ for (Uint32 i = 0; i < MAX_NDB_NODES; i++) {
+ jam();
+ if(aNodeBitMask.get(i)) {
+ if(!c_aliveNodes.get(i)){
+ jam();
+ return false;
+ }//if
+ }//if
+ }//for
+ return true;
+}
+
+void
+Backup::checkNodeFail(Signal* signal,
+ BackupRecordPtr ptr,
+ NodeId newCoord,
+ Uint32 theFailedNodes[NodeBitmask::Size])
+{
+ ndbrequire( ptr.p->nodes.get(newCoord)); /* just to make sure newCoord
+ * is part of the backup
+ */
+ /* Update ptr.p->nodes to be up to date with current alive nodes
+ */
+ NodePtr nodePtr;
+ bool found = false;
+ for(c_nodes.first(nodePtr); nodePtr.i != RNIL; c_nodes.next(nodePtr)) {
+ jam();
+ if(NodeBitmask::get(theFailedNodes, nodePtr.p->nodeId)) {
+ jam();
+ if (ptr.p->nodes.get(nodePtr.p->nodeId)) {
+ jam();
+ ptr.p->nodes.clear(nodePtr.p->nodeId);
+ found = true;
+ }
+ }//if
+ }//for
+
+ if(!found) {
+ jam();
+ return; // failed node is not part of backup process, safe to continue
+ }
+
+ bool doMasterTakeover = false;
+ if(NodeBitmask::get(theFailedNodes, refToNode(ptr.p->masterRef))){
+ jam();
+ doMasterTakeover = true;
+ };
+
+ if (newCoord == getOwnNodeId()){
+ jam();
+ if (doMasterTakeover) {
+ /**
+ * I'm new master
+ */
+ CRASH_INSERTION((10002));
+#ifdef DEBUG_ABORT
+ ndbout_c("**** Master Takeover: Node failed: Master id = %u",
+ refToNode(ptr.p->masterRef));
+#endif
+ masterTakeOver(signal, ptr);
+ return;
+ }//if
+ /**
+ * I'm master for this backup
+ */
+ jam();
+ CRASH_INSERTION((10001));
+#ifdef DEBUG_ABORT
+ ndbout_c("**** Master: Node failed: Master id = %u",
+ refToNode(ptr.p->masterRef));
+#endif
+ masterAbort(signal, ptr, false);
+ return;
+ }//if
+
+ /**
+ * If there's a new master, (it's not me)
+ * but remember who it is
+ */
+ ptr.p->masterRef = calcBackupBlockRef(newCoord);
+#ifdef DEBUG_ABORT
+ ndbout_c("**** Slave: Node failed: Master id = %u",
+ refToNode(ptr.p->masterRef));
+#endif
+ /**
+ * I abort myself as slave if not master
+ */
+ CRASH_INSERTION((10021));
+ // slaveAbort(signal, ptr);
+}
+
+void
+Backup::masterTakeOver(Signal* signal, BackupRecordPtr ptr)
+{
+ ptr.p->masterRef = reference();
+ ptr.p->masterData.gsn = MAX_GSN + 1;
+
+ switch(ptr.p->slaveState.getState()){
+ case INITIAL:
+ jam();
+ ptr.p->masterData.state.forceState(INITIAL);
+ break;
+ case ABORTING:
+ jam();
+ case DEFINING:
+ jam();
+ case DEFINED:
+ jam();
+ case STARTED:
+ jam();
+ case SCANNING:
+ jam();
+ ptr.p->masterData.state.forceState(STARTED);
+ break;
+ case STOPPING:
+ jam();
+ case CLEANING:
+ jam();
+ ptr.p->masterData.state.forceState(STOPPING);
+ break;
+ default:
+ ndbrequire(false);
+ }
+ masterAbort(signal, ptr, false);
+}
+
+void
+Backup::execINCL_NODEREQ(Signal* signal)
+{
+ jamEntry();
+
+ const Uint32 senderRef = signal->theData[0];
+ const Uint32 inclNode = signal->theData[1];
+
+ NodePtr node;
+ for(c_nodes.first(node); node.i != RNIL; c_nodes.next(node)) {
+ jam();
+ const Uint32 nodeId = node.p->nodeId;
+ if(inclNode == nodeId){
+ jam();
+
+ ndbrequire(node.p->alive == 0);
+ ndbrequire(!c_aliveNodes.get(nodeId));
+
+ node.p->alive = 1;
+ c_aliveNodes.set(nodeId);
+
+ break;
+ }//if
+ }//for
+ signal->theData[0] = reference();
+ sendSignal(senderRef, GSN_INCL_NODECONF, signal, 1, JBB);
+}
+
+/*****************************************************************************
+ *
+ * Master functionallity - Define backup
+ *
+ *****************************************************************************/
+
+void
+Backup::execBACKUP_REQ(Signal* signal)
+{
+ jamEntry();
+ BackupReq * req = (BackupReq*)signal->getDataPtr();
+
+ const Uint32 senderData = req->senderData;
+ const BlockReference senderRef = signal->senderBlockRef();
+ const Uint32 dataLen32 = req->backupDataLen; // In 32 bit words
+
+ if(getOwnNodeId() != getMasterNodeId()) {
+ jam();
+ sendBackupRef(senderRef, signal, senderData, BackupRef::IAmNotMaster);
+ return;
+ }//if
+
+ if (m_diskless)
+ {
+ sendBackupRef(senderRef, signal, senderData,
+ BackupRef::CannotBackupDiskless);
+ return;
+ }
+
+ if(dataLen32 != 0) {
+ jam();
+ sendBackupRef(senderRef, signal, senderData,
+ BackupRef::BackupDefinitionNotImplemented);
+ return;
+ }//if
+
+#ifdef DEBUG_ABORT
+ dumpUsedResources();
+#endif
+ /**
+ * Seize a backup record
+ */
+ BackupRecordPtr ptr;
+ c_backups.seize(ptr);
+ if(ptr.i == RNIL) {
+ jam();
+ sendBackupRef(senderRef, signal, senderData, BackupRef::OutOfBackupRecord);
+ return;
+ }//if
+
+ ndbrequire(ptr.p->pages.empty());
+ ndbrequire(ptr.p->tables.isEmpty());
+
+ ptr.p->masterData.state.forceState(INITIAL);
+ ptr.p->masterData.state.setState(DEFINING);
+ ptr.p->clientRef = senderRef;
+ ptr.p->clientData = senderData;
+ ptr.p->masterRef = reference();
+ ptr.p->nodes = c_aliveNodes;
+ ptr.p->backupId = 0;
+ ptr.p->backupKey[0] = 0;
+ ptr.p->backupKey[1] = 0;
+ ptr.p->backupDataLen = 0;
+ ptr.p->masterData.dropTrig.tableId = RNIL;
+ ptr.p->masterData.alterTrig.tableId = RNIL;
+
+ UtilSequenceReq * utilReq = (UtilSequenceReq*)signal->getDataPtrSend();
+
+ ptr.p->masterData.gsn = GSN_UTIL_SEQUENCE_REQ;
+ utilReq->senderData = ptr.i;
+ utilReq->sequenceId = BACKUP_SEQUENCE;
+ utilReq->requestType = UtilSequenceReq::NextVal;
+ sendSignal(DBUTIL_REF, GSN_UTIL_SEQUENCE_REQ,
+ signal, UtilSequenceReq::SignalLength, JBB);
+}
+
+void
+Backup::execUTIL_SEQUENCE_REF(Signal* signal)
+{
+ BackupRecordPtr ptr;
+ jamEntry();
+ UtilSequenceRef * utilRef = (UtilSequenceRef*)signal->getDataPtr();
+ ptr.i = utilRef->senderData;
+ ndbrequire(ptr.i == RNIL);
+ c_backupPool.getPtr(ptr);
+ ndbrequire(ptr.p->masterData.gsn == GSN_UTIL_SEQUENCE_REQ);
+ ptr.p->masterData.gsn = 0;
+ sendBackupRef(signal, ptr, BackupRef::SequenceFailure);
+}//execUTIL_SEQUENCE_REF()
+
+
+void
+Backup::sendBackupRef(Signal* signal, BackupRecordPtr ptr, Uint32 errorCode)
+{
+ jam();
+ sendBackupRef(ptr.p->clientRef, signal, ptr.p->clientData, errorCode);
+ // ptr.p->masterData.state.setState(INITIAL);
+ cleanupSlaveResources(ptr);
+}
+
+void
+Backup::sendBackupRef(BlockReference senderRef, Signal *signal,
+ Uint32 senderData, Uint32 errorCode)
+{
+ jam();
+ BackupRef* ref = (BackupRef*)signal->getDataPtrSend();
+ ref->senderData = senderData;
+ ref->errorCode = errorCode;
+ ref->masterRef = numberToRef(BACKUP, getMasterNodeId());
+ sendSignal(senderRef, GSN_BACKUP_REF, signal, BackupRef::SignalLength, JBB);
+
+ if(errorCode != BackupRef::IAmNotMaster){
+ signal->theData[0] = NDB_LE_BackupFailedToStart;
+ signal->theData[1] = senderRef;
+ signal->theData[2] = errorCode;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
+ }
+}
+
+void
+Backup::execUTIL_SEQUENCE_CONF(Signal* signal)
+{
+ jamEntry();
+
+ UtilSequenceConf * conf = (UtilSequenceConf*)signal->getDataPtr();
+
+ if(conf->requestType == UtilSequenceReq::Create) {
+ jam();
+ sendSTTORRY(signal); // At startup in NDB
+ return;
+ }
+
+ BackupRecordPtr ptr;
+ ptr.i = conf->senderData;
+ c_backupPool.getPtr(ptr);
+
+ ndbrequire(ptr.p->masterData.gsn == GSN_UTIL_SEQUENCE_REQ);
+ ptr.p->masterData.gsn = 0;
+ if (ptr.p->masterData.state.getState() == ABORTING) {
+ jam();
+ sendBackupRef(signal, ptr, ptr.p->errorCode);
+ return;
+ }//if
+ if (ERROR_INSERTED(10023)) {
+ ptr.p->masterData.state.setState(ABORTING);
+ sendBackupRef(signal, ptr, 323);
+ return;
+ }//if
+ ndbrequire(ptr.p->masterData.state.getState() == DEFINING);
+
+ {
+ Uint64 backupId;
+ memcpy(&backupId,conf->sequenceValue,8);
+ ptr.p->backupId= (Uint32)backupId;
+ }
+ ptr.p->backupKey[0] = (getOwnNodeId() << 16) | (ptr.p->backupId & 0xFFFF);
+ ptr.p->backupKey[1] = NdbTick_CurrentMillisecond();
+
+ ptr.p->masterData.gsn = GSN_UTIL_LOCK_REQ;
+ Mutex mutex(signal, c_mutexMgr, ptr.p->masterData.m_defineBackupMutex);
+ Callback c = { safe_cast(&Backup::defineBackupMutex_locked), ptr.i };
+ ndbrequire(mutex.lock(c));
+
+ return;
+}
+
+void
+Backup::defineBackupMutex_locked(Signal* signal, Uint32 ptrI, Uint32 retVal){
+ jamEntry();
+ ndbrequire(retVal == 0);
+
+ BackupRecordPtr ptr;
+ ptr.i = ptrI;
+ c_backupPool.getPtr(ptr);
+
+ ndbrequire(ptr.p->masterData.gsn == GSN_UTIL_LOCK_REQ);
+ ptr.p->masterData.gsn = 0;
+
+ ptr.p->masterData.gsn = GSN_UTIL_LOCK_REQ;
+ Mutex mutex(signal, c_mutexMgr, ptr.p->masterData.m_dictCommitTableMutex);
+ Callback c = { safe_cast(&Backup::dictCommitTableMutex_locked), ptr.i };
+ ndbrequire(mutex.lock(c));
+}
+
+void
+Backup::dictCommitTableMutex_locked(Signal* signal, Uint32 ptrI,Uint32 retVal)
+{
+ jamEntry();
+ ndbrequire(retVal == 0);
+
+ /**
+ * We now have both the mutexes
+ */
+ BackupRecordPtr ptr;
+ ptr.i = ptrI;
+ c_backupPool.getPtr(ptr);
+
+ ndbrequire(ptr.p->masterData.gsn == GSN_UTIL_LOCK_REQ);
+ ptr.p->masterData.gsn = 0;
+
+ if (ERROR_INSERTED(10031)) {
+ ptr.p->masterData.state.setState(ABORTING);
+ ptr.p->setErrorCode(331);
+ }//if
+
+ if (ptr.p->masterData.state.getState() == ABORTING) {
+ jam();
+
+ /**
+ * Unlock mutexes
+ */
+ jam();
+ Mutex mutex1(signal, c_mutexMgr, ptr.p->masterData.m_dictCommitTableMutex);
+ jam();
+ mutex1.unlock(); // ignore response
+
+ jam();
+ Mutex mutex2(signal, c_mutexMgr, ptr.p->masterData.m_defineBackupMutex);
+ jam();
+ mutex2.unlock(); // ignore response
+
+ sendBackupRef(signal, ptr, ptr.p->errorCode);
+ return;
+ }//if
+
+ ndbrequire(ptr.p->masterData.state.getState() == DEFINING);
+
+ sendDefineBackupReq(signal, ptr);
+}
+
+/*****************************************************************************
+ *
+ * Master functionallity - Define backup cont'd (from now on all slaves are in)
+ *
+ *****************************************************************************/
+
+void
+Backup::sendSignalAllWait(BackupRecordPtr ptr, Uint32 gsn, Signal *signal,
+ Uint32 signalLength, bool executeDirect)
+{
+ jam();
+ ptr.p->masterData.gsn = gsn;
+ ptr.p->masterData.sendCounter.clearWaitingFor();
+ NodePtr node;
+ for(c_nodes.first(node); node.i != RNIL; c_nodes.next(node)){
+ jam();
+ const Uint32 nodeId = node.p->nodeId;
+ if(node.p->alive && ptr.p->nodes.get(nodeId)){
+ jam();
+
+ ptr.p->masterData.sendCounter.setWaitingFor(nodeId);
+
+ const BlockReference ref = numberToRef(BACKUP, nodeId);
+ if (!executeDirect || ref != reference()) {
+ sendSignal(ref, gsn, signal, signalLength, JBB);
+ }//if
+ }//if
+ }//for
+ if (executeDirect) {
+ EXECUTE_DIRECT(BACKUP, gsn, signal, signalLength);
+ }
+}
+
+bool
+Backup::haveAllSignals(BackupRecordPtr ptr, Uint32 gsn, Uint32 nodeId)
+{
+ ndbrequire(ptr.p->masterRef == reference());
+ ndbrequire(ptr.p->masterData.gsn == gsn);
+ ndbrequire(!ptr.p->masterData.sendCounter.done());
+ ndbrequire(ptr.p->masterData.sendCounter.isWaitingFor(nodeId));
+
+ ptr.p->masterData.sendCounter.clearWaitingFor(nodeId);
+
+ if (ptr.p->masterData.sendCounter.done())
+ ptr.p->masterData.gsn = 0;
+
+ return ptr.p->masterData.sendCounter.done();
+}
+
+void
+Backup::sendDefineBackupReq(Signal *signal, BackupRecordPtr ptr)
+{
+ /**
+ * Sending define backup to all participants
+ */
+ DefineBackupReq * req = (DefineBackupReq*)signal->getDataPtrSend();
+ req->backupId = ptr.p->backupId;
+ req->clientRef = ptr.p->clientRef;
+ req->clientData = ptr.p->clientData;
+ req->senderRef = reference();
+ req->backupPtr = ptr.i;
+ req->backupKey[0] = ptr.p->backupKey[0];
+ req->backupKey[1] = ptr.p->backupKey[1];
+ req->nodes = ptr.p->nodes;
+ req->backupDataLen = ptr.p->backupDataLen;
+
+ ptr.p->masterData.errorCode = 0;
+ ptr.p->okToCleanMaster = false; // master must wait with cleaning to last
+ sendSignalAllWait(ptr, GSN_DEFINE_BACKUP_REQ, signal,
+ DefineBackupReq::SignalLength,
+ true /* do execute direct on oneself */);
+ /**
+ * Now send backup data
+ */
+ const Uint32 len = ptr.p->backupDataLen;
+ if(len == 0){
+ /**
+ * No data to send
+ */
+ jam();
+ return;
+ }//if
+
+ /**
+ * Not implemented
+ */
+ ndbrequire(0);
+}
+
+void
+Backup::execDEFINE_BACKUP_REF(Signal* signal)
+{
+ jamEntry();
+
+ DefineBackupRef* ref = (DefineBackupRef*)signal->getDataPtr();
+
+ const Uint32 ptrI = ref->backupPtr;
+ const Uint32 backupId = ref->backupId;
+ const Uint32 nodeId = refToNode(signal->senderBlockRef());
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, ptrI);
+
+ masterAbortCheck(); // macro will do return if ABORTING
+
+ ptr.p->masterData.errorCode = ref->errorCode;
+ defineBackupReply(signal, ptr, nodeId);
+}
+
+void
+Backup::execDEFINE_BACKUP_CONF(Signal* signal)
+{
+ jamEntry();
+
+ DefineBackupConf* conf = (DefineBackupConf*)signal->getDataPtr();
+ const Uint32 ptrI = conf->backupPtr;
+ const Uint32 backupId = conf->backupId;
+ const Uint32 nodeId = refToNode(signal->senderBlockRef());
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, ptrI);
+
+ masterAbortCheck(); // macro will do return if ABORTING
+
+ if (ERROR_INSERTED(10024)) {
+ ptr.p->masterData.errorCode = 324;
+ }//if
+
+ defineBackupReply(signal, ptr, nodeId);
+}
+
+void
+Backup::defineBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId)
+{
+ if (!haveAllSignals(ptr, GSN_DEFINE_BACKUP_REQ, nodeId)) {
+ jam();
+ return;
+ }
+ /**
+ * Unlock mutexes
+ */
+ jam();
+ Mutex mutex1(signal, c_mutexMgr, ptr.p->masterData.m_dictCommitTableMutex);
+ jam();
+ mutex1.unlock(); // ignore response
+
+ jam();
+ Mutex mutex2(signal, c_mutexMgr, ptr.p->masterData.m_defineBackupMutex);
+ jam();
+ mutex2.unlock(); // ignore response
+
+ if(ptr.p->errorCode) {
+ jam();
+ ptr.p->masterData.errorCode = ptr.p->errorCode;
+ }
+
+ if(ptr.p->masterData.errorCode){
+ jam();
+ ptr.p->setErrorCode(ptr.p->masterData.errorCode);
+ sendAbortBackupOrd(signal, ptr, AbortBackupOrd::OkToClean);
+ masterSendAbortBackup(signal, ptr);
+ return;
+ }
+
+ /**
+ * Reply to client
+ */
+ BackupConf * conf = (BackupConf*)signal->getDataPtrSend();
+ conf->backupId = ptr.p->backupId;
+ conf->senderData = ptr.p->clientData;
+ conf->nodes = ptr.p->nodes;
+ sendSignal(ptr.p->clientRef, GSN_BACKUP_CONF, signal,
+ BackupConf::SignalLength, JBB);
+
+ signal->theData[0] = NDB_LE_BackupStarted;
+ signal->theData[1] = ptr.p->clientRef;
+ signal->theData[2] = ptr.p->backupId;
+ ptr.p->nodes.copyto(NdbNodeBitmask::Size, signal->theData+3);
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3+NdbNodeBitmask::Size, JBB);
+
+ ptr.p->masterData.state.setState(DEFINED);
+ /**
+ * Prepare Trig
+ */
+ TablePtr tabPtr;
+ ndbrequire(ptr.p->tables.first(tabPtr));
+ sendCreateTrig(signal, ptr, tabPtr);
+}
+
+/*****************************************************************************
+ *
+ * Master functionallity - Prepare triggers
+ *
+ *****************************************************************************/
+void
+Backup::createAttributeMask(TablePtr tabPtr,
+ Bitmask<MAXNROFATTRIBUTESINWORDS> & mask)
+{
+ mask.clear();
+ Table & table = * tabPtr.p;
+ for(Uint32 i = 0; i<table.noOfAttributes; i++) {
+ jam();
+ AttributePtr attr;
+ table.attributes.getPtr(attr, i);
+ mask.set(i);
+ }
+}
+
+void
+Backup::sendCreateTrig(Signal* signal,
+ BackupRecordPtr ptr, TablePtr tabPtr)
+{
+ CreateTrigReq * req =(CreateTrigReq *)signal->getDataPtrSend();
+
+ ptr.p->errorCode = 0;
+ ptr.p->masterData.gsn = GSN_CREATE_TRIG_REQ;
+ ptr.p->masterData.sendCounter = 3;
+ ptr.p->masterData.createTrig.tableId = tabPtr.p->tableId;
+
+ req->setUserRef(reference());
+ req->setConnectionPtr(ptr.i);
+ req->setRequestType(CreateTrigReq::RT_USER);
+
+ Bitmask<MAXNROFATTRIBUTESINWORDS> attrMask;
+ createAttributeMask(tabPtr, attrMask);
+ req->setAttributeMask(attrMask);
+ req->setTableId(tabPtr.p->tableId);
+ req->setIndexId(RNIL); // not used
+ req->setTriggerId(RNIL); // to be created
+ req->setTriggerType(TriggerType::SUBSCRIPTION);
+ req->setTriggerActionTime(TriggerActionTime::TA_DETACHED);
+ req->setMonitorReplicas(true);
+ req->setMonitorAllAttributes(false);
+ req->setOnline(false); // leave trigger offline
+
+ char triggerName[MAX_TAB_NAME_SIZE];
+ Uint32 nameBuffer[2 + ((MAX_TAB_NAME_SIZE + 3) >> 2)]; // SP string
+ LinearWriter w(nameBuffer, sizeof(nameBuffer) >> 2);
+ LinearSectionPtr lsPtr[3];
+
+ for (int i=0; i < 3; i++) {
+ req->setTriggerEvent(triggerEventValues[i]);
+ BaseString::snprintf(triggerName, sizeof(triggerName), triggerNameFormat[i],
+ ptr.p->backupId, tabPtr.p->tableId);
+ w.reset();
+ w.add(CreateTrigReq::TriggerNameKey, triggerName);
+ lsPtr[0].p = nameBuffer;
+ lsPtr[0].sz = w.getWordsUsed();
+ sendSignal(DBDICT_REF, GSN_CREATE_TRIG_REQ,
+ signal, CreateTrigReq::SignalLength, JBB, lsPtr, 1);
+ }
+}
+
+void
+Backup::execCREATE_TRIG_CONF(Signal* signal)
+{
+ jamEntry();
+ CreateTrigConf * conf = (CreateTrigConf*)signal->getDataPtr();
+
+ const Uint32 ptrI = conf->getConnectionPtr();
+ const Uint32 tableId = conf->getTableId();
+ const TriggerEvent::Value type = conf->getTriggerEvent();
+ const Uint32 triggerId = conf->getTriggerId();
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, ptrI);
+
+ /**
+ * Verify that I'm waiting for this conf
+ */
+ ndbrequire(ptr.p->masterRef == reference());
+ ndbrequire(ptr.p->masterData.gsn == GSN_CREATE_TRIG_REQ);
+ ndbrequire(ptr.p->masterData.sendCounter.done() == false);
+ ndbrequire(ptr.p->masterData.createTrig.tableId == tableId);
+
+ TablePtr tabPtr;
+ ndbrequire(findTable(ptr, tabPtr, tableId));
+ ndbrequire(type < 3); // if some decides to change the enums
+
+ ndbrequire(tabPtr.p->triggerIds[type] == ILLEGAL_TRIGGER_ID);
+ tabPtr.p->triggerIds[type] = triggerId;
+
+ createTrigReply(signal, ptr);
+}
+
+void
+Backup::execCREATE_TRIG_REF(Signal* signal)
+{
+ CreateTrigRef* ref = (CreateTrigRef*)signal->getDataPtr();
+
+ const Uint32 ptrI = ref->getConnectionPtr();
+ const Uint32 tableId = ref->getTableId();
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, ptrI);
+
+ /**
+ * Verify that I'm waiting for this ref
+ */
+ ndbrequire(ptr.p->masterRef == reference());
+ ndbrequire(ptr.p->masterData.gsn == GSN_CREATE_TRIG_REQ);
+ ndbrequire(ptr.p->masterData.sendCounter.done() == false);
+ ndbrequire(ptr.p->masterData.createTrig.tableId == tableId);
+
+ ptr.p->setErrorCode(ref->getErrorCode());
+
+ createTrigReply(signal, ptr);
+}
+
+void
+Backup::createTrigReply(Signal* signal, BackupRecordPtr ptr)
+{
+ CRASH_INSERTION(10003);
+
+ /**
+ * Check finished with table
+ */
+ ptr.p->masterData.sendCounter--;
+ if(ptr.p->masterData.sendCounter.done() == false){
+ jam();
+ return;
+ }//if
+
+ ptr.p->masterData.gsn = 0;
+
+ if(ptr.p->checkError()) {
+ jam();
+ masterAbort(signal, ptr, true);
+ return;
+ }//if
+
+ if (ERROR_INSERTED(10025)) {
+ ptr.p->errorCode = 325;
+ masterAbort(signal, ptr, true);
+ return;
+ }//if
+
+ TablePtr tabPtr;
+ ndbrequire(findTable(ptr, tabPtr, ptr.p->masterData.createTrig.tableId));
+
+ /**
+ * Next table
+ */
+ ptr.p->tables.next(tabPtr);
+ if(tabPtr.i != RNIL){
+ jam();
+ sendCreateTrig(signal, ptr, tabPtr);
+ return;
+ }//if
+
+ /**
+ * Finished with all tables, send StartBackupReq
+ */
+ ptr.p->masterData.state.setState(STARTED);
+
+ ptr.p->tables.first(tabPtr);
+ ptr.p->errorCode = 0;
+ ptr.p->masterData.startBackup.signalNo = 0;
+ ptr.p->masterData.startBackup.noOfSignals =
+ (ptr.p->tables.noOfElements() + StartBackupReq::MaxTableTriggers - 1) /
+ StartBackupReq::MaxTableTriggers;
+ sendStartBackup(signal, ptr, tabPtr);
+}
+
+/*****************************************************************************
+ *
+ * Master functionallity - Start backup
+ *
+ *****************************************************************************/
+void
+Backup::sendStartBackup(Signal* signal, BackupRecordPtr ptr, TablePtr tabPtr)
+{
+
+ ptr.p->masterData.startBackup.tablePtr = tabPtr.i;
+
+ StartBackupReq* req = (StartBackupReq*)signal->getDataPtrSend();
+ req->backupId = ptr.p->backupId;
+ req->backupPtr = ptr.i;
+ req->signalNo = ptr.p->masterData.startBackup.signalNo;
+ req->noOfSignals = ptr.p->masterData.startBackup.noOfSignals;
+ Uint32 i;
+ for(i = 0; i<StartBackupReq::MaxTableTriggers; i++) {
+ jam();
+ req->tableTriggers[i].tableId = tabPtr.p->tableId;
+ req->tableTriggers[i].triggerIds[0] = tabPtr.p->triggerIds[0];
+ req->tableTriggers[i].triggerIds[1] = tabPtr.p->triggerIds[1];
+ req->tableTriggers[i].triggerIds[2] = tabPtr.p->triggerIds[2];
+ if(!ptr.p->tables.next(tabPtr)){
+ jam();
+ i++;
+ break;
+ }//if
+ }//for
+ req->noOfTableTriggers = i;
+
+ sendSignalAllWait(ptr, GSN_START_BACKUP_REQ, signal,
+ StartBackupReq::HeaderLength +
+ (i * StartBackupReq::TableTriggerLength));
+}
+
+void
+Backup::execSTART_BACKUP_REF(Signal* signal)
+{
+ jamEntry();
+
+ StartBackupRef* ref = (StartBackupRef*)signal->getDataPtr();
+ const Uint32 ptrI = ref->backupPtr;
+ const Uint32 backupId = ref->backupId;
+ const Uint32 signalNo = ref->signalNo;
+ const Uint32 nodeId = refToNode(signal->senderBlockRef());
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, ptrI);
+
+ masterAbortCheck(); // macro will do return if ABORTING
+
+ ptr.p->setErrorCode(ref->errorCode);
+ startBackupReply(signal, ptr, nodeId, signalNo);
+}
+
+void
+Backup::execSTART_BACKUP_CONF(Signal* signal)
+{
+ jamEntry();
+
+ StartBackupConf* conf = (StartBackupConf*)signal->getDataPtr();
+ const Uint32 ptrI = conf->backupPtr;
+ const Uint32 backupId = conf->backupId;
+ const Uint32 signalNo = conf->signalNo;
+ const Uint32 nodeId = refToNode(signal->senderBlockRef());
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, ptrI);
+
+ masterAbortCheck(); // macro will do return if ABORTING
+
+ startBackupReply(signal, ptr, nodeId, signalNo);
+}
+
+void
+Backup::startBackupReply(Signal* signal, BackupRecordPtr ptr,
+ Uint32 nodeId, Uint32 signalNo)
+{
+
+ CRASH_INSERTION((10004));
+
+ ndbrequire(ptr.p->masterData.startBackup.signalNo == signalNo);
+ if (!haveAllSignals(ptr, GSN_START_BACKUP_REQ, nodeId)) {
+ jam();
+ return;
+ }
+
+ if(ptr.p->checkError()){
+ jam();
+ masterAbort(signal, ptr, true);
+ return;
+ }
+
+ if (ERROR_INSERTED(10026)) {
+ ptr.p->errorCode = 326;
+ masterAbort(signal, ptr, true);
+ return;
+ }//if
+
+ TablePtr tabPtr;
+ c_tablePool.getPtr(tabPtr, ptr.p->masterData.startBackup.tablePtr);
+ for(Uint32 i = 0; i<StartBackupReq::MaxTableTriggers; i++) {
+ jam();
+ if(!ptr.p->tables.next(tabPtr)) {
+ jam();
+ break;
+ }//if
+ }//for
+
+ if(tabPtr.i != RNIL) {
+ jam();
+ ptr.p->masterData.startBackup.signalNo++;
+ sendStartBackup(signal, ptr, tabPtr);
+ return;
+ }
+
+ sendAlterTrig(signal, ptr);
+}
+
+/*****************************************************************************
+ *
+ * Master functionallity - Activate triggers
+ *
+ *****************************************************************************/
+void
+Backup::sendAlterTrig(Signal* signal, BackupRecordPtr ptr)
+{
+ AlterTrigReq * req =(AlterTrigReq *)signal->getDataPtrSend();
+
+ ptr.p->errorCode = 0;
+ ptr.p->masterData.gsn = GSN_ALTER_TRIG_REQ;
+ ptr.p->masterData.sendCounter = 0;
+
+ req->setUserRef(reference());
+ req->setConnectionPtr(ptr.i);
+ req->setRequestType(AlterTrigReq::RT_USER);
+ req->setTriggerInfo(0); // not used on ALTER via DICT
+ req->setOnline(true);
+ req->setReceiverRef(reference());
+
+ TablePtr tabPtr;
+
+ if (ptr.p->masterData.alterTrig.tableId == RNIL) {
+ jam();
+ ptr.p->tables.first(tabPtr);
+ } else {
+ jam();
+ ndbrequire(findTable(ptr, tabPtr, ptr.p->masterData.alterTrig.tableId));
+ ptr.p->tables.next(tabPtr);
+ }//if
+ if (tabPtr.i != RNIL) {
+ jam();
+ ptr.p->masterData.alterTrig.tableId = tabPtr.p->tableId;
+ req->setTableId(tabPtr.p->tableId);
+
+ req->setTriggerId(tabPtr.p->triggerIds[0]);
+ sendSignal(DBDICT_REF, GSN_ALTER_TRIG_REQ,
+ signal, AlterTrigReq::SignalLength, JBB);
+
+ req->setTriggerId(tabPtr.p->triggerIds[1]);
+ sendSignal(DBDICT_REF, GSN_ALTER_TRIG_REQ,
+ signal, AlterTrigReq::SignalLength, JBB);
+
+ req->setTriggerId(tabPtr.p->triggerIds[2]);
+ sendSignal(DBDICT_REF, GSN_ALTER_TRIG_REQ,
+ signal, AlterTrigReq::SignalLength, JBB);
+
+ ptr.p->masterData.sendCounter += 3;
+ return;
+ }//if
+ ptr.p->masterData.alterTrig.tableId = RNIL;
+ /**
+ * Finished with all tables
+ */
+ ptr.p->masterData.gsn = GSN_WAIT_GCP_REQ;
+ ptr.p->masterData.waitGCP.startBackup = true;
+
+ WaitGCPReq * waitGCPReq = (WaitGCPReq*)signal->getDataPtrSend();
+ waitGCPReq->senderRef = reference();
+ waitGCPReq->senderData = ptr.i;
+ waitGCPReq->requestType = WaitGCPReq::CompleteForceStart;
+ sendSignal(DBDIH_REF, GSN_WAIT_GCP_REQ, signal,
+ WaitGCPReq::SignalLength,JBB);
+}
+
+void
+Backup::execALTER_TRIG_CONF(Signal* signal)
+{
+ jamEntry();
+
+ AlterTrigConf* conf = (AlterTrigConf*)signal->getDataPtr();
+ const Uint32 ptrI = conf->getConnectionPtr();
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, ptrI);
+
+ alterTrigReply(signal, ptr);
+}
+
+void
+Backup::execALTER_TRIG_REF(Signal* signal)
+{
+ jamEntry();
+
+ AlterTrigRef* ref = (AlterTrigRef*)signal->getDataPtr();
+ const Uint32 ptrI = ref->getConnectionPtr();
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, ptrI);
+
+ ptr.p->setErrorCode(ref->getErrorCode());
+
+ alterTrigReply(signal, ptr);
+}
+
+void
+Backup::alterTrigReply(Signal* signal, BackupRecordPtr ptr)
+{
+
+ CRASH_INSERTION((10005));
+
+ ndbrequire(ptr.p->masterRef == reference());
+ ndbrequire(ptr.p->masterData.gsn == GSN_ALTER_TRIG_REQ);
+ ndbrequire(ptr.p->masterData.sendCounter.done() == false);
+
+ ptr.p->masterData.sendCounter--;
+
+ if(ptr.p->masterData.sendCounter.done() == false){
+ jam();
+ return;
+ }//if
+
+ ptr.p->masterData.gsn = 0;
+
+ if(ptr.p->checkError()){
+ jam();
+ masterAbort(signal, ptr, true);
+ return;
+ }//if
+
+ sendAlterTrig(signal, ptr);
+}
+
+void
+Backup::execWAIT_GCP_REF(Signal* signal)
+{
+ jamEntry();
+
+ CRASH_INSERTION((10006));
+
+ WaitGCPRef * ref = (WaitGCPRef*)signal->getDataPtr();
+ const Uint32 ptrI = ref->senderData;
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, ptrI);
+
+ ndbrequire(ptr.p->masterRef == reference());
+ ndbrequire(ptr.p->masterData.gsn == GSN_WAIT_GCP_REQ);
+
+ WaitGCPReq * req = (WaitGCPReq*)signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->senderData = ptr.i;
+ req->requestType = WaitGCPReq::CompleteForceStart;
+ sendSignal(DBDIH_REF, GSN_WAIT_GCP_REQ, signal,
+ WaitGCPReq::SignalLength,JBB);
+}
+
+void
+Backup::execWAIT_GCP_CONF(Signal* signal){
+ jamEntry();
+
+ CRASH_INSERTION((10007));
+
+ WaitGCPConf * conf = (WaitGCPConf*)signal->getDataPtr();
+ const Uint32 ptrI = conf->senderData;
+ const Uint32 gcp = conf->gcp;
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, ptrI);
+
+ ndbrequire(ptr.p->masterRef == reference());
+ ndbrequire(ptr.p->masterData.gsn == GSN_WAIT_GCP_REQ);
+ ptr.p->masterData.gsn = 0;
+
+ if(ptr.p->checkError()) {
+ jam();
+ masterAbort(signal, ptr, true);
+ return;
+ }//if
+
+ if(ptr.p->masterData.waitGCP.startBackup) {
+ jam();
+ CRASH_INSERTION((10008));
+ ptr.p->startGCP = gcp;
+ ptr.p->masterData.state.setState(SCANNING);
+ nextFragment(signal, ptr);
+ } else {
+ jam();
+ CRASH_INSERTION((10009));
+ ptr.p->stopGCP = gcp;
+ ptr.p->masterData.state.setState(STOPPING);
+ sendDropTrig(signal, ptr); // regular dropping of triggers
+ }//if
+}
+/*****************************************************************************
+ *
+ * Master functionallity - Backup fragment
+ *
+ *****************************************************************************/
+void
+Backup::nextFragment(Signal* signal, BackupRecordPtr ptr)
+{
+ jam();
+
+ BackupFragmentReq* req = (BackupFragmentReq*)signal->getDataPtrSend();
+ req->backupPtr = ptr.i;
+ req->backupId = ptr.p->backupId;
+
+ NodeBitmask nodes = ptr.p->nodes;
+ Uint32 idleNodes = nodes.count();
+ Uint32 saveIdleNodes = idleNodes;
+ ndbrequire(idleNodes > 0);
+
+ TablePtr tabPtr;
+ ptr.p->tables.first(tabPtr);
+ for(; tabPtr.i != RNIL && idleNodes > 0; ptr.p->tables.next(tabPtr)) {
+ jam();
+ FragmentPtr fragPtr;
+ Array<Fragment> & frags = tabPtr.p->fragments;
+ const Uint32 fragCount = frags.getSize();
+
+ for(Uint32 i = 0; i<fragCount && idleNodes > 0; i++) {
+ jam();
+ tabPtr.p->fragments.getPtr(fragPtr, i);
+ const Uint32 nodeId = fragPtr.p->node;
+ if(fragPtr.p->scanning != 0) {
+ jam();
+ ndbrequire(nodes.get(nodeId));
+ nodes.clear(nodeId);
+ idleNodes--;
+ } else if(fragPtr.p->scanned == 0 && nodes.get(nodeId)){
+ jam();
+ fragPtr.p->scanning = 1;
+ nodes.clear(nodeId);
+ idleNodes--;
+
+ req->tableId = tabPtr.p->tableId;
+ req->fragmentNo = i;
+ req->count = 0;
+
+ const BlockReference ref = numberToRef(BACKUP, nodeId);
+ sendSignal(ref, GSN_BACKUP_FRAGMENT_REQ, signal,
+ BackupFragmentReq::SignalLength, JBB);
+ }//if
+ }//for
+ }//for
+
+ if(idleNodes != saveIdleNodes){
+ jam();
+ return;
+ }//if
+
+ /**
+ * Finished with all tables
+ */
+ {
+ ptr.p->masterData.gsn = GSN_WAIT_GCP_REQ;
+ ptr.p->masterData.waitGCP.startBackup = false;
+
+ WaitGCPReq * req = (WaitGCPReq*)signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->senderData = ptr.i;
+ req->requestType = WaitGCPReq::CompleteForceStart;
+ sendSignal(DBDIH_REF, GSN_WAIT_GCP_REQ, signal,
+ WaitGCPReq::SignalLength, JBB);
+ }
+}
+
+void
+Backup::execBACKUP_FRAGMENT_CONF(Signal* signal)
+{
+ jamEntry();
+
+ CRASH_INSERTION((10010));
+
+ BackupFragmentConf * conf = (BackupFragmentConf*)signal->getDataPtr();
+ const Uint32 ptrI = conf->backupPtr;
+ const Uint32 backupId = conf->backupId;
+ const Uint32 tableId = conf->tableId;
+ const Uint32 fragmentNo = conf->fragmentNo;
+ const Uint32 nodeId = refToNode(signal->senderBlockRef());
+ const Uint32 noOfBytes = conf->noOfBytes;
+ const Uint32 noOfRecords = conf->noOfRecords;
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, ptrI);
+
+ masterAbortCheck(); // macro will do return if ABORTING
+
+ ptr.p->noOfBytes += noOfBytes;
+ ptr.p->noOfRecords += noOfRecords;
+
+ TablePtr tabPtr;
+ ndbrequire(findTable(ptr, tabPtr, tableId));
+
+ FragmentPtr fragPtr;
+ tabPtr.p->fragments.getPtr(fragPtr, fragmentNo);
+
+ ndbrequire(fragPtr.p->scanned == 0);
+ ndbrequire(fragPtr.p->scanning == 1);
+ ndbrequire(fragPtr.p->node == nodeId);
+
+ fragPtr.p->scanned = 1;
+ fragPtr.p->scanning = 0;
+
+ if(ptr.p->checkError()) {
+ jam();
+ masterAbort(signal, ptr, true);
+ return;
+ }//if
+ if (ERROR_INSERTED(10028)) {
+ ptr.p->errorCode = 328;
+ masterAbort(signal, ptr, true);
+ return;
+ }//if
+ nextFragment(signal, ptr);
+}
+
+void
+Backup::execBACKUP_FRAGMENT_REF(Signal* signal)
+{
+ jamEntry();
+
+ CRASH_INSERTION((10011));
+
+ BackupFragmentRef * ref = (BackupFragmentRef*)signal->getDataPtr();
+ const Uint32 ptrI = ref->backupPtr;
+ const Uint32 backupId = ref->backupId;
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, ptrI);
+
+ masterAbortCheck(); // macro will do return if ABORTING
+
+ ptr.p->setErrorCode(ref->errorCode);
+ masterAbort(signal, ptr, true);
+}
+
+/*****************************************************************************
+ *
+ * Master functionallity - Drop triggers
+ *
+ *****************************************************************************/
+
+void
+Backup::sendDropTrig(Signal* signal, BackupRecordPtr ptr)
+{
+ TablePtr tabPtr;
+ if (ptr.p->masterData.dropTrig.tableId == RNIL) {
+ jam();
+ ptr.p->tables.first(tabPtr);
+ } else {
+ jam();
+ ndbrequire(findTable(ptr, tabPtr, ptr.p->masterData.dropTrig.tableId));
+ ptr.p->tables.next(tabPtr);
+ }//if
+ if (tabPtr.i != RNIL) {
+ jam();
+ sendDropTrig(signal, ptr, tabPtr);
+ } else {
+ jam();
+ ptr.p->masterData.dropTrig.tableId = RNIL;
+
+ sendAbortBackupOrd(signal, ptr, AbortBackupOrd::OkToClean);
+
+ if(ptr.p->masterData.state.getState() == STOPPING) {
+ jam();
+ sendStopBackup(signal, ptr);
+ return;
+ }//if
+ ndbrequire(ptr.p->masterData.state.getState() == ABORTING);
+ masterSendAbortBackup(signal, ptr);
+ }//if
+}
+
+void
+Backup::sendDropTrig(Signal* signal, BackupRecordPtr ptr, TablePtr tabPtr)
+{
+ jam();
+ DropTrigReq * req = (DropTrigReq *)signal->getDataPtrSend();
+
+ ptr.p->masterData.gsn = GSN_DROP_TRIG_REQ;
+ ptr.p->masterData.sendCounter = 0;
+
+ req->setConnectionPtr(ptr.i);
+ req->setUserRef(reference()); // Sending to myself
+ req->setRequestType(DropTrigReq::RT_USER);
+ req->setIndexId(RNIL);
+ req->setTriggerInfo(0); // not used on DROP via DICT
+
+ char triggerName[MAX_TAB_NAME_SIZE];
+ Uint32 nameBuffer[2 + ((MAX_TAB_NAME_SIZE + 3) >> 2)]; // SP string
+ LinearWriter w(nameBuffer, sizeof(nameBuffer) >> 2);
+ LinearSectionPtr lsPtr[3];
+
+ ptr.p->masterData.dropTrig.tableId = tabPtr.p->tableId;
+ req->setTableId(tabPtr.p->tableId);
+
+ for (int i = 0; i < 3; i++) {
+ Uint32 id = tabPtr.p->triggerIds[i];
+ req->setTriggerId(id);
+ if (id != ILLEGAL_TRIGGER_ID) {
+ sendSignal(DBDICT_REF, GSN_DROP_TRIG_REQ,
+ signal, DropTrigReq::SignalLength, JBB);
+ } else {
+ BaseString::snprintf(triggerName, sizeof(triggerName), triggerNameFormat[i],
+ ptr.p->backupId, tabPtr.p->tableId);
+ w.reset();
+ w.add(CreateTrigReq::TriggerNameKey, triggerName);
+ lsPtr[0].p = nameBuffer;
+ lsPtr[0].sz = w.getWordsUsed();
+ sendSignal(DBDICT_REF, GSN_DROP_TRIG_REQ,
+ signal, DropTrigReq::SignalLength, JBB, lsPtr, 1);
+ }
+ ptr.p->masterData.sendCounter ++;
+ }
+}
+
+void
+Backup::execDROP_TRIG_REF(Signal* signal)
+{
+ jamEntry();
+
+ DropTrigRef* ref = (DropTrigRef*)signal->getDataPtr();
+ const Uint32 ptrI = ref->getConnectionPtr();
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, ptrI);
+
+ //ndbrequire(ref->getErrorCode() == DropTrigRef::NoSuchTrigger);
+ dropTrigReply(signal, ptr);
+}
+
+void
+Backup::execDROP_TRIG_CONF(Signal* signal)
+{
+ jamEntry();
+
+ DropTrigConf* conf = (DropTrigConf*)signal->getDataPtr();
+ const Uint32 ptrI = conf->getConnectionPtr();
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, ptrI);
+
+ dropTrigReply(signal, ptr);
+}
+
+void
+Backup::dropTrigReply(Signal* signal, BackupRecordPtr ptr)
+{
+
+ CRASH_INSERTION((10012));
+
+ ndbrequire(ptr.p->masterRef == reference());
+ ndbrequire(ptr.p->masterData.gsn == GSN_DROP_TRIG_REQ);
+ ndbrequire(ptr.p->masterData.sendCounter.done() == false);
+
+ ptr.p->masterData.sendCounter--;
+ if(ptr.p->masterData.sendCounter.done() == false){
+ jam();
+ return;
+ }//if
+
+ ptr.p->masterData.gsn = 0;
+ sendDropTrig(signal, ptr); // recursive next
+}
+
+/*****************************************************************************
+ *
+ * Master functionallity - Stop backup
+ *
+ *****************************************************************************/
+void
+Backup::execSTOP_BACKUP_REF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(0);
+}
+
+void
+Backup::sendStopBackup(Signal* signal, BackupRecordPtr ptr)
+{
+ jam();
+ ptr.p->masterData.gsn = GSN_STOP_BACKUP_REQ;
+
+ StopBackupReq* stop = (StopBackupReq*)signal->getDataPtrSend();
+ stop->backupPtr = ptr.i;
+ stop->backupId = ptr.p->backupId;
+ stop->startGCP = ptr.p->startGCP;
+ stop->stopGCP = ptr.p->stopGCP;
+
+ sendSignalAllWait(ptr, GSN_STOP_BACKUP_REQ, signal,
+ StopBackupReq::SignalLength);
+}
+
+void
+Backup::execSTOP_BACKUP_CONF(Signal* signal)
+{
+ jamEntry();
+
+ StopBackupConf* conf = (StopBackupConf*)signal->getDataPtr();
+ const Uint32 ptrI = conf->backupPtr;
+ const Uint32 backupId = conf->backupId;
+ const Uint32 nodeId = refToNode(signal->senderBlockRef());
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, ptrI);
+
+ masterAbortCheck(); // macro will do return if ABORTING
+
+ ptr.p->noOfLogBytes += conf->noOfLogBytes;
+ ptr.p->noOfLogRecords += conf->noOfLogRecords;
+
+ stopBackupReply(signal, ptr, nodeId);
+}
+
+void
+Backup::stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId)
+{
+ CRASH_INSERTION((10013));
+
+ if (!haveAllSignals(ptr, GSN_STOP_BACKUP_REQ, nodeId)) {
+ jam();
+ return;
+ }
+
+ // ptr.p->masterData.state.setState(INITIAL);
+
+ // send backup complete first to slaves so that they know
+ sendAbortBackupOrd(signal, ptr, AbortBackupOrd::BackupComplete);
+
+ BackupCompleteRep * rep = (BackupCompleteRep*)signal->getDataPtrSend();
+ rep->backupId = ptr.p->backupId;
+ rep->senderData = ptr.p->clientData;
+ rep->startGCP = ptr.p->startGCP;
+ rep->stopGCP = ptr.p->stopGCP;
+ rep->noOfBytes = ptr.p->noOfBytes;
+ rep->noOfRecords = ptr.p->noOfRecords;
+ rep->noOfLogBytes = ptr.p->noOfLogBytes;
+ rep->noOfLogRecords = ptr.p->noOfLogRecords;
+ rep->nodes = ptr.p->nodes;
+ sendSignal(ptr.p->clientRef, GSN_BACKUP_COMPLETE_REP, signal,
+ BackupCompleteRep::SignalLength, JBB);
+
+ signal->theData[0] = NDB_LE_BackupCompleted;
+ signal->theData[1] = ptr.p->clientRef;
+ signal->theData[2] = ptr.p->backupId;
+ signal->theData[3] = ptr.p->startGCP;
+ signal->theData[4] = ptr.p->stopGCP;
+ signal->theData[5] = ptr.p->noOfBytes;
+ signal->theData[6] = ptr.p->noOfRecords;
+ signal->theData[7] = ptr.p->noOfLogBytes;
+ signal->theData[8] = ptr.p->noOfLogRecords;
+ ptr.p->nodes.copyto(NdbNodeBitmask::Size, signal->theData+9);
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 9+NdbNodeBitmask::Size, JBB);
+}
+
+/*****************************************************************************
+ *
+ * Master functionallity - Abort backup
+ *
+ *****************************************************************************/
+void
+Backup::masterAbort(Signal* signal, BackupRecordPtr ptr, bool controlledAbort)
+{
+ if(ptr.p->masterData.state.getState() == ABORTING) {
+#ifdef DEBUG_ABORT
+ ndbout_c("---- Master already aborting");
+#endif
+ jam();
+ return;
+ }
+ jam();
+#ifdef DEBUG_ABORT
+ ndbout_c("************ masterAbort");
+#endif
+
+ sendAbortBackupOrd(signal, ptr, AbortBackupOrd::BackupFailure);
+ if (!ptr.p->checkError())
+ ptr.p->errorCode = AbortBackupOrd::BackupFailureDueToNodeFail;
+
+ const State s = ptr.p->masterData.state.getState();
+
+ ptr.p->masterData.state.setState(ABORTING);
+
+ ndbrequire(s == INITIAL ||
+ s == STARTED ||
+ s == DEFINING ||
+ s == DEFINED ||
+ s == SCANNING ||
+ s == STOPPING ||
+ s == ABORTING);
+ if(ptr.p->masterData.gsn == GSN_UTIL_SEQUENCE_REQ) {
+ jam();
+ DEBUG_OUT("masterAbort: gsn = GSN_UTIL_SEQUENCE_REQ");
+ //-------------------------------------------------------
+ // We are waiting for UTIL_SEQUENCE response. We rely on
+ // this to arrive and check for ABORTING in response.
+ // No slaves are involved at this point and ABORT simply
+ // results in BACKUP_REF to client
+ //-------------------------------------------------------
+ /**
+ * Waiting for Sequence Id
+ * @see execUTIL_SEQUENCE_CONF
+ */
+ return;
+ }//if
+
+ if(ptr.p->masterData.gsn == GSN_UTIL_LOCK_REQ) {
+ jam();
+ DEBUG_OUT("masterAbort: gsn = GSN_UTIL_LOCK_REQ");
+ //-------------------------------------------------------
+ // We are waiting for UTIL_LOCK response (mutex). We rely on
+ // this to arrive and check for ABORTING in response.
+ // No slaves are involved at this point and ABORT simply
+ // results in BACKUP_REF to client
+ //-------------------------------------------------------
+ /**
+ * Waiting for lock
+ * @see execUTIL_LOCK_CONF
+ */
+ return;
+ }//if
+
+ /**
+ * Unlock mutexes only at master
+ */
+ jam();
+ Mutex mutex1(signal, c_mutexMgr, ptr.p->masterData.m_dictCommitTableMutex);
+ jam();
+ mutex1.unlock(); // ignore response
+
+ jam();
+ Mutex mutex2(signal, c_mutexMgr, ptr.p->masterData.m_defineBackupMutex);
+ jam();
+ mutex2.unlock(); // ignore response
+
+ if (!controlledAbort) {
+ jam();
+ if (s == DEFINING) {
+ jam();
+//-------------------------------------------------------
+// If we are in the defining phase all work is done by
+// slaves. No triggers have been allocated thus slaves
+// may free all "Master" resources, let them know...
+//-------------------------------------------------------
+ sendAbortBackupOrd(signal, ptr, AbortBackupOrd::OkToClean);
+ return;
+ }//if
+ if (s == DEFINED) {
+ jam();
+//-------------------------------------------------------
+// DEFINED is the state when triggers are created. We rely
+// on that DICT will report create trigger failure in case
+// of node failure. Thus no special action is needed here.
+// We will check for errorCode != 0 when receiving
+// replies on create trigger.
+//-------------------------------------------------------
+ return;
+ }//if
+ if(ptr.p->masterData.gsn == GSN_WAIT_GCP_REQ) {
+ jam();
+ DEBUG_OUT("masterAbort: gsn = GSN_WAIT_GCP_REQ");
+//-------------------------------------------------------
+// We are waiting for WAIT_GCP response. We rely on
+// this to arrive and check for ABORTING in response.
+//-------------------------------------------------------
+
+ /**
+ * Waiting for GCP
+ * @see execWAIT_GCP_CONF
+ */
+ return;
+ }//if
+
+ if(ptr.p->masterData.gsn == GSN_ALTER_TRIG_REQ) {
+ jam();
+ DEBUG_OUT("masterAbort: gsn = GSN_ALTER_TRIG_REQ");
+//-------------------------------------------------------
+// We are waiting for ALTER_TRIG response. We rely on
+// this to arrive and check for ABORTING in response.
+//-------------------------------------------------------
+
+ /**
+ * All triggers haven't been created yet
+ */
+ return;
+ }//if
+
+ if(ptr.p->masterData.gsn == GSN_DROP_TRIG_REQ) {
+ jam();
+ DEBUG_OUT("masterAbort: gsn = GSN_DROP_TRIG_REQ");
+//-------------------------------------------------------
+// We are waiting for DROP_TRIG response. We rely on
+// this to arrive and will continue dropping triggers
+// until completed.
+//-------------------------------------------------------
+
+ /**
+ * I'm currently dropping the trigger
+ */
+ return;
+ }//if
+ }//if
+
+//-------------------------------------------------------
+// If we are waiting for START_BACKUP responses we can
+// safely start dropping triggers (state == STARTED).
+// We will ignore any START_BACKUP responses after this.
+//-------------------------------------------------------
+ DEBUG_OUT("masterAbort: sendDropTrig");
+ sendDropTrig(signal, ptr); // dropping due to error
+}
+
+void
+Backup::masterSendAbortBackup(Signal* signal, BackupRecordPtr ptr)
+{
+ if (ptr.p->masterData.state.getState() != ABORTING) {
+ sendAbortBackupOrd(signal, ptr, AbortBackupOrd::BackupFailure);
+ ptr.p->masterData.state.setState(ABORTING);
+ }
+ const State s = ptr.p->masterData.state.getAbortState();
+
+ /**
+ * First inform to client
+ */
+ if(s == DEFINING) {
+ jam();
+#ifdef DEBUG_ABORT
+ ndbout_c("** Abort: sending BACKUP_REF to mgmtsrvr");
+#endif
+ sendBackupRef(ptr.p->clientRef, signal, ptr.p->clientData,
+ ptr.p->errorCode);
+
+ } else {
+ jam();
+#ifdef DEBUG_ABORT
+ ndbout_c("** Abort: sending BACKUP_ABORT_REP to mgmtsrvr");
+#endif
+ BackupAbortRep* rep = (BackupAbortRep*)signal->getDataPtrSend();
+ rep->backupId = ptr.p->backupId;
+ rep->senderData = ptr.p->clientData;
+ rep->reason = ptr.p->errorCode;
+ sendSignal(ptr.p->clientRef, GSN_BACKUP_ABORT_REP, signal,
+ BackupAbortRep::SignalLength, JBB);
+
+ signal->theData[0] = NDB_LE_BackupAborted;
+ signal->theData[1] = ptr.p->clientRef;
+ signal->theData[2] = ptr.p->backupId;
+ signal->theData[3] = ptr.p->errorCode;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB);
+ }//if
+
+ // ptr.p->masterData.state.setState(INITIAL);
+
+ sendAbortBackupOrd(signal, ptr, AbortBackupOrd::BackupFailure);
+}
+
+/*****************************************************************************
+ *
+ * Slave functionallity: Define Backup
+ *
+ *****************************************************************************/
+void
+Backup::defineBackupRef(Signal* signal, BackupRecordPtr ptr, Uint32 errCode)
+{
+ if (ptr.p->slaveState.getState() == ABORTING) {
+ jam();
+ return;
+ }
+ ptr.p->slaveState.setState(ABORTING);
+
+ if (errCode != 0) {
+ jam();
+ ptr.p->setErrorCode(errCode);
+ }//if
+ ndbrequire(ptr.p->errorCode != 0);
+
+ DefineBackupRef* ref = (DefineBackupRef*)signal->getDataPtrSend();
+ ref->backupId = ptr.p->backupId;
+ ref->backupPtr = ptr.i;
+ ref->errorCode = ptr.p->errorCode;
+ sendSignal(ptr.p->masterRef, GSN_DEFINE_BACKUP_REF, signal,
+ DefineBackupRef::SignalLength, JBB);
+
+ closeFiles(signal, ptr);
+}
+
+void
+Backup::execDEFINE_BACKUP_REQ(Signal* signal)
+{
+ jamEntry();
+
+ DefineBackupReq* req = (DefineBackupReq*)signal->getDataPtr();
+
+ BackupRecordPtr ptr;
+ const Uint32 ptrI = req->backupPtr;
+ const Uint32 backupId = req->backupId;
+ const BlockReference senderRef = req->senderRef;
+
+ if(senderRef == reference()){
+ /**
+ * Signal sent from myself -> record already seized
+ */
+ jam();
+ c_backupPool.getPtr(ptr, ptrI);
+ } else { // from other node
+ jam();
+#ifdef DEBUG_ABORT
+ dumpUsedResources();
+#endif
+ if(!c_backups.seizeId(ptr, ptrI)) {
+ jam();
+ ndbrequire(false); // If master has succeeded slave should succed
+ }//if
+ }//if
+
+ CRASH_INSERTION((10014));
+
+ ptr.p->slaveState.forceState(INITIAL);
+ ptr.p->slaveState.setState(DEFINING);
+ ptr.p->errorCode = 0;
+ ptr.p->clientRef = req->clientRef;
+ ptr.p->clientData = req->clientData;
+ ptr.p->masterRef = senderRef;
+ ptr.p->nodes = req->nodes;
+ ptr.p->backupId = backupId;
+ ptr.p->backupKey[0] = req->backupKey[0];
+ ptr.p->backupKey[1] = req->backupKey[1];
+ ptr.p->backupDataLen = req->backupDataLen;
+ ptr.p->masterData.dropTrig.tableId = RNIL;
+ ptr.p->masterData.alterTrig.tableId = RNIL;
+ ptr.p->noOfBytes = 0;
+ ptr.p->noOfRecords = 0;
+ ptr.p->noOfLogBytes = 0;
+ ptr.p->noOfLogRecords = 0;
+ ptr.p->currGCP = 0;
+
+ /**
+ * Allocate files
+ */
+ BackupFilePtr files[3];
+ Uint32 noOfPages[] = {
+ NO_OF_PAGES_META_FILE,
+ 2, // 32k
+ 0 // 3M
+ };
+ const Uint32 maxInsert[] = {
+ 2048, // Temporarily to solve TR515
+ //25, // 100 bytes
+ 2048, // 4k
+ 16*3000, // Max 16 tuples
+ };
+ Uint32 minWrite[] = {
+ 8192,
+ 8192,
+ 32768
+ };
+ Uint32 maxWrite[] = {
+ 8192,
+ 8192,
+ 32768
+ };
+
+ minWrite[1] = c_defaults.m_minWriteSize;
+ maxWrite[1] = c_defaults.m_maxWriteSize;
+ noOfPages[1] = (c_defaults.m_logBufferSize + sizeof(Page32) - 1) /
+ sizeof(Page32);
+ minWrite[2] = c_defaults.m_minWriteSize;
+ maxWrite[2] = c_defaults.m_maxWriteSize;
+ noOfPages[2] = (c_defaults.m_dataBufferSize + sizeof(Page32) - 1) /
+ sizeof(Page32);
+
+ for(Uint32 i = 0; i<3; i++) {
+ jam();
+ if(!ptr.p->files.seize(files[i])) {
+ jam();
+ defineBackupRef(signal, ptr,
+ DefineBackupRef::FailedToAllocateFileRecord);
+ return;
+ }//if
+
+ files[i].p->tableId = RNIL;
+ files[i].p->backupPtr = ptr.i;
+ files[i].p->filePointer = RNIL;
+ files[i].p->fileDone = 0;
+ files[i].p->fileOpened = 0;
+ files[i].p->fileRunning = 0;
+ files[i].p->scanRunning = 0;
+ files[i].p->errorCode = 0;
+
+ if(files[i].p->pages.seize(noOfPages[i]) == false) {
+ jam();
+ DEBUG_OUT("Failed to seize " << noOfPages[i] << " pages");
+ defineBackupRef(signal, ptr, DefineBackupRef::FailedToAllocateBuffers);
+ return;
+ }//if
+ Page32Ptr pagePtr;
+ files[i].p->pages.getPtr(pagePtr, 0);
+
+ const char * msg = files[i].p->
+ operation.dataBuffer.setup((Uint32*)pagePtr.p,
+ noOfPages[i] * (sizeof(Page32) >> 2),
+ 128,
+ minWrite[i] >> 2,
+ maxWrite[i] >> 2,
+ maxInsert[i]);
+ if(msg != 0) {
+ jam();
+ defineBackupRef(signal, ptr, DefineBackupRef::FailedToSetupFsBuffers);
+ return;
+ }//if
+ }//for
+ files[0].p->fileType = BackupFormat::CTL_FILE;
+ files[1].p->fileType = BackupFormat::LOG_FILE;
+ files[2].p->fileType = BackupFormat::DATA_FILE;
+
+ ptr.p->ctlFilePtr = files[0].i;
+ ptr.p->logFilePtr = files[1].i;
+ ptr.p->dataFilePtr = files[2].i;
+
+ if (!verifyNodesAlive(ptr.p->nodes)) {
+ jam();
+ defineBackupRef(signal, ptr, DefineBackupRef::Undefined);
+ // sendBackupRef(signal, ptr,
+ // ptr.p->errorCode?ptr.p->errorCode:BackupRef::Undefined);
+ return;
+ }//if
+ if (ERROR_INSERTED(10027)) {
+ jam();
+ defineBackupRef(signal, ptr, 327);
+ // sendBackupRef(signal, ptr, 327);
+ return;
+ }//if
+
+ if(ptr.p->backupDataLen == 0) {
+ jam();
+ backupAllData(signal, ptr);
+ return;
+ }//if
+
+ /**
+ * Not implemented
+ */
+ ndbrequire(0);
+}
+
+void
+Backup::backupAllData(Signal* signal, BackupRecordPtr ptr)
+{
+ /**
+ * Get all tables from dict
+ */
+ ListTablesReq * req = (ListTablesReq*)signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->senderData = ptr.i;
+ req->requestData = 0;
+ sendSignal(DBDICT_REF, GSN_LIST_TABLES_REQ, signal,
+ ListTablesReq::SignalLength, JBB);
+}
+
+void
+Backup::execLIST_TABLES_CONF(Signal* signal)
+{
+ jamEntry();
+
+ ListTablesConf* conf = (ListTablesConf*)signal->getDataPtr();
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, conf->senderData);
+
+ const Uint32 len = signal->length() - ListTablesConf::HeaderLength;
+ for(unsigned int i = 0; i<len; i++) {
+ jam();
+ Uint32 tableId = ListTablesConf::getTableId(conf->tableData[i]);
+ Uint32 tableType = ListTablesConf::getTableType(conf->tableData[i]);
+ if (!DictTabInfo::isTable(tableType) && !DictTabInfo::isIndex(tableType)){
+ jam();
+ continue;
+ }//if
+ TablePtr tabPtr;
+ ptr.p->tables.seize(tabPtr);
+ if(tabPtr.i == RNIL) {
+ jam();
+ defineBackupRef(signal, ptr, DefineBackupRef::FailedToAllocateTables);
+ return;
+ }//if
+ tabPtr.p->tableId = tableId;
+ tabPtr.p->tableType = tableType;
+ }//for
+
+ if(len == ListTablesConf::DataLength) {
+ jam();
+ /**
+ * Not finished...
+ */
+ return;
+ }//if
+
+ defineSlaveAbortCheck();
+
+ /**
+ * All tables fetched
+ */
+ openFiles(signal, ptr);
+}
+
+void
+Backup::openFiles(Signal* signal, BackupRecordPtr ptr)
+{
+ jam();
+
+ BackupFilePtr filePtr;
+
+ FsOpenReq * req = (FsOpenReq *)signal->getDataPtrSend();
+ req->userReference = reference();
+ req->fileFlags =
+ FsOpenReq::OM_WRITEONLY |
+ FsOpenReq::OM_TRUNCATE |
+ FsOpenReq::OM_CREATE |
+ FsOpenReq::OM_APPEND |
+ FsOpenReq::OM_SYNC;
+ FsOpenReq::v2_setCount(req->fileNumber, 0xFFFFFFFF);
+
+ /**
+ * Ctl file
+ */
+ c_backupFilePool.getPtr(filePtr, ptr.p->ctlFilePtr);
+ ndbrequire(filePtr.p->fileRunning == 0);
+ filePtr.p->fileRunning = 1;
+
+ req->userPointer = filePtr.i;
+ FsOpenReq::setVersion(req->fileNumber, 2);
+ FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_CTL);
+ FsOpenReq::v2_setSequence(req->fileNumber, ptr.p->backupId);
+ FsOpenReq::v2_setNodeId(req->fileNumber, getOwnNodeId());
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA);
+
+ /**
+ * Log file
+ */
+ c_backupFilePool.getPtr(filePtr, ptr.p->logFilePtr);
+ ndbrequire(filePtr.p->fileRunning == 0);
+ filePtr.p->fileRunning = 1;
+
+ req->userPointer = filePtr.i;
+ FsOpenReq::setVersion(req->fileNumber, 2);
+ FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_LOG);
+ FsOpenReq::v2_setSequence(req->fileNumber, ptr.p->backupId);
+ FsOpenReq::v2_setNodeId(req->fileNumber, getOwnNodeId());
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA);
+
+ /**
+ * Data file
+ */
+ c_backupFilePool.getPtr(filePtr, ptr.p->dataFilePtr);
+ ndbrequire(filePtr.p->fileRunning == 0);
+ filePtr.p->fileRunning = 1;
+
+ req->userPointer = filePtr.i;
+ FsOpenReq::setVersion(req->fileNumber, 2);
+ FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_DATA);
+ FsOpenReq::v2_setSequence(req->fileNumber, ptr.p->backupId);
+ FsOpenReq::v2_setNodeId(req->fileNumber, getOwnNodeId());
+ FsOpenReq::v2_setCount(req->fileNumber, 0);
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA);
+}
+
+void
+Backup::execFSOPENREF(Signal* signal)
+{
+ jamEntry();
+
+ FsRef * ref = (FsRef *)signal->getDataPtr();
+
+ const Uint32 userPtr = ref->userPointer;
+
+ BackupFilePtr filePtr;
+ c_backupFilePool.getPtr(filePtr, userPtr);
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, filePtr.p->backupPtr);
+ ptr.p->setErrorCode(ref->errorCode);
+ openFilesReply(signal, ptr, filePtr);
+}
+
+void
+Backup::execFSOPENCONF(Signal* signal)
+{
+ jamEntry();
+
+ FsConf * conf = (FsConf *)signal->getDataPtr();
+
+ const Uint32 userPtr = conf->userPointer;
+ const Uint32 filePointer = conf->filePointer;
+
+ BackupFilePtr filePtr;
+ c_backupFilePool.getPtr(filePtr, userPtr);
+ filePtr.p->filePointer = filePointer;
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, filePtr.p->backupPtr);
+
+ ndbrequire(filePtr.p->fileOpened == 0);
+ filePtr.p->fileOpened = 1;
+ openFilesReply(signal, ptr, filePtr);
+}
+
+void
+Backup::openFilesReply(Signal* signal,
+ BackupRecordPtr ptr, BackupFilePtr filePtr)
+{
+ jam();
+
+ /**
+ * Mark files as "opened"
+ */
+ ndbrequire(filePtr.p->fileRunning == 1);
+ filePtr.p->fileRunning = 0;
+
+ /**
+ * Check if all files have recived open_reply
+ */
+ for(ptr.p->files.first(filePtr); filePtr.i!=RNIL;ptr.p->files.next(filePtr))
+ {
+ jam();
+ if(filePtr.p->fileRunning == 1) {
+ jam();
+ return;
+ }//if
+ }//for
+
+ defineSlaveAbortCheck();
+
+ /**
+ * Did open succeed for all files
+ */
+ if(ptr.p->checkError()) {
+ jam();
+ defineBackupRef(signal, ptr);
+ return;
+ }//if
+
+ /**
+ * Insert file headers
+ */
+ ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr);
+ if(!insertFileHeader(BackupFormat::CTL_FILE, ptr.p, filePtr.p)) {
+ jam();
+ defineBackupRef(signal, ptr, DefineBackupRef::FailedInsertFileHeader);
+ return;
+ }//if
+
+ ptr.p->files.getPtr(filePtr, ptr.p->logFilePtr);
+ if(!insertFileHeader(BackupFormat::LOG_FILE, ptr.p, filePtr.p)) {
+ jam();
+ defineBackupRef(signal, ptr, DefineBackupRef::FailedInsertFileHeader);
+ return;
+ }//if
+
+ ptr.p->files.getPtr(filePtr, ptr.p->dataFilePtr);
+ if(!insertFileHeader(BackupFormat::DATA_FILE, ptr.p, filePtr.p)) {
+ jam();
+ defineBackupRef(signal, ptr, DefineBackupRef::FailedInsertFileHeader);
+ return;
+ }//if
+
+ /**
+ * Start CTL file thread
+ */
+ ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr);
+ filePtr.p->fileRunning = 1;
+
+ signal->theData[0] = BackupContinueB::START_FILE_THREAD;
+ signal->theData[1] = ptr.p->ctlFilePtr;
+ sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, 100, 2);
+
+ /**
+ * Insert table list in ctl file
+ */
+ FsBuffer & buf = filePtr.p->operation.dataBuffer;
+
+ const Uint32 sz =
+ (sizeof(BackupFormat::CtlFile::TableList) >> 2) +
+ ptr.p->tables.noOfElements() - 1;
+
+ Uint32 * dst;
+ ndbrequire(sz < buf.getMaxWrite());
+ if(!buf.getWritePtr(&dst, sz)) {
+ jam();
+ defineBackupRef(signal, ptr, DefineBackupRef::FailedInsertTableList);
+ return;
+ }//if
+
+ BackupFormat::CtlFile::TableList* tl =
+ (BackupFormat::CtlFile::TableList*)dst;
+ tl->SectionType = htonl(BackupFormat::TABLE_LIST);
+ tl->SectionLength = htonl(sz);
+
+ TablePtr tabPtr;
+ Uint32 count = 0;
+ for(ptr.p->tables.first(tabPtr);
+ tabPtr.i != RNIL;
+ ptr.p->tables.next(tabPtr)){
+ jam();
+ tl->TableIds[count] = htonl(tabPtr.p->tableId);
+ count++;
+ }//for
+
+ buf.updateWritePtr(sz);
+
+ /**
+ * Start getting table definition data
+ */
+ ndbrequire(ptr.p->tables.first(tabPtr));
+
+ signal->theData[0] = BackupContinueB::BUFFER_FULL_META;
+ signal->theData[1] = ptr.i;
+ signal->theData[2] = tabPtr.i;
+ sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, 100, 3);
+ return;
+}
+
+bool
+Backup::insertFileHeader(BackupFormat::FileType ft,
+ BackupRecord * ptrP,
+ BackupFile * filePtrP){
+ FsBuffer & buf = filePtrP->operation.dataBuffer;
+
+ const Uint32 sz = sizeof(BackupFormat::FileHeader) >> 2;
+
+ Uint32 * dst;
+ ndbrequire(sz < buf.getMaxWrite());
+ if(!buf.getWritePtr(&dst, sz)) {
+ jam();
+ return false;
+ }//if
+
+ BackupFormat::FileHeader* header = (BackupFormat::FileHeader*)dst;
+ ndbrequire(sizeof(header->Magic) == sizeof(BACKUP_MAGIC));
+ memcpy(header->Magic, BACKUP_MAGIC, sizeof(BACKUP_MAGIC));
+ header->NdbVersion = htonl(NDB_VERSION);
+ header->SectionType = htonl(BackupFormat::FILE_HEADER);
+ header->SectionLength = htonl(sz - 3);
+ header->FileType = htonl(ft);
+ header->BackupId = htonl(ptrP->backupId);
+ header->BackupKey_0 = htonl(ptrP->backupKey[0]);
+ header->BackupKey_1 = htonl(ptrP->backupKey[1]);
+ header->ByteOrder = 0x12345678;
+
+ buf.updateWritePtr(sz);
+ return true;
+}
+
+void
+Backup::execGET_TABINFOREF(Signal* signal)
+{
+ GetTabInfoRef * ref = (GetTabInfoRef*)signal->getDataPtr();
+
+ const Uint32 senderData = ref->senderData;
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, senderData);
+
+ defineSlaveAbortCheck();
+
+ defineBackupRef(signal, ptr, ref->errorCode);
+}
+
+void
+Backup::execGET_TABINFO_CONF(Signal* signal)
+{
+ jamEntry();
+
+ if(!assembleFragments(signal)) {
+ jam();
+ return;
+ }//if
+
+ GetTabInfoConf * const conf = (GetTabInfoConf*)signal->getDataPtr();
+ //const Uint32 senderRef = info->senderRef;
+ const Uint32 len = conf->totalLen;
+ const Uint32 senderData = conf->senderData;
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, senderData);
+
+ defineSlaveAbortCheck();
+
+ SegmentedSectionPtr dictTabInfoPtr;
+ signal->getSection(dictTabInfoPtr, GetTabInfoConf::DICT_TAB_INFO);
+ ndbrequire(dictTabInfoPtr.sz == len);
+
+ /**
+ * No of pages needed
+ */
+ const Uint32 noPages = (len + sizeof(Page32) - 1) / sizeof(Page32);
+ if(ptr.p->pages.getSize() < noPages) {
+ jam();
+ ptr.p->pages.release();
+ if(ptr.p->pages.seize(noPages) == false) {
+ jam();
+ ptr.p->setErrorCode(DefineBackupRef::FailedAllocateTableMem);
+ ndbrequire(false);
+ releaseSections(signal);
+ defineBackupRef(signal, ptr);
+ return;
+ }//if
+ }//if
+
+ BackupFilePtr filePtr;
+ ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr);
+ FsBuffer & buf = filePtr.p->operation.dataBuffer;
+ { // Write into ctl file
+ Uint32* dst, dstLen = len + 2;
+ if(!buf.getWritePtr(&dst, dstLen)) {
+ jam();
+ ndbrequire(false);
+ ptr.p->setErrorCode(DefineBackupRef::FailedAllocateTableMem);
+ releaseSections(signal);
+ defineBackupRef(signal, ptr);
+ return;
+ }//if
+ if(dst != 0) {
+ jam();
+
+ BackupFormat::CtlFile::TableDescription * desc =
+ (BackupFormat::CtlFile::TableDescription*)dst;
+ desc->SectionType = htonl(BackupFormat::TABLE_DESCRIPTION);
+ desc->SectionLength = htonl(len + 2);
+ dst += 2;
+
+ copy(dst, dictTabInfoPtr);
+ buf.updateWritePtr(dstLen);
+ }//if
+ }
+
+ ndbrequire(ptr.p->pages.getSize() >= noPages);
+ Page32Ptr pagePtr;
+ ptr.p->pages.getPtr(pagePtr, 0);
+ copy(&pagePtr.p->data[0], dictTabInfoPtr);
+ releaseSections(signal);
+
+ if(ptr.p->checkError()) {
+ jam();
+ defineBackupRef(signal, ptr);
+ return;
+ }//if
+
+ TablePtr tabPtr = parseTableDescription(signal, ptr, len);
+ if(tabPtr.i == RNIL) {
+ jam();
+ defineBackupRef(signal, ptr);
+ return;
+ }//if
+
+ TablePtr tmp = tabPtr;
+ ptr.p->tables.next(tabPtr);
+ if(DictTabInfo::isIndex(tmp.p->tableType)){
+ ptr.p->tables.release(tmp);
+ }
+
+ if(tabPtr.i == RNIL) {
+ jam();
+
+ ptr.p->pages.release();
+
+ ndbrequire(ptr.p->tables.first(tabPtr));
+ signal->theData[0] = RNIL;
+ signal->theData[1] = tabPtr.p->tableId;
+ signal->theData[2] = ptr.i;
+ sendSignal(DBDIH_REF, GSN_DI_FCOUNTREQ, signal, 3, JBB);
+ return;
+ }//if
+
+ signal->theData[0] = BackupContinueB::BUFFER_FULL_META;
+ signal->theData[1] = ptr.i;
+ signal->theData[2] = tabPtr.i;
+ sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, 100, 3);
+ return;
+}
+
+Backup::TablePtr
+Backup::parseTableDescription(Signal* signal, BackupRecordPtr ptr, Uint32 len)
+{
+
+ Page32Ptr pagePtr;
+ ptr.p->pages.getPtr(pagePtr, 0);
+
+ SimplePropertiesLinearReader it(&pagePtr.p->data[0], len);
+
+ it.first();
+
+ DictTabInfo::Table tmpTab; tmpTab.init();
+ SimpleProperties::UnpackStatus stat;
+ stat = SimpleProperties::unpack(it, &tmpTab,
+ DictTabInfo::TableMapping,
+ DictTabInfo::TableMappingSize,
+ true, true);
+ ndbrequire(stat == SimpleProperties::Break);
+
+ TablePtr tabPtr;
+ ndbrequire(findTable(ptr, tabPtr, tmpTab.TableId));
+ if(DictTabInfo::isIndex(tabPtr.p->tableType)){
+ jam();
+ return tabPtr;
+ }
+
+ /**
+ * Initialize table object
+ */
+ tabPtr.p->frag_mask = RNIL;
+
+ tabPtr.p->schemaVersion = tmpTab.TableVersion;
+ tabPtr.p->noOfAttributes = tmpTab.NoOfAttributes;
+ tabPtr.p->noOfNull = 0;
+ tabPtr.p->noOfVariable = 0; // Computed while iterating over attribs
+ tabPtr.p->sz_FixedAttributes = 0; // Computed while iterating over attribs
+ tabPtr.p->triggerIds[0] = ILLEGAL_TRIGGER_ID;
+ tabPtr.p->triggerIds[1] = ILLEGAL_TRIGGER_ID;
+ tabPtr.p->triggerIds[2] = ILLEGAL_TRIGGER_ID;
+ tabPtr.p->triggerAllocated[0] = false;
+ tabPtr.p->triggerAllocated[1] = false;
+ tabPtr.p->triggerAllocated[2] = false;
+
+ if(tabPtr.p->attributes.seize(tabPtr.p->noOfAttributes) == false) {
+ jam();
+ ptr.p->setErrorCode(DefineBackupRef::FailedToAllocateAttributeRecord);
+ tabPtr.i = RNIL;
+ return tabPtr;
+ }//if
+
+ const Uint32 count = tabPtr.p->noOfAttributes;
+ for(Uint32 i = 0; i<count; i++) {
+ jam();
+ DictTabInfo::Attribute tmp; tmp.init();
+ stat = SimpleProperties::unpack(it, &tmp,
+ DictTabInfo::AttributeMapping,
+ DictTabInfo::AttributeMappingSize,
+ true, true);
+
+ ndbrequire(stat == SimpleProperties::Break);
+
+ const Uint32 arr = tmp.AttributeArraySize;
+ const Uint32 sz = 1 << tmp.AttributeSize;
+ const Uint32 sz32 = (sz * arr + 31) >> 5;
+
+ AttributePtr attrPtr;
+ tabPtr.p->attributes.getPtr(attrPtr, tmp.AttributeId);
+
+ attrPtr.p->data.nullable = tmp.AttributeNullableFlag;
+ attrPtr.p->data.fixed = (tmp.AttributeArraySize != 0);
+ attrPtr.p->data.sz32 = sz32;
+
+ /**
+ * Either
+ * 1) Fixed
+ * 2) Nullable
+ * 3) Variable
+ */
+ if(attrPtr.p->data.fixed == true && attrPtr.p->data.nullable == false) {
+ jam();
+ attrPtr.p->data.offset = tabPtr.p->sz_FixedAttributes;
+ tabPtr.p->sz_FixedAttributes += sz32;
+ }//if
+
+ if(attrPtr.p->data.fixed == true && attrPtr.p->data.nullable == true) {
+ jam();
+ attrPtr.p->data.offset = 0;
+
+ attrPtr.p->data.offsetNull = tabPtr.p->noOfNull;
+ tabPtr.p->noOfNull++;
+ tabPtr.p->noOfVariable++;
+ }//if
+
+ if(attrPtr.p->data.fixed == false) {
+ jam();
+ tabPtr.p->noOfVariable++;
+ ndbrequire(0);
+ }//if
+
+ it.next(); // Move Past EndOfAttribute
+ }//for
+ return tabPtr;
+}
+
+void
+Backup::execDI_FCOUNTCONF(Signal* signal)
+{
+ jamEntry();
+
+ const Uint32 userPtr = signal->theData[0];
+ const Uint32 fragCount = signal->theData[1];
+ const Uint32 tableId = signal->theData[2];
+ const Uint32 senderData = signal->theData[3];
+
+ ndbrequire(userPtr == RNIL && signal->length() == 5);
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, senderData);
+
+ defineSlaveAbortCheck();
+
+ TablePtr tabPtr;
+ ndbrequire(findTable(ptr, tabPtr, tableId));
+
+ ndbrequire(tabPtr.p->fragments.seize(fragCount) != false);
+ tabPtr.p->frag_mask = calculate_frag_mask(fragCount);
+ for(Uint32 i = 0; i<fragCount; i++) {
+ jam();
+ FragmentPtr fragPtr;
+ tabPtr.p->fragments.getPtr(fragPtr, i);
+ fragPtr.p->scanned = 0;
+ fragPtr.p->scanning = 0;
+ fragPtr.p->tableId = tableId;
+ fragPtr.p->node = RNIL;
+ }//for
+
+ /**
+ * Next table
+ */
+ if(ptr.p->tables.next(tabPtr)) {
+ jam();
+ signal->theData[0] = RNIL;
+ signal->theData[1] = tabPtr.p->tableId;
+ signal->theData[2] = ptr.i;
+ sendSignal(DBDIH_REF, GSN_DI_FCOUNTREQ, signal, 3, JBB);
+ return;
+ }//if
+
+ ptr.p->tables.first(tabPtr);
+ getFragmentInfo(signal, ptr, tabPtr, 0);
+}
+
+void
+Backup::getFragmentInfo(Signal* signal,
+ BackupRecordPtr ptr, TablePtr tabPtr, Uint32 fragNo)
+{
+ jam();
+
+ for(; tabPtr.i != RNIL; ptr.p->tables.next(tabPtr)) {
+ jam();
+ const Uint32 fragCount = tabPtr.p->fragments.getSize();
+ for(; fragNo < fragCount; fragNo ++) {
+ jam();
+ FragmentPtr fragPtr;
+ tabPtr.p->fragments.getPtr(fragPtr, fragNo);
+
+ if(fragPtr.p->scanned == 0 && fragPtr.p->scanning == 0) {
+ jam();
+ signal->theData[0] = RNIL;
+ signal->theData[1] = ptr.i;
+ signal->theData[2] = tabPtr.p->tableId;
+ signal->theData[3] = fragNo;
+ sendSignal(DBDIH_REF, GSN_DIGETPRIMREQ, signal, 4, JBB);
+ return;
+ }//if
+ }//for
+ fragNo = 0;
+ }//for
+
+ getFragmentInfoDone(signal, ptr);
+}
+
+void
+Backup::execDIGETPRIMCONF(Signal* signal)
+{
+ jamEntry();
+
+ const Uint32 userPtr = signal->theData[0];
+ const Uint32 senderData = signal->theData[1];
+ const Uint32 nodeCount = signal->theData[6];
+ const Uint32 tableId = signal->theData[7];
+ const Uint32 fragNo = signal->theData[8];
+
+ ndbrequire(userPtr == RNIL && signal->length() == 9);
+ ndbrequire(nodeCount > 0 && nodeCount <= MAX_REPLICAS);
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, senderData);
+
+ defineSlaveAbortCheck();
+
+ TablePtr tabPtr;
+ ndbrequire(findTable(ptr, tabPtr, tableId));
+
+ FragmentPtr fragPtr;
+ tabPtr.p->fragments.getPtr(fragPtr, fragNo);
+
+ fragPtr.p->node = signal->theData[2];
+
+ getFragmentInfo(signal, ptr, tabPtr, fragNo + 1);
+}
+
+void
+Backup::getFragmentInfoDone(Signal* signal, BackupRecordPtr ptr)
+{
+ // Slave must now hold on to master data until
+ // AbortBackupOrd::OkToClean signal
+ ptr.p->okToCleanMaster = false;
+ ptr.p->slaveState.setState(DEFINED);
+ DefineBackupConf * conf = (DefineBackupConf*)signal->getDataPtr();
+ conf->backupPtr = ptr.i;
+ conf->backupId = ptr.p->backupId;
+ sendSignal(ptr.p->masterRef, GSN_DEFINE_BACKUP_CONF, signal,
+ DefineBackupConf::SignalLength, JBB);
+}
+
+
+/*****************************************************************************
+ *
+ * Slave functionallity: Start backup
+ *
+ *****************************************************************************/
+void
+Backup::execSTART_BACKUP_REQ(Signal* signal)
+{
+ jamEntry();
+
+ CRASH_INSERTION((10015));
+
+ StartBackupReq* req = (StartBackupReq*)signal->getDataPtr();
+ const Uint32 ptrI = req->backupPtr;
+ const Uint32 backupId = req->backupId;
+ const Uint32 signalNo = req->signalNo;
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, ptrI);
+
+ slaveAbortCheck(); // macro will do return if ABORTING
+
+ ptr.p->slaveState.setState(STARTED);
+
+ for(Uint32 i = 0; i<req->noOfTableTriggers; i++) {
+ jam();
+ TablePtr tabPtr;
+ ndbrequire(findTable(ptr, tabPtr, req->tableTriggers[i].tableId));
+ for(Uint32 j = 0; j<3; j++) {
+ jam();
+ const Uint32 triggerId = req->tableTriggers[i].triggerIds[j];
+ tabPtr.p->triggerIds[j] = triggerId;
+
+ TriggerPtr trigPtr;
+ if(!ptr.p->triggers.seizeId(trigPtr, triggerId)) {
+ jam();
+ StartBackupRef* ref = (StartBackupRef*)signal->getDataPtrSend();
+ ref->backupPtr = ptr.i;
+ ref->backupId = ptr.p->backupId;
+ ref->signalNo = signalNo;
+ ref->errorCode = StartBackupRef::FailedToAllocateTriggerRecord;
+ sendSignal(ptr.p->masterRef, GSN_START_BACKUP_REF, signal,
+ StartBackupRef::SignalLength, JBB);
+ return;
+ }//if
+
+ tabPtr.p->triggerAllocated[j] = true;
+ trigPtr.p->backupPtr = ptr.i;
+ trigPtr.p->tableId = tabPtr.p->tableId;
+ trigPtr.p->tab_ptr_i = tabPtr.i;
+ trigPtr.p->logEntry = 0;
+ trigPtr.p->event = j;
+ trigPtr.p->maxRecordSize = 2048;
+ trigPtr.p->operation =
+ &ptr.p->files.getPtr(ptr.p->logFilePtr)->operation;
+ trigPtr.p->operation->noOfBytes = 0;
+ trigPtr.p->operation->noOfRecords = 0;
+ trigPtr.p->errorCode = 0;
+ }//for
+ }//for
+
+ /**
+ * Start file threads...
+ */
+ BackupFilePtr filePtr;
+ for(ptr.p->files.first(filePtr);
+ filePtr.i!=RNIL;
+ ptr.p->files.next(filePtr)){
+ jam();
+ if(filePtr.p->fileRunning == 0) {
+ jam();
+ filePtr.p->fileRunning = 1;
+ signal->theData[0] = BackupContinueB::START_FILE_THREAD;
+ signal->theData[1] = filePtr.i;
+ sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, 100, 2);
+ }//if
+ }//for
+
+ StartBackupConf* conf = (StartBackupConf*)signal->getDataPtrSend();
+ conf->backupPtr = ptr.i;
+ conf->backupId = ptr.p->backupId;
+ conf->signalNo = signalNo;
+ sendSignal(ptr.p->masterRef, GSN_START_BACKUP_CONF, signal,
+ StartBackupConf::SignalLength, JBB);
+}
+
+/*****************************************************************************
+ *
+ * Slave functionallity: Backup fragment
+ *
+ *****************************************************************************/
+void
+Backup::execBACKUP_FRAGMENT_REQ(Signal* signal)
+{
+ jamEntry();
+ BackupFragmentReq* req = (BackupFragmentReq*)signal->getDataPtr();
+
+ CRASH_INSERTION((10016));
+
+ const Uint32 ptrI = req->backupPtr;
+ const Uint32 backupId = req->backupId;
+ const Uint32 tableId = req->tableId;
+ const Uint32 fragNo = req->fragmentNo;
+ const Uint32 count = req->count;
+
+ /**
+ * Get backup record
+ */
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, ptrI);
+
+ slaveAbortCheck(); // macro will do return if ABORTING
+
+ ptr.p->slaveState.setState(SCANNING);
+
+ /**
+ * Get file
+ */
+ BackupFilePtr filePtr;
+ c_backupFilePool.getPtr(filePtr, ptr.p->dataFilePtr);
+
+ ndbrequire(filePtr.p->backupPtr == ptrI);
+ ndbrequire(filePtr.p->fileOpened == 1);
+ ndbrequire(filePtr.p->fileRunning == 1);
+ ndbrequire(filePtr.p->scanRunning == 0);
+ ndbrequire(filePtr.p->fileDone == 0);
+
+ /**
+ * Get table
+ */
+ TablePtr tabPtr;
+ ndbrequire(findTable(ptr, tabPtr, tableId));
+
+ /**
+ * Get fragment
+ */
+ FragmentPtr fragPtr;
+ tabPtr.p->fragments.getPtr(fragPtr, fragNo);
+
+ ndbrequire(fragPtr.p->scanned == 0);
+ ndbrequire(fragPtr.p->scanning == 0 ||
+ refToNode(ptr.p->masterRef) == getOwnNodeId());
+
+ /**
+ * Init operation
+ */
+ if(filePtr.p->tableId != tableId) {
+ jam();
+ filePtr.p->operation.init(tabPtr);
+ filePtr.p->tableId = tableId;
+ }//if
+
+ /**
+ * Check for space in buffer
+ */
+ if(!filePtr.p->operation.newFragment(tableId, fragNo)) {
+ jam();
+ req->count = count + 1;
+ sendSignalWithDelay(BACKUP_REF, GSN_BACKUP_FRAGMENT_REQ, signal, 50,
+ signal->length());
+ ptr.p->slaveState.setState(STARTED);
+ return;
+ }//if
+
+ /**
+ * Mark things as "in use"
+ */
+ fragPtr.p->scanning = 1;
+ filePtr.p->fragmentNo = fragNo;
+
+ /**
+ * Start scan
+ */
+ {
+ filePtr.p->scanRunning = 1;
+
+ Table & table = * tabPtr.p;
+ ScanFragReq * req = (ScanFragReq *)signal->getDataPtrSend();
+ const Uint32 parallelism = 16;
+ const Uint32 attrLen = 5 + table.noOfAttributes;
+
+ req->senderData = filePtr.i;
+ req->resultRef = reference();
+ req->schemaVersion = table.schemaVersion;
+ req->fragmentNoKeyLen = fragNo;
+ req->requestInfo = 0;
+ req->savePointId = 0;
+ req->tableId = table.tableId;
+ ScanFragReq::setLockMode(req->requestInfo, 0);
+ ScanFragReq::setHoldLockFlag(req->requestInfo, 0);
+ ScanFragReq::setKeyinfoFlag(req->requestInfo, 0);
+ ScanFragReq::setAttrLen(req->requestInfo,attrLen);
+ req->transId1 = 0;
+ req->transId2 = (BACKUP << 20) + (getOwnNodeId() << 8);
+ req->clientOpPtr= filePtr.i;
+ req->batch_size_rows= 16;
+ req->batch_size_bytes= 0;
+ sendSignal(DBLQH_REF, GSN_SCAN_FRAGREQ, signal,
+ ScanFragReq::SignalLength, JBB);
+
+ signal->theData[0] = filePtr.i;
+ signal->theData[1] = 0;
+ signal->theData[2] = (BACKUP << 20) + (getOwnNodeId() << 8);
+
+ // Return all
+ signal->theData[3] = table.noOfAttributes;
+ signal->theData[4] = 0;
+ signal->theData[5] = 0;
+ signal->theData[6] = 0;
+ signal->theData[7] = 0;
+
+ Uint32 dataPos = 8;
+ Uint32 i;
+ for(i = 0; i<table.noOfAttributes; i++) {
+ jam();
+ AttributePtr attr;
+ table.attributes.getPtr(attr, i);
+
+ AttributeHeader::init(&signal->theData[dataPos], i, 0);
+ dataPos++;
+ if(dataPos == 25) {
+ jam();
+ sendSignal(DBLQH_REF, GSN_ATTRINFO, signal, 25, JBB);
+ dataPos = 3;
+ }//if
+ }//for
+ if(dataPos != 3) {
+ jam();
+ sendSignal(DBLQH_REF, GSN_ATTRINFO, signal, dataPos, JBB);
+ }//if
+ }
+}
+
+void
+Backup::execSCAN_HBREP(Signal* signal)
+{
+ jamEntry();
+}
+
+void
+Backup::execTRANSID_AI(Signal* signal)
+{
+ jamEntry();
+
+ const Uint32 filePtrI = signal->theData[0];
+ //const Uint32 transId1 = signal->theData[1];
+ //const Uint32 transId2 = signal->theData[2];
+ const Uint32 dataLen = signal->length() - 3;
+
+ BackupFilePtr filePtr;
+ c_backupFilePool.getPtr(filePtr, filePtrI);
+
+ OperationRecord & op = filePtr.p->operation;
+
+ TablePtr tabPtr;
+ c_tablePool.getPtr(tabPtr, op.tablePtr);
+
+ Table & table = * tabPtr.p;
+
+ /**
+ * Unpack data
+ */
+ op.attrSzTotal += dataLen;
+
+ Uint32 srcSz = dataLen;
+ const Uint32 * src = &signal->theData[3];
+
+ Uint32 * dst = op.dst;
+ Uint32 dstSz = op.attrSzLeft;
+
+ while(srcSz > 0) {
+ jam();
+
+ if(dstSz == 0) {
+ jam();
+
+ /**
+ * Finished with one attribute now find next
+ */
+ const AttributeHeader attrHead(* src);
+ const Uint32 attrId = attrHead.getAttributeId();
+ const bool null = attrHead.isNULL();
+ const Attribute::Data attr = table.attributes.getPtr(attrId)->data;
+
+ srcSz -= attrHead.getHeaderSize();
+ src += attrHead.getHeaderSize();
+
+ if(null) {
+ jam();
+ ndbrequire(attr.nullable);
+ op.nullAttribute(attr.offsetNull);
+ dstSz = 0;
+ continue;
+ }//if
+
+ dstSz = attrHead.getDataSize();
+ ndbrequire(dstSz == attr.sz32);
+ if(attr.fixed && ! attr.nullable) {
+ jam();
+ dst = op.newAttrib(attr.offset, dstSz);
+ } else if (attr.fixed && attr.nullable) {
+ jam();
+ dst = op.newNullable(attrId, dstSz);
+ } else {
+ ndbrequire(false);
+ //dst = op.newVariable(attrId, attrSize);
+ }//if
+ }//if
+
+ const Uint32 szCopy = (dstSz > srcSz) ? srcSz : dstSz;
+ memcpy(dst, src, (szCopy << 2));
+
+ srcSz -= szCopy;
+ dstSz -= szCopy;
+ src += szCopy;
+ dst += szCopy;
+ }//while
+ op.dst = dst;
+ op.attrSzLeft = dstSz;
+
+ if(op.finished()){
+ jam();
+ op.newRecord(op.dst);
+ }
+}
+
+void
+Backup::OperationRecord::init(const TablePtr & ptr)
+{
+
+ tablePtr = ptr.i;
+ noOfAttributes = ptr.p->noOfAttributes;
+
+ sz_Bitmask = (ptr.p->noOfNull + 31) >> 5;
+ sz_FixedAttribs = ptr.p->sz_FixedAttributes;
+
+ if(ptr.p->noOfVariable == 0) {
+ jam();
+ maxRecordSize = 1 + sz_Bitmask + sz_FixedAttribs;
+ } else {
+ jam();
+ maxRecordSize =
+ 1 + sz_Bitmask + 2048 /* Max tuple size */ + 2 * ptr.p->noOfVariable;
+ }//if
+}
+
+bool
+Backup::OperationRecord::newFragment(Uint32 tableId, Uint32 fragNo)
+{
+ Uint32 * tmp;
+ const Uint32 headSz = (sizeof(BackupFormat::DataFile::FragmentHeader) >> 2);
+ const Uint32 sz = headSz + 16 * maxRecordSize;
+
+ ndbrequire(sz < dataBuffer.getMaxWrite());
+ if(dataBuffer.getWritePtr(&tmp, sz)) {
+ jam();
+ BackupFormat::DataFile::FragmentHeader * head =
+ (BackupFormat::DataFile::FragmentHeader*)tmp;
+
+ head->SectionType = htonl(BackupFormat::FRAGMENT_HEADER);
+ head->SectionLength = htonl(headSz);
+ head->TableId = htonl(tableId);
+ head->FragmentNo = htonl(fragNo);
+ head->ChecksumType = htonl(0);
+
+ opNoDone = opNoConf = opLen = 0;
+ newRecord(tmp + headSz);
+ scanStart = tmp;
+ scanStop = (tmp + headSz);
+
+ noOfRecords = 0;
+ noOfBytes = 0;
+ return true;
+ }//if
+ return false;
+}
+
+bool
+Backup::OperationRecord::fragComplete(Uint32 tableId, Uint32 fragNo)
+{
+ Uint32 * tmp;
+ const Uint32 footSz = sizeof(BackupFormat::DataFile::FragmentFooter) >> 2;
+
+ if(dataBuffer.getWritePtr(&tmp, footSz + 1)) {
+ jam();
+ * tmp = 0; // Finish record stream
+ tmp++;
+ BackupFormat::DataFile::FragmentFooter * foot =
+ (BackupFormat::DataFile::FragmentFooter*)tmp;
+ foot->SectionType = htonl(BackupFormat::FRAGMENT_FOOTER);
+ foot->SectionLength = htonl(footSz);
+ foot->TableId = htonl(tableId);
+ foot->FragmentNo = htonl(fragNo);
+ foot->NoOfRecords = htonl(noOfRecords);
+ foot->Checksum = htonl(0);
+ dataBuffer.updateWritePtr(footSz + 1);
+ return true;
+ }//if
+ return false;
+}
+
+bool
+Backup::OperationRecord::newScan()
+{
+ Uint32 * tmp;
+ ndbrequire(16 * maxRecordSize < dataBuffer.getMaxWrite());
+ if(dataBuffer.getWritePtr(&tmp, 16 * maxRecordSize)) {
+ jam();
+ opNoDone = opNoConf = opLen = 0;
+ newRecord(tmp);
+ scanStart = tmp;
+ scanStop = tmp;
+ return true;
+ }//if
+ return false;
+}
+
+bool
+Backup::OperationRecord::scanConf(Uint32 noOfOps, Uint32 total_len)
+{
+ const Uint32 done = opNoDone-opNoConf;
+
+ ndbrequire(noOfOps == done);
+ ndbrequire(opLen == total_len);
+ opNoConf = opNoDone;
+
+ const Uint32 len = (scanStop - scanStart);
+ ndbrequire(len < dataBuffer.getMaxWrite());
+ dataBuffer.updateWritePtr(len);
+ noOfBytes += (len << 2);
+ return true;
+}
+
+void
+Backup::execSCAN_FRAGREF(Signal* signal)
+{
+ jamEntry();
+
+ ScanFragRef * ref = (ScanFragRef*)signal->getDataPtr();
+
+ const Uint32 filePtrI = ref->senderData;
+ BackupFilePtr filePtr;
+ c_backupFilePool.getPtr(filePtr, filePtrI);
+
+ filePtr.p->errorCode = ref->errorCode;
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, filePtr.p->backupPtr);
+
+ abortFile(signal, ptr, filePtr);
+}
+
+void
+Backup::execSCAN_FRAGCONF(Signal* signal)
+{
+ jamEntry();
+
+ CRASH_INSERTION((10017));
+
+ ScanFragConf * conf = (ScanFragConf*)signal->getDataPtr();
+
+ const Uint32 filePtrI = conf->senderData;
+ BackupFilePtr filePtr;
+ c_backupFilePool.getPtr(filePtr, filePtrI);
+
+ OperationRecord & op = filePtr.p->operation;
+
+ op.scanConf(conf->completedOps, conf->total_len);
+ const Uint32 completed = conf->fragmentCompleted;
+ if(completed != 2) {
+ jam();
+
+ checkScan(signal, filePtr);
+ return;
+ }//if
+
+ fragmentCompleted(signal, filePtr);
+}
+
+void
+Backup::fragmentCompleted(Signal* signal, BackupFilePtr filePtr)
+{
+ jam();
+
+ if(filePtr.p->errorCode != 0){
+ jam();
+ abortFileHook(signal, filePtr, true); // Scan completed
+ return;
+ }//if
+
+ OperationRecord & op = filePtr.p->operation;
+ if(!op.fragComplete(filePtr.p->tableId, filePtr.p->fragmentNo)) {
+ jam();
+ signal->theData[0] = BackupContinueB::BUFFER_FULL_FRAG_COMPLETE;
+ signal->theData[1] = filePtr.i;
+ sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, 50, 2);
+ return;
+ }//if
+
+ filePtr.p->scanRunning = 0;
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, filePtr.p->backupPtr);
+
+ BackupFragmentConf * conf = (BackupFragmentConf*)signal->getDataPtrSend();
+ conf->backupId = ptr.p->backupId;
+ conf->backupPtr = ptr.i;
+ conf->tableId = filePtr.p->tableId;
+ conf->fragmentNo = filePtr.p->fragmentNo;
+ conf->noOfRecords = op.noOfRecords;
+ conf->noOfBytes = op.noOfBytes;
+ sendSignal(ptr.p->masterRef, GSN_BACKUP_FRAGMENT_CONF, signal,
+ BackupFragmentConf::SignalLength, JBB);
+
+ ptr.p->slaveState.setState(STARTED);
+ return;
+}
+
+void
+Backup::checkScan(Signal* signal, BackupFilePtr filePtr)
+{
+ if(filePtr.p->errorCode != 0){
+ jam();
+ abortFileHook(signal, filePtr, false); // Scan not completed
+ return;
+ }//if
+
+ OperationRecord & op = filePtr.p->operation;
+ if(op.newScan()) {
+ jam();
+
+ ScanFragNextReq * req = (ScanFragNextReq *)signal->getDataPtrSend();
+ req->senderData = filePtr.i;
+ req->closeFlag = 0;
+ req->transId1 = 0;
+ req->transId2 = (BACKUP << 20) + (getOwnNodeId() << 8);
+ req->batch_size_rows= 16;
+ req->batch_size_bytes= 0;
+ sendSignal(DBLQH_REF, GSN_SCAN_NEXTREQ, signal,
+ ScanFragNextReq::SignalLength, JBB);
+ return;
+ }//if
+
+ signal->theData[0] = BackupContinueB::BUFFER_FULL_SCAN;
+ signal->theData[1] = filePtr.i;
+ sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, 50, 2);
+}
+
+void
+Backup::execFSAPPENDREF(Signal* signal)
+{
+ jamEntry();
+
+ FsRef * ref = (FsRef *)signal->getDataPtr();
+
+ const Uint32 filePtrI = ref->userPointer;
+ const Uint32 errCode = ref->errorCode;
+
+ BackupFilePtr filePtr;
+ c_backupFilePool.getPtr(filePtr, filePtrI);
+
+ filePtr.p->fileRunning = 0;
+ filePtr.p->errorCode = errCode;
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, filePtr.p->backupPtr);
+
+ abortFile(signal, ptr, filePtr);
+}
+
+void
+Backup::execFSAPPENDCONF(Signal* signal)
+{
+ jamEntry();
+
+ CRASH_INSERTION((10018));
+
+ //FsConf * conf = (FsConf*)signal->getDataPtr();
+ const Uint32 filePtrI = signal->theData[0]; //conf->userPointer;
+ const Uint32 bytes = signal->theData[1]; //conf->bytes;
+
+ BackupFilePtr filePtr;
+ c_backupFilePool.getPtr(filePtr, filePtrI);
+
+ if (ERROR_INSERTED(10029)) {
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, filePtr.p->backupPtr);
+ abortFile(signal, ptr, filePtr);
+ }//if
+
+ OperationRecord & op = filePtr.p->operation;
+
+ op.dataBuffer.updateReadPtr(bytes >> 2);
+
+ checkFile(signal, filePtr);
+}
+
+void
+Backup::checkFile(Signal* signal, BackupFilePtr filePtr)
+{
+
+#ifdef DEBUG_ABORT
+ // ndbout_c("---- check file filePtr.i = %u", filePtr.i);
+#endif
+
+ OperationRecord & op = filePtr.p->operation;
+
+ Uint32 * tmp, sz; bool eof;
+ if(op.dataBuffer.getReadPtr(&tmp, &sz, &eof)) {
+ jam();
+
+ if(filePtr.p->errorCode == 0) {
+ jam();
+ FsAppendReq * req = (FsAppendReq *)signal->getDataPtrSend();
+ req->filePointer = filePtr.p->filePointer;
+ req->userPointer = filePtr.i;
+ req->userReference = reference();
+ req->varIndex = 0;
+ req->offset = tmp - c_startOfPages;
+ req->size = sz;
+
+ sendSignal(NDBFS_REF, GSN_FSAPPENDREQ, signal,
+ FsAppendReq::SignalLength, JBA);
+ return;
+ } else {
+ jam();
+ if (filePtr.p->scanRunning == 1)
+ eof = false;
+ }//if
+ }//if
+
+ if(!eof) {
+ jam();
+ signal->theData[0] = BackupContinueB::BUFFER_UNDERFLOW;
+ signal->theData[1] = filePtr.i;
+ sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, 50, 2);
+ return;
+ }//if
+
+ ndbrequire(filePtr.p->fileDone == 1);
+
+ if(sz > 0 && filePtr.p->errorCode == 0) {
+ jam();
+ FsAppendReq * req = (FsAppendReq *)signal->getDataPtrSend();
+ req->filePointer = filePtr.p->filePointer;
+ req->userPointer = filePtr.i;
+ req->userReference = reference();
+ req->varIndex = 0;
+ req->offset = tmp - c_startOfPages;
+ req->size = sz; // Avrunda uppot
+
+ sendSignal(NDBFS_REF, GSN_FSAPPENDREQ, signal,
+ FsAppendReq::SignalLength, JBA);
+ return;
+ }//if
+
+ filePtr.p->fileRunning = 0;
+
+ FsCloseReq * req = (FsCloseReq *)signal->getDataPtrSend();
+ req->filePointer = filePtr.p->filePointer;
+ req->userPointer = filePtr.i;
+ req->userReference = reference();
+ req->fileFlag = 0;
+#ifdef DEBUG_ABORT
+ ndbout_c("***** FSCLOSEREQ filePtr.i = %u", filePtr.i);
+#endif
+ sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, FsCloseReq::SignalLength, JBA);
+}
+
+void
+Backup::abortFile(Signal* signal, BackupRecordPtr ptr, BackupFilePtr filePtr)
+{
+ jam();
+
+ if(ptr.p->slaveState.getState() != ABORTING) {
+ /**
+ * Inform master of failure
+ */
+ jam();
+ ptr.p->slaveState.setState(ABORTING);
+ ptr.p->setErrorCode(AbortBackupOrd::FileOrScanError);
+ sendAbortBackupOrdSlave(signal, ptr, AbortBackupOrd::FileOrScanError);
+ return;
+ }//if
+
+
+ for(ptr.p->files.first(filePtr);
+ filePtr.i!=RNIL;
+ ptr.p->files.next(filePtr)){
+ jam();
+ filePtr.p->errorCode = 1;
+ }//for
+
+ closeFiles(signal, ptr);
+}
+
+void
+Backup::abortFileHook(Signal* signal, BackupFilePtr filePtr, bool scanComplete)
+{
+ jam();
+
+ if(!scanComplete) {
+ jam();
+
+ ScanFragNextReq * req = (ScanFragNextReq *)signal->getDataPtrSend();
+ req->senderData = filePtr.i;
+ req->closeFlag = 1;
+ req->transId1 = 0;
+ req->transId2 = (BACKUP << 20) + (getOwnNodeId() << 8);
+ sendSignal(DBLQH_REF, GSN_SCAN_NEXTREQ, signal,
+ ScanFragNextReq::SignalLength, JBB);
+ return;
+ }//if
+
+ filePtr.p->scanRunning = 0;
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, filePtr.p->backupPtr);
+
+ filePtr.i = RNIL;
+ abortFile(signal, ptr, filePtr);
+}
+
+/****************************************************************************
+ *
+ * Slave functionallity: Perform logging
+ *
+ ****************************************************************************/
+Uint32
+Backup::calculate_frag_mask(Uint32 count)
+{
+ Uint32 mask = 1;
+ while (mask < count) mask <<= 1;
+ mask -= 1;
+ return mask;
+}
+
+void
+Backup::execBACKUP_TRIG_REQ(Signal* signal)
+{
+ /*
+ TUP asks if this trigger is to be fired on this node.
+ */
+ TriggerPtr trigPtr;
+ TablePtr tabPtr;
+ FragmentPtr fragPtr;
+ Uint32 trigger_id = signal->theData[0];
+ Uint32 frag_id = signal->theData[1];
+ Uint32 result;
+
+ jamEntry();
+ c_triggerPool.getPtr(trigPtr, trigger_id);
+ c_tablePool.getPtr(tabPtr, trigPtr.p->tab_ptr_i);
+ frag_id = frag_id & tabPtr.p->frag_mask;
+ /*
+ At the moment the fragment identity known by TUP is the
+ actual fragment id but with possibly an extra bit set.
+ This is due to that ACC splits the fragment. Thus fragment id 5 can
+ here be either 5 or 13. Thus masking with 2 ** n - 1 where number of
+ fragments <= 2 ** n will always provide a correct fragment id.
+ */
+ tabPtr.p->fragments.getPtr(fragPtr, frag_id);
+ if (fragPtr.p->node != getOwnNodeId()) {
+ jam();
+ result = ZFALSE;
+ } else {
+ jam();
+ result = ZTRUE;
+ }//if
+ signal->theData[0] = result;
+}
+
+void
+Backup::execTRIG_ATTRINFO(Signal* signal) {
+ jamEntry();
+
+ CRASH_INSERTION((10019));
+
+ TrigAttrInfo * trg = (TrigAttrInfo*)signal->getDataPtr();
+
+ TriggerPtr trigPtr;
+ c_triggerPool.getPtr(trigPtr, trg->getTriggerId());
+ ndbrequire(trigPtr.p->event != ILLEGAL_TRIGGER_ID); // Online...
+
+ if(trigPtr.p->errorCode != 0) {
+ jam();
+ return;
+ }//if
+
+ if(trg->getAttrInfoType() == TrigAttrInfo::BEFORE_VALUES) {
+ jam();
+ /**
+ * Backup is doing REDO logging and don't need before values
+ */
+ return;
+ }//if
+
+ BackupFormat::LogFile::LogEntry * logEntry = trigPtr.p->logEntry;
+ if(logEntry == 0) {
+ jam();
+ Uint32 * dst;
+ FsBuffer & buf = trigPtr.p->operation->dataBuffer;
+ ndbrequire(trigPtr.p->maxRecordSize <= buf.getMaxWrite());
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, trigPtr.p->backupPtr);
+ if(!buf.getWritePtr(&dst, trigPtr.p->maxRecordSize)) {
+ jam();
+ trigPtr.p->errorCode = AbortBackupOrd::LogBufferFull;
+ sendAbortBackupOrdSlave(signal, ptr, AbortBackupOrd::LogBufferFull);
+ return;
+ }//if
+ if(trigPtr.p->operation->noOfBytes > 123 && ERROR_INSERTED(10030)) {
+ jam();
+ trigPtr.p->errorCode = AbortBackupOrd::LogBufferFull;
+ sendAbortBackupOrdSlave(signal, ptr, AbortBackupOrd::LogBufferFull);
+ return;
+ }//if
+
+ logEntry = (BackupFormat::LogFile::LogEntry *)dst;
+ trigPtr.p->logEntry = logEntry;
+ logEntry->Length = 0;
+ logEntry->TableId = htonl(trigPtr.p->tableId);
+ logEntry->TriggerEvent = htonl(trigPtr.p->event);
+ } else {
+ ndbrequire(logEntry->TableId == htonl(trigPtr.p->tableId));
+ ndbrequire(logEntry->TriggerEvent == htonl(trigPtr.p->event));
+ }//if
+
+ const Uint32 pos = logEntry->Length;
+ const Uint32 dataLen = signal->length() - TrigAttrInfo::StaticLength;
+ memcpy(&logEntry->Data[pos], trg->getData(), dataLen << 2);
+
+ logEntry->Length = pos + dataLen;
+}
+
+void
+Backup::execFIRE_TRIG_ORD(Signal* signal)
+{
+ jamEntry();
+ FireTrigOrd* trg = (FireTrigOrd*)signal->getDataPtr();
+
+ const Uint32 gci = trg->getGCI();
+ const Uint32 trI = trg->getTriggerId();
+
+ TriggerPtr trigPtr;
+ c_triggerPool.getPtr(trigPtr, trI);
+
+ ndbrequire(trigPtr.p->event != ILLEGAL_TRIGGER_ID);
+
+ if(trigPtr.p->errorCode != 0) {
+ jam();
+ return;
+ }//if
+
+ ndbrequire(trigPtr.p->logEntry != 0);
+ Uint32 len = trigPtr.p->logEntry->Length;
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, trigPtr.p->backupPtr);
+ if(gci != ptr.p->currGCP) {
+ jam();
+
+ trigPtr.p->logEntry->TriggerEvent = htonl(trigPtr.p->event | 0x10000);
+ trigPtr.p->logEntry->Data[len] = htonl(gci);
+ len ++;
+ ptr.p->currGCP = gci;
+ }//if
+
+ len += (sizeof(BackupFormat::LogFile::LogEntry) >> 2) - 2;
+ trigPtr.p->logEntry->Length = htonl(len);
+
+ ndbrequire(len + 1 <= trigPtr.p->operation->dataBuffer.getMaxWrite());
+ trigPtr.p->operation->dataBuffer.updateWritePtr(len + 1);
+ trigPtr.p->logEntry = 0;
+
+ trigPtr.p->operation->noOfBytes += (len + 1) << 2;
+ trigPtr.p->operation->noOfRecords += 1;
+}
+
+void
+Backup::sendAbortBackupOrdSlave(Signal* signal, BackupRecordPtr ptr,
+ Uint32 requestType)
+{
+ jam();
+ AbortBackupOrd *ord = (AbortBackupOrd*)signal->getDataPtrSend();
+ ord->backupId = ptr.p->backupId;
+ ord->backupPtr = ptr.i;
+ ord->requestType = requestType;
+ ord->senderData= ptr.i;
+ sendSignal(ptr.p->masterRef, GSN_ABORT_BACKUP_ORD, signal,
+ AbortBackupOrd::SignalLength, JBB);
+}
+
+void
+Backup::sendAbortBackupOrd(Signal* signal, BackupRecordPtr ptr,
+ Uint32 requestType)
+{
+ jam();
+ AbortBackupOrd *ord = (AbortBackupOrd*)signal->getDataPtrSend();
+ ord->backupId = ptr.p->backupId;
+ ord->backupPtr = ptr.i;
+ ord->requestType = requestType;
+ ord->senderData= ptr.i;
+ NodePtr node;
+ for(c_nodes.first(node); node.i != RNIL; c_nodes.next(node)) {
+ jam();
+ const Uint32 nodeId = node.p->nodeId;
+ if(node.p->alive && ptr.p->nodes.get(nodeId)) {
+ jam();
+ sendSignal(numberToRef(BACKUP, nodeId), GSN_ABORT_BACKUP_ORD, signal,
+ AbortBackupOrd::SignalLength, JBB);
+ }//if
+ }//for
+}
+
+/*****************************************************************************
+ *
+ * Slave functionallity: Stop backup
+ *
+ *****************************************************************************/
+void
+Backup::execSTOP_BACKUP_REQ(Signal* signal)
+{
+ jamEntry();
+ StopBackupReq * req = (StopBackupReq*)signal->getDataPtr();
+
+ CRASH_INSERTION((10020));
+
+ const Uint32 ptrI = req->backupPtr;
+ const Uint32 backupId = req->backupId;
+ const Uint32 startGCP = req->startGCP;
+ const Uint32 stopGCP = req->stopGCP;
+
+ /**
+ * At least one GCP must have passed
+ */
+ ndbrequire(stopGCP > startGCP);
+
+ /**
+ * Get backup record
+ */
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, ptrI);
+
+ ptr.p->slaveState.setState(STOPPING);
+ slaveAbortCheck(); // macro will do return if ABORTING
+
+ /**
+ * Insert footers
+ */
+ {
+ BackupFilePtr filePtr;
+ ptr.p->files.getPtr(filePtr, ptr.p->logFilePtr);
+ Uint32 * dst;
+ ndbrequire(filePtr.p->operation.dataBuffer.getWritePtr(&dst, 1));
+ * dst = 0;
+ filePtr.p->operation.dataBuffer.updateWritePtr(1);
+ }
+
+ {
+ BackupFilePtr filePtr;
+ ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr);
+
+ const Uint32 gcpSz = sizeof(BackupFormat::CtlFile::GCPEntry) >> 2;
+
+ Uint32 * dst;
+ ndbrequire(filePtr.p->operation.dataBuffer.getWritePtr(&dst, gcpSz));
+
+ BackupFormat::CtlFile::GCPEntry * gcp =
+ (BackupFormat::CtlFile::GCPEntry*)dst;
+
+ gcp->SectionType = htonl(BackupFormat::GCP_ENTRY);
+ gcp->SectionLength = htonl(gcpSz);
+ gcp->StartGCP = htonl(startGCP);
+ gcp->StopGCP = htonl(stopGCP - 1);
+ filePtr.p->operation.dataBuffer.updateWritePtr(gcpSz);
+ }
+
+ closeFiles(signal, ptr);
+}
+
+void
+Backup::closeFiles(Signal* sig, BackupRecordPtr ptr)
+{
+ if (ptr.p->closingFiles) {
+ jam();
+ return;
+ }
+ ptr.p->closingFiles = true;
+
+ /**
+ * Close all files
+ */
+ BackupFilePtr filePtr;
+ int openCount = 0;
+ for(ptr.p->files.first(filePtr); filePtr.i!=RNIL; ptr.p->files.next(filePtr))
+ {
+ if(filePtr.p->fileOpened == 0) {
+ jam();
+ continue;
+ }
+
+ jam();
+ openCount++;
+
+ if(filePtr.p->fileDone == 1){
+ jam();
+ continue;
+ }//if
+
+ filePtr.p->fileDone = 1;
+
+ if(filePtr.p->fileRunning == 1){
+ jam();
+#ifdef DEBUG_ABORT
+ ndbout_c("Close files fileRunning == 1, filePtr.i=%u", filePtr.i);
+#endif
+ filePtr.p->operation.dataBuffer.eof();
+ } else {
+ jam();
+
+ FsCloseReq * req = (FsCloseReq *)sig->getDataPtrSend();
+ req->filePointer = filePtr.p->filePointer;
+ req->userPointer = filePtr.i;
+ req->userReference = reference();
+ req->fileFlag = 0;
+#ifdef DEBUG_ABORT
+ ndbout_c("***** FSCLOSEREQ filePtr.i = %u", filePtr.i);
+#endif
+ sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, sig,
+ FsCloseReq::SignalLength, JBA);
+ }//if
+ }//for
+
+ if(openCount == 0){
+ jam();
+ closeFilesDone(sig, ptr);
+ }//if
+}
+
+void
+Backup::execFSCLOSEREF(Signal* signal)
+{
+ jamEntry();
+
+ FsRef * ref = (FsRef*)signal->getDataPtr();
+ const Uint32 filePtrI = ref->userPointer;
+
+ BackupFilePtr filePtr;
+ c_backupFilePool.getPtr(filePtr, filePtrI);
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, filePtr.p->backupPtr);
+
+ /**
+ * This should only happen during abort of backup
+ */
+ ndbrequire(ptr.p->slaveState.getState() == ABORTING);
+
+ filePtr.p->fileOpened = 1;
+ FsConf * conf = (FsConf*)signal->getDataPtr();
+ conf->userPointer = filePtrI;
+
+ execFSCLOSECONF(signal);
+}
+
+void
+Backup::execFSCLOSECONF(Signal* signal)
+{
+ jamEntry();
+
+ FsConf * conf = (FsConf*)signal->getDataPtr();
+ const Uint32 filePtrI = conf->userPointer;
+
+ BackupFilePtr filePtr;
+ c_backupFilePool.getPtr(filePtr, filePtrI);
+
+#ifdef DEBUG_ABORT
+ ndbout_c("***** FSCLOSECONF filePtrI = %u", filePtrI);
+#endif
+
+ ndbrequire(filePtr.p->fileDone == 1);
+ ndbrequire(filePtr.p->fileOpened == 1);
+ ndbrequire(filePtr.p->fileRunning == 0);
+ ndbrequire(filePtr.p->scanRunning == 0);
+
+ filePtr.p->fileOpened = 0;
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, filePtr.p->backupPtr);
+ for(ptr.p->files.first(filePtr); filePtr.i!=RNIL;ptr.p->files.next(filePtr))
+ {
+ jam();
+ if(filePtr.p->fileOpened == 1) {
+ jam();
+#ifdef DEBUG_ABORT
+ ndbout_c("waiting for more FSCLOSECONF's filePtr.i = %u", filePtr.i);
+#endif
+ return; // we will be getting more FSCLOSECONF's
+ }//if
+ }//for
+ closeFilesDone(signal, ptr);
+}
+
+void
+Backup::closeFilesDone(Signal* signal, BackupRecordPtr ptr)
+{
+ jam();
+
+ if(ptr.p->slaveState.getState() == STOPPING) {
+ jam();
+ BackupFilePtr filePtr;
+ ptr.p->files.getPtr(filePtr, ptr.p->logFilePtr);
+
+ StopBackupConf* conf = (StopBackupConf*)signal->getDataPtrSend();
+ conf->backupId = ptr.p->backupId;
+ conf->backupPtr = ptr.i;
+ conf->noOfLogBytes = filePtr.p->operation.noOfBytes;
+ conf->noOfLogRecords = filePtr.p->operation.noOfRecords;
+ sendSignal(ptr.p->masterRef, GSN_STOP_BACKUP_CONF, signal,
+ StopBackupConf::SignalLength, JBB);
+
+ ptr.p->slaveState.setState(CLEANING);
+ return;
+ }//if
+
+ ndbrequire(ptr.p->slaveState.getState() == ABORTING);
+ removeBackup(signal, ptr);
+}
+
+/*****************************************************************************
+ *
+ * Slave functionallity: Abort backup
+ *
+ *****************************************************************************/
+void
+Backup::removeBackup(Signal* signal, BackupRecordPtr ptr)
+{
+ jam();
+
+ FsRemoveReq * req = (FsRemoveReq *)signal->getDataPtrSend();
+ req->userReference = reference();
+ req->userPointer = ptr.i;
+ req->directory = 1;
+ req->ownDirectory = 1;
+ FsOpenReq::setVersion(req->fileNumber, 2);
+ FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_CTL);
+ FsOpenReq::v2_setSequence(req->fileNumber, ptr.p->backupId);
+ FsOpenReq::v2_setNodeId(req->fileNumber, getOwnNodeId());
+ sendSignal(NDBFS_REF, GSN_FSREMOVEREQ, signal,
+ FsRemoveReq::SignalLength, JBA);
+}
+
+void
+Backup::execFSREMOVEREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(0);
+}
+
+void
+Backup::execFSREMOVECONF(Signal* signal){
+ jamEntry();
+
+ FsConf * conf = (FsConf*)signal->getDataPtr();
+ const Uint32 ptrI = conf->userPointer;
+
+ /**
+ * Get backup record
+ */
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, ptrI);
+
+ ndbrequire(ptr.p->slaveState.getState() == ABORTING);
+ if (ptr.p->masterRef == reference()) {
+ if (ptr.p->masterData.state.getAbortState() == DEFINING) {
+ jam();
+ sendBackupRef(signal, ptr, ptr.p->errorCode);
+ return;
+ } else {
+ jam();
+ }//if
+ }//if
+ cleanupSlaveResources(ptr);
+}
+
+/*****************************************************************************
+ *
+ * Slave functionallity: Abort backup
+ *
+ *****************************************************************************/
+void
+Backup::execABORT_BACKUP_ORD(Signal* signal)
+{
+ jamEntry();
+ AbortBackupOrd* ord = (AbortBackupOrd*)signal->getDataPtr();
+
+ const Uint32 backupId = ord->backupId;
+ const AbortBackupOrd::RequestType requestType =
+ (AbortBackupOrd::RequestType)ord->requestType;
+ const Uint32 senderData = ord->senderData;
+
+#ifdef DEBUG_ABORT
+ ndbout_c("******** ABORT_BACKUP_ORD ********* nodeId = %u",
+ refToNode(signal->getSendersBlockRef()));
+ ndbout_c("backupId = %u, requestType = %u, senderData = %u, ",
+ backupId, requestType, senderData);
+ dumpUsedResources();
+#endif
+
+ BackupRecordPtr ptr;
+ if(requestType == AbortBackupOrd::ClientAbort) {
+ if (getOwnNodeId() != getMasterNodeId()) {
+ jam();
+ // forward to master
+#ifdef DEBUG_ABORT
+ ndbout_c("---- Forward to master nodeId = %u", getMasterNodeId());
+#endif
+ sendSignal(calcBackupBlockRef(getMasterNodeId()), GSN_ABORT_BACKUP_ORD,
+ signal, AbortBackupOrd::SignalLength, JBB);
+ return;
+ }
+ jam();
+ for(c_backups.first(ptr); ptr.i != RNIL; c_backups.next(ptr)) {
+ jam();
+ if(ptr.p->backupId == backupId && ptr.p->clientData == senderData) {
+ jam();
+ break;
+ }//if
+ }//for
+ if(ptr.i == RNIL) {
+ jam();
+ return;
+ }//if
+ } else {
+ if (c_backupPool.findId(senderData)) {
+ jam();
+ c_backupPool.getPtr(ptr, senderData);
+ } else { // TODO might be abort sent to not master,
+ // or master aborting too early
+ jam();
+#ifdef DEBUG_ABORT
+ ndbout_c("Backup: abort request type=%u on id=%u,%u not found",
+ requestType, backupId, senderData);
+#endif
+ return;
+ }
+ }//if
+
+ const bool isCoordinator = (ptr.p->masterRef == reference());
+
+ bool ok = false;
+ switch(requestType){
+
+ /**
+ * Requests sent to master
+ */
+
+ case AbortBackupOrd::ClientAbort:
+ jam();
+ // fall through
+ case AbortBackupOrd::LogBufferFull:
+ jam();
+ // fall through
+ case AbortBackupOrd::FileOrScanError:
+ jam();
+ if(ptr.p->masterData.state.getState() == ABORTING) {
+#ifdef DEBUG_ABORT
+ ndbout_c("---- Already aborting");
+#endif
+ jam();
+ return;
+ }
+ ptr.p->setErrorCode(requestType);
+ ndbrequire(isCoordinator); // Sent from slave to coordinator
+ masterAbort(signal, ptr, false);
+ return;
+
+ /**
+ * Info sent to slave
+ */
+
+ case AbortBackupOrd::OkToClean:
+ jam();
+ cleanupMasterResources(ptr);
+ return;
+
+ /**
+ * Requests sent to slave
+ */
+
+ case AbortBackupOrd::BackupComplete:
+ jam();
+ if (ptr.p->slaveState.getState() == CLEANING) { // TODO what if state is
+ // not CLEANING?
+ jam();
+ cleanupSlaveResources(ptr);
+ }//if
+ return;
+ break;
+ case AbortBackupOrd::BackupFailureDueToNodeFail:
+ jam();
+ ok = true;
+ if (ptr.p->errorCode != 0)
+ ptr.p->setErrorCode(requestType);
+ break;
+ case AbortBackupOrd::BackupFailure:
+ jam();
+ ok = true;
+ break;
+ }
+ ndbrequire(ok);
+
+ /**
+ * Slave abort
+ */
+ slaveAbort(signal, ptr);
+}
+
+void
+Backup::slaveAbort(Signal* signal, BackupRecordPtr ptr)
+{
+ if(ptr.p->slaveState.getState() == ABORTING) {
+#ifdef DEBUG_ABORT
+ ndbout_c("---- Slave already aborting");
+#endif
+ jam();
+ return;
+ }
+#ifdef DEBUG_ABORT
+ ndbout_c("************* slaveAbort");
+#endif
+
+ State slaveState = ptr.p->slaveState.getState();
+ ptr.p->slaveState.setState(ABORTING);
+ switch(slaveState) {
+ case DEFINING:
+ jam();
+ return;
+//------------------------------------------
+// Will watch for the abort at various places
+// in the defining phase.
+//------------------------------------------
+ case ABORTING:
+ jam();
+ //Fall through
+ case DEFINED:
+ jam();
+ //Fall through
+ case STOPPING:
+ jam();
+ closeFiles(signal, ptr);
+ return;
+ case STARTED:
+ jam();
+ //Fall through
+ case SCANNING:
+ jam();
+ BackupFilePtr filePtr;
+ filePtr.i = RNIL;
+ abortFile(signal, ptr, filePtr);
+ return;
+ case CLEANING:
+ jam();
+ cleanupSlaveResources(ptr);
+ return;
+ case INITIAL:
+ jam();
+ ndbrequire(false);
+ return;
+ }
+}
+
+void
+Backup::dumpUsedResources()
+{
+ jam();
+ BackupRecordPtr ptr;
+
+ for(c_backups.first(ptr); ptr.i != RNIL; c_backups.next(ptr)) {
+ ndbout_c("Backup id=%u, slaveState.getState = %u, errorCode=%u",
+ ptr.p->backupId,
+ ptr.p->slaveState.getState(),
+ ptr.p->errorCode);
+
+ TablePtr tabPtr;
+ for(ptr.p->tables.first(tabPtr);
+ tabPtr.i != RNIL;
+ ptr.p->tables.next(tabPtr)) {
+ jam();
+ for(Uint32 j = 0; j<3; j++) {
+ jam();
+ TriggerPtr trigPtr;
+ if(tabPtr.p->triggerAllocated[j]) {
+ jam();
+ c_triggerPool.getPtr(trigPtr, tabPtr.p->triggerIds[j]);
+ ndbout_c("Allocated[%u] Triggerid = %u, event = %u",
+ j,
+ tabPtr.p->triggerIds[j],
+ trigPtr.p->event);
+ }//if
+ }//for
+ }//for
+
+ BackupFilePtr filePtr;
+ for(ptr.p->files.first(filePtr);
+ filePtr.i != RNIL;
+ ptr.p->files.next(filePtr)) {
+ jam();
+ ndbout_c("filePtr.i = %u, filePtr.p->fileOpened=%u fileRunning=%u "
+ "scanRunning=%u",
+ filePtr.i,
+ filePtr.p->fileOpened,
+ filePtr.p->fileRunning,
+ filePtr.p->scanRunning);
+ }//for
+ }
+}
+
+void
+Backup::cleanupMasterResources(BackupRecordPtr ptr)
+{
+#ifdef DEBUG_ABORT
+ ndbout_c("******** Cleanup Master Resources *********");
+ ndbout_c("backupId = %u, errorCode = %u", ptr.p->backupId, ptr.p->errorCode);
+#endif
+
+ TablePtr tabPtr;
+ for(ptr.p->tables.first(tabPtr); tabPtr.i != RNIL;ptr.p->tables.next(tabPtr))
+ {
+ jam();
+ tabPtr.p->attributes.release();
+ tabPtr.p->fragments.release();
+ for(Uint32 j = 0; j<3; j++) {
+ jam();
+ TriggerPtr trigPtr;
+ if(tabPtr.p->triggerAllocated[j]) {
+ jam();
+ c_triggerPool.getPtr(trigPtr, tabPtr.p->triggerIds[j]);
+ trigPtr.p->event = ILLEGAL_TRIGGER_ID;
+ tabPtr.p->triggerAllocated[j] = false;
+ }//if
+ tabPtr.p->triggerIds[j] = ILLEGAL_TRIGGER_ID;
+ }//for
+ }//for
+ ptr.p->tables.release();
+ ptr.p->triggers.release();
+ ptr.p->okToCleanMaster = true;
+
+ cleanupFinalResources(ptr);
+}
+
+void
+Backup::cleanupSlaveResources(BackupRecordPtr ptr)
+{
+#ifdef DEBUG_ABORT
+ ndbout_c("******** Clean Up Slave Resources*********");
+ ndbout_c("backupId = %u, errorCode = %u", ptr.p->backupId, ptr.p->errorCode);
+#endif
+
+ BackupFilePtr filePtr;
+ for(ptr.p->files.first(filePtr);
+ filePtr.i != RNIL;
+ ptr.p->files.next(filePtr)) {
+ jam();
+ ndbrequire(filePtr.p->fileOpened == 0);
+ ndbrequire(filePtr.p->fileRunning == 0);
+ ndbrequire(filePtr.p->scanRunning == 0);
+ filePtr.p->pages.release();
+ }//for
+ ptr.p->files.release();
+
+ cleanupFinalResources(ptr);
+}
+
+void
+Backup::cleanupFinalResources(BackupRecordPtr ptr)
+{
+#ifdef DEBUG_ABORT
+ ndbout_c("******** Clean Up Final Resources*********");
+ ndbout_c("backupId = %u, errorCode = %u", ptr.p->backupId, ptr.p->errorCode);
+#endif
+
+ // if (!ptr.p->tables.empty() || !ptr.p->files.empty()) {
+ if (!ptr.p->okToCleanMaster || !ptr.p->files.empty()) {
+ jam();
+#ifdef DEBUG_ABORT
+ ndbout_c("******** Waiting to do final cleanup");
+#endif
+ return;
+ }
+ ptr.p->pages.release();
+ ptr.p->masterData.state.setState(INITIAL);
+ ptr.p->slaveState.setState(INITIAL);
+ ptr.p->backupId = 0;
+
+ ptr.p->closingFiles = false;
+ ptr.p->okToCleanMaster = true;
+
+ c_backups.release(ptr);
+ // ndbrequire(false);
+}
diff --git a/storage/ndb/src/kernel/blocks/backup/Backup.hpp b/storage/ndb/src/kernel/blocks/backup/Backup.hpp
new file mode 100644
index 00000000000..1a5d6c7a925
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/backup/Backup.hpp
@@ -0,0 +1,696 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef BACKUP_H
+#define BACKUP_H
+
+#include <ndb_limits.h>
+#include <SimulatedBlock.hpp>
+
+#include "FsBuffer.hpp"
+#include "BackupFormat.hpp"
+
+#include <NodeBitmask.hpp>
+#include <SimpleProperties.hpp>
+
+#include <SLList.hpp>
+#include <ArrayList.hpp>
+#include <SignalCounter.hpp>
+#include <blocks/mutexes.hpp>
+
+#include <NdbTCP.h>
+
+/**
+ * Backup - This block manages database backup and restore
+ */
+class Backup : public SimulatedBlock
+{
+public:
+ Backup(const Configuration & conf);
+ virtual ~Backup();
+ BLOCK_DEFINES(Backup);
+
+protected:
+
+ void execSTTOR(Signal* signal);
+ void execDUMP_STATE_ORD(Signal* signal);
+ void execREAD_NODESCONF(Signal* signal);
+ void execNODE_FAILREP(Signal* signal);
+ void execINCL_NODEREQ(Signal* signal);
+ void execCONTINUEB(Signal* signal);
+
+ /**
+ * Testing
+ */
+ void execBACKUP_REF(Signal* signal);
+ void execBACKUP_CONF(Signal* signal);
+ void execBACKUP_ABORT_REP(Signal* signal);
+ void execBACKUP_COMPLETE_REP(Signal* signal);
+
+ /**
+ * Signals sent from master
+ */
+ void execDEFINE_BACKUP_REQ(Signal* signal);
+ void execBACKUP_DATA(Signal* signal);
+ void execSTART_BACKUP_REQ(Signal* signal);
+ void execBACKUP_FRAGMENT_REQ(Signal* signal);
+ void execSTOP_BACKUP_REQ(Signal* signal);
+ void execBACKUP_STATUS_REQ(Signal* signal);
+ void execABORT_BACKUP_ORD(Signal* signal);
+
+ /**
+ * The actual scan
+ */
+ void execSCAN_HBREP(Signal* signal);
+ void execTRANSID_AI(Signal* signal);
+ void execSCAN_FRAGREF(Signal* signal);
+ void execSCAN_FRAGCONF(Signal* signal);
+
+ /**
+ * Trigger logging
+ */
+ void execBACKUP_TRIG_REQ(Signal* signal);
+ void execTRIG_ATTRINFO(Signal* signal);
+ void execFIRE_TRIG_ORD(Signal* signal);
+
+ /**
+ * DICT signals
+ */
+ void execLIST_TABLES_CONF(Signal* signal);
+ void execGET_TABINFOREF(Signal* signal);
+ void execGET_TABINFO_CONF(Signal* signal);
+ void execCREATE_TRIG_REF(Signal* signal);
+ void execCREATE_TRIG_CONF(Signal* signal);
+ void execALTER_TRIG_REF(Signal* signal);
+ void execALTER_TRIG_CONF(Signal* signal);
+ void execDROP_TRIG_REF(Signal* signal);
+ void execDROP_TRIG_CONF(Signal* signal);
+
+ /**
+ * DIH signals
+ */
+ void execDI_FCOUNTCONF(Signal* signal);
+ void execDIGETPRIMCONF(Signal* signal);
+
+ /**
+ * FS signals
+ */
+ void execFSOPENREF(Signal* signal);
+ void execFSOPENCONF(Signal* signal);
+
+ void execFSCLOSEREF(Signal* signal);
+ void execFSCLOSECONF(Signal* signal);
+
+ void execFSAPPENDREF(Signal* signal);
+ void execFSAPPENDCONF(Signal* signal);
+
+ void execFSREMOVEREF(Signal* signal);
+ void execFSREMOVECONF(Signal* signal);
+
+ /**
+ * Master functinallity
+ */
+ void execBACKUP_REQ(Signal* signal);
+ void execABORT_BACKUP_REQ(Signal* signal);
+
+ void execDEFINE_BACKUP_REF(Signal* signal);
+ void execDEFINE_BACKUP_CONF(Signal* signal);
+
+ void execSTART_BACKUP_REF(Signal* signal);
+ void execSTART_BACKUP_CONF(Signal* signal);
+
+ void execBACKUP_FRAGMENT_REF(Signal* signal);
+ void execBACKUP_FRAGMENT_CONF(Signal* signal);
+
+ void execSTOP_BACKUP_REF(Signal* signal);
+ void execSTOP_BACKUP_CONF(Signal* signal);
+
+ void execBACKUP_STATUS_CONF(Signal* signal);
+
+ void execUTIL_SEQUENCE_REF(Signal* signal);
+ void execUTIL_SEQUENCE_CONF(Signal* signal);
+
+ void execWAIT_GCP_REF(Signal* signal);
+ void execWAIT_GCP_CONF(Signal* signal);
+
+
+private:
+ void defineBackupMutex_locked(Signal* signal, Uint32 ptrI,Uint32 retVal);
+ void dictCommitTableMutex_locked(Signal* signal, Uint32 ptrI,Uint32 retVal);
+
+public:
+ struct Node {
+ Uint32 nodeId;
+ Uint32 alive;
+ Uint32 nextList;
+ union { Uint32 prevList; Uint32 nextPool; };
+ };
+ typedef Ptr<Node> NodePtr;
+
+#define BACKUP_WORDS_PER_PAGE 8191
+ struct Page32 {
+ Uint32 data[BACKUP_WORDS_PER_PAGE];
+ Uint32 nextPool;
+ };
+ typedef Ptr<Page32> Page32Ptr;
+
+ struct Attribute {
+ struct Data {
+ Uint8 nullable;
+ Uint8 fixed;
+ Uint8 unused;
+ Uint8 unused2;
+ Uint32 sz32; // No of 32 bit words
+ Uint32 offset; // Relative DataFixedAttributes/DataFixedKeys
+ Uint32 offsetNull; // In NullBitmask
+ } data;
+ Uint32 nextPool;
+ };
+ typedef Ptr<Attribute> AttributePtr;
+
+ struct Fragment {
+ Uint32 tableId;
+ Uint32 node;
+ Uint16 scanned; // 0 = not scanned x = scanned by node x
+ Uint16 scanning; // 0 = not scanning x = scanning on node x
+ Uint32 nextPool;
+ };
+ typedef Ptr<Fragment> FragmentPtr;
+
+ struct Table {
+ Table(ArrayPool<Attribute> &, ArrayPool<Fragment> &);
+
+ Uint32 tableId;
+ Uint32 schemaVersion;
+ Uint32 frag_mask;
+ Uint32 tableType;
+ Uint32 noOfNull;
+ Uint32 noOfAttributes;
+ Uint32 noOfVariable;
+ Uint32 sz_FixedAttributes;
+ Uint32 triggerIds[3];
+ bool triggerAllocated[3];
+
+ Array<Attribute> attributes;
+ Array<Fragment> fragments;
+
+ Uint32 nextList;
+ union { Uint32 nextPool; Uint32 prevList; };
+ };
+ typedef Ptr<Table> TablePtr;
+
+ struct OperationRecord {
+ public:
+ OperationRecord(Backup & b) : backup(b) {}
+
+ /**
+ * Once per table
+ */
+ void init(const TablePtr & ptr);
+
+ /**
+ * Once per fragment
+ */
+ bool newFragment(Uint32 tableId, Uint32 fragNo);
+ bool fragComplete(Uint32 tableId, Uint32 fragNo);
+
+ /**
+ * Once per scan frag (next) req/conf
+ */
+ bool newScan();
+ bool scanConf(Uint32 noOfOps, Uint32 opLen);
+
+ /**
+ * Per record
+ */
+ void newRecord(Uint32 * base);
+ bool finished();
+
+ /**
+ * Per attribute
+ */
+ void nullAttribute(Uint32 nullOffset);
+ Uint32 * newNullable(Uint32 attrId, Uint32 sz);
+ Uint32 * newAttrib(Uint32 offset, Uint32 sz);
+ Uint32 * newVariable(Uint32 id, Uint32 sz);
+
+ private:
+ Uint32* base;
+ Uint32* dst_Length;
+ Uint32* dst_Bitmask;
+ Uint32* dst_FixedAttribs;
+ BackupFormat::DataFile::VariableData* dst_VariableData;
+
+ Uint32 noOfAttributes; // No of Attributes
+ Uint32 attrLeft; // No of attributes left
+
+ Uint32 opNoDone;
+ Uint32 opNoConf;
+ Uint32 opLen;
+
+ public:
+ Uint32* dst;
+ Uint32 attrSzLeft; // No of words missing for current attribute
+ Uint32 attrSzTotal; // No of AI words received
+ Uint32 tablePtr; // Ptr.i to current table
+
+ FsBuffer dataBuffer;
+ Uint32 noOfRecords;
+ Uint32 noOfBytes;
+ Uint32 maxRecordSize;
+
+ private:
+ Uint32* scanStart;
+ Uint32* scanStop;
+
+ /**
+ * sizes of part
+ */
+ Uint32 sz_Bitmask;
+ Uint32 sz_FixedAttribs;
+
+ public:
+ union { Uint32 nextPool; Uint32 nextList; };
+ Uint32 prevList;
+ private:
+
+ Backup & backup;
+ BlockNumber number() const { return backup.number(); }
+ void progError(int line, int cause, const char * extra) {
+ backup.progError(line, cause, extra);
+ }
+ };
+ friend struct OperationRecord;
+
+ struct TriggerRecord {
+ TriggerRecord() { event = ~0;}
+ OperationRecord * operation;
+ BackupFormat::LogFile::LogEntry * logEntry;
+ Uint32 maxRecordSize;
+ Uint32 tableId;
+ Uint32 tab_ptr_i;
+ Uint32 event;
+ Uint32 backupPtr;
+ Uint32 errorCode;
+ union { Uint32 nextPool; Uint32 nextList; };
+ };
+ typedef Ptr<TriggerRecord> TriggerPtr;
+
+ /**
+ * BackupFile - At least 3 per backup
+ */
+ struct BackupFile {
+ BackupFile(Backup & backup, ArrayPool<Page32> & pp)
+ : operation(backup), pages(pp) {}
+
+ Uint32 backupPtr; // Pointer to backup record
+ Uint32 tableId;
+ Uint32 fragmentNo;
+ Uint32 filePointer;
+ Uint32 errorCode;
+ BackupFormat::FileType fileType;
+ OperationRecord operation;
+
+ Array<Page32> pages;
+ Uint32 nextList;
+ union { Uint32 prevList; Uint32 nextPool; };
+
+ Uint8 fileOpened;
+ Uint8 fileRunning;
+ Uint8 fileDone;
+ Uint8 scanRunning;
+ };
+ typedef Ptr<BackupFile> BackupFilePtr;
+
+
+ /**
+ * State for BackupRecord
+ */
+ enum State {
+ INITIAL,
+ DEFINING, // Defining backup content and parameters
+ DEFINED, // DEFINE_BACKUP_CONF sent in slave, received all in master
+ STARTED, // Creating triggers
+ SCANNING, // Scanning fragments
+ STOPPING, // Closing files
+ CLEANING, // Cleaning resources
+ ABORTING // Aborting backup
+ };
+
+ static const Uint32 validSlaveTransitionsCount;
+ static const Uint32 validMasterTransitionsCount;
+ static const State validSlaveTransitions[];
+ static const State validMasterTransitions[];
+
+ class CompoundState {
+ public:
+ CompoundState(Backup & b,
+ const State valid[],
+ Uint32 count, Uint32 _id)
+ : backup(b)
+ , validTransitions(valid),
+ noOfValidTransitions(count), id(_id)
+ {
+ state = INITIAL;
+ abortState = state;
+ }
+
+ void setState(State s);
+ State getState() const { return state;}
+ State getAbortState() const { return abortState;}
+
+ void forceState(State s);
+
+ BlockNumber number() const { return backup.number(); }
+ void progError(int line, int cause, const char * extra) {
+ backup.progError(line, cause, extra);
+ }
+ private:
+ Backup & backup;
+ State state;
+ State abortState; /**
+ When state == ABORTING, this contains the state
+ when the abort started
+ */
+ const State * validTransitions;
+ const Uint32 noOfValidTransitions;
+ const Uint32 id;
+ };
+ friend class CompoundState;
+
+ /**
+ * Backup record
+ *
+ * One record per backup
+ */
+ struct BackupRecord {
+ BackupRecord(Backup& b, ArrayPool<Page32> & pp,
+ ArrayPool<Table> & tp,
+ ArrayPool<BackupFile> & bp,
+ ArrayPool<TriggerRecord> & trp)
+ : slaveState(b, validSlaveTransitions, validSlaveTransitionsCount,1)
+ , tables(tp), triggers(trp), files(bp), pages(pp)
+ , masterData(b, validMasterTransitions, validMasterTransitionsCount)
+ , backup(b)
+ {
+ closingFiles = false;
+ okToCleanMaster = true;
+ }
+
+ CompoundState slaveState;
+
+ Uint32 clientRef;
+ Uint32 clientData;
+ Uint32 backupId;
+ Uint32 backupKey[2];
+ Uint32 masterRef;
+ Uint32 errorCode;
+ NdbNodeBitmask nodes;
+
+ bool okToCleanMaster;
+ bool closingFiles;
+
+ Uint64 noOfBytes;
+ Uint64 noOfRecords;
+ Uint64 noOfLogBytes;
+ Uint64 noOfLogRecords;
+
+ Uint32 startGCP;
+ Uint32 currGCP;
+ Uint32 stopGCP;
+ DLList<Table> tables;
+ SLList<TriggerRecord> triggers;
+
+ SLList<BackupFile> files;
+ Uint32 ctlFilePtr; // Ptr.i to ctl-file
+ Uint32 logFilePtr; // Ptr.i to log-file
+ Uint32 dataFilePtr; // Ptr.i to first data-file
+
+ Uint32 backupDataLen; // Used for (un)packing backup request
+ Array<Page32> pages; // Used for (un)packing backup request
+ SimpleProperties props;// Used for (un)packing backup request
+
+ struct MasterData {
+ MasterData(Backup & b, const State valid[], Uint32 count)
+ : state(b, valid, count, 0)
+ {
+ }
+ MutexHandle2<BACKUP_DEFINE_MUTEX> m_defineBackupMutex;
+ MutexHandle2<DICT_COMMIT_TABLE_MUTEX> m_dictCommitTableMutex;
+
+ Uint32 gsn;
+ CompoundState state;
+ SignalCounter sendCounter;
+ Uint32 errorCode;
+ struct {
+ Uint32 tableId;
+ } createTrig;
+ struct {
+ Uint32 tableId;
+ } dropTrig;
+ struct {
+ Uint32 tableId;
+ } alterTrig;
+ union {
+ struct {
+ Uint32 startBackup;
+ } waitGCP;
+ struct {
+ Uint32 signalNo;
+ Uint32 noOfSignals;
+ Uint32 tablePtr;
+ } startBackup;
+ struct {
+ Uint32 dummy;
+ } stopBackup;
+ };
+ } masterData;
+
+ Uint32 nextList;
+ union { Uint32 prevList; Uint32 nextPool; };
+
+ void setErrorCode(Uint32 errCode){
+ if(errorCode == 0)
+ errorCode = errCode;
+ }
+
+ bool checkError() const {
+ return errorCode != 0;
+ }
+
+ Backup & backup;
+ BlockNumber number() const { return backup.number(); }
+ void progError(int line, int cause, const char * extra) {
+ backup.progError(line, cause, extra);
+ }
+ };
+ friend struct BackupRecord;
+ typedef Ptr<BackupRecord> BackupRecordPtr;
+
+ struct Config {
+ Uint32 m_dataBufferSize;
+ Uint32 m_logBufferSize;
+ Uint32 m_minWriteSize;
+ Uint32 m_maxWriteSize;
+ };
+
+ /**
+ * Variables
+ */
+ Uint32 * c_startOfPages;
+ NodeId c_masterNodeId;
+ SLList<Node> c_nodes;
+ NdbNodeBitmask c_aliveNodes;
+ DLList<BackupRecord> c_backups;
+ Config c_defaults;
+ Uint32 m_diskless;
+
+ STATIC_CONST(NO_OF_PAGES_META_FILE = 2);
+
+ /**
+ * Pools
+ */
+ ArrayPool<Table> c_tablePool;
+ ArrayPool<Attribute> c_attributePool;
+ ArrayPool<BackupRecord> c_backupPool;
+ ArrayPool<BackupFile> c_backupFilePool;
+ ArrayPool<Page32> c_pagePool;
+ ArrayPool<Fragment> c_fragmentPool;
+ ArrayPool<Node> c_nodePool;
+ ArrayPool<TriggerRecord> c_triggerPool;
+
+ Uint32 calculate_frag_mask(Uint32);
+
+ void checkFile(Signal*, BackupFilePtr);
+ void checkScan(Signal*, BackupFilePtr);
+ void fragmentCompleted(Signal*, BackupFilePtr);
+
+ void backupAllData(Signal* signal, BackupRecordPtr);
+
+ void getFragmentInfo(Signal*, BackupRecordPtr, TablePtr, Uint32 fragNo);
+ void getFragmentInfoDone(Signal*, BackupRecordPtr);
+
+ void openFiles(Signal* signal, BackupRecordPtr ptr);
+ void openFilesReply(Signal*, BackupRecordPtr ptr, BackupFilePtr);
+ void closeFiles(Signal*, BackupRecordPtr ptr);
+ void closeFilesDone(Signal*, BackupRecordPtr ptr);
+
+ void sendDefineBackupReq(Signal *signal, BackupRecordPtr ptr);
+
+ void defineBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId);
+ void createTrigReply(Signal* signal, BackupRecordPtr ptr);
+ void alterTrigReply(Signal* signal, BackupRecordPtr ptr);
+ void startBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32, Uint32);
+ void stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId);
+
+ void defineBackupRef(Signal*, BackupRecordPtr, Uint32 errCode = 0);
+
+ void nextFragment(Signal*, BackupRecordPtr);
+
+ void sendCreateTrig(Signal*, BackupRecordPtr ptr, TablePtr tabPtr);
+ void createAttributeMask(TablePtr tab, Bitmask<MAXNROFATTRIBUTESINWORDS>&);
+ void sendStartBackup(Signal*, BackupRecordPtr, TablePtr);
+ void sendAlterTrig(Signal*, BackupRecordPtr ptr);
+
+ void sendDropTrig(Signal*, BackupRecordPtr ptr);
+ void sendDropTrig(Signal* signal, BackupRecordPtr ptr, TablePtr tabPtr);
+ void dropTrigReply(Signal*, BackupRecordPtr ptr);
+
+ void sendSignalAllWait(BackupRecordPtr ptr, Uint32 gsn, Signal *signal,
+ Uint32 signalLength,
+ bool executeDirect = false);
+ bool haveAllSignals(BackupRecordPtr ptr, Uint32 gsn, Uint32 nodeId);
+
+ void sendStopBackup(Signal*, BackupRecordPtr ptr);
+ void sendAbortBackupOrd(Signal* signal, BackupRecordPtr ptr, Uint32 errCode);
+ void sendAbortBackupOrdSlave(Signal* signal, BackupRecordPtr ptr,
+ Uint32 errCode);
+ void masterAbort(Signal*, BackupRecordPtr ptr, bool controlledAbort);
+ void masterSendAbortBackup(Signal*, BackupRecordPtr ptr);
+ void slaveAbort(Signal*, BackupRecordPtr ptr);
+
+ void abortFile(Signal* signal, BackupRecordPtr ptr, BackupFilePtr filePtr);
+ void abortFileHook(Signal* signal, BackupFilePtr filePtr, bool scanDone);
+
+ bool verifyNodesAlive(const NdbNodeBitmask& aNodeBitMask);
+ bool checkAbort(BackupRecordPtr ptr);
+ void checkNodeFail(Signal* signal,
+ BackupRecordPtr ptr,
+ NodeId newCoord,
+ Uint32 theFailedNodes[NodeBitmask::Size]);
+ void masterTakeOver(Signal* signal, BackupRecordPtr ptr);
+
+
+ NodeId getMasterNodeId() const { return c_masterNodeId; }
+ bool findTable(const BackupRecordPtr &, TablePtr &, Uint32 tableId) const;
+ TablePtr parseTableDescription(Signal*, BackupRecordPtr ptr, Uint32 len);
+
+ bool insertFileHeader(BackupFormat::FileType, BackupRecord*, BackupFile*);
+ void sendBackupRef(Signal* signal, BackupRecordPtr ptr, Uint32 errorCode);
+ void sendBackupRef(BlockReference ref, Signal *signal,
+ Uint32 senderData, Uint32 errorCode);
+ void dumpUsedResources();
+ void cleanupMasterResources(BackupRecordPtr ptr);
+ void cleanupSlaveResources(BackupRecordPtr ptr);
+ void cleanupFinalResources(BackupRecordPtr ptr);
+ void removeBackup(Signal*, BackupRecordPtr ptr);
+
+ void sendSTTORRY(Signal*);
+ void createSequence(Signal* signal);
+ void createSequenceReply(Signal*, class UtilSequenceConf *);
+};
+
+inline
+void
+Backup::OperationRecord::newRecord(Uint32 * p){
+ base = p;
+ dst_Length = p; p += 1;
+ dst_Bitmask = p; p += sz_Bitmask;
+ dst_FixedAttribs = p; p += sz_FixedAttribs;
+ dst_VariableData = (BackupFormat::DataFile::VariableData*)p;
+ BitmaskImpl::clear(sz_Bitmask, dst_Bitmask);
+ attrLeft = noOfAttributes;
+ attrSzLeft = attrSzTotal = 0;
+}
+
+inline
+Uint32 *
+Backup::OperationRecord::newAttrib(Uint32 offset, Uint32 sz){
+ attrLeft--;
+ attrSzLeft = sz;
+ dst = dst_FixedAttribs + offset;
+ return dst;
+}
+
+inline
+void
+Backup::OperationRecord::nullAttribute(Uint32 offsetNull){
+ attrLeft --;
+ BitmaskImpl::set(sz_Bitmask, dst_Bitmask, offsetNull);
+}
+
+inline
+Uint32 *
+Backup::OperationRecord::newNullable(Uint32 id, Uint32 sz){
+ attrLeft--;
+ attrSzLeft = sz;
+
+ dst = &dst_VariableData->Data[0];
+ dst_VariableData->Sz = htonl(sz);
+ dst_VariableData->Id = htonl(id);
+
+ dst_VariableData = (BackupFormat::DataFile::VariableData *)(dst + sz);
+
+ // Clear all bits on newRecord -> dont need to clear this
+ // BitmaskImpl::clear(sz_Bitmask, dst_Bitmask, offsetNull);
+ return dst;
+}
+
+inline
+Uint32 *
+Backup::OperationRecord::newVariable(Uint32 id, Uint32 sz){
+ attrLeft--;
+ attrSzLeft = sz;
+
+ dst = &dst_VariableData->Data[0];
+ dst_VariableData->Sz = htonl(sz);
+ dst_VariableData->Id = htonl(id);
+
+ dst_VariableData = (BackupFormat::DataFile::VariableData *)(dst + sz);
+ return dst;
+}
+
+inline
+bool
+Backup::OperationRecord::finished(){
+ if(attrLeft != 0 || attrSzLeft != 0){
+ return false;
+ }
+
+ opLen += attrSzTotal;
+ opNoDone++;
+
+ scanStop = dst = (Uint32 *)dst_VariableData;
+
+ const Uint32 len = (dst - base - 1);
+ * dst_Length = htonl(len);
+
+ noOfRecords++;
+
+ return true;
+}
+
+#endif
diff --git a/storage/ndb/src/kernel/blocks/backup/Backup.txt b/storage/ndb/src/kernel/blocks/backup/Backup.txt
new file mode 100644
index 00000000000..ee5e02bb549
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/backup/Backup.txt
@@ -0,0 +1,343 @@
+-- BACKUP SIGNAL DIAGRAM COMPLEMENT TO BACKUP AMENDMENTS 2003-07-11 --
+
+USER MASTER MASTER SLAVE SLAVE
+---------------------------------------------------------------------
+BACKUP_REQ
+---------------->
+ UTIL_SEQUENCE
+ --------------->
+ <---------------
+ DEFINE_BACKUP
+ ------------------------------> (Local signals)
+ LIST_TABLES
+ --------------->
+ <---------------
+ FSOPEN
+ --------------->
+ GET_TABINFO
+ <---------------
+ DI_FCOUNT
+ --------------->
+ <---------------
+ DI_GETPRIM
+ --------------->
+ <---------------
+ <-------------------------------
+BACKUP_CONF
+<----------------
+ CREATE_TRIG
+ --------------> (If master crashes here -> rouge triggers/memory leak)
+ <--------------
+ START_BACKUP
+ ------------------------------>
+ <------------------------------
+ ALTER_TRIG
+ -------------->
+ <--------------
+ WAIT_GCP
+ -------------->
+ <--------------
+ BACKUP_FRAGMENT
+ ------------------------------>
+ SCAN_FRAG
+ --------------->
+ <---------------
+ <------------------------------
+ WAIT_GCP
+ -------------->
+ <--------------
+ DROP_TRIG
+ -------------->
+ <--------------
+ STOP_BACKUP
+ ------------------------------>
+ <------------------------------
+BACKUP_COMPLETE_REP
+<----------------
+ ABORT_BACKUP
+ ------------------------------>
+
+----------------------------------------------------------------------------
+
+USER BACKUP-MASTER
+
+1) BACKUP_REQ -->
+
+2) To all slaves DEFINE_BACKUP_REQ
+ This signals contains info so that all
+ slaves can take over as master
+ Tomas: Except triggerId info...
+
+3) Wait for conf
+
+4) <-- BACKUP_CONF
+
+5) For Each Table
+ PREP_CREATE_TRIG_REQ
+ Wait for Conf
+
+6) To all slaves START_BACKUP_REQ
+ Include trigger ids
+ Wait for conf
+
+7) For Each Table
+ CREATE_TRIG_REQ
+ Wait for conf
+
+8) Wait for GCP
+
+9) For each table
+ For each fragment
+ BACKUP_FRAGMENT_REQ -->
+ <-- BACKUP_FRAGMENT_CONF
+
+10) Wait for GCP
+
+11) To all slaves STOP_BACKUP_REQ
+ This signal turns off logging
+
+12) Wait for conf
+
+13) <-- BACKUP_COMPLETE_REP
+
+----
+
+Slave: Master Died
+Wait for master take-over, max 30 sec then abort everything
+
+Slave: Master TakeOver
+
+BACKUP_STATUS_REQ --> To all nodes
+<-- BACKUP_STATUS_CONF
+
+BACKUP_STATUS_CONF
+ BACKUP_DEFINED
+ BACKUP_STARTED
+ BACKUP_FRAGMENT
+
+Master: Slave died
+
+-- Define Backup Req --
+
+1) Get backup definition
+ Which tables (all)
+
+2) Open files
+ Write table list to CTL - file
+
+3) Get definitions for all tables in backup
+
+4) Get Fragment info
+
+5) Define Backup Conf
+
+-- Define Backup Req --
+
+-- Abort Backup Req --
+
+1) Report to others
+
+2) Stop logging
+3) Stop file(s)
+4) Stop scan
+
+5) If failure/abort
+ Remove files
+
+6) If XXX
+ Report to user
+7) Clean up records/stuff
+
+-- Abort Backup --
+
+Reasons for aborting:
+
+1a) client abort
+
+1b) slave failure
+
+1c) node failure
+
+Resources to be cleaned up:
+
+Slave responsability:
+
+2a) Close and remove files
+
+2b) Free allocated resources
+
+Master responsability:
+
+2c) Drop triggers
+
+USER MASTER MASTER SLAVE SLAVE
+---------------------------------------------------------------------
+ BACKUP_ABORT_ORD:
+ -------------------------(ALL)-->
+ Set Master State ABORTING Set Slave State ABORTING
+ Drop Triggers Close and Remove files
+ CleanupSlaveResources()
+
+ BACKUP_ABORT_ORD:OkToClean
+ -------------------------(ALL)-->
+
+
+ CleanupMasterResources()
+
+BACKUP_ABORT_REP
+<---------------
+
+
+
+State descriptions:
+
+Master - INITIAL
+BACKUP_REQ ->
+Master - DEFINING
+DEFINE_BACKUP_CONF ->
+Master - DEFINED
+CREATE_TRIG_CONF ->
+Master - STARTED
+<--->
+Master - SCANNING
+WAIT_GCP_CONF ->
+Master - STOPPING
+(Master - CLEANING)
+--------
+Master - ABORTING
+
+
+Slave - INITIAL
+DEFINE_BACKUP_REQ ->
+Slave - DEFINING
+ - backupId
+ - tables
+DIGETPRIMCONF ->
+Slave - DEFINED
+START_BACKUP_REQ ->
+Slave - STARTED
+Slave - SCANNING
+STOP_BACKUP_REQ ->
+Slave - STOPPING
+FSCLOSECONF ->
+Slave - CLEANING
+-----
+Slave - ABORTING
+
+
+
+Testcases:
+
+2. Master failure at first START_BACKUP_CONF
+
+<masterId> error 10004
+start backup
+
+- Ok
+
+2. Master failure at first CREATE_TRIG_CONF
+
+<masterId> error 10003
+start backup
+
+- Ok
+
+2. Master failure at first ALTER_TRIG_CONF
+
+<masterId> error 10005
+start backup
+
+- Ok
+
+2. Master failure at WAIT_GCP_CONF
+
+<masterId> error 10007
+start backup
+
+- Ok
+
+2. Master failure at WAIT_GCP_CONF, nextFragment
+
+<masterId> error 10008
+start backup
+
+- Ok
+
+2. Master failure at WAIT_GCP_CONF, stopping
+
+<masterId> error 10009
+start backup
+
+- Ok
+
+2. Master failure at BACKUP_FRAGMENT_CONF
+
+<masterId> error 10010
+start backup
+
+- Ok
+
+2. Master failure at first DROP_TRIG_CONF
+
+<masterId> error 10012
+start backup
+
+- Ok
+
+1. Master failure at first STOP_BACKUP_CONF
+
+<masterId> error 10013
+start backup
+
+- Ok
+
+3. Multiple node failiure:
+
+<masterId> error 10001
+<otheId> error 10014
+start backup
+
+- Ok (note, mgmtsrvr does gets BACKUP_ABORT_REP but expects BACKUP_REF, hangs...)
+
+4. Multiple node failiure:
+
+<masterId> error 10007
+<takeover id> error 10002
+start backup
+
+- Ok
+
+
+
+ ndbrequire(!ERROR_INSERTED(10001));
+ ndbrequire(!ERROR_INSERTED(10002));
+ ndbrequire(!ERROR_INSERTED(10021));
+ ndbrequire(!ERROR_INSERTED(10003));
+ ndbrequire(!ERROR_INSERTED(10004));
+ ndbrequire(!ERROR_INSERTED(10005));
+ ndbrequire(!ERROR_INSERTED(10006));
+ ndbrequire(!ERROR_INSERTED(10007));
+ ndbrequire(!ERROR_INSERTED(10008));
+ ndbrequire(!ERROR_INSERTED(10009));
+ ndbrequire(!ERROR_INSERTED(10010));
+ ndbrequire(!ERROR_INSERTED(10011));
+ ndbrequire(!ERROR_INSERTED(10012));
+ ndbrequire(!ERROR_INSERTED(10013));
+ ndbrequire(!ERROR_INSERTED(10014));
+ ndbrequire(!ERROR_INSERTED(10015));
+ ndbrequire(!ERROR_INSERTED(10016));
+ ndbrequire(!ERROR_INSERTED(10017));
+ ndbrequire(!ERROR_INSERTED(10018));
+ ndbrequire(!ERROR_INSERTED(10019));
+ ndbrequire(!ERROR_INSERTED(10020));
+
+ if (ERROR_INSERTED(10023)) {
+ if (ERROR_INSERTED(10023)) {
+ if (ERROR_INSERTED(10024)) {
+ if (ERROR_INSERTED(10025)) {
+ if (ERROR_INSERTED(10026)) {
+ if (ERROR_INSERTED(10028)) {
+ if (ERROR_INSERTED(10027)) {
+ (ERROR_INSERTED(10022))) {
+ if (ERROR_INSERTED(10029)) {
+ if(trigPtr.p->operation->noOfBytes > 123 && ERROR_INSERTED(10030)) {
diff --git a/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp b/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp
new file mode 100644
index 00000000000..65dd2ad9053
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp
@@ -0,0 +1,149 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef BACKUP_FORMAT_HPP
+#define BACKUP_FORMAT_HPP
+
+#include <ndb_types.h>
+
+static const char BACKUP_MAGIC[] = { 'N', 'D', 'B', 'B', 'C', 'K', 'U', 'P' };
+
+struct BackupFormat {
+
+ /**
+ * Section types in file
+ */
+ enum SectionType {
+ FILE_HEADER = 1,
+ FRAGMENT_HEADER = 2,
+ FRAGMENT_FOOTER = 3,
+ TABLE_LIST = 4,
+ TABLE_DESCRIPTION = 5,
+ GCP_ENTRY = 6
+ };
+
+ struct FileHeader {
+ char Magic[8];
+ Uint32 NdbVersion;
+
+ Uint32 SectionType;
+ Uint32 SectionLength;
+ Uint32 FileType;
+ Uint32 BackupId;
+ Uint32 BackupKey_0;
+ Uint32 BackupKey_1;
+ Uint32 ByteOrder;
+ };
+
+ /**
+ * File types
+ */
+ enum FileType {
+ CTL_FILE = 1,
+ LOG_FILE = 2,
+ DATA_FILE = 3
+ };
+
+ /**
+ * Data file formats
+ */
+ struct DataFile {
+
+ struct FragmentHeader {
+ Uint32 SectionType;
+ Uint32 SectionLength;
+ Uint32 TableId;
+ Uint32 FragmentNo;
+ Uint32 ChecksumType;
+ };
+
+ struct VariableData {
+ Uint32 Sz;
+ Uint32 Id;
+ Uint32 Data[1];
+ };
+
+ struct Record {
+ Uint32 Length;
+ Uint32 NullBitmask[1];
+ Uint32 DataFixedKeys[1];
+ Uint32 DataFixedAttributes[1];
+ VariableData DataVariableAttributes[1];
+ };
+
+ struct FragmentFooter {
+ Uint32 SectionType;
+ Uint32 SectionLength;
+ Uint32 TableId;
+ Uint32 FragmentNo;
+ Uint32 NoOfRecords;
+ Uint32 Checksum;
+ };
+ };
+
+ /**
+ * CTL file formats
+ */
+ struct CtlFile {
+
+ /**
+ * Table list
+ */
+ struct TableList {
+ Uint32 SectionType;
+ Uint32 SectionLength;
+ Uint32 TableIds[1]; // Length = SectionLength - 2
+ };
+
+ /**
+ * Table description(s)
+ */
+ struct TableDescription {
+ Uint32 SectionType;
+ Uint32 SectionLength;
+ Uint32 DictTabInfo[1]; // Length = SectionLength - 2
+ };
+
+ /**
+ * GCP Entry
+ */
+ struct GCPEntry {
+ Uint32 SectionType;
+ Uint32 SectionLength;
+ Uint32 StartGCP;
+ Uint32 StopGCP;
+ };
+ };
+
+ /**
+ * LOG file format
+ */
+ struct LogFile {
+
+ /**
+ * Log Entry
+ */
+ struct LogEntry {
+ Uint32 Length;
+ Uint32 TableId;
+ // If TriggerEvent & 0x10000 == true then GCI is right after data
+ Uint32 TriggerEvent;
+ Uint32 Data[1]; // Len = Length - 2
+ };
+ };
+};
+
+#endif
diff --git a/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp b/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp
new file mode 100644
index 00000000000..08fa089a9c0
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp
@@ -0,0 +1,211 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+//****************************************************************************
+//
+// NAME
+// Backup - Database backup / restore
+//
+//===========================================================================
+#include "Backup.hpp"
+
+#include <Properties.hpp>
+#include <Configuration.hpp>
+
+//extern const unsigned Ndbcntr::g_sysTableCount;
+
+Backup::Backup(const Configuration & conf) :
+ SimulatedBlock(BACKUP, conf),
+ c_nodes(c_nodePool),
+ c_backups(c_backupPool)
+{
+ BLOCK_CONSTRUCTOR(Backup);
+
+ c_nodePool.setSize(MAX_NDB_NODES);
+ c_masterNodeId = getOwnNodeId();
+
+ const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ Uint32 noBackups = 0, noTables = 0, noAttribs = 0;
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS, &m_diskless));
+ ndb_mgm_get_int_parameter(p, CFG_DB_PARALLEL_BACKUPS, &noBackups);
+ // ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_TABLES, &noTables));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DICT_TABLE, &noTables));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_ATTRIBUTES, &noAttribs));
+
+ noAttribs++; //RT 527 bug fix
+
+ c_backupPool.setSize(noBackups);
+ c_backupFilePool.setSize(3 * noBackups);
+ c_tablePool.setSize(noBackups * noTables);
+ c_attributePool.setSize(noBackups * noAttribs);
+ c_triggerPool.setSize(noBackups * 3 * noTables);
+
+ // 2 = no of replicas
+ c_fragmentPool.setSize(noBackups * 2 * NO_OF_FRAG_PER_NODE * noTables);
+
+ Uint32 szMem = 0;
+ ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_MEM, &szMem);
+ Uint32 noPages = (szMem + sizeof(Page32) - 1) / sizeof(Page32);
+ // We need to allocate an additional of 2 pages. 1 page because of a bug in
+ // ArrayPool and another one for DICTTAINFO.
+ c_pagePool.setSize(noPages + NO_OF_PAGES_META_FILE + 2);
+
+ Uint32 szDataBuf = (2 * 1024 * 1024);
+ Uint32 szLogBuf = (2 * 1024 * 1024);
+ Uint32 szWrite = 32768;
+ ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_DATA_BUFFER_MEM, &szDataBuf);
+ ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_LOG_BUFFER_MEM, &szLogBuf);
+ ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_WRITE_SIZE, &szWrite);
+
+ c_defaults.m_logBufferSize = szLogBuf;
+ c_defaults.m_dataBufferSize = szDataBuf;
+ c_defaults.m_minWriteSize = szWrite;
+ c_defaults.m_maxWriteSize = szWrite;
+
+ { // Init all tables
+ ArrayList<Table> tables(c_tablePool);
+ TablePtr ptr;
+ while(tables.seize(ptr)){
+ new (ptr.p) Table(c_attributePool, c_fragmentPool);
+ }
+ tables.release();
+ }
+
+ {
+ ArrayList<BackupFile> ops(c_backupFilePool);
+ BackupFilePtr ptr;
+ while(ops.seize(ptr)){
+ new (ptr.p) BackupFile(* this, c_pagePool);
+ }
+ ops.release();
+ }
+
+ {
+ ArrayList<BackupRecord> recs(c_backupPool);
+ BackupRecordPtr ptr;
+ while(recs.seize(ptr)){
+ new (ptr.p) BackupRecord(* this, c_pagePool, c_tablePool,
+ c_backupFilePool, c_triggerPool);
+ }
+ recs.release();
+ }
+
+ // Initialize BAT for interface to file system
+ {
+ Page32Ptr p;
+ ndbrequire(c_pagePool.seizeId(p, 0));
+ c_startOfPages = (Uint32 *)p.p;
+ c_pagePool.release(p);
+
+ NewVARIABLE* bat = allocateBat(1);
+ bat[0].WA = c_startOfPages;
+ bat[0].nrr = c_pagePool.getSize()*sizeof(Page32)/sizeof(Uint32);
+ }
+
+ // Add received signals
+ addRecSignal(GSN_STTOR, &Backup::execSTTOR);
+ addRecSignal(GSN_DUMP_STATE_ORD, &Backup::execDUMP_STATE_ORD);
+ addRecSignal(GSN_READ_NODESCONF, &Backup::execREAD_NODESCONF);
+ addRecSignal(GSN_NODE_FAILREP, &Backup::execNODE_FAILREP);
+ addRecSignal(GSN_INCL_NODEREQ, &Backup::execINCL_NODEREQ);
+ addRecSignal(GSN_CONTINUEB, &Backup::execCONTINUEB);
+
+ addRecSignal(GSN_SCAN_HBREP, &Backup::execSCAN_HBREP);
+ addRecSignal(GSN_TRANSID_AI, &Backup::execTRANSID_AI);
+ addRecSignal(GSN_SCAN_FRAGREF, &Backup::execSCAN_FRAGREF);
+ addRecSignal(GSN_SCAN_FRAGCONF, &Backup::execSCAN_FRAGCONF);
+
+ addRecSignal(GSN_BACKUP_TRIG_REQ, &Backup::execBACKUP_TRIG_REQ);
+ addRecSignal(GSN_TRIG_ATTRINFO, &Backup::execTRIG_ATTRINFO);
+ addRecSignal(GSN_FIRE_TRIG_ORD, &Backup::execFIRE_TRIG_ORD);
+
+ addRecSignal(GSN_LIST_TABLES_CONF, &Backup::execLIST_TABLES_CONF);
+ addRecSignal(GSN_GET_TABINFOREF, &Backup::execGET_TABINFOREF);
+ addRecSignal(GSN_GET_TABINFO_CONF, &Backup::execGET_TABINFO_CONF);
+
+ addRecSignal(GSN_CREATE_TRIG_REF, &Backup::execCREATE_TRIG_REF);
+ addRecSignal(GSN_CREATE_TRIG_CONF, &Backup::execCREATE_TRIG_CONF);
+
+ addRecSignal(GSN_ALTER_TRIG_REF, &Backup::execALTER_TRIG_REF);
+ addRecSignal(GSN_ALTER_TRIG_CONF, &Backup::execALTER_TRIG_CONF);
+
+ addRecSignal(GSN_DROP_TRIG_REF, &Backup::execDROP_TRIG_REF);
+ addRecSignal(GSN_DROP_TRIG_CONF, &Backup::execDROP_TRIG_CONF);
+
+ addRecSignal(GSN_DI_FCOUNTCONF, &Backup::execDI_FCOUNTCONF);
+ addRecSignal(GSN_DIGETPRIMCONF, &Backup::execDIGETPRIMCONF);
+
+ addRecSignal(GSN_FSOPENREF, &Backup::execFSOPENREF);
+ addRecSignal(GSN_FSOPENCONF, &Backup::execFSOPENCONF);
+
+ addRecSignal(GSN_FSCLOSEREF, &Backup::execFSCLOSEREF);
+ addRecSignal(GSN_FSCLOSECONF, &Backup::execFSCLOSECONF);
+
+ addRecSignal(GSN_FSAPPENDREF, &Backup::execFSAPPENDREF);
+ addRecSignal(GSN_FSAPPENDCONF, &Backup::execFSAPPENDCONF);
+
+ addRecSignal(GSN_FSREMOVEREF, &Backup::execFSREMOVEREF);
+ addRecSignal(GSN_FSREMOVECONF, &Backup::execFSREMOVECONF);
+
+ /*****/
+ addRecSignal(GSN_BACKUP_REQ, &Backup::execBACKUP_REQ);
+ addRecSignal(GSN_ABORT_BACKUP_ORD, &Backup::execABORT_BACKUP_ORD);
+
+ addRecSignal(GSN_DEFINE_BACKUP_REQ, &Backup::execDEFINE_BACKUP_REQ);
+ addRecSignal(GSN_DEFINE_BACKUP_REF, &Backup::execDEFINE_BACKUP_REF);
+ addRecSignal(GSN_DEFINE_BACKUP_CONF, &Backup::execDEFINE_BACKUP_CONF);
+
+ addRecSignal(GSN_START_BACKUP_REQ, &Backup::execSTART_BACKUP_REQ);
+ addRecSignal(GSN_START_BACKUP_REF, &Backup::execSTART_BACKUP_REF);
+ addRecSignal(GSN_START_BACKUP_CONF, &Backup::execSTART_BACKUP_CONF);
+
+ addRecSignal(GSN_BACKUP_FRAGMENT_REQ, &Backup::execBACKUP_FRAGMENT_REQ);
+ //addRecSignal(GSN_BACKUP_FRAGMENT_REF, &Backup::execBACKUP_FRAGMENT_REF);
+ addRecSignal(GSN_BACKUP_FRAGMENT_CONF, &Backup::execBACKUP_FRAGMENT_CONF);
+
+ addRecSignal(GSN_STOP_BACKUP_REQ, &Backup::execSTOP_BACKUP_REQ);
+ addRecSignal(GSN_STOP_BACKUP_REF, &Backup::execSTOP_BACKUP_REF);
+ addRecSignal(GSN_STOP_BACKUP_CONF, &Backup::execSTOP_BACKUP_CONF);
+
+ //addRecSignal(GSN_BACKUP_STATUS_REQ, &Backup::execBACKUP_STATUS_REQ);
+ //addRecSignal(GSN_BACKUP_STATUS_CONF, &Backup::execBACKUP_STATUS_CONF);
+
+ addRecSignal(GSN_UTIL_SEQUENCE_REF, &Backup::execUTIL_SEQUENCE_REF);
+ addRecSignal(GSN_UTIL_SEQUENCE_CONF, &Backup::execUTIL_SEQUENCE_CONF);
+
+ addRecSignal(GSN_WAIT_GCP_REF, &Backup::execWAIT_GCP_REF);
+ addRecSignal(GSN_WAIT_GCP_CONF, &Backup::execWAIT_GCP_CONF);
+
+ /**
+ * Testing
+ */
+ addRecSignal(GSN_BACKUP_REF, &Backup::execBACKUP_REF);
+ addRecSignal(GSN_BACKUP_CONF, &Backup::execBACKUP_CONF);
+ addRecSignal(GSN_BACKUP_ABORT_REP, &Backup::execBACKUP_ABORT_REP);
+ addRecSignal(GSN_BACKUP_COMPLETE_REP, &Backup::execBACKUP_COMPLETE_REP);
+}
+
+Backup::~Backup()
+{
+}
+
+BLOCK_FUNCTIONS(Backup)
+
+template class ArrayPool<Backup::Page32>;
+template class ArrayPool<Backup::Attribute>;
+template class ArrayPool<Backup::Fragment>;
diff --git a/storage/ndb/src/kernel/blocks/backup/FsBuffer.hpp b/storage/ndb/src/kernel/blocks/backup/FsBuffer.hpp
new file mode 100644
index 00000000000..2f3c7daae43
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/backup/FsBuffer.hpp
@@ -0,0 +1,343 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef FS_BUFFER_HPP
+#define FS_BUFFER_HPP
+
+#include <ndb_global.h>
+
+#define DEBUG(x)
+
+/**
+ * A circular data buffer to be used together with the FS
+ *
+ * One writer - Typically your block
+ * getWritePtr()
+ * updateWritePtr()
+ *
+ * One reader - Typically "thread" in your block sending stuff to NDBFS
+ * getReadPtr()
+ * updateReadPtr()
+ */
+class FsBuffer {
+public:
+ /**
+ * Default constructor
+ */
+ FsBuffer();
+
+ /**
+ * setup FsBuffer
+ *
+ * @param Buffer - Ptr to continuous memory
+ * @param Size - Buffer size in 32-bit words
+ * @param BlockSize - Size of block in 32-bit words
+ * @param MinRead - Min read size in 32-bit words
+ * Get rounded(down) to nearest multiple of block size.
+ * @param MaxRead - Max read size in 32-bit words
+ * Get rounded(down) to nearest multiple of block size.
+ * @param MaxWrite - Maximum write (into buffer) in 32-bit words
+ *
+ * @return NULL if everything is OK
+ * else A string describing problem
+ */
+ const char * setup(Uint32 * Buffer,
+ Uint32 Size,
+ Uint32 BlockSize = 128, // 512 bytes
+ Uint32 MinRead = 1024, // 4k
+ Uint32 MaxRead = 1024, // 4k
+ Uint32 MaxWrite = 1024); // 4k
+ /*
+ * @return NULL if everything is OK
+ * else A string describing problem
+ */
+ const char * valid() const;
+
+ Uint32 getBufferSize() const;
+ Uint32 getUsableSize() const;
+ Uint32 * getStart() const;
+
+ /**
+ * getReadPtr - Get pointer and size of data to send to FS
+ *
+ * @param ptr - Where to fetch data
+ * @param sz - How much data in 32-bit words
+ * @param eof - Is this the last fetch (only if return false)
+ *
+ * @return true - If there is data of size >= minread
+ * false - If there is can be data be if it is is < minread
+ * - else eof = true
+ */
+ bool getReadPtr(Uint32 ** ptr, Uint32 * sz, bool * eof);
+
+ /**
+ * @note: sz must be equal to sz returned by getReadPtr
+ */
+ void updateReadPtr(Uint32 sz);
+
+ /**
+ *
+ * @note Must be followed by a updateWritePtr(no of words used)
+ */
+ bool getWritePtr(Uint32 ** ptr, Uint32 sz);
+
+ void updateWritePtr(Uint32 sz);
+
+ /**
+ * There will be no more writing to this buffer
+ */
+ void eof();
+
+ /**
+ * Getters for varibles
+ */
+ Uint32 getMaxWrite() const { return m_maxWrite;}
+ Uint32 getMinRead() const { return m_minRead;}
+
+ Uint32 getFreeSize() const { return m_free; }
+
+
+private:
+
+ Uint32 m_free;
+ Uint32 m_readIndex;
+ Uint32 m_writeIndex;
+ Uint32 m_eof;
+ Uint32 * m_start;
+ Uint32 m_minRead;
+ Uint32 m_maxRead;
+ Uint32 m_maxWrite;
+ Uint32 m_size;
+
+ Uint32 * m_buffer;
+ Uint32 m_bufSize;
+ Uint32 m_blockSize;
+
+ void clear();
+};
+
+inline
+FsBuffer::FsBuffer()
+{
+ clear();
+}
+
+inline
+void
+FsBuffer::clear(){
+ m_minRead = m_maxRead = m_maxWrite = m_size = m_bufSize = m_free = 0;
+ m_buffer = m_start = 0;
+}
+
+static
+Uint32 *
+align(Uint32 * ptr, Uint32 alignment, bool downwards){
+
+ const UintPtr a = (UintPtr)ptr;
+ const UintPtr b = a % alignment;
+
+ if(downwards){
+ return (Uint32 *)(a - b);
+ } else {
+ return (Uint32 *)(a + (b == 0 ? 0 : (alignment - b)));
+ }
+}
+
+inline
+const char *
+FsBuffer::setup(Uint32 * Buffer,
+ Uint32 Size,
+ Uint32 Block,
+ Uint32 MinRead,
+ Uint32 MaxRead,
+ Uint32 MaxWrite)
+{
+ clear();
+ m_buffer = Buffer;
+ m_bufSize = Size;
+ m_blockSize = Block;
+ if(Block == 0){
+ return valid();
+ }
+
+ m_minRead = (MinRead / Block) * Block;
+ m_maxRead = (MaxRead / Block) * Block;
+ m_maxWrite = MaxWrite;
+
+ m_start = align(Buffer, Block*4, false);
+ Uint32 * stop = align(Buffer + Size - MaxWrite, Block*4, true);
+ if(stop > m_start){
+ m_size = stop - m_start;
+ } else {
+ m_size = 0;
+ }
+
+ if(m_minRead == 0)
+ m_size = 0;
+ else
+ m_size = (m_size / m_minRead) * m_minRead;
+
+#if 0
+ ndbout_c("Block = %d MinRead = %d -> %d", Block*4, MinRead*4, m_minRead*4);
+ ndbout_c("Block = %d MaxRead = %d -> %d", Block*4, MaxRead*4, m_maxRead*4);
+
+ ndbout_c("Buffer = %d -> %d", Buffer, m_start);
+ ndbout_c("Buffer = %d Size = %d MaxWrite = %d -> %d",
+ Buffer, Size*4, MaxWrite*4, m_size*4);
+#endif
+
+ m_readIndex = m_writeIndex = m_eof = 0;
+ m_free = m_size;
+ return valid();
+}
+
+inline
+const char *
+FsBuffer::valid() const {
+ if(m_buffer == 0) return "Null pointer buffer";
+ if(m_bufSize == 0) return "Zero size buffer";
+ if(m_blockSize == 0) return "Zero block size";
+ if(m_minRead < m_blockSize) return "Min read less than block size";
+ if(m_maxRead < m_blockSize) return "Max read less than block size";
+ if(m_maxRead < m_minRead) return "Max read less than min read";
+ if(m_size == 0) return "Zero usable space";
+ return 0;
+}
+
+inline
+Uint32
+FsBuffer::getBufferSize() const {
+ return m_bufSize;
+}
+
+inline
+Uint32
+FsBuffer::getUsableSize() const {
+ return m_size;
+}
+
+inline
+Uint32 *
+FsBuffer::getStart() const {
+ return m_start;
+}
+
+inline
+bool
+FsBuffer::getReadPtr(Uint32 ** ptr, Uint32 * sz, bool * _eof){
+
+ Uint32 * Tp = m_start;
+ const Uint32 Tr = m_readIndex;
+ const Uint32 Tm = m_minRead;
+ const Uint32 Ts = m_size;
+ const Uint32 Tmw = m_maxRead;
+
+ Uint32 sz1 = m_size - m_free; // Used
+
+ if(sz1 >= Tm){
+ if(Tr + sz1 > Ts)
+ sz1 = (Ts - Tr);
+
+ if(sz1 > Tmw)
+ * sz = Tmw;
+ else
+ * sz = sz1 - (sz1 % Tm);
+
+ * ptr = &Tp[Tr];
+
+ DEBUG(ndbout_c("getReadPtr() Tr: %d Tw: %d Ts: %d Tm: %d sz1: %d -> %d",
+ Tr, Tw, Ts, Tm, sz1, * sz));
+
+ return true;
+ }
+
+ if(!m_eof){
+ * _eof = false;
+
+ DEBUG(ndbout_c("getReadPtr() Tr: %d Tw: %d Ts: %d Tm: %d sz1: %d -> false",
+ Tr, Tw, Ts, Tm, sz1));
+
+ return false;
+ }
+
+ * sz = sz1;
+ * _eof = true;
+ * ptr = &Tp[Tr];
+
+ DEBUG(ndbout_c("getReadPtr() Tr: %d Tw: %d Ts: %d Tm: %d sz1: %d -> %d eof",
+ Tr, Tw, Ts, Tm, sz1, * sz));
+
+ return false;
+}
+
+inline
+void
+FsBuffer::updateReadPtr(Uint32 sz){
+ const Uint32 Tr = m_readIndex;
+ const Uint32 Ts = m_size;
+
+ m_free += sz;
+ m_readIndex = (Tr + sz) % Ts;
+}
+
+inline
+bool
+FsBuffer::getWritePtr(Uint32 ** ptr, Uint32 sz){
+ assert(sz <= m_maxWrite);
+ Uint32 * Tp = m_start;
+ const Uint32 Tw = m_writeIndex;
+ const Uint32 sz1 = m_free;
+
+ if(sz1 > sz){ // Note at least 1 word of slack
+ * ptr = &Tp[Tw];
+
+ DEBUG(ndbout_c("getWritePtr(%d) Tr: %d Tw: %d Ts: %d sz1: %d -> true",
+ sz, Tr, Tw, Ts, sz1));
+ return true;
+ }
+
+ DEBUG(ndbout_c("getWritePtr(%d) Tr: %d Tw: %d Ts: %d sz1: %d -> false",
+ sz, Tr, Tw, Ts, sz1));
+
+ return false;
+}
+
+inline
+void
+FsBuffer::updateWritePtr(Uint32 sz){
+ assert(sz <= m_maxWrite);
+ Uint32 * Tp = m_start;
+ const Uint32 Tw = m_writeIndex;
+ const Uint32 Ts = m_size;
+
+ const Uint32 Tnew = (Tw + sz);
+ m_free -= sz;
+ if(Tnew < Ts){
+ m_writeIndex = Tnew;
+ return;
+ }
+
+ memcpy(Tp, &Tp[Ts], (Tnew - Ts) << 2);
+ m_writeIndex = Tnew - Ts;
+}
+
+inline
+void
+FsBuffer::eof(){
+ m_eof = 1;
+}
+
+#endif
diff --git a/storage/ndb/src/kernel/blocks/backup/Makefile.am b/storage/ndb/src/kernel/blocks/backup/Makefile.am
new file mode 100644
index 00000000000..c8f44f31292
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/backup/Makefile.am
@@ -0,0 +1,24 @@
+
+noinst_LIBRARIES = libbackup.a
+
+libbackup_a_SOURCES = Backup.cpp BackupInit.cpp
+
+include $(top_srcdir)/ndb/config/common.mk.am
+include $(top_srcdir)/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libbackup.dsp
+
+libbackup.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-lib.am \
+ $(top_srcdir)/ndb/config/win-name \
+ $(top_srcdir)/ndb/config/win-includes \
+ $(top_srcdir)/ndb/config/win-sources \
+ $(top_srcdir)/ndb/config/win-libraries
+ cat $(top_srcdir)/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/ndb/config/win-sources $@ $(libbackup_a_SOURCES)
+ @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/storage/ndb/src/kernel/blocks/backup/read.cpp b/storage/ndb/src/kernel/blocks/backup/read.cpp
new file mode 100644
index 00000000000..89cc08ee9de
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/backup/read.cpp
@@ -0,0 +1,478 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+#include <ndb_global.h>
+
+#include <NdbTCP.h>
+#include <NdbOut.hpp>
+#include "BackupFormat.hpp"
+#include <AttributeHeader.hpp>
+#include <SimpleProperties.hpp>
+
+bool readHeader(FILE*, BackupFormat::FileHeader *);
+bool readFragHeader(FILE*, BackupFormat::DataFile::FragmentHeader *);
+bool readFragFooter(FILE*, BackupFormat::DataFile::FragmentFooter *);
+Int32 readRecord(FILE*, Uint32 **);
+
+NdbOut & operator<<(NdbOut&, const BackupFormat::FileHeader &);
+NdbOut & operator<<(NdbOut&, const BackupFormat::DataFile::FragmentHeader &);
+NdbOut & operator<<(NdbOut&, const BackupFormat::DataFile::FragmentFooter &);
+
+bool readTableList(FILE*, BackupFormat::CtlFile::TableList **);
+bool readTableDesc(FILE*, BackupFormat::CtlFile::TableDescription **);
+bool readGCPEntry(FILE*, BackupFormat::CtlFile::GCPEntry **);
+
+NdbOut & operator<<(NdbOut&, const BackupFormat::CtlFile::TableList &);
+NdbOut & operator<<(NdbOut&, const BackupFormat::CtlFile::TableDescription &);
+NdbOut & operator<<(NdbOut&, const BackupFormat::CtlFile::GCPEntry &);
+
+Int32 readLogEntry(FILE*, Uint32**);
+
+static Uint32 recNo;
+static Uint32 logEntryNo;
+
+int
+main(int argc, const char * argv[]){
+
+ ndb_init();
+ if(argc <= 1){
+ printf("Usage: %s <filename>", argv[0]);
+ exit(1);
+ }
+ FILE * f = fopen(argv[1], "rb");
+ if(!f){
+ ndbout << "No such file!" << endl;
+ exit(1);
+ }
+
+ BackupFormat::FileHeader fileHeader;
+ if(!readHeader(f, &fileHeader)){
+ ndbout << "Invalid file!" << endl;
+ exit(1);
+ }
+ ndbout << fileHeader << endl;
+
+ switch(fileHeader.FileType){
+ case BackupFormat::DATA_FILE:
+ while(!feof(f)){
+ BackupFormat::DataFile::FragmentHeader fragHeader;
+ if(!readFragHeader(f, &fragHeader))
+ break;
+ ndbout << fragHeader << endl;
+
+ Uint32 len, * data;
+ while((len = readRecord(f, &data)) > 0){
+#if 0
+ ndbout << "-> " << hex;
+ for(Uint32 i = 0; i<len; i++){
+ ndbout << data[i] << " ";
+ }
+ ndbout << endl;
+#endif
+ }
+
+ BackupFormat::DataFile::FragmentFooter fragFooter;
+ if(!readFragFooter(f, &fragFooter))
+ break;
+ ndbout << fragFooter << endl;
+ }
+ break;
+ case BackupFormat::CTL_FILE:{
+ BackupFormat::CtlFile::TableList * tabList;
+ if(!readTableList(f, &tabList)){
+ ndbout << "Invalid file! No table list" << endl;
+ break;
+ }
+ ndbout << (* tabList) << endl;
+
+ const Uint32 noOfTables = tabList->SectionLength - 2;
+ for(Uint32 i = 0; i<noOfTables; i++){
+ BackupFormat::CtlFile::TableDescription * tabDesc;
+ if(!readTableDesc(f, &tabDesc)){
+ ndbout << "Invalid file missing table description" << endl;
+ break;
+ }
+ ndbout << (* tabDesc) << endl;
+ }
+
+ BackupFormat::CtlFile::GCPEntry * gcpE;
+ if(!readGCPEntry(f, &gcpE)){
+ ndbout << "Invalid file! GCP ENtry" << endl;
+ break;
+ }
+ ndbout << (* gcpE) << endl;
+
+ break;
+ }
+ case BackupFormat::LOG_FILE:{
+ logEntryNo = 0;
+
+ typedef BackupFormat::LogFile::LogEntry LogEntry;
+
+ Uint32 len, * data;
+ while((len = readLogEntry(f, &data)) > 0){
+ LogEntry * logEntry = (LogEntry *) data;
+ /**
+ * Log Entry
+ */
+ Uint32 event = ntohl(logEntry->TriggerEvent);
+ bool gcp = (event & 0x10000) != 0;
+ event &= 0xFFFF;
+ if(gcp)
+ len --;
+
+ ndbout << "LogEntry Table: " << (Uint32)ntohl(logEntry->TableId)
+ << " Event: " << event
+ << " Length: " << (len - 2);
+
+ const Uint32 dataLen = len - 2;
+#if 0
+ Uint32 pos = 0;
+ while(pos < dataLen){
+ AttributeHeader * ah = (AttributeHeader*)&logEntry->Data[pos];
+ ndbout_c(" Attribut: %d Size: %d",
+ ah->getAttributeId(),
+ ah->getDataSize());
+ pos += ah->getDataSize() + 1;
+ }
+#endif
+ if(gcp)
+ ndbout << " GCP: " << (Uint32)ntohl(logEntry->Data[dataLen]);
+ ndbout << endl;
+ }
+ break;
+ }
+ default:
+ ndbout << "Unsupported file type for printer: "
+ << fileHeader.FileType << endl;
+ break;
+ }
+ fclose(f);
+ return 0;
+}
+
+#define RETURN_FALSE() { ndbout_c("false: %d", __LINE__); abort(); return false; }
+
+static bool endian = false;
+
+bool
+readHeader(FILE* f, BackupFormat::FileHeader * dst){
+ if(fread(dst, 4, 3, f) != 3)
+ RETURN_FALSE();
+
+ if(memcmp(dst->Magic, BACKUP_MAGIC, sizeof(BACKUP_MAGIC)) != 0)
+ RETURN_FALSE();
+
+ dst->NdbVersion = ntohl(dst->NdbVersion);
+ if(dst->NdbVersion != 210)
+ RETURN_FALSE();
+
+ if(fread(&dst->SectionType, 4, 2, f) != 2)
+ RETURN_FALSE();
+ dst->SectionType = ntohl(dst->SectionType);
+ dst->SectionLength = ntohl(dst->SectionLength);
+
+ if(dst->SectionType != BackupFormat::FILE_HEADER)
+ RETURN_FALSE();
+
+ if(dst->SectionLength != ((sizeof(BackupFormat::FileHeader) - 12) >> 2))
+ RETURN_FALSE();
+
+ if(fread(&dst->FileType, 4, dst->SectionLength - 2, f) !=
+ (dst->SectionLength - 2))
+ RETURN_FALSE();
+
+ dst->FileType = ntohl(dst->FileType);
+ dst->BackupId = ntohl(dst->BackupId);
+ dst->BackupKey_0 = ntohl(dst->BackupKey_0);
+ dst->BackupKey_1 = ntohl(dst->BackupKey_1);
+
+ if(dst->FileType < BackupFormat::CTL_FILE ||
+ dst->FileType > BackupFormat::DATA_FILE)
+ RETURN_FALSE();
+
+ if(dst->ByteOrder != 0x12345678)
+ endian = true;
+
+ return true;
+}
+
+bool
+readFragHeader(FILE* f, BackupFormat::DataFile::FragmentHeader * dst){
+ if(fread(dst, 1, sizeof(* dst), f) != sizeof(* dst))
+ return false;
+
+ dst->SectionType = ntohl(dst->SectionType);
+ dst->SectionLength = ntohl(dst->SectionLength);
+ dst->TableId = ntohl(dst->TableId);
+ dst->FragmentNo = ntohl(dst->FragmentNo);
+ dst->ChecksumType = ntohl(dst->ChecksumType);
+
+ if(dst->SectionLength != (sizeof(* dst) >> 2))
+ RETURN_FALSE();
+
+ if(dst->SectionType != BackupFormat::FRAGMENT_HEADER)
+ RETURN_FALSE();
+
+ recNo = 0;
+
+ return true;
+}
+
+bool
+readFragFooter(FILE* f, BackupFormat::DataFile::FragmentFooter * dst){
+ if(fread(dst, 1, sizeof(* dst), f) != sizeof(* dst))
+ RETURN_FALSE();
+
+ dst->SectionType = ntohl(dst->SectionType);
+ dst->SectionLength = ntohl(dst->SectionLength);
+ dst->TableId = ntohl(dst->TableId);
+ dst->FragmentNo = ntohl(dst->FragmentNo);
+ dst->NoOfRecords = ntohl(dst->NoOfRecords);
+ dst->Checksum = ntohl(dst->Checksum);
+
+ if(dst->SectionLength != (sizeof(* dst) >> 2))
+ RETURN_FALSE();
+
+ if(dst->SectionType != BackupFormat::FRAGMENT_FOOTER)
+ RETURN_FALSE();
+ return true;
+}
+
+static Uint32 buf[8192];
+
+Int32
+readRecord(FILE* f, Uint32 **dst){
+ Uint32 len;
+ if(fread(&len, 1, 4, f) != 4)
+ RETURN_FALSE();
+
+ len = ntohl(len);
+
+ if(fread(buf, 4, len, f) != len)
+ return -1;
+
+ if(len > 0)
+ recNo++;
+
+ * dst = &buf[0];
+
+ return len;
+}
+
+Int32
+readLogEntry(FILE* f, Uint32 **dst){
+ Uint32 len;
+ if(fread(&len, 1, 4, f) != 4)
+ RETURN_FALSE();
+
+ len = ntohl(len);
+
+ if(fread(&buf[1], 4, len, f) != len)
+ return -1;
+
+ buf[0] = len;
+
+ if(len > 0)
+ logEntryNo++;
+
+ * dst = &buf[0];
+
+ return len;
+}
+
+
+NdbOut &
+operator<<(NdbOut& ndbout, const BackupFormat::FileHeader & hf){
+
+ char buf[9];
+ memcpy(buf, hf.Magic, sizeof(hf.Magic));
+ buf[8] = 0;
+
+ ndbout << "-- FileHeader:" << endl;
+ ndbout << "Magic: " << buf << endl;
+ ndbout << "NdbVersion: " << hf.NdbVersion << endl;
+ ndbout << "SectionType: " << hf.SectionType << endl;
+ ndbout << "SectionLength: " << hf.SectionLength << endl;
+ ndbout << "FileType: " << hf.FileType << endl;
+ ndbout << "BackupId: " << hf.BackupId << endl;
+ ndbout << "BackupKey: [ " << hex << hf.BackupKey_0
+ << " "<< hf.BackupKey_1 << " ]" << endl;
+ ndbout << "ByteOrder: " << hex << hf.ByteOrder << endl;
+ return ndbout;
+}
+
+NdbOut & operator<<(NdbOut& ndbout,
+ const BackupFormat::DataFile::FragmentHeader & hf){
+
+ ndbout << "-- Fragment header:" << endl;
+ ndbout << "SectionType: " << hf.SectionType << endl;
+ ndbout << "SectionLength: " << hf.SectionLength << endl;
+ ndbout << "TableId: " << hf.TableId << endl;
+ ndbout << "FragmentNo: " << hf.FragmentNo << endl;
+ ndbout << "ChecksumType: " << hf.ChecksumType << endl;
+
+ return ndbout;
+}
+NdbOut & operator<<(NdbOut& ndbout,
+ const BackupFormat::DataFile::FragmentFooter & hf){
+
+ ndbout << "-- Fragment footer:" << endl;
+ ndbout << "SectionType: " << hf.SectionType << endl;
+ ndbout << "SectionLength: " << hf.SectionLength << endl;
+ ndbout << "TableId: " << hf.TableId << endl;
+ ndbout << "FragmentNo: " << hf.FragmentNo << endl;
+ ndbout << "NoOfRecords: " << hf.NoOfRecords << endl;
+ ndbout << "Checksum: " << hf.Checksum << endl;
+
+ return ndbout;
+}
+
+bool
+readTableList(FILE* f, BackupFormat::CtlFile::TableList **ret){
+ BackupFormat::CtlFile::TableList * dst =
+ (BackupFormat::CtlFile::TableList *)&buf[0];
+
+ if(fread(dst, 4, 2, f) != 2)
+ RETURN_FALSE();
+
+ dst->SectionType = ntohl(dst->SectionType);
+ dst->SectionLength = ntohl(dst->SectionLength);
+
+ if(dst->SectionType != BackupFormat::TABLE_LIST)
+ RETURN_FALSE();
+
+ const Uint32 len = dst->SectionLength - 2;
+ if(fread(&dst->TableIds[0], 4, len, f) != len)
+ RETURN_FALSE();
+
+ for(Uint32 i = 0; i<len; i++){
+ dst->TableIds[i] = ntohl(dst->TableIds[i]);
+ }
+
+ * ret = dst;
+
+ return true;
+}
+
+bool
+readTableDesc(FILE* f, BackupFormat::CtlFile::TableDescription **ret){
+ BackupFormat::CtlFile::TableDescription * dst =
+ (BackupFormat::CtlFile::TableDescription *)&buf[0];
+
+ if(fread(dst, 4, 2, f) != 2)
+ RETURN_FALSE();
+
+ dst->SectionType = ntohl(dst->SectionType);
+ dst->SectionLength = ntohl(dst->SectionLength);
+
+ if(dst->SectionType != BackupFormat::TABLE_DESCRIPTION)
+ RETURN_FALSE();
+
+ const Uint32 len = dst->SectionLength - 2;
+ if(fread(&dst->DictTabInfo[0], 4, len, f) != len)
+ RETURN_FALSE();
+
+ * ret = dst;
+
+ return true;
+}
+
+bool
+readGCPEntry(FILE* f, BackupFormat::CtlFile::GCPEntry **ret){
+ BackupFormat::CtlFile::GCPEntry * dst =
+ (BackupFormat::CtlFile::GCPEntry *)&buf[0];
+
+ if(fread(dst, 4, 4, f) != 4)
+ RETURN_FALSE();
+
+ dst->SectionType = ntohl(dst->SectionType);
+ dst->SectionLength = ntohl(dst->SectionLength);
+
+ if(dst->SectionType != BackupFormat::GCP_ENTRY)
+ RETURN_FALSE();
+
+ dst->StartGCP = ntohl(dst->StartGCP);
+ dst->StopGCP = ntohl(dst->StopGCP);
+
+ * ret = dst;
+
+ return true;
+}
+
+
+NdbOut &
+operator<<(NdbOut& ndbout, const BackupFormat::CtlFile::TableList & hf) {
+ ndbout << "-- Table List:" << endl;
+ ndbout << "SectionType: " << hf.SectionType << endl;
+ ndbout << "SectionLength: " << hf.SectionLength << endl;
+ for(Uint32 i = 0; i < hf.SectionLength - 2; i++){
+ ndbout << hf.TableIds[i] << " ";
+ if((i + 1) % 16 == 0)
+ ndbout << endl;
+ }
+ return ndbout;
+}
+
+NdbOut &
+operator<<(NdbOut& ndbout, const BackupFormat::CtlFile::TableDescription & hf){
+ ndbout << "-- Table Description:" << endl;
+ ndbout << "SectionType: " << hf.SectionType << endl;
+ ndbout << "SectionLength: " << hf.SectionLength << endl;
+
+ SimplePropertiesLinearReader it(&hf.DictTabInfo[0], hf.SectionLength - 2);
+ char buf[1024];
+ for(it.first(); it.valid(); it.next()){
+ switch(it.getValueType()){
+ case SimpleProperties::Uint32Value:
+ ndbout << "Key: " << it.getKey()
+ << " value(" << it.getValueLen() << ") : "
+ << it.getUint32() << endl;
+ break;
+ case SimpleProperties::StringValue:
+ if(it.getValueLen() < sizeof(buf)){
+ it.getString(buf);
+ ndbout << "Key: " << it.getKey()
+ << " value(" << it.getValueLen() << ") : "
+ << "\"" << buf << "\"" << endl;
+ } else {
+ ndbout << "Key: " << it.getKey()
+ << " value(" << it.getValueLen() << ") : "
+ << "\"" << "<TOO LONG>" << "\"" << endl;
+
+ }
+ break;
+ default:
+ ndbout << "Unknown type for key: " << it.getKey()
+ << " type: " << it.getValueType() << endl;
+ }
+ }
+
+ return ndbout;
+}
+
+NdbOut &
+operator<<(NdbOut& ndbout, const BackupFormat::CtlFile::GCPEntry & hf) {
+ ndbout << "-- GCP Entry:" << endl;
+ ndbout << "SectionType: " << hf.SectionType << endl;
+ ndbout << "SectionLength: " << hf.SectionLength << endl;
+ ndbout << "Start GCP: " << hf.StartGCP << endl;
+ ndbout << "Stop GCP: " << hf.StopGCP << endl;
+
+ return ndbout;
+}
+
diff --git a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
new file mode 100644
index 00000000000..9001491dd64
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
@@ -0,0 +1,1393 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "Cmvmi.hpp"
+
+#include <Configuration.hpp>
+#include <kernel_types.h>
+#include <TransporterRegistry.hpp>
+#include <NdbOut.hpp>
+#include <NdbMem.h>
+
+#include <SignalLoggerManager.hpp>
+#include <FastScheduler.hpp>
+
+#define DEBUG(x) { ndbout << "CMVMI::" << x << endl; }
+
+#include <signaldata/TestOrd.hpp>
+#include <signaldata/EventReport.hpp>
+#include <signaldata/TamperOrd.hpp>
+#include <signaldata/StartOrd.hpp>
+#include <signaldata/CloseComReqConf.hpp>
+#include <signaldata/SetLogLevelOrd.hpp>
+#include <signaldata/EventSubscribeReq.hpp>
+#include <signaldata/DumpStateOrd.hpp>
+#include <signaldata/DisconnectRep.hpp>
+
+#include <EventLogger.hpp>
+#include <TimeQueue.hpp>
+
+#include <NdbSleep.h>
+#include <SafeCounter.hpp>
+
+// Used here only to print event reports on stdout/console.
+EventLogger g_eventLogger;
+extern int simulate_error_during_shutdown;
+
+Cmvmi::Cmvmi(const Configuration & conf) :
+ SimulatedBlock(CMVMI, conf)
+ ,theConfig((Configuration&)conf)
+ ,subscribers(subscriberPool)
+{
+ BLOCK_CONSTRUCTOR(Cmvmi);
+
+ Uint32 long_sig_buffer_size;
+ const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ ndb_mgm_get_int_parameter(p, CFG_DB_LONG_SIGNAL_BUFFER,
+ &long_sig_buffer_size);
+
+ long_sig_buffer_size= long_sig_buffer_size / 256;
+ g_sectionSegmentPool.setSize(long_sig_buffer_size);
+
+ // Add received signals
+ addRecSignal(GSN_CONNECT_REP, &Cmvmi::execCONNECT_REP);
+ addRecSignal(GSN_DISCONNECT_REP, &Cmvmi::execDISCONNECT_REP);
+
+ addRecSignal(GSN_NDB_TAMPER, &Cmvmi::execNDB_TAMPER, true);
+ addRecSignal(GSN_SET_LOGLEVELORD, &Cmvmi::execSET_LOGLEVELORD);
+ addRecSignal(GSN_EVENT_REP, &Cmvmi::execEVENT_REP);
+ addRecSignal(GSN_STTOR, &Cmvmi::execSTTOR);
+ addRecSignal(GSN_CLOSE_COMREQ, &Cmvmi::execCLOSE_COMREQ);
+ addRecSignal(GSN_ENABLE_COMORD, &Cmvmi::execENABLE_COMORD);
+ addRecSignal(GSN_OPEN_COMREQ, &Cmvmi::execOPEN_COMREQ);
+ addRecSignal(GSN_TEST_ORD, &Cmvmi::execTEST_ORD);
+
+ addRecSignal(GSN_STATISTICS_REQ, &Cmvmi::execSTATISTICS_REQ);
+ addRecSignal(GSN_TAMPER_ORD, &Cmvmi::execTAMPER_ORD);
+ addRecSignal(GSN_SET_VAR_REQ, &Cmvmi::execSET_VAR_REQ);
+ addRecSignal(GSN_SET_VAR_CONF, &Cmvmi::execSET_VAR_CONF);
+ addRecSignal(GSN_SET_VAR_REF, &Cmvmi::execSET_VAR_REF);
+ addRecSignal(GSN_STOP_ORD, &Cmvmi::execSTOP_ORD);
+ addRecSignal(GSN_START_ORD, &Cmvmi::execSTART_ORD);
+ addRecSignal(GSN_EVENT_SUBSCRIBE_REQ,
+ &Cmvmi::execEVENT_SUBSCRIBE_REQ);
+
+ addRecSignal(GSN_DUMP_STATE_ORD, &Cmvmi::execDUMP_STATE_ORD);
+
+ addRecSignal(GSN_TESTSIG, &Cmvmi::execTESTSIG);
+
+ subscriberPool.setSize(5);
+
+ const ndb_mgm_configuration_iterator * db = theConfig.getOwnConfigIterator();
+ for(unsigned j = 0; j<LogLevel::LOGLEVEL_CATEGORIES; j++){
+ Uint32 logLevel;
+ if(!ndb_mgm_get_int_parameter(db, CFG_MIN_LOGLEVEL+j, &logLevel)){
+ clogLevel.setLogLevel((LogLevel::EventCategory)j,
+ logLevel);
+ }
+ }
+
+ ndb_mgm_configuration_iterator * iter = theConfig.getClusterConfigIterator();
+ for(ndb_mgm_first(iter); ndb_mgm_valid(iter); ndb_mgm_next(iter)){
+ jam();
+ Uint32 nodeId;
+ Uint32 nodeType;
+
+ ndbrequire(!ndb_mgm_get_int_parameter(iter,CFG_NODE_ID, &nodeId));
+ ndbrequire(!ndb_mgm_get_int_parameter(iter,CFG_TYPE_OF_SECTION,&nodeType));
+
+ switch(nodeType){
+ case NodeInfo::DB:
+ c_dbNodes.set(nodeId);
+ break;
+ case NodeInfo::API:
+ case NodeInfo::MGM:
+ case NodeInfo::REP:
+ break;
+ default:
+ ndbrequire(false);
+ }
+ setNodeInfo(nodeId).m_type = nodeType;
+ }
+
+ setNodeInfo(getOwnNodeId()).m_connected = true;
+}
+
+Cmvmi::~Cmvmi()
+{
+}
+
+
+void Cmvmi::execNDB_TAMPER(Signal* signal)
+{
+ jamEntry();
+ SET_ERROR_INSERT_VALUE(signal->theData[0]);
+ if(ERROR_INSERTED(9999)){
+ CRASH_INSERTION(9999);
+ }
+
+ if(ERROR_INSERTED(9998)){
+ while(true) NdbSleep_SecSleep(1);
+ }
+
+ if(ERROR_INSERTED(9997)){
+ ndbrequire(false);
+ }
+
+#ifndef NDB_WIN32
+ if(ERROR_INSERTED(9996)){
+ simulate_error_during_shutdown= SIGSEGV;
+ ndbrequire(false);
+ }
+
+ if(ERROR_INSERTED(9995)){
+ simulate_error_during_shutdown= SIGSEGV;
+ kill(getpid(), SIGABRT);
+ }
+#endif
+}//execNDB_TAMPER()
+
+void Cmvmi::execSET_LOGLEVELORD(Signal* signal)
+{
+ SetLogLevelOrd * const llOrd = (SetLogLevelOrd *)&signal->theData[0];
+ LogLevel::EventCategory category;
+ Uint32 level;
+ jamEntry();
+
+ for(unsigned int i = 0; i<llOrd->noOfEntries; i++){
+ category = (LogLevel::EventCategory)(llOrd->theData[i] >> 16);
+ level = llOrd->theData[i] & 0xFFFF;
+
+ clogLevel.setLogLevel(category, level);
+ }
+}//execSET_LOGLEVELORD()
+
+void Cmvmi::execEVENT_REP(Signal* signal)
+{
+ //-----------------------------------------------------------------------
+ // This message is sent to report any types of events in NDB.
+ // Based on the log level they will be either ignored or
+ // reported. Currently they are printed, but they will be
+ // transferred to the management server for further distribution
+ // to the graphical management interface.
+ //-----------------------------------------------------------------------
+ EventReport * const eventReport = (EventReport *)&signal->theData[0];
+ Ndb_logevent_type eventType = eventReport->getEventType();
+
+ jamEntry();
+
+ /**
+ * If entry is not found
+ */
+ Uint32 threshold;
+ LogLevel::EventCategory eventCategory;
+ Logger::LoggerLevel severity;
+ EventLoggerBase::EventTextFunction textF;
+ if (EventLoggerBase::event_lookup(eventType,eventCategory,threshold,severity,textF))
+ return;
+
+ SubscriberPtr ptr;
+ for(subscribers.first(ptr); ptr.i != RNIL; subscribers.next(ptr)){
+ if(ptr.p->logLevel.getLogLevel(eventCategory) < threshold){
+ continue;
+ }
+
+ sendSignal(ptr.p->blockRef, GSN_EVENT_REP, signal, signal->length(), JBB);
+ }
+
+ if(clogLevel.getLogLevel(eventCategory) < threshold){
+ return;
+ }
+
+ // Print the event info
+ g_eventLogger.log(eventReport->getEventType(), signal->theData);
+
+ return;
+}//execEVENT_REP()
+
+void
+Cmvmi::execEVENT_SUBSCRIBE_REQ(Signal * signal){
+ EventSubscribeReq * subReq = (EventSubscribeReq *)&signal->theData[0];
+ SubscriberPtr ptr;
+ jamEntry();
+ DBUG_ENTER("Cmvmi::execEVENT_SUBSCRIBE_REQ");
+
+ /**
+ * Search for subcription
+ */
+ for(subscribers.first(ptr); ptr.i != RNIL; subscribers.next(ptr)){
+ if(ptr.p->blockRef == subReq->blockRef)
+ break;
+ }
+
+ if(ptr.i == RNIL){
+ /**
+ * Create a new one
+ */
+ if(subscribers.seize(ptr) == false){
+ sendSignal(subReq->blockRef, GSN_EVENT_SUBSCRIBE_REF, signal, 1, JBB);
+ return;
+ }
+ ptr.p->logLevel.clear();
+ ptr.p->blockRef = subReq->blockRef;
+ }
+
+ if(subReq->noOfEntries == 0){
+ /**
+ * Cancel subscription
+ */
+ subscribers.release(ptr.i);
+ } else {
+ /**
+ * Update subscription
+ */
+ LogLevel::EventCategory category;
+ Uint32 level = 0;
+ for(Uint32 i = 0; i<subReq->noOfEntries; i++){
+ category = (LogLevel::EventCategory)(subReq->theData[i] >> 16);
+ level = subReq->theData[i] & 0xFFFF;
+ ptr.p->logLevel.setLogLevel(category, level);
+ DBUG_PRINT("info",("entry %d: level=%d, category= %d", i, level, category));
+ }
+ }
+
+ signal->theData[0] = ptr.i;
+ sendSignal(ptr.p->blockRef, GSN_EVENT_SUBSCRIBE_CONF, signal, 1, JBB);
+ DBUG_VOID_RETURN;
+}
+
+void
+Cmvmi::cancelSubscription(NodeId nodeId){
+
+ SubscriberPtr ptr;
+ subscribers.first(ptr);
+
+ while(ptr.i != RNIL){
+ Uint32 i = ptr.i;
+ BlockReference blockRef = ptr.p->blockRef;
+
+ subscribers.next(ptr);
+
+ if(refToNode(blockRef) == nodeId){
+ subscribers.release(i);
+ }
+ }
+}
+
+void Cmvmi::sendSTTORRY(Signal* signal)
+{
+ jam();
+ signal->theData[3] = 1;
+ signal->theData[4] = 3;
+ signal->theData[5] = 8;
+ signal->theData[6] = 255;
+ sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 7, JBB);
+}//Cmvmi::sendSTTORRY
+
+
+void Cmvmi::execSTTOR(Signal* signal)
+{
+ Uint32 theStartPhase = signal->theData[1];
+
+ jamEntry();
+ if (theStartPhase == 1){
+ jam();
+ sendSTTORRY(signal);
+ return;
+ } else if (theStartPhase == 3) {
+ jam();
+ globalData.activateSendPacked = 1;
+ sendSTTORRY(signal);
+ } else if (theStartPhase == 8){
+ /*---------------------------------------------------*/
+ /* Open com to API + REP nodes */
+ /*---------------------------------------------------*/
+ signal->theData[0] = 0; // no answer
+ signal->theData[1] = 0; // no id
+ signal->theData[2] = NodeInfo::API;
+ execOPEN_COMREQ(signal);
+ signal->theData[0] = 0; // no answer
+ signal->theData[1] = 0; // no id
+ signal->theData[2] = NodeInfo::REP;
+ execOPEN_COMREQ(signal);
+ globalData.theStartLevel = NodeState::SL_STARTED;
+ sendSTTORRY(signal);
+ } else {
+ jam();
+
+ if(theConfig.lockPagesInMainMemory()){
+ int res = NdbMem_MemLockAll();
+ if(res != 0){
+ g_eventLogger.warning("Failed to memlock pages");
+ warningEvent("Failed to memlock pages");
+ }
+ }
+
+ sendSTTORRY(signal);
+ }
+}
+
+void Cmvmi::execCLOSE_COMREQ(Signal* signal)
+{
+ // Close communication with the node and halt input/output from
+ // other blocks than QMGR
+
+ CloseComReqConf * const closeCom = (CloseComReqConf *)&signal->theData[0];
+
+ const BlockReference userRef = closeCom->xxxBlockRef;
+ Uint32 failNo = closeCom->failNo;
+// Uint32 noOfNodes = closeCom->noOfNodes;
+
+ jamEntry();
+ for (unsigned i = 0; i < MAX_NODES; i++){
+ if(NodeBitmask::get(closeCom->theNodes, i)){
+
+ jam();
+
+ //-----------------------------------------------------
+ // Report that the connection to the node is closed
+ //-----------------------------------------------------
+ signal->theData[0] = NDB_LE_CommunicationClosed;
+ signal->theData[1] = i;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
+
+ globalTransporterRegistry.setIOState(i, HaltIO);
+ globalTransporterRegistry.do_disconnect(i);
+ }
+ }
+ if (failNo != 0) {
+ jam();
+ signal->theData[0] = userRef;
+ signal->theData[1] = failNo;
+ sendSignal(QMGR_REF, GSN_CLOSE_COMCONF, signal, 19, JBA);
+ }
+}
+
+void Cmvmi::execOPEN_COMREQ(Signal* signal)
+{
+ // Connect to the specifed NDB node, only QMGR allowed communication
+ // so far with the node
+
+ const BlockReference userRef = signal->theData[0];
+ Uint32 tStartingNode = signal->theData[1];
+ Uint32 tData2 = signal->theData[2];
+ jamEntry();
+
+ const Uint32 len = signal->getLength();
+ if(len == 2){
+ globalTransporterRegistry.do_connect(tStartingNode);
+ globalTransporterRegistry.setIOState(tStartingNode, HaltIO);
+
+ //-----------------------------------------------------
+ // Report that the connection to the node is opened
+ //-----------------------------------------------------
+ signal->theData[0] = NDB_LE_CommunicationOpened;
+ signal->theData[1] = tStartingNode;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
+ //-----------------------------------------------------
+ } else {
+ for(unsigned int i = 1; i < MAX_NODES; i++ ) {
+ jam();
+ if (i != getOwnNodeId() && getNodeInfo(i).m_type == tData2){
+ jam();
+ globalTransporterRegistry.do_connect(i);
+ globalTransporterRegistry.setIOState(i, HaltIO);
+
+ signal->theData[0] = NDB_LE_CommunicationOpened;
+ signal->theData[1] = i;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
+ }
+ }
+ }
+
+ if (userRef != 0) {
+ jam();
+ signal->theData[0] = tStartingNode;
+ signal->theData[1] = tData2;
+ sendSignal(userRef, GSN_OPEN_COMCONF, signal, len - 1,JBA);
+ }
+}
+
+void Cmvmi::execENABLE_COMORD(Signal* signal)
+{
+ // Enable communication with all our NDB blocks to this node
+
+ Uint32 tStartingNode = signal->theData[0];
+ globalTransporterRegistry.setIOState(tStartingNode, NoHalt);
+ setNodeInfo(tStartingNode).m_connected = true;
+ //-----------------------------------------------------
+ // Report that the version of the node
+ //-----------------------------------------------------
+ signal->theData[0] = NDB_LE_ConnectedApiVersion;
+ signal->theData[1] = tStartingNode;
+ signal->theData[2] = getNodeInfo(tStartingNode).m_version;
+
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
+ //-----------------------------------------------------
+
+ jamEntry();
+}
+
+void Cmvmi::execDISCONNECT_REP(Signal *signal)
+{
+ const DisconnectRep * const rep = (DisconnectRep *)&signal->theData[0];
+ const Uint32 hostId = rep->nodeId;
+ const Uint32 errNo = rep->err;
+
+ jamEntry();
+
+ setNodeInfo(hostId).m_connected = false;
+ setNodeInfo(hostId).m_connectCount++;
+ const NodeInfo::NodeType type = getNodeInfo(hostId).getType();
+ ndbrequire(type != NodeInfo::INVALID);
+
+ if(type == NodeInfo::DB || globalData.theStartLevel == NodeState::SL_STARTED){
+ jam();
+ DisconnectRep * const rep = (DisconnectRep *)&signal->theData[0];
+ rep->nodeId = hostId;
+ rep->err = errNo;
+ sendSignal(QMGR_REF, GSN_DISCONNECT_REP, signal,
+ DisconnectRep::SignalLength, JBA);
+ } else if((globalData.theStartLevel == NodeState::SL_CMVMI ||
+ globalData.theStartLevel == NodeState::SL_STARTING)
+ && type == NodeInfo::MGM) {
+ /**
+ * Someone disconnected during cmvmi period
+ */
+ jam();
+ globalTransporterRegistry.do_connect(hostId);
+ }
+
+ cancelSubscription(hostId);
+
+ signal->theData[0] = NDB_LE_Disconnected;
+ signal->theData[1] = hostId;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
+}
+
+void Cmvmi::execCONNECT_REP(Signal *signal){
+ const Uint32 hostId = signal->theData[0];
+ jamEntry();
+
+ const NodeInfo::NodeType type = (NodeInfo::NodeType)getNodeInfo(hostId).m_type;
+ ndbrequire(type != NodeInfo::INVALID);
+ globalData.m_nodeInfo[hostId].m_version = 0;
+ globalData.m_nodeInfo[hostId].m_signalVersion = 0;
+
+ if(type == NodeInfo::DB || globalData.theStartLevel >= NodeState::SL_STARTED){
+ jam();
+
+ /**
+ * Inform QMGR that client has connected
+ */
+
+ signal->theData[0] = hostId;
+ sendSignal(QMGR_REF, GSN_CONNECT_REP, signal, 1, JBA);
+ } else if(globalData.theStartLevel == NodeState::SL_CMVMI ||
+ globalData.theStartLevel == NodeState::SL_STARTING) {
+ jam();
+ /**
+ * Someone connected before start was finished
+ */
+ if(type == NodeInfo::MGM){
+ jam();
+ } else {
+ /**
+ * Dont allow api nodes to connect
+ */
+ abort();
+ globalTransporterRegistry.do_disconnect(hostId);
+ }
+ }
+
+ /* Automatically subscribe events for MGM nodes.
+ */
+ if(type == NodeInfo::MGM){
+ jam();
+ globalTransporterRegistry.setIOState(hostId, NoHalt);
+ }
+
+ //------------------------------------------
+ // Also report this event to the Event handler
+ //------------------------------------------
+ signal->theData[0] = NDB_LE_Connected;
+ signal->theData[1] = hostId;
+ signal->header.theLength = 2;
+
+ execEVENT_REP(signal);
+}
+
+#ifdef VM_TRACE
+void
+modifySignalLogger(bool allBlocks, BlockNumber bno,
+ TestOrd::Command cmd,
+ TestOrd::SignalLoggerSpecification spec){
+ SignalLoggerManager::LogMode logMode;
+
+ /**
+ * Mapping between SignalLoggerManager::LogMode and
+ * TestOrd::SignalLoggerSpecification
+ */
+ switch(spec){
+ case TestOrd::InputSignals:
+ logMode = SignalLoggerManager::LogIn;
+ break;
+ case TestOrd::OutputSignals:
+ logMode = SignalLoggerManager::LogOut;
+ break;
+ case TestOrd::InputOutputSignals:
+ logMode = SignalLoggerManager::LogInOut;
+ break;
+ default:
+ return;
+ break;
+ }
+
+ switch(cmd){
+ case TestOrd::On:
+ globalSignalLoggers.logOn(allBlocks, bno, logMode);
+ break;
+ case TestOrd::Off:
+ globalSignalLoggers.logOff(allBlocks, bno, logMode);
+ break;
+ case TestOrd::Toggle:
+ globalSignalLoggers.logToggle(allBlocks, bno, logMode);
+ break;
+ case TestOrd::KeepUnchanged:
+ // Do nothing
+ break;
+ }
+ globalSignalLoggers.flushSignalLog();
+}
+#endif
+
+void
+Cmvmi::execTEST_ORD(Signal * signal){
+ jamEntry();
+
+#ifdef VM_TRACE
+ TestOrd * const testOrd = (TestOrd *)&signal->theData[0];
+
+ TestOrd::Command cmd;
+
+ {
+ /**
+ * Process Trace command
+ */
+ TestOrd::TraceSpecification traceSpec;
+
+ testOrd->getTraceCommand(cmd, traceSpec);
+ unsigned long traceVal = traceSpec;
+ unsigned long currentTraceVal = globalSignalLoggers.getTrace();
+ switch(cmd){
+ case TestOrd::On:
+ currentTraceVal |= traceVal;
+ break;
+ case TestOrd::Off:
+ currentTraceVal &= (~traceVal);
+ break;
+ case TestOrd::Toggle:
+ currentTraceVal ^= traceVal;
+ break;
+ case TestOrd::KeepUnchanged:
+ // Do nothing
+ break;
+ }
+ globalSignalLoggers.setTrace(currentTraceVal);
+ }
+
+ {
+ /**
+ * Process Log command
+ */
+ TestOrd::SignalLoggerSpecification logSpec;
+ BlockNumber bno;
+ unsigned int loggers = testOrd->getNoOfSignalLoggerCommands();
+
+ if(loggers == (unsigned)~0){ // Apply command to all blocks
+ testOrd->getSignalLoggerCommand(0, bno, cmd, logSpec);
+ modifySignalLogger(true, bno, cmd, logSpec);
+ } else {
+ for(unsigned int i = 0; i<loggers; i++){
+ testOrd->getSignalLoggerCommand(i, bno, cmd, logSpec);
+ modifySignalLogger(false, bno, cmd, logSpec);
+ }
+ }
+ }
+
+ {
+ /**
+ * Process test command
+ */
+ testOrd->getTestCommand(cmd);
+ switch(cmd){
+ case TestOrd::On:{
+ SET_GLOBAL_TEST_ON;
+ }
+ break;
+ case TestOrd::Off:{
+ SET_GLOBAL_TEST_OFF;
+ }
+ break;
+ case TestOrd::Toggle:{
+ TOGGLE_GLOBAL_TEST_FLAG;
+ }
+ break;
+ case TestOrd::KeepUnchanged:
+ // Do nothing
+ break;
+ }
+ }
+
+#endif
+}
+
+void Cmvmi::execSTATISTICS_REQ(Signal* signal)
+{
+ // TODO Note ! This is only a test implementation...
+
+ static int stat1 = 0;
+ jamEntry();
+
+ //ndbout << "data 1: " << signal->theData[1];
+
+ int x = signal->theData[0];
+ stat1++;
+ signal->theData[0] = stat1;
+ sendSignal(x, GSN_STATISTICS_CONF, signal, 7, JBB);
+
+}//execSTATISTICS_REQ()
+
+
+
+void Cmvmi::execSTOP_ORD(Signal* signal)
+{
+ jamEntry();
+ globalData.theRestartFlag = perform_stop;
+}//execSTOP_ORD()
+
+void
+Cmvmi::execSTART_ORD(Signal* signal) {
+
+ StartOrd * const startOrd = (StartOrd *)&signal->theData[0];
+ jamEntry();
+
+ Uint32 tmp = startOrd->restartInfo;
+ if(StopReq::getPerformRestart(tmp)){
+ jam();
+ /**
+ *
+ */
+ NdbRestartType type = NRT_Default;
+ if(StopReq::getNoStart(tmp) && StopReq::getInitialStart(tmp))
+ type = NRT_NoStart_InitialStart;
+ if(StopReq::getNoStart(tmp) && !StopReq::getInitialStart(tmp))
+ type = NRT_NoStart_Restart;
+ if(!StopReq::getNoStart(tmp) && StopReq::getInitialStart(tmp))
+ type = NRT_DoStart_InitialStart;
+ if(!StopReq::getNoStart(tmp)&&!StopReq::getInitialStart(tmp))
+ type = NRT_DoStart_Restart;
+ NdbShutdown(NST_Restart, type);
+ }
+
+ if(globalData.theRestartFlag == system_started){
+ jam()
+ /**
+ * START_ORD received when already started(ignored)
+ */
+ //ndbout << "START_ORD received when already started(ignored)" << endl;
+ return;
+ }
+
+ if(globalData.theRestartFlag == perform_stop){
+ jam()
+ /**
+ * START_ORD received when stopping(ignored)
+ */
+ //ndbout << "START_ORD received when stopping(ignored)" << endl;
+ return;
+ }
+
+ if(globalData.theStartLevel == NodeState::SL_NOTHING){
+ jam();
+ globalData.theStartLevel = NodeState::SL_CMVMI;
+ /**
+ * Open connections to management servers
+ */
+ for(unsigned int i = 1; i < MAX_NODES; i++ ){
+ if (getNodeInfo(i).m_type == NodeInfo::MGM){
+ if(!globalTransporterRegistry.is_connected(i)){
+ globalTransporterRegistry.do_connect(i);
+ globalTransporterRegistry.setIOState(i, NoHalt);
+ }
+ }
+ }
+ return ;
+ }
+
+ if(globalData.theStartLevel == NodeState::SL_CMVMI){
+ jam();
+ globalData.theStartLevel = NodeState::SL_STARTING;
+ globalData.theRestartFlag = system_started;
+ /**
+ * StartLevel 1
+ *
+ * Do Restart
+ */
+
+ globalScheduler.clear();
+ globalTimeQueue.clear();
+
+ // Disconnect all nodes as part of the system restart.
+ // We need to ensure that we are starting up
+ // without any connected nodes.
+ for(unsigned int i = 1; i < MAX_NODES; i++ ){
+ if (i != getOwnNodeId() && getNodeInfo(i).m_type != NodeInfo::MGM){
+ globalTransporterRegistry.do_disconnect(i);
+ globalTransporterRegistry.setIOState(i, HaltIO);
+ }
+ }
+
+ /**
+ * Start running startphases
+ */
+ sendSignal(NDBCNTR_REF, GSN_START_ORD, signal, 1, JBA);
+ return;
+ }
+}//execSTART_ORD()
+
+void Cmvmi::execTAMPER_ORD(Signal* signal)
+{
+ jamEntry();
+ // TODO We should maybe introduce a CONF and REF signal
+ // to be able to indicate if we really introduced an error.
+#ifdef ERROR_INSERT
+ TamperOrd* const tamperOrd = (TamperOrd*)&signal->theData[0];
+
+ signal->theData[1] = tamperOrd->errorNo;
+ signal->theData[0] = 5;
+ sendSignal(DBDIH_REF, GSN_DIHNDBTAMPER, signal, 3,JBB);
+#endif
+
+}//execTAMPER_ORD()
+
+
+
+void Cmvmi::execSET_VAR_REQ(Signal* signal)
+{
+#if 0
+
+ SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
+ ConfigParamId var = setVarReq->variable();
+ jamEntry();
+ switch (var) {
+
+ // NDBCNTR_REF
+
+ // DBTC
+ case TransactionDeadlockDetectionTimeout:
+ case TransactionInactiveTime:
+ case NoOfConcurrentProcessesHandleTakeover:
+ sendSignal(DBTC_REF, GSN_SET_VAR_REQ, signal, 3, JBB);
+ break;
+
+ // DBDIH
+ case TimeBetweenLocalCheckpoints:
+ case TimeBetweenGlobalCheckpoints:
+ sendSignal(DBDIH_REF, GSN_SET_VAR_REQ, signal, 3, JBB);
+ break;
+
+ // DBLQH
+ case NoOfConcurrentCheckpointsDuringRestart:
+ case NoOfConcurrentCheckpointsAfterRestart:
+ sendSignal(DBLQH_REF, GSN_SET_VAR_REQ, signal, 3, JBB);
+ break;
+
+ // DBACC
+ case NoOfDiskPagesToDiskDuringRestartACC:
+ case NoOfDiskPagesToDiskAfterRestartACC:
+ sendSignal(DBACC_REF, GSN_SET_VAR_REQ, signal, 3, JBB);
+ break;
+
+ // DBTUP
+ case NoOfDiskPagesToDiskDuringRestartTUP:
+ case NoOfDiskPagesToDiskAfterRestartTUP:
+ sendSignal(DBTUP_REF, GSN_SET_VAR_REQ, signal, 3, JBB);
+ break;
+
+ // DBDICT
+
+ // NDBCNTR
+ case TimeToWaitAlive:
+
+ // QMGR
+ case HeartbeatIntervalDbDb: // TODO ev till Ndbcnt också
+ case HeartbeatIntervalDbApi:
+ case ArbitTimeout:
+ sendSignal(QMGR_REF, GSN_SET_VAR_REQ, signal, 3, JBB);
+ break;
+
+ // NDBFS
+
+ // CMVMI
+ case MaxNoOfSavedMessages:
+ case LockPagesInMainMemory:
+ case TimeBetweenWatchDogCheck:
+ case StopOnError:
+ handleSET_VAR_REQ(signal);
+ break;
+
+
+ // Not possible to update (this could of course be handled by each block
+ // instead but I havn't investigated where they belong)
+ case Id:
+ case ExecuteOnComputer:
+ case ShmKey:
+ case MaxNoOfConcurrentOperations:
+ case MaxNoOfConcurrentTransactions:
+ case MemorySpaceIndexes:
+ case MemorySpaceTuples:
+ case MemoryDiskPages:
+ case NoOfFreeDiskClusters:
+ case NoOfDiskClusters:
+ case NoOfFragmentLogFiles:
+ case NoOfDiskClustersPerDiskFile:
+ case NoOfDiskFiles:
+ case MaxNoOfSavedEvents:
+ default:
+
+ int mgmtSrvr = setVarReq->mgmtSrvrBlockRef();
+ sendSignal(mgmtSrvr, GSN_SET_VAR_REF, signal, 0, JBB);
+ } // switch
+
+#endif
+}//execSET_VAR_REQ()
+
+
+void Cmvmi::execSET_VAR_CONF(Signal* signal)
+{
+ int mgmtSrvr = signal->theData[0];
+ sendSignal(mgmtSrvr, GSN_SET_VAR_CONF, signal, 0, JBB);
+
+}//execSET_VAR_CONF()
+
+
+void Cmvmi::execSET_VAR_REF(Signal* signal)
+{
+ int mgmtSrvr = signal->theData[0];
+ sendSignal(mgmtSrvr, GSN_SET_VAR_REF, signal, 0, JBB);
+
+}//execSET_VAR_REF()
+
+
+void Cmvmi::handleSET_VAR_REQ(Signal* signal) {
+#if 0
+ SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
+ ConfigParamId var = setVarReq->variable();
+ int val = setVarReq->value();
+
+ switch (var) {
+ case MaxNoOfSavedMessages:
+ theConfig.maxNoOfErrorLogs(val);
+ sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
+ break;
+
+ case LockPagesInMainMemory:
+ int result;
+ if (val == 0) {
+ result = NdbMem_MemUnlockAll();
+ }
+ else {
+ result = NdbMem_MemLockAll();
+ }
+ if (result == 0) {
+ sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
+ }
+ else {
+ sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
+ }
+ break;
+
+ case TimeBetweenWatchDogCheck:
+ theConfig.timeBetweenWatchDogCheck(val);
+ sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
+ break;
+
+ case StopOnError:
+ theConfig.stopOnError(val);
+ sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
+ break;
+
+ default:
+ sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
+ return;
+ } // switch
+#endif
+}
+
+#ifdef VM_TRACE
+class RefSignalTest {
+public:
+ enum ErrorCode {
+ OK = 0,
+ NF_FakeErrorREF = 7
+ };
+ Uint32 senderRef;
+ Uint32 senderData;
+ Uint32 errorCode;
+};
+#endif
+
+
+static int iii;
+
+static
+int
+recurse(char * buf, int loops, int arg){
+ char * tmp = (char*)alloca(arg);
+ printf("tmp = %p\n", tmp);
+ for(iii = 0; iii<arg; iii += 1024){
+ tmp[iii] = (iii % 23 + (arg & iii));
+ }
+
+ if(loops == 0)
+ return tmp[345];
+ else
+ return tmp[arg/loops] + recurse(tmp, loops - 1, arg);
+}
+
+void
+Cmvmi::execDUMP_STATE_ORD(Signal* signal)
+{
+
+ sendSignal(QMGR_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB);
+ sendSignal(NDBCNTR_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB);
+ sendSignal(DBTC_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB);
+ sendSignal(DBDIH_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB);
+ sendSignal(DBDICT_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB);
+ sendSignal(DBLQH_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB);
+ sendSignal(DBTUP_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB);
+ sendSignal(DBACC_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB);
+ sendSignal(NDBFS_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB);
+ sendSignal(BACKUP_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB);
+ sendSignal(DBUTIL_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB);
+ sendSignal(SUMA_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB);
+ sendSignal(GREP_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB);
+ sendSignal(TRIX_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB);
+ sendSignal(DBTUX_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB);
+
+ /**
+ *
+ * Here I can dump CMVMI state if needed
+ */
+ if(signal->theData[0] == 13){
+#if 0
+ int loop = 100;
+ int len = (10*1024*1024);
+ if(signal->getLength() > 1)
+ loop = signal->theData[1];
+ if(signal->getLength() > 2)
+ len = signal->theData[2];
+
+ ndbout_c("recurse(%d loop, %dkb per recurse)", loop, len/1024);
+ int a = recurse(0, loop, len);
+ ndbout_c("after...%d", a);
+#endif
+ }
+
+ DumpStateOrd * const & dumpState = (DumpStateOrd *)&signal->theData[0];
+ if (dumpState->args[0] == DumpStateOrd::CmvmiDumpConnections){
+ for(unsigned int i = 1; i < MAX_NODES; i++ ){
+ const char* nodeTypeStr = "";
+ switch(getNodeInfo(i).m_type){
+ case NodeInfo::DB:
+ nodeTypeStr = "DB";
+ break;
+ case NodeInfo::API:
+ nodeTypeStr = "API";
+ break;
+ case NodeInfo::MGM:
+ nodeTypeStr = "MGM";
+ break;
+ case NodeInfo::REP:
+ nodeTypeStr = "REP";
+ break;
+ case NodeInfo::INVALID:
+ nodeTypeStr = 0;
+ break;
+ default:
+ nodeTypeStr = "<UNKNOWN>";
+ }
+
+ if(nodeTypeStr == 0)
+ continue;
+
+ infoEvent("Connection to %d (%s) %s",
+ i,
+ nodeTypeStr,
+ globalTransporterRegistry.getPerformStateString(i));
+ }
+ }
+
+ if (dumpState->args[0] == DumpStateOrd::CmvmiDumpLongSignalMemory){
+ infoEvent("Cmvmi: g_sectionSegmentPool size: %d free: %d",
+ g_sectionSegmentPool.getSize(),
+ g_sectionSegmentPool.getNoOfFree());
+ }
+
+ if (dumpState->args[0] == DumpStateOrd::CmvmiSetRestartOnErrorInsert){
+ if(signal->getLength() == 1)
+ theConfig.setRestartOnErrorInsert((int)NRT_NoStart_Restart);
+ else
+ theConfig.setRestartOnErrorInsert(signal->theData[1]);
+ }
+
+ if (dumpState->args[0] == DumpStateOrd::CmvmiTestLongSigWithDelay) {
+ unsigned i;
+ Uint32 loopCount = dumpState->args[1];
+ const unsigned len0 = 11;
+ const unsigned len1 = 123;
+ Uint32 sec0[len0];
+ Uint32 sec1[len1];
+ for (i = 0; i < len0; i++)
+ sec0[i] = i;
+ for (i = 0; i < len1; i++)
+ sec1[i] = 16 * i;
+ Uint32* sig = signal->getDataPtrSend();
+ sig[0] = reference();
+ sig[1] = 20; // test type
+ sig[2] = 0;
+ sig[3] = 0;
+ sig[4] = loopCount;
+ sig[5] = len0;
+ sig[6] = len1;
+ sig[7] = 0;
+ LinearSectionPtr ptr[3];
+ ptr[0].p = sec0;
+ ptr[0].sz = len0;
+ ptr[1].p = sec1;
+ ptr[1].sz = len1;
+ sendSignal(reference(), GSN_TESTSIG, signal, 8, JBB, ptr, 2);
+ }
+
+#ifdef VM_TRACE
+#if 0
+ {
+ SafeCounterManager mgr(* this); mgr.setSize(1);
+ SafeCounterHandle handle;
+
+ {
+ SafeCounter tmp(mgr, handle);
+ tmp.init<RefSignalTest>(CMVMI, GSN_TESTSIG, /* senderData */ 13);
+ tmp.setWaitingFor(3);
+ ndbrequire(!tmp.done());
+ ndbout_c("Allocted");
+ }
+ ndbrequire(!handle.done());
+ {
+ SafeCounter tmp(mgr, handle);
+ tmp.clearWaitingFor(3);
+ ndbrequire(tmp.done());
+ ndbout_c("Deallocted");
+ }
+ ndbrequire(handle.done());
+ }
+#endif
+#endif
+}//Cmvmi::execDUMP_STATE_ORD()
+
+
+BLOCK_FUNCTIONS(Cmvmi)
+
+static Uint32 g_print;
+static LinearSectionPtr g_test[3];
+
+void
+Cmvmi::execTESTSIG(Signal* signal){
+ Uint32 i;
+ /**
+ * Test of SafeCounter
+ */
+ jamEntry();
+
+ if(!assembleFragments(signal)){
+ jam();
+ return;
+ }
+
+ Uint32 ref = signal->theData[0];
+ Uint32 testType = signal->theData[1];
+ Uint32 fragmentLength = signal->theData[2];
+ g_print = signal->theData[3];
+// Uint32 returnCount = signal->theData[4];
+ Uint32 * secSizes = &signal->theData[5];
+
+ if(g_print){
+ SignalLoggerManager::printSignalHeader(stdout,
+ signal->header,
+ 0,
+ getOwnNodeId(),
+ true);
+ ndbout_c("-- Fixed section --");
+ for(i = 0; i<signal->length(); i++){
+ fprintf(stdout, "H'0x%.8x ", signal->theData[i]);
+ if(((i + 1) % 6) == 0)
+ fprintf(stdout, "\n");
+ }
+ fprintf(stdout, "\n");
+
+ for(i = 0; i<signal->header.m_noOfSections; i++){
+ SegmentedSectionPtr ptr;
+ ndbout_c("-- Section %d --", i);
+ signal->getSection(ptr, i);
+ ndbrequire(ptr.p != 0);
+ print(ptr, stdout);
+ ndbrequire(ptr.sz == secSizes[i]);
+ }
+ }
+
+ /**
+ * Validate length:s
+ */
+ for(i = 0; i<signal->header.m_noOfSections; i++){
+ SegmentedSectionPtr ptr;
+ signal->getSection(ptr, i);
+ ndbrequire(ptr.p != 0);
+ ndbrequire(ptr.sz == secSizes[i]);
+ }
+
+ /**
+ * Testing send with delay.
+ */
+ if (testType == 20) {
+ if (signal->theData[4] == 0) {
+ releaseSections(signal);
+ return;
+ }
+ signal->theData[4]--;
+ sendSignalWithDelay(reference(), GSN_TESTSIG, signal, 100, 8);
+ return;
+ }
+
+ NodeReceiverGroup rg(CMVMI, c_dbNodes);
+
+ if(signal->getSendersBlockRef() == ref){
+ /**
+ * Signal from API (not via NodeReceiverGroup)
+ */
+ if((testType % 2) == 1){
+ signal->theData[4] = 1;
+ } else {
+ signal->theData[1] --;
+ signal->theData[4] = rg.m_nodes.count();
+ }
+ }
+
+ switch(testType){
+ case 1:
+ sendSignal(ref, GSN_TESTSIG, signal, signal->length(), JBB);
+ break;
+ case 2:
+ sendSignal(rg, GSN_TESTSIG, signal, signal->length(), JBB);
+ break;
+ case 3:
+ case 4:{
+ LinearSectionPtr ptr[3];
+ const Uint32 secs = signal->getNoOfSections();
+ for(i = 0; i<secs; i++){
+ SegmentedSectionPtr sptr;
+ signal->getSection(sptr, i);
+ ptr[i].sz = sptr.sz;
+ ptr[i].p = new Uint32[sptr.sz];
+ copy(ptr[i].p, sptr);
+ }
+
+ if(testType == 3){
+ sendSignal(ref, GSN_TESTSIG, signal, signal->length(), JBB, ptr, secs);
+ } else {
+ sendSignal(rg, GSN_TESTSIG, signal, signal->length(), JBB, ptr, secs);
+ }
+ for(Uint32 i = 0; i<secs; i++){
+ delete[] ptr[i].p;
+ }
+ break;
+ }
+ case 5:
+ case 6:{
+
+ NodeReceiverGroup tmp;
+ if(testType == 5){
+ tmp = ref;
+ } else {
+ tmp = rg;
+ }
+
+ FragmentSendInfo fragSend;
+ sendFirstFragment(fragSend,
+ tmp,
+ GSN_TESTSIG,
+ signal,
+ signal->length(),
+ JBB,
+ fragmentLength);
+ int count = 1;
+ while(fragSend.m_status != FragmentSendInfo::SendComplete){
+ count++;
+ if(g_print)
+ ndbout_c("Sending fragment %d", count);
+ sendNextSegmentedFragment(signal, fragSend);
+ }
+ break;
+ }
+ case 7:
+ case 8:{
+ LinearSectionPtr ptr[3];
+ const Uint32 secs = signal->getNoOfSections();
+ for(i = 0; i<secs; i++){
+ SegmentedSectionPtr sptr;
+ signal->getSection(sptr, i);
+ ptr[i].sz = sptr.sz;
+ ptr[i].p = new Uint32[sptr.sz];
+ copy(ptr[i].p, sptr);
+ }
+
+ NodeReceiverGroup tmp;
+ if(testType == 7){
+ tmp = ref;
+ } else {
+ tmp = rg;
+ }
+
+ FragmentSendInfo fragSend;
+ sendFirstFragment(fragSend,
+ tmp,
+ GSN_TESTSIG,
+ signal,
+ signal->length(),
+ JBB,
+ ptr,
+ secs,
+ fragmentLength);
+
+ int count = 1;
+ while(fragSend.m_status != FragmentSendInfo::SendComplete){
+ count++;
+ if(g_print)
+ ndbout_c("Sending fragment %d", count);
+ sendNextLinearFragment(signal, fragSend);
+ }
+
+ for(i = 0; i<secs; i++){
+ delete[] ptr[i].p;
+ }
+ break;
+ }
+ case 9:
+ case 10:{
+
+ Callback m_callBack;
+ m_callBack.m_callbackFunction =
+ safe_cast(&Cmvmi::sendFragmentedComplete);
+
+ if(testType == 9){
+ m_callBack.m_callbackData = 9;
+ sendFragmentedSignal(ref,
+ GSN_TESTSIG, signal, signal->length(), JBB,
+ m_callBack,
+ fragmentLength);
+ } else {
+ m_callBack.m_callbackData = 10;
+ sendFragmentedSignal(rg,
+ GSN_TESTSIG, signal, signal->length(), JBB,
+ m_callBack,
+ fragmentLength);
+ }
+ break;
+ }
+ case 11:
+ case 12:{
+
+ const Uint32 secs = signal->getNoOfSections();
+ memset(g_test, 0, sizeof(g_test));
+ for(i = 0; i<secs; i++){
+ SegmentedSectionPtr sptr;
+ signal->getSection(sptr, i);
+ g_test[i].sz = sptr.sz;
+ g_test[i].p = new Uint32[sptr.sz];
+ copy(g_test[i].p, sptr);
+ }
+
+
+ Callback m_callBack;
+ m_callBack.m_callbackFunction =
+ safe_cast(&Cmvmi::sendFragmentedComplete);
+
+ if(testType == 11){
+ m_callBack.m_callbackData = 11;
+ sendFragmentedSignal(ref,
+ GSN_TESTSIG, signal, signal->length(), JBB,
+ g_test, secs,
+ m_callBack,
+ fragmentLength);
+ } else {
+ m_callBack.m_callbackData = 12;
+ sendFragmentedSignal(rg,
+ GSN_TESTSIG, signal, signal->length(), JBB,
+ g_test, secs,
+ m_callBack,
+ fragmentLength);
+ }
+ break;
+ }
+ case 13:{
+ ndbrequire(signal->getNoOfSections() == 0);
+ Uint32 loop = signal->theData[9];
+ if(loop > 0){
+ signal->theData[9] --;
+ sendSignal(CMVMI_REF, GSN_TESTSIG, signal, signal->length(), JBB);
+ return;
+ }
+ sendSignal(ref, GSN_TESTSIG, signal, signal->length(), JBB);
+ return;
+ }
+ case 14:{
+ Uint32 count = signal->theData[8];
+ signal->theData[10] = count * rg.m_nodes.count();
+ for(i = 0; i<count; i++){
+ sendSignal(rg, GSN_TESTSIG, signal, signal->length(), JBB);
+ }
+ return;
+ }
+
+ default:
+ ndbrequire(false);
+ }
+ return;
+}
+
+void
+Cmvmi::sendFragmentedComplete(Signal* signal, Uint32 data, Uint32 returnCode){
+ if(g_print)
+ ndbout_c("sendFragmentedComplete: %d", data);
+ if(data == 11 || data == 12){
+ for(Uint32 i = 0; i<3; i++){
+ if(g_test[i].p != 0)
+ delete[] g_test[i].p;
+ }
+ }
+}
diff --git a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp
new file mode 100644
index 00000000000..1c91f564749
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp
@@ -0,0 +1,124 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef Cmvmi_H_
+#define Cmvmi_H_
+
+#include <pc.hpp>
+#include <SimulatedBlock.hpp>
+#include <LogLevel.hpp>
+
+#include <ArrayList.hpp>
+
+/**
+ * Cmvmi class
+ */
+class Cmvmi : public SimulatedBlock {
+public:
+ Cmvmi(const Configuration & conf);
+ virtual ~Cmvmi();
+
+private:
+ /**
+ * These methods used to be reportXXX
+ *
+ * But they in a nasty way intefere with the execution model
+ * they been turned in to exec-Method used via prio A signals
+ */
+ void execDISCONNECT_REP(Signal*);
+ void execCONNECT_REP(Signal*);
+
+private:
+ BLOCK_DEFINES(Cmvmi);
+
+ // The signal processing functions
+ void execNDB_TAMPER(Signal* signal);
+ void execSET_LOGLEVELORD(Signal* signal);
+ void execEVENT_REP(Signal* signal);
+ void execSTTOR(Signal* signal);
+ void execCLOSE_COMREQ(Signal* signal);
+ void execENABLE_COMORD(Signal* signal);
+ void execOPEN_COMREQ(Signal* signal);
+ void execSIZEALT_ACK(Signal* signal);
+ void execTEST_ORD(Signal* signal);
+
+ void execSTATISTICS_REQ(Signal* signal);
+ void execSTOP_ORD(Signal* signal);
+ void execSTART_ORD(Signal* signal);
+ void execTAMPER_ORD(Signal* signal);
+ void execSET_VAR_REQ(Signal* signal);
+ void execSET_VAR_CONF(Signal* signal);
+ void execSET_VAR_REF(Signal* signal);
+
+ void execDUMP_STATE_ORD(Signal* signal);
+
+ void execEVENT_SUBSCRIBE_REQ(Signal *);
+ void cancelSubscription(NodeId nodeId);
+
+ void handleSET_VAR_REQ(Signal* signal);
+
+ void execTESTSIG(Signal* signal);
+
+ char theErrorMessage[256];
+ void sendSTTORRY(Signal* signal);
+
+ LogLevel clogLevel;
+ NdbNodeBitmask c_dbNodes;
+
+ class Configuration & theConfig;
+
+ /**
+ * This struct defines the data needed for a EVENT_REP subscriber
+ */
+ struct EventRepSubscriber {
+ /**
+ * What log level is the subscriber using
+ */
+ LogLevel logLevel;
+
+ /**
+ * What block reference does he use
+ * (Where should the EVENT_REP's be forwarded)
+ */
+ BlockReference blockRef;
+
+ /**
+ * Next ptr (used in pool/list)
+ */
+ union { Uint32 nextPool; Uint32 nextList; };
+ Uint32 prevList;
+ };
+ typedef Ptr<EventRepSubscriber> SubscriberPtr;
+
+ /**
+ * Pool of EventRepSubscriber record
+ */
+ ArrayPool<EventRepSubscriber> subscriberPool;
+
+ /**
+ * List of current subscribers
+ */
+ ArrayList<EventRepSubscriber> subscribers;
+
+private:
+ // Declared but not defined
+ Cmvmi(const Cmvmi &obj);
+ void operator = (const Cmvmi &);
+
+ void sendFragmentedComplete(Signal* signal, Uint32 data, Uint32 returnCode);
+};
+
+#endif
diff --git a/storage/ndb/src/kernel/blocks/cmvmi/Makefile.am b/storage/ndb/src/kernel/blocks/cmvmi/Makefile.am
new file mode 100644
index 00000000000..dc2e12746fd
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/cmvmi/Makefile.am
@@ -0,0 +1,24 @@
+
+noinst_LIBRARIES = libcmvmi.a
+
+libcmvmi_a_SOURCES = Cmvmi.cpp
+
+include $(top_srcdir)/ndb/config/common.mk.am
+include $(top_srcdir)/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libcmvmi.dsp
+
+libcmvmi.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-lib.am \
+ $(top_srcdir)/ndb/config/win-name \
+ $(top_srcdir)/ndb/config/win-includes \
+ $(top_srcdir)/ndb/config/win-sources \
+ $(top_srcdir)/ndb/config/win-libraries
+ cat $(top_srcdir)/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/ndb/config/win-sources $@ $(libcmvmi_a_SOURCES)
+ @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp b/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp
new file mode 100644
index 00000000000..6a65da5bb6a
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp
@@ -0,0 +1,1470 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef DBACC_H
+#define DBACC_H
+
+
+
+#include <pc.hpp>
+#include <SimulatedBlock.hpp>
+
+// primary key is stored in TUP
+#include <Dbtup.hpp>
+
+#ifdef DBACC_C
+// Debug Macros
+#define dbgWord32(ptr, ind, val)
+
+/*
+#define dbgWord32(ptr, ind, val) \
+if(debug_jan){ \
+tmp_val = val; \
+switch(ind){ \
+case 1: strcpy(tmp_string, "ZPOS_PAGE_TYPE "); \
+break; \
+case 2: strcpy(tmp_string, "ZPOS_NO_ELEM_IN_PAGE"); \
+break; \
+case 3: strcpy(tmp_string, "ZPOS_CHECKSUM "); \
+break; \
+case 4: strcpy(tmp_string, "ZPOS_OVERFLOWREC "); \
+break; \
+case 5: strcpy(tmp_string, "ZPOS_FREE_AREA_IN_PAGE"); \
+break; \
+case 6: strcpy(tmp_string, "ZPOS_LAST_INDEX "); \
+break; \
+case 7: strcpy(tmp_string, "ZPOS_INSERT_INDEX "); \
+break; \
+case 8: strcpy(tmp_string, "ZPOS_ARRAY_POS "); \
+break; \
+case 9: strcpy(tmp_string, "ZPOS_NEXT_FREE_INDEX"); \
+break; \
+case 10: strcpy(tmp_string, "ZPOS_NEXT_PAGE "); \
+break; \
+case 11: strcpy(tmp_string, "ZPOS_PREV_PAGE "); \
+break; \
+default: sprintf(tmp_string, "%-20d", ind);\
+} \
+ndbout << "Ptr: " << ptr.p->word32 << " \tIndex: " << tmp_string << " \tValue: " << tmp_val << " \tLINE: " << __LINE__ << endl; \
+}\
+*/
+
+#define dbgUndoword(ptr, ind, val)
+
+// Constants
+/** ------------------------------------------------------------------------
+ * THESE ARE CONSTANTS THAT ARE USED FOR DEFINING THE SIZE OF BUFFERS, THE
+ * SIZE OF PAGE HEADERS, THE NUMBER OF BUFFERS IN A PAGE AND A NUMBER OF
+ * OTHER CONSTANTS WHICH ARE CHANGED WHEN THE BUFFER SIZE IS CHANGED.
+ * ----------------------------------------------------------------------- */
+#define ZHEAD_SIZE 32
+#define ZCON_HEAD_SIZE 2
+#define ZBUF_SIZE 28
+#define ZEMPTYLIST 72
+#define ZUP_LIMIT 14
+#define ZDOWN_LIMIT 12
+#define ZSHIFT_PLUS 5
+#define ZSHIFT_MINUS 2
+#define ZFREE_LIMIT 65
+#define ZNO_CONTAINERS 64
+#define ZELEM_HEAD_SIZE 1
+/* ------------------------------------------------------------------------- */
+/* THESE CONSTANTS DEFINE THE USE OF THE PAGE HEADER IN THE INDEX PAGES. */
+/* ------------------------------------------------------------------------- */
+#define ZPOS_PAGE_ID 0
+#define ZPOS_PAGE_TYPE 1
+#define ZPOS_PAGE_TYPE_BIT 14
+#define ZPOS_EMPTY_LIST 1
+#define ZPOS_ALLOC_CONTAINERS 2
+#define ZPOS_CHECKSUM 3
+#define ZPOS_OVERFLOWREC 4
+#define ZPOS_NO_ELEM_IN_PAGE 2
+#define ZPOS_FREE_AREA_IN_PAGE 5
+#define ZPOS_LAST_INDEX 6
+#define ZPOS_INSERT_INDEX 7
+#define ZPOS_ARRAY_POS 8
+#define ZPOS_NEXT_FREE_INDEX 9
+#define ZPOS_NEXT_PAGE 10
+#define ZPOS_PREV_PAGE 11
+#define ZNORMAL_PAGE_TYPE 0
+#define ZOVERFLOW_PAGE_TYPE 1
+#define ZDEFAULT_LIST 3
+#define ZWORDS_IN_PAGE 2048
+/* --------------------------------------------------------------------------------- */
+/* CONSTANTS FOR THE ZERO PAGES */
+/* --------------------------------------------------------------------------------- */
+#define ZPAGEZERO_PREV_UNDOP 8
+#define ZPAGEZERO_NO_OVER_PAGE 9
+#define ZPAGEZERO_TABID 10
+#define ZPAGEZERO_FRAGID0 11
+#define ZPAGEZERO_FRAGID1 12
+#define ZPAGEZERO_HASH_CHECK 13
+#define ZPAGEZERO_DIRSIZE 14
+#define ZPAGEZERO_EXPCOUNTER 15
+#define ZPAGEZERO_NEXT_UNDO_FILE 16
+#define ZPAGEZERO_SLACK 17
+#define ZPAGEZERO_NO_PAGES 18
+#define ZPAGEZERO_HASHCHECKBIT 19
+#define ZPAGEZERO_K 20
+#define ZPAGEZERO_LHFRAGBITS 21
+#define ZPAGEZERO_LHDIRBITS 22
+#define ZPAGEZERO_LOCALKEYLEN 23
+#define ZPAGEZERO_MAXP 24
+#define ZPAGEZERO_MAXLOADFACTOR 25
+#define ZPAGEZERO_MINLOADFACTOR 26
+#define ZPAGEZERO_MYFID 27
+#define ZPAGEZERO_LAST_OVER_INDEX 28
+#define ZPAGEZERO_P 29
+#define ZPAGEZERO_NO_OF_ELEMENTS 30
+#define ZPAGEZERO_ELEMENT_LENGTH 31
+#define ZPAGEZERO_KEY_LENGTH 32
+#define ZPAGEZERO_NODETYPE 33
+#define ZPAGEZERO_SLACK_CHECK 34
+/* --------------------------------------------------------------------------------- */
+/* CONSTANTS IN ALPHABETICAL ORDER */
+/* --------------------------------------------------------------------------------- */
+#define ZADDFRAG 0
+#define ZCOPY_NEXT 1
+#define ZCOPY_NEXT_COMMIT 2
+#define ZCOPY_COMMIT 3
+#define ZCOPY_REPEAT 4
+#define ZCOPY_ABORT 5
+#define ZCOPY_CLOSE 6
+#define ZDIRARRAY 68
+#define ZDIRRANGESIZE 65
+//#define ZEMPTY_FRAGMENT 0
+#define ZFRAGMENTSIZE 64
+#define ZFIRSTTIME 1
+#define ZFS_CONNECTSIZE 300
+#define ZFS_OPSIZE 100
+#define ZKEYINKEYREQ 4
+#define ZLCP_CONNECTSIZE 30
+#define ZLEFT 1
+#define ZLOCALLOGFILE 2
+#define ZLOCKED 0
+#define ZMAXSCANSIGNALLEN 20
+#define ZMAINKEYLEN 8
+#define ZMAX_UNDO_VERSION 4
+#define ZNO_OF_DISK_VERSION 3
+#define ZNO_OF_OP_PER_SIGNAL 20
+//#define ZNOT_EMPTY_FRAGMENT 1
+#define ZNR_OF_UNDO_PAGE_GROUP 16
+#define ZOP_HEAD_INFO_LN 3
+#define ZOPRECSIZE 740
+#define ZOVERFLOWRECSIZE 5
+#define ZPAGE8_BASE_ADD 1
+#define ZPAGESIZE 128
+#define ZPARALLEL_QUEUE 1
+#define ZPDIRECTORY 1
+#define ZSCAN_MAX_LOCK 4
+#define ZSERIAL_QUEUE 2
+#define ZSPH1 1
+#define ZSPH2 2
+#define ZSPH3 3
+#define ZSPH6 6
+#define ZREADLOCK 0
+#define ZRIGHT 2
+#define ZROOTFRAGMENTSIZE 32
+#define ZSCAN_LOCK_ALL 3
+#define ZSCAN_OP 5
+#define ZSCAN_REC_SIZE 256
+#define ZSR_VERSION_REC_SIZE 16
+#define ZSTAND_BY 2
+#define ZTABLESIZE 16
+#define ZTABMAXINDEX 3
+#define ZUNDEFINED_OP 6
+#define ZUNDOHEADSIZE 7
+#define ZUNLOCKED 1
+#define ZUNDOPAGE_BASE_ADD 2
+#define ZUNDOPAGEINDEXBITS 13
+#define ZUNDOPAGEINDEX_MASK 0x1fff
+#define ZWRITEPAGESIZE 8
+#define ZWRITE_UNDOPAGESIZE 2
+#define ZMIN_UNDO_PAGES_AT_COMMIT 4
+#define ZMIN_UNDO_PAGES_AT_OPERATION 10
+#define ZMIN_UNDO_PAGES_AT_EXPAND 16
+
+/* --------------------------------------------------------------------------------- */
+/* CONTINUEB CODES */
+/* --------------------------------------------------------------------------------- */
+#define ZLOAD_BAL_LCP_TIMER 0
+#define ZINITIALISE_RECORDS 1
+#define ZSR_READ_PAGES_ALLOC 2
+#define ZSTART_UNDO 3
+#define ZSEND_SCAN_HBREP 4
+#define ZREL_ROOT_FRAG 5
+#define ZREL_FRAG 6
+#define ZREL_DIR 7
+#define ZREPORT_MEMORY_USAGE 8
+#define ZLCP_OP_WRITE_RT_BREAK 9
+
+/* ------------------------------------------------------------------------- */
+/* ERROR CODES */
+/* ------------------------------------------------------------------------- */
+#define ZLIMIT_OF_ERROR 600 // Limit check for error codes
+#define ZCHECKROOT_ERROR 601 // Delete fragment error code
+#define ZCONNECT_SIZE_ERROR 602 // ACC_SEIZEREF
+#define ZDIR_RANGE_ERROR 603 // Add fragment error code
+#define ZFULL_FRAGRECORD_ERROR 604 // Add fragment error code
+#define ZFULL_ROOTFRAGRECORD_ERROR 605 // Add fragment error code
+#define ZROOTFRAG_STATE_ERROR 606 // Add fragment
+#define ZOVERTAB_REC_ERROR 607 // Add fragment
+
+#define ZSCAN_REFACC_CONNECT_ERROR 608 // ACC_SCANREF
+#define ZFOUR_ACTIVE_SCAN_ERROR 609 // ACC_SCANREF
+#define ZNULL_SCAN_REC_ERROR 610 // ACC_SCANREF
+
+#define ZDIRSIZE_ERROR 623
+#define ZOVER_REC_ERROR 624 // Insufficient Space
+#define ZPAGESIZE_ERROR 625
+#define ZTUPLE_DELETED_ERROR 626
+#define ZREAD_ERROR 626
+#define ZWRITE_ERROR 630
+#define ZTO_OP_STATE_ERROR 631
+#define ZTOO_EARLY_ACCESS_ERROR 632
+#define ZTEMPORARY_ACC_UNDO_FAILURE 677
+#endif
+
+class ElementHeader {
+ /**
+ *
+ * l = Locked -- If true contains operation else scan bits + hash value
+ * s = Scan bits
+ * h = Hash value
+ * o = Operation ptr I
+ *
+ * 1111111111222222222233
+ * 01234567890123456789012345678901
+ * lssssssssssss hhhhhhhhhhhhhhhh
+ * ooooooooooooooooooooooooooooooo
+ */
+public:
+ STATIC_CONST( HASH_VALUE_PART_MASK = 0xFFFF );
+
+ static bool getLocked(Uint32 data);
+ static bool getUnlocked(Uint32 data);
+ static Uint32 getScanBits(Uint32 data);
+ static Uint32 getHashValuePart(Uint32 data);
+ static Uint32 getOpPtrI(Uint32 data);
+
+ static Uint32 setLocked(Uint32 opPtrI);
+ static Uint32 setUnlocked(Uint32 hashValuePart, Uint32 scanBits);
+ static Uint32 setScanBit(Uint32 header, Uint32 scanBit);
+ static Uint32 clearScanBit(Uint32 header, Uint32 scanBit);
+};
+
+inline
+bool
+ElementHeader::getLocked(Uint32 data){
+ return (data & 1) == 0;
+}
+
+inline
+bool
+ElementHeader::getUnlocked(Uint32 data){
+ return (data & 1) == 1;
+}
+
+inline
+Uint32
+ElementHeader::getScanBits(Uint32 data){
+ assert(getUnlocked(data));
+ return (data >> 1) & ((1 << MAX_PARALLEL_SCANS_PER_FRAG) - 1);
+}
+
+inline
+Uint32
+ElementHeader::getHashValuePart(Uint32 data){
+ assert(getUnlocked(data));
+ return data >> 16;
+}
+
+inline
+Uint32
+ElementHeader::getOpPtrI(Uint32 data){
+ assert(getLocked(data));
+ return data >> 1;
+}
+
+inline
+Uint32
+ElementHeader::setLocked(Uint32 opPtrI){
+ return (opPtrI << 1) + 0;
+}
+inline
+Uint32
+ElementHeader::setUnlocked(Uint32 hashValue, Uint32 scanBits){
+ return (hashValue << 16) + (scanBits << 1) + 1;
+}
+
+inline
+Uint32
+ElementHeader::setScanBit(Uint32 header, Uint32 scanBit){
+ assert(getUnlocked(header));
+ return header | (scanBit << 1);
+}
+
+inline
+Uint32
+ElementHeader::clearScanBit(Uint32 header, Uint32 scanBit){
+ assert(getUnlocked(header));
+ return header & (~(scanBit << 1));
+}
+
+
+class Dbacc: public SimulatedBlock {
+public:
+// State values
+enum State {
+ FREEFRAG = 0,
+ ACTIVEFRAG = 1,
+ SEND_QUE_OP = 2,
+ WAIT_ACC_LCPREQ = 3,
+ LCP_SEND_PAGES = 4,
+ LCP_SEND_OVER_PAGES = 5,
+ LCP_SEND_ZERO_PAGE = 6,
+ SR_READ_PAGES = 7,
+ SR_READ_OVER_PAGES = 8,
+ WAIT_ZERO_PAGE_STORED = 9,
+ WAIT_NOTHING = 10,
+ WAIT_OPEN_UNDO_LCP = 11,
+ WAIT_OPEN_UNDO_LCP_NEXT = 12,
+ WAIT_OPEN_DATA_FILE_FOR_READ = 13,
+ WAIT_OPEN_DATA_FILE_FOR_WRITE = 14,
+ OPEN_UNDO_FILE_SR = 15,
+ READ_UNDO_PAGE = 16,
+ READ_UNDO_PAGE_AND_CLOSE = 17,
+ WAIT_READ_DATA = 18,
+ WAIT_READ_PAGE_ZERO = 19,
+ WAIT_WRITE_DATA = 20,
+ WAIT_WRITE_UNDO = 21,
+ WAIT_WRITE_UNDO_EXIT = 22,
+ WAIT_CLOSE_UNDO = 23,
+ LCP_CLOSE_DATA = 24,
+ SR_CLOSE_DATA = 25,
+ WAIT_ONE_CONF = 26,
+ WAIT_TWO_CONF = 27,
+ LCP_FREE = 28,
+ LCP_ACTIVE = 29,
+ FREE_OP = 30,
+ WAIT_EXE_OP = 32,
+ WAIT_IN_QUEUE = 34,
+ EXE_OP = 35,
+ SCAN_ACTIVE = 36,
+ SCAN_WAIT_IN_QUEUE = 37,
+ IDLE = 39,
+ ACTIVE = 40,
+ WAIT_COMMIT_ABORT = 41,
+ ABORT = 42,
+ ABORTADDFRAG = 43,
+ REFUSEADDFRAG = 44,
+ DELETEFRAG = 45,
+ DELETETABLE = 46,
+ UNDEFINEDROOT = 47,
+ ADDFIRSTFRAG = 48,
+ ADDSECONDFRAG = 49,
+ DELETEFIRSTFRAG = 50,
+ DELETESECONDFRAG = 51,
+ ACTIVEROOT = 52,
+ LCP_CREATION = 53
+};
+
+// Records
+
+/* --------------------------------------------------------------------------------- */
+/* UNDO HEADER RECORD */
+/* --------------------------------------------------------------------------------- */
+
+ struct UndoHeader {
+ enum UndoHeaderType{
+ ZPAGE_INFO = 0,
+ ZOVER_PAGE_INFO = 1,
+ ZOP_INFO = 2,
+ ZNO_UNDORECORD_TYPES = 3
+ };
+ UintR tableId;
+ UintR rootFragId;
+ UintR localFragId;
+ UintR variousInfo;
+ UintR logicalPageId;
+ UintR prevUndoAddressForThisFrag;
+ UintR prevUndoAddress;
+ };
+
+/* --------------------------------------------------------------------------------- */
+/* DIRECTORY RANGE */
+/* --------------------------------------------------------------------------------- */
+ struct DirRange {
+ Uint32 dirArray[256];
+ }; /* p2c: size = 1024 bytes */
+
+ typedef Ptr<DirRange> DirRangePtr;
+
+/* --------------------------------------------------------------------------------- */
+/* DIRECTORYARRAY */
+/* --------------------------------------------------------------------------------- */
+struct Directoryarray {
+ Uint32 pagep[256];
+}; /* p2c: size = 1024 bytes */
+
+ typedef Ptr<Directoryarray> DirectoryarrayPtr;
+
+/* --------------------------------------------------------------------------------- */
+/* FRAGMENTREC. ALL INFORMATION ABOUT FRAMENT AND HASH TABLE IS SAVED IN FRAGMENT */
+/* REC A POINTER TO FRAGMENT RECORD IS SAVED IN ROOTFRAGMENTREC FRAGMENT */
+/* --------------------------------------------------------------------------------- */
+struct Fragmentrec {
+//-----------------------------------------------------------------------------
+// References to long key pages with free area. Some type of buddy structure
+// where references in higher index have more free space.
+//-----------------------------------------------------------------------------
+ Uint32 longKeyPageArray[4];
+
+//-----------------------------------------------------------------------------
+// These variables keep track of allocated pages, the number of them and the
+// start file page of them. Used during local checkpoints.
+//-----------------------------------------------------------------------------
+ Uint32 datapages[8];
+ Uint32 activeDataPage;
+ Uint32 activeDataFilePage;
+
+//-----------------------------------------------------------------------------
+// Temporary variables used during shrink and expand process.
+//-----------------------------------------------------------------------------
+ Uint32 expReceivePageptr;
+ Uint32 expReceiveIndex;
+ Uint32 expReceiveForward;
+ Uint32 expSenderDirIndex;
+ Uint32 expSenderDirptr;
+ Uint32 expSenderIndex;
+ Uint32 expSenderPageptr;
+
+//-----------------------------------------------------------------------------
+// List of lock owners and list of lock waiters to support LCP handling
+//-----------------------------------------------------------------------------
+ Uint32 lockOwnersList;
+ Uint32 firstWaitInQueOp;
+ Uint32 lastWaitInQueOp;
+ Uint32 sentWaitInQueOp;
+
+//-----------------------------------------------------------------------------
+// References to Directory Ranges (which in turn references directories, which
+// in its turn references the pages) for the bucket pages and the overflow
+// bucket pages.
+//-----------------------------------------------------------------------------
+ Uint32 directory;
+ Uint32 dirsize;
+ Uint32 overflowdir;
+ Uint32 lastOverIndex;
+
+//-----------------------------------------------------------------------------
+// These variables are used to support LCP and Restore from disk.
+// lcpDirIndex: used during LCP as the frag page id currently stored.
+// lcpMaxDirIndex: The dirsize at start of LCP.
+// lcpMaxOverDirIndex: The xx at start of LCP
+// During a LCP one writes the minimum of the number of pages in the directory
+// and the number of pages at the start of the LCP.
+// noStoredPages: Number of bucket pages written in LCP used at restore
+// noOfOverStoredPages: Number of overflow pages written in LCP used at restore
+// This variable is also used during LCP to calculate this number.
+//-----------------------------------------------------------------------------
+ Uint32 lcpDirIndex;
+ Uint32 lcpMaxDirIndex;
+ Uint32 lcpMaxOverDirIndex;
+ Uint32 noStoredPages;
+ Uint32 noOfStoredOverPages;
+
+//-----------------------------------------------------------------------------
+// We have a list of overflow pages with free areas. We have a special record,
+// the overflow record representing these pages. The reason is that the
+// same record is also used to represent pages in the directory array that have
+// been released since they were empty (there were however higher indexes with
+// data in them). These are put in the firstFreeDirIndexRec-list.
+// An overflow record representing a page can only be in one of these lists.
+//-----------------------------------------------------------------------------
+ Uint32 firstOverflowRec;
+ Uint32 lastOverflowRec;
+ Uint32 firstFreeDirindexRec;
+
+//-----------------------------------------------------------------------------
+// localCheckpId is used during execution of UNDO log to ensure that we only
+// apply UNDO log records from the restored LCP of the fragment.
+// lcpLqhPtr keeps track of LQH record for this fragment to checkpoint
+//-----------------------------------------------------------------------------
+ Uint32 localCheckpId;
+ Uint32 lcpLqhPtr;
+
+//-----------------------------------------------------------------------------
+// Counter keeping track of how many times we have expanded. We need to ensure
+// that we do not shrink so many times that this variable becomes negative.
+//-----------------------------------------------------------------------------
+ Uint32 expandCounter;
+//-----------------------------------------------------------------------------
+// Reference to record for open file at LCP and restore
+//-----------------------------------------------------------------------------
+ Uint32 fsConnPtr;
+
+//-----------------------------------------------------------------------------
+// These variables are important for the linear hashing algorithm.
+// localkeylen is the size of the local key (1 and 2 is currently supported)
+// maxloadfactor is the factor specifying when to expand
+// minloadfactor is the factor specifying when to shrink (hysteresis model)
+// maxp and p
+// maxp and p is the variables most central to linear hashing. p + maxp + 1 is the
+// current number of buckets. maxp is the largest value of the type 2**n - 1
+// which is smaller than the number of buckets. These values are used to find
+// correct bucket with the aid of the hash value.
+//
+// slack is the variable keeping track of whether we have inserted more than
+// the current size is suitable for or less. Slack together with the boundaries
+// set by maxloadfactor and minloadfactor decides when to expand/shrink
+// slackCheck When slack goes over this value it is time to expand.
+// slackCheck = (maxp + p + 1)*(maxloadfactor - minloadfactor) or
+// bucketSize * hysteresis
+//-----------------------------------------------------------------------------
+ Uint32 localkeylen;
+ Uint32 maxp;
+ Uint32 maxloadfactor;
+ Uint32 minloadfactor;
+ Uint32 p;
+ Uint32 slack;
+ Uint32 slackCheck;
+
+//-----------------------------------------------------------------------------
+// myfid is the fragment id of the fragment
+// myroot is the reference to the root fragment record
+// nextfreefrag is the next free fragment if linked into a free list
+//-----------------------------------------------------------------------------
+ Uint32 myfid;
+ Uint32 myroot;
+ Uint32 myTableId;
+ Uint32 nextfreefrag;
+
+//-----------------------------------------------------------------------------
+// This variable is used during restore to keep track of page id of read pages.
+// During read of bucket pages this is used to calculate the page id and also
+// to verify that the page id of the read page is correct. During read of over-
+// flow pages it is only used to keep track of the number of pages read.
+//-----------------------------------------------------------------------------
+ Uint32 nextAllocPage;
+
+//-----------------------------------------------------------------------------
+// Keeps track of undo position for fragment during LCP and restore.
+//-----------------------------------------------------------------------------
+ Uint32 prevUndoposition;
+
+//-----------------------------------------------------------------------------
+// Page reference during LCP and restore of page zero where fragment data is
+// saved
+//-----------------------------------------------------------------------------
+ Uint32 zeroPagePtr;
+
+//-----------------------------------------------------------------------------
+// Number of pages read from file during restore
+//-----------------------------------------------------------------------------
+ Uint32 noOfExpectedPages;
+
+//-----------------------------------------------------------------------------
+// Fragment State, mostly applicable during LCP and restore
+//-----------------------------------------------------------------------------
+ State fragState;
+
+//-----------------------------------------------------------------------------
+// Keep track of number of outstanding writes of UNDO log records to ensure that
+// we have saved all UNDO info before concluding local checkpoint.
+//-----------------------------------------------------------------------------
+ Uint32 nrWaitWriteUndoExit;
+
+//-----------------------------------------------------------------------------
+// lastUndoIsStored is used to handle parallel writes of UNDO log and pages to
+// know when LCP is completed
+//-----------------------------------------------------------------------------
+ Uint8 lastUndoIsStored;
+
+//-----------------------------------------------------------------------------
+// Set to ZTRUE when local checkpoint freeze occurs and set to ZFALSE when
+// local checkpoint concludes.
+//-----------------------------------------------------------------------------
+ Uint8 createLcp;
+
+//-----------------------------------------------------------------------------
+// Flag indicating whether we are in the load phase of restore still.
+//-----------------------------------------------------------------------------
+ Uint8 loadingFlag;
+
+//-----------------------------------------------------------------------------
+// elementLength: Length of element in bucket and overflow pages
+// keyLength: Length of key
+//-----------------------------------------------------------------------------
+ Uint8 elementLength;
+ Uint16 keyLength;
+
+//-----------------------------------------------------------------------------
+// This flag is used to avoid sending a big number of expand or shrink signals
+// when simultaneously committing many inserts or deletes.
+//-----------------------------------------------------------------------------
+ Uint8 expandFlag;
+
+//-----------------------------------------------------------------------------
+// hashcheckbit is the bit to check whether to send element to split bucket or not
+// k (== 6) is the number of buckets per page
+// lhfragbits is the number of bits used to calculate the fragment id
+// lhdirbits is the number of bits used to calculate the page id
+//-----------------------------------------------------------------------------
+ Uint8 hashcheckbit;
+ Uint8 k;
+ Uint8 lhfragbits;
+ Uint8 lhdirbits;
+
+//-----------------------------------------------------------------------------
+// nodetype can only be STORED in this release. Is currently only set, never read
+// stopQueOp is indicator that locked operations will not start until LCP have
+// released the lock on the fragment
+//-----------------------------------------------------------------------------
+ Uint8 nodetype;
+ Uint8 stopQueOp;
+
+//-----------------------------------------------------------------------------
+// flag to avoid accessing table record if no char attributes
+//-----------------------------------------------------------------------------
+ Uint8 hasCharAttr;
+};
+
+ typedef Ptr<Fragmentrec> FragmentrecPtr;
+
+/* --------------------------------------------------------------------------------- */
+/* FS_CONNECTREC */
+/* --------------------------------------------------------------------------------- */
+struct FsConnectrec {
+ Uint32 fsNext;
+ Uint32 fsPrev;
+ Uint32 fragrecPtr;
+ Uint32 fsPtr;
+ State fsState;
+ Uint8 activeFragId;
+ Uint8 fsPart;
+}; /* p2c: size = 24 bytes */
+
+ typedef Ptr<FsConnectrec> FsConnectrecPtr;
+
+/* --------------------------------------------------------------------------------- */
+/* FS_OPREC */
+/* --------------------------------------------------------------------------------- */
+struct FsOprec {
+ Uint32 fsOpnext;
+ Uint32 fsOpfragrecPtr;
+ Uint32 fsConptr;
+ State fsOpstate;
+ Uint16 fsOpMemPage;
+}; /* p2c: size = 20 bytes */
+
+ typedef Ptr<FsOprec> FsOprecPtr;
+
+/* --------------------------------------------------------------------------------- */
+/* LCP_CONNECTREC */
+/* --------------------------------------------------------------------------------- */
+struct LcpConnectrec {
+ Uint32 nextLcpConn;
+ Uint32 lcpUserptr;
+ Uint32 rootrecptr;
+ State syncUndopageState;
+ State lcpstate;
+ Uint32 lcpUserblockref;
+ Uint16 localCheckPid;
+ Uint8 noOfLcpConf;
+};
+ typedef Ptr<LcpConnectrec> LcpConnectrecPtr;
+
+/* --------------------------------------------------------------------------------- */
+/* OPERATIONREC */
+/* --------------------------------------------------------------------------------- */
+struct Operationrec {
+ Uint32 keydata[8];
+ Uint32 localdata[2];
+ Uint32 elementIsforward;
+ Uint32 elementPage;
+ Uint32 elementPointer;
+ Uint32 fid;
+ Uint32 fragptr;
+ Uint32 hashvaluePart;
+ Uint32 hashValue;
+ Uint32 insertDeleteLen;
+ Uint32 keyinfoPage;
+ Uint32 nextLockOwnerOp;
+ Uint32 nextOp;
+ Uint32 nextParallelQue;
+ Uint32 nextQueOp;
+ Uint32 nextSerialQue;
+ Uint32 prevOp;
+ Uint32 prevLockOwnerOp;
+ Uint32 prevParallelQue;
+ Uint32 prevQueOp;
+ Uint32 prevSerialQue;
+ Uint32 scanRecPtr;
+ Uint32 transId1;
+ Uint32 transId2;
+ Uint32 longPagePtr;
+ Uint32 longKeyPageIndex;
+ State opState;
+ Uint32 userptr;
+ State transactionstate;
+ Uint16 elementContainer;
+ Uint16 tupkeylen;
+ Uint32 xfrmtupkeylen;
+ Uint32 userblockref;
+ Uint32 scanBits;
+ Uint8 elementIsDisappeared;
+ Uint8 insertIsDone;
+ Uint8 lockMode;
+ Uint8 lockOwner;
+ Uint8 nodeType;
+ Uint8 operation;
+ Uint8 opSimple;
+ Uint8 dirtyRead;
+ Uint8 commitDeleteCheckFlag;
+ Uint8 isAccLockReq;
+ Uint8 isUndoLogReq;
+}; /* p2c: size = 168 bytes */
+
+ typedef Ptr<Operationrec> OperationrecPtr;
+
+/* --------------------------------------------------------------------------------- */
+/* OVERFLOW_RECORD */
+/* --------------------------------------------------------------------------------- */
+struct OverflowRecord {
+ Uint32 dirindex;
+ Uint32 nextOverRec;
+ Uint32 nextOverList;
+ Uint32 prevOverRec;
+ Uint32 prevOverList;
+ Uint32 overpage;
+ Uint32 nextfreeoverrec;
+};
+
+ typedef Ptr<OverflowRecord> OverflowRecordPtr;
+
+/* --------------------------------------------------------------------------------- */
+/* PAGE8 */
+/* --------------------------------------------------------------------------------- */
+struct Page8 {
+ Uint32 word32[2048];
+}; /* p2c: size = 8192 bytes */
+
+ typedef Ptr<Page8> Page8Ptr;
+
+/* --------------------------------------------------------------------------------- */
+/* ROOTFRAGMENTREC */
+/* DURING EXPAND FRAGMENT PROCESS, EACH FRAGMEND WILL BE EXPAND INTO TWO */
+/* NEW FRAGMENTS.TO MAKE THIS PROCESS EASIER, DURING ADD FRAGMENT PROCESS */
+/* NEXT FRAGMENT IDENTIIES WILL BE CALCULATED, AND TWO FRAGMENTS WILL BE */
+/* ADDED IN (NDBACC). THEREBY EXPAND OF FRAGMENT CAN BE PERFORMED QUICK AND */
+/* EASY.THE NEW FRAGMENT ID SENDS TO TUP MANAGER FOR ALL OPERATION PROCESS. */
+/* --------------------------------------------------------------------------------- */
+struct Rootfragmentrec {
+ Uint32 scan[MAX_PARALLEL_SCANS_PER_FRAG];
+ Uint32 fragmentptr[2];
+ Uint32 fragmentid[2];
+ Uint32 lcpPtr;
+ Uint32 mytabptr;
+ Uint32 nextroot;
+ Uint32 roothashcheck;
+ Uint32 noOfElements;
+ Uint32 m_commit_count;
+ State rootState;
+}; /* p2c: size = 72 bytes */
+
+ typedef Ptr<Rootfragmentrec> RootfragmentrecPtr;
+
+/* --------------------------------------------------------------------------------- */
+/* SCAN_REC */
+/* --------------------------------------------------------------------------------- */
+struct ScanRec {
+ enum ScanState {
+ WAIT_NEXT,
+ SCAN_DISCONNECT
+ };
+ enum ScanBucketState {
+ FIRST_LAP,
+ SECOND_LAP,
+ SCAN_COMPLETED
+ };
+ Uint32 activeLocalFrag;
+ Uint32 rootPtr;
+ Uint32 nextBucketIndex;
+ Uint32 scanNextfreerec;
+ Uint32 scanFirstActiveOp;
+ Uint32 scanFirstLockedOp;
+ Uint32 scanLastLockedOp;
+ Uint32 scanFirstQueuedOp;
+ Uint32 scanLastQueuedOp;
+ Uint32 scanUserptr;
+ Uint32 scanTrid1;
+ Uint32 scanTrid2;
+ Uint32 startNoOfBuckets;
+ Uint32 minBucketIndexToRescan;
+ Uint32 maxBucketIndexToRescan;
+ Uint32 scanOpsAllocated;
+ ScanBucketState scanBucketState;
+ ScanState scanState;
+ Uint16 scanLockHeld;
+ Uint32 scanUserblockref;
+ Uint32 scanMask;
+ Uint8 scanLockMode;
+ Uint8 scanKeyinfoFlag;
+ Uint8 scanTimer;
+ Uint8 scanContinuebCounter;
+ Uint8 scanReadCommittedFlag;
+};
+
+ typedef Ptr<ScanRec> ScanRecPtr;
+
+/* --------------------------------------------------------------------------------- */
+/* SR_VERSION_REC */
+/* --------------------------------------------------------------------------------- */
+struct SrVersionRec {
+ Uint32 nextFreeSr;
+ Uint32 checkPointId;
+ Uint32 prevAddress;
+ Uint32 srUnused; /* p2c: Not used */
+}; /* p2c: size = 16 bytes */
+
+ typedef Ptr<SrVersionRec> SrVersionRecPtr;
+
+/* --------------------------------------------------------------------------------- */
+/* TABREC */
+/* --------------------------------------------------------------------------------- */
+struct Tabrec {
+ Uint32 fragholder[MAX_FRAG_PER_NODE];
+ Uint32 fragptrholder[MAX_FRAG_PER_NODE];
+ Uint32 tabUserPtr;
+ BlockReference tabUserRef;
+
+ Uint8 noOfKeyAttr;
+ Uint8 hasCharAttr;
+ struct KeyAttr {
+ Uint32 attributeDescriptor;
+ CHARSET_INFO* charsetInfo;
+ } keyAttr[MAX_ATTRIBUTES_IN_INDEX];
+};
+ typedef Ptr<Tabrec> TabrecPtr;
+
+/* --------------------------------------------------------------------------------- */
+/* UNDOPAGE */
+/* --------------------------------------------------------------------------------- */
+struct Undopage {
+ Uint32 undoword[8192];
+}; /* p2c: size = 32768 bytes */
+
+ typedef Ptr<Undopage> UndopagePtr;
+
+public:
+ Dbacc(const class Configuration &);
+ virtual ~Dbacc();
+
+ // pointer to TUP instance in this thread
+ Dbtup* c_tup;
+
+private:
+ BLOCK_DEFINES(Dbacc);
+
+ // Transit signals
+ void execDEBUG_SIG(Signal* signal);
+ void execCONTINUEB(Signal* signal);
+ void execACC_CHECK_SCAN(Signal* signal);
+ void execEXPANDCHECK2(Signal* signal);
+ void execSHRINKCHECK2(Signal* signal);
+ void execACC_OVER_REC(Signal* signal);
+ void execACC_SAVE_PAGES(Signal* signal);
+ void execNEXTOPERATION(Signal* signal);
+ void execREAD_PSUEDO_REQ(Signal* signal);
+
+ // Received signals
+ void execSTTOR(Signal* signal);
+ void execSR_FRAGIDREQ(Signal* signal);
+ void execLCP_FRAGIDREQ(Signal* signal);
+ void execLCP_HOLDOPREQ(Signal* signal);
+ void execEND_LCPREQ(Signal* signal);
+ void execACC_LCPREQ(Signal* signal);
+ void execSTART_RECREQ(Signal* signal);
+ void execACC_CONTOPREQ(Signal* signal);
+ void execACCKEYREQ(Signal* signal);
+ void execACCSEIZEREQ(Signal* signal);
+ void execACCFRAGREQ(Signal* signal);
+ void execTC_SCHVERREQ(Signal* signal);
+ void execACC_SRREQ(Signal* signal);
+ void execNEXT_SCANREQ(Signal* signal);
+ void execACC_ABORTREQ(Signal* signal);
+ void execACC_SCANREQ(Signal* signal);
+ void execACCMINUPDATE(Signal* signal);
+ void execACC_COMMITREQ(Signal* signal);
+ void execACC_TO_REQ(Signal* signal);
+ void execACC_LOCKREQ(Signal* signal);
+ void execFSOPENCONF(Signal* signal);
+ void execFSOPENREF(Signal* signal);
+ void execFSCLOSECONF(Signal* signal);
+ void execFSCLOSEREF(Signal* signal);
+ void execFSWRITECONF(Signal* signal);
+ void execFSWRITEREF(Signal* signal);
+ void execFSREADCONF(Signal* signal);
+ void execFSREADREF(Signal* signal);
+ void execNDB_STTOR(Signal* signal);
+ void execDROP_TAB_REQ(Signal* signal);
+ void execFSREMOVECONF(Signal* signal);
+ void execFSREMOVEREF(Signal* signal);
+ void execREAD_CONFIG_REQ(Signal* signal);
+ void execSET_VAR_REQ(Signal* signal);
+ void execDUMP_STATE_ORD(Signal* signal);
+
+ // Statement blocks
+ void ACCKEY_error(Uint32 fromWhere);
+
+ void commitDeleteCheck();
+
+ void initRootFragPageZero(RootfragmentrecPtr, Page8Ptr);
+ void initRootFragSr(RootfragmentrecPtr, Page8Ptr);
+ void initFragAdd(Signal*, Uint32 rootFragIndex, Uint32 rootIndex, FragmentrecPtr);
+ void initFragPageZero(FragmentrecPtr, Page8Ptr);
+ void initFragSr(FragmentrecPtr, Page8Ptr);
+ void initFragGeneral(FragmentrecPtr);
+ void verifyFragCorrect(FragmentrecPtr regFragPtr);
+ void sendFSREMOVEREQ(Signal* signal, Uint32 tableId);
+ void releaseFragResources(Signal* signal, Uint32 fragIndex);
+ void releaseRootFragRecord(Signal* signal, RootfragmentrecPtr rootPtr);
+ void releaseRootFragResources(Signal* signal, Uint32 tableId);
+ void releaseDirResources(Signal* signal,
+ Uint32 fragIndex,
+ Uint32 dirIndex,
+ Uint32 startIndex);
+ void releaseDirectoryResources(Signal* signal,
+ Uint32 fragIndex,
+ Uint32 dirIndex,
+ Uint32 startIndex,
+ Uint32 directoryIndex);
+ void releaseOverflowResources(Signal* signal, FragmentrecPtr regFragPtr);
+ void releaseDirIndexResources(Signal* signal, FragmentrecPtr regFragPtr);
+ void releaseFragRecord(Signal* signal, FragmentrecPtr regFragPtr);
+ Uint32 remainingUndoPages();
+ void updateLastUndoPageIdWritten(Signal* signal, Uint32 aNewValue);
+ void updateUndoPositionPage(Signal* signal, Uint32 aNewValue);
+ void srCheckPage(Signal* signal);
+ void srCheckContainer(Signal* signal);
+ void initScanFragmentPart(Signal* signal);
+ Uint32 checkScanExpand(Signal* signal);
+ Uint32 checkScanShrink(Signal* signal);
+ void initialiseDirRec(Signal* signal);
+ void initialiseDirRangeRec(Signal* signal);
+ void initialiseFragRec(Signal* signal);
+ void initialiseFsConnectionRec(Signal* signal);
+ void initialiseFsOpRec(Signal* signal);
+ void initialiseLcpConnectionRec(Signal* signal);
+ void initialiseOperationRec(Signal* signal);
+ void initialiseOverflowRec(Signal* signal);
+ void initialisePageRec(Signal* signal);
+ void initialiseLcpPages(Signal* signal);
+ void initialiseRootfragRec(Signal* signal);
+ void initialiseScanRec(Signal* signal);
+ void initialiseSrVerRec(Signal* signal);
+ void initialiseTableRec(Signal* signal);
+ bool addfragtotab(Signal* signal, Uint32 rootIndex, Uint32 fragId);
+ void initOpRec(Signal* signal);
+ void sendAcckeyconf(Signal* signal);
+ Uint32 placeReadInLockQueue(Signal* signal);
+ void placeSerialQueueRead(Signal* signal);
+ void checkOnlyReadEntry(Signal* signal);
+ void getNoParallelTransaction(Signal* signal);
+ void moveLastParallelQueue(Signal* signal);
+ void moveLastParallelQueueWrite(Signal* signal);
+ Uint32 placeWriteInLockQueue(Signal* signal);
+ void placeSerialQueueWrite(Signal* signal);
+ void expandcontainer(Signal* signal);
+ void shrinkcontainer(Signal* signal);
+ void nextcontainerinfoExp(Signal* signal);
+ void lcpCopyPage(Signal* signal);
+ void lcpUpdatePage(Signal* signal);
+ void checkUndoPages(Signal* signal);
+ void undoWritingProcess(Signal* signal);
+ void writeUndoDataInfo(Signal* signal);
+ void writeUndoHeader(Signal* signal,
+ Uint32 logicalPageId,
+ UndoHeader::UndoHeaderType pageType);
+ void writeUndoOpInfo(Signal* signal);
+ void checksumControl(Signal* signal, Uint32 checkPage);
+ void startActiveUndo(Signal* signal);
+ void releaseAndCommitActiveOps(Signal* signal);
+ void releaseAndCommitQueuedOps(Signal* signal);
+ void releaseAndAbortLockedOps(Signal* signal);
+ void containerinfo(Signal* signal);
+ bool getScanElement(Signal* signal);
+ void initScanOpRec(Signal* signal);
+ void nextcontainerinfo(Signal* signal);
+ void putActiveScanOp(Signal* signal);
+ void putOpScanLockQue();
+ void putReadyScanQueue(Signal* signal, Uint32 scanRecIndex);
+ void releaseScanBucket(Signal* signal);
+ void releaseScanContainer(Signal* signal);
+ void releaseScanRec(Signal* signal);
+ bool searchScanContainer(Signal* signal);
+ void sendNextScanConf(Signal* signal);
+ void setlock(Signal* signal);
+ void takeOutActiveScanOp(Signal* signal);
+ void takeOutScanLockQueue(Uint32 scanRecIndex);
+ void takeOutReadyScanQueue(Signal* signal);
+ void insertElement(Signal* signal);
+ void insertContainer(Signal* signal);
+ void addnewcontainer(Signal* signal);
+ void getfreelist(Signal* signal);
+ void increaselistcont(Signal* signal);
+ void seizeLeftlist(Signal* signal);
+ void seizeRightlist(Signal* signal);
+ Uint32 readTablePk(Uint32 localkey1);
+ void getElement(Signal* signal);
+ void getdirindex(Signal* signal);
+ void commitdelete(Signal* signal, bool systemRestart);
+ void deleteElement(Signal* signal);
+ void getLastAndRemove(Signal* signal);
+ void releaseLeftlist(Signal* signal);
+ void releaseRightlist(Signal* signal);
+ void checkoverfreelist(Signal* signal);
+ void abortOperation(Signal* signal);
+ void accAbortReqLab(Signal* signal, bool sendConf);
+ void commitOperation(Signal* signal);
+ void copyOpInfo(Signal* signal);
+ Uint32 executeNextOperation(Signal* signal);
+ void releaselock(Signal* signal);
+ void takeOutFragWaitQue(Signal* signal);
+ void allocOverflowPage(Signal* signal);
+ bool getrootfragmentrec(Signal* signal, RootfragmentrecPtr&, Uint32 fragId);
+ void insertLockOwnersList(Signal* signal, const OperationrecPtr&);
+ void takeOutLockOwnersList(Signal* signal, const OperationrecPtr&);
+ void initFsOpRec(Signal* signal);
+ void initLcpConnRec(Signal* signal);
+ void initOverpage(Signal* signal);
+ void initPage(Signal* signal);
+ void initRootfragrec(Signal* signal);
+ void putOpInFragWaitQue(Signal* signal);
+ void putOverflowRecInFrag(Signal* signal);
+ void putRecInFreeOverdir(Signal* signal);
+ void releaseDirectory(Signal* signal);
+ void releaseDirrange(Signal* signal);
+ void releaseFsConnRec(Signal* signal);
+ void releaseFsOpRec(Signal* signal);
+ void releaseLcpConnectRec(Signal* signal);
+ void releaseOpRec(Signal* signal);
+ void releaseOverflowRec(Signal* signal);
+ void releaseOverpage(Signal* signal);
+ void releasePage(Signal* signal);
+ void releaseLcpPage(Signal* signal);
+ void releaseSrRec(Signal* signal);
+ void releaseLogicalPage(Fragmentrec * fragP, Uint32 logicalPageId);
+ void seizeDirectory(Signal* signal);
+ void seizeDirrange(Signal* signal);
+ void seizeFragrec(Signal* signal);
+ void seizeFsConnectRec(Signal* signal);
+ void seizeFsOpRec(Signal* signal);
+ void seizeLcpConnectRec(Signal* signal);
+ void seizeOpRec(Signal* signal);
+ void seizeOverRec(Signal* signal);
+ void seizePage(Signal* signal);
+ void seizeLcpPage(Page8Ptr&);
+ void seizeRootfragrec(Signal* signal);
+ void seizeScanRec(Signal* signal);
+ void seizeSrVerRec(Signal* signal);
+ void sendSystemerror(Signal* signal);
+ void takeRecOutOfFreeOverdir(Signal* signal);
+ void takeRecOutOfFreeOverpage(Signal* signal);
+ void sendScanHbRep(Signal* signal, Uint32);
+
+ void addFragRefuse(Signal* signal, Uint32 errorCode);
+ void ndbsttorryLab(Signal* signal);
+ void srCloseDataFileLab(Signal* signal);
+ void acckeyref1Lab(Signal* signal, Uint32 result_code);
+ void insertelementLab(Signal* signal);
+ void startUndoLab(Signal* signal);
+ void checkNextFragmentLab(Signal* signal);
+ void endofexpLab(Signal* signal);
+ void endofshrinkbucketLab(Signal* signal);
+ void srStartUndoLab(Signal* signal);
+ void senddatapagesLab(Signal* signal);
+ void undoNext2Lab(Signal* signal);
+ void sttorrysignalLab(Signal* signal);
+ void sendholdconfsignalLab(Signal* signal);
+ void accIsLockedLab(Signal* signal);
+ void insertExistElemLab(Signal* signal);
+ void refaccConnectLab(Signal* signal);
+ void srReadOverPagesLab(Signal* signal);
+ void releaseScanLab(Signal* signal);
+ void lcpOpenUndofileConfLab(Signal* signal);
+ void srFsOpenConfLab(Signal* signal);
+ void checkSyncUndoPagesLab(Signal* signal);
+ void sendaccSrconfLab(Signal* signal);
+ void checkSendLcpConfLab(Signal* signal);
+ void endsaveoverpageLab(Signal* signal);
+ void lcpCloseDataFileLab(Signal* signal);
+ void srOpenDataFileLoopLab(Signal* signal);
+ void srReadPagesLab(Signal* signal);
+ void srDoUndoLab(Signal* signal);
+ void ndbrestart1Lab(Signal* signal);
+ void initialiseRecordsLab(Signal* signal, Uint32 ref, Uint32 data);
+ void srReadPagesAllocLab(Signal* signal);
+ void checkNextBucketLab(Signal* signal);
+ void endsavepageLab(Signal* signal);
+ void saveZeroPageLab(Signal* signal);
+ void srAllocPage0011Lab(Signal* signal);
+ void sendLcpFragidconfLab(Signal* signal);
+ void savepagesLab(Signal* signal);
+ void saveOverPagesLab(Signal* signal);
+ void srReadPageZeroLab(Signal* signal);
+ void storeDataPageInDirectoryLab(Signal* signal);
+ void lcpFsOpenConfLab(Signal* signal);
+
+ void zpagesize_error(const char* where);
+
+ void reportMemoryUsage(Signal* signal, int gth);
+ void lcp_write_op_to_undolog(Signal* signal);
+ void reenable_expand_after_redo_log_exection_complete(Signal*);
+
+ // charsets
+ void xfrmKeyData(Signal* signal);
+
+ // Initialisation
+ void initData();
+ void initRecords();
+
+ // Variables
+/* --------------------------------------------------------------------------------- */
+/* DIRECTORY RANGE */
+/* --------------------------------------------------------------------------------- */
+ DirRange *dirRange;
+ DirRangePtr expDirRangePtr;
+ DirRangePtr gnsDirRangePtr;
+ DirRangePtr newDirRangePtr;
+ DirRangePtr rdDirRangePtr;
+ DirRangePtr nciOverflowrangeptr;
+ Uint32 cdirrangesize;
+ Uint32 cfirstfreeDirrange;
+/* --------------------------------------------------------------------------------- */
+/* DIRECTORYARRAY */
+/* --------------------------------------------------------------------------------- */
+ Directoryarray *directoryarray;
+ DirectoryarrayPtr expDirptr;
+ DirectoryarrayPtr rdDirptr;
+ DirectoryarrayPtr sdDirptr;
+ DirectoryarrayPtr nciOverflowDirptr;
+ Uint32 cdirarraysize;
+ Uint32 cdirmemory;
+ Uint32 cfirstfreedir;
+/* --------------------------------------------------------------------------------- */
+/* FRAGMENTREC. ALL INFORMATION ABOUT FRAMENT AND HASH TABLE IS SAVED IN FRAGMENT */
+/* REC A POINTER TO FRAGMENT RECORD IS SAVED IN ROOTFRAGMENTREC FRAGMENT */
+/* --------------------------------------------------------------------------------- */
+ Fragmentrec *fragmentrec;
+ FragmentrecPtr fragrecptr;
+ Uint32 cfirstfreefrag;
+ Uint32 cfragmentsize;
+/* --------------------------------------------------------------------------------- */
+/* FS_CONNECTREC */
+/* --------------------------------------------------------------------------------- */
+ FsConnectrec *fsConnectrec;
+ FsConnectrecPtr fsConnectptr;
+ Uint32 cfsConnectsize;
+ Uint32 cfsFirstfreeconnect;
+/* --------------------------------------------------------------------------------- */
+/* FS_OPREC */
+/* --------------------------------------------------------------------------------- */
+ FsOprec *fsOprec;
+ FsOprecPtr fsOpptr;
+ Uint32 cfsOpsize;
+ Uint32 cfsFirstfreeop;
+/* --------------------------------------------------------------------------------- */
+/* LCP_CONNECTREC */
+/* --------------------------------------------------------------------------------- */
+ LcpConnectrec *lcpConnectrec;
+ LcpConnectrecPtr lcpConnectptr;
+ Uint32 clcpConnectsize;
+ Uint32 cfirstfreelcpConnect;
+/* --------------------------------------------------------------------------------- */
+/* OPERATIONREC */
+/* --------------------------------------------------------------------------------- */
+ Operationrec *operationrec;
+ OperationrecPtr operationRecPtr;
+ OperationrecPtr idrOperationRecPtr;
+ OperationrecPtr copyInOperPtr;
+ OperationrecPtr copyOperPtr;
+ OperationrecPtr mlpqOperPtr;
+ OperationrecPtr queOperPtr;
+ OperationrecPtr readWriteOpPtr;
+ OperationrecPtr tgnptMainOpPtr;
+ Uint32 cfreeopRec;
+ Uint32 coprecsize;
+/* --------------------------------------------------------------------------------- */
+/* OVERFLOW_RECORD */
+/* --------------------------------------------------------------------------------- */
+ OverflowRecord *overflowRecord;
+ OverflowRecordPtr iopOverflowRecPtr;
+ OverflowRecordPtr tfoOverflowRecPtr;
+ OverflowRecordPtr porOverflowRecPtr;
+ OverflowRecordPtr priOverflowRecPtr;
+ OverflowRecordPtr rorOverflowRecPtr;
+ OverflowRecordPtr sorOverflowRecPtr;
+ OverflowRecordPtr troOverflowRecPtr;
+ Uint32 cfirstfreeoverrec;
+ Uint32 coverflowrecsize;
+
+/* --------------------------------------------------------------------------------- */
+/* PAGE8 */
+/* --------------------------------------------------------------------------------- */
+ Page8 *page8;
+ /* 8 KB PAGE */
+ Page8Ptr ancPageptr;
+ Page8Ptr colPageptr;
+ Page8Ptr ccoPageptr;
+ Page8Ptr datapageptr;
+ Page8Ptr delPageptr;
+ Page8Ptr excPageptr;
+ Page8Ptr expPageptr;
+ Page8Ptr gdiPageptr;
+ Page8Ptr gePageptr;
+ Page8Ptr gflPageptr;
+ Page8Ptr idrPageptr;
+ Page8Ptr ilcPageptr;
+ Page8Ptr inpPageptr;
+ Page8Ptr iopPageptr;
+ Page8Ptr lastPageptr;
+ Page8Ptr lastPrevpageptr;
+ Page8Ptr lcnPageptr;
+ Page8Ptr lcnCopyPageptr;
+ Page8Ptr lupPageptr;
+ Page8Ptr priPageptr;
+ Page8Ptr pwiPageptr;
+ Page8Ptr ciPageidptr;
+ Page8Ptr gsePageidptr;
+ Page8Ptr isoPageptr;
+ Page8Ptr nciPageidptr;
+ Page8Ptr rsbPageidptr;
+ Page8Ptr rscPageidptr;
+ Page8Ptr slPageidptr;
+ Page8Ptr sscPageidptr;
+ Page8Ptr rlPageptr;
+ Page8Ptr rlpPageptr;
+ Page8Ptr ropPageptr;
+ Page8Ptr rpPageptr;
+ Page8Ptr slPageptr;
+ Page8Ptr spPageptr;
+ Uint32 cfirstfreepage;
+ Uint32 cfreepage;
+ Uint32 cpagesize;
+ Uint32 cfirstfreeLcpPage;
+ Uint32 cnoOfAllocatedPages;
+ Uint32 cnoLcpPages;
+/* --------------------------------------------------------------------------------- */
+/* ROOTFRAGMENTREC */
+/* DURING EXPAND FRAGMENT PROCESS, EACH FRAGMEND WILL BE EXPAND INTO TWO */
+/* NEW FRAGMENTS.TO MAKE THIS PROCESS EASIER, DURING ADD FRAGMENT PROCESS */
+/* NEXT FRAGMENT IDENTIIES WILL BE CALCULATED, AND TWO FRAGMENTS WILL BE */
+/* ADDED IN (NDBACC). THEREBY EXPAND OF FRAGMENT CAN BE PERFORMED QUICK AND */
+/* EASY.THE NEW FRAGMENT ID SENDS TO TUP MANAGER FOR ALL OPERATION PROCESS. */
+/* --------------------------------------------------------------------------------- */
+ Rootfragmentrec *rootfragmentrec;
+ RootfragmentrecPtr rootfragrecptr;
+ Uint32 crootfragmentsize;
+ Uint32 cfirstfreerootfrag;
+/* --------------------------------------------------------------------------------- */
+/* SCAN_REC */
+/* --------------------------------------------------------------------------------- */
+ ScanRec *scanRec;
+ ScanRecPtr scanPtr;
+ Uint32 cscanRecSize;
+ Uint32 cfirstFreeScanRec;
+/* --------------------------------------------------------------------------------- */
+/* SR_VERSION_REC */
+/* --------------------------------------------------------------------------------- */
+ SrVersionRec *srVersionRec;
+ SrVersionRecPtr srVersionPtr;
+ Uint32 csrVersionRecSize;
+ Uint32 cfirstFreeSrVersionRec;
+/* --------------------------------------------------------------------------------- */
+/* TABREC */
+/* --------------------------------------------------------------------------------- */
+ Tabrec *tabrec;
+ TabrecPtr tabptr;
+ Uint32 ctablesize;
+/* --------------------------------------------------------------------------------- */
+/* UNDOPAGE */
+/* --------------------------------------------------------------------------------- */
+ Undopage *undopage;
+ /* 32 KB PAGE */
+ UndopagePtr undopageptr;
+ Uint32 tpwiElementptr;
+ Uint32 tpriElementptr;
+ Uint32 tgseElementptr;
+ Uint32 tgseContainerptr;
+ Uint32 trlHead;
+ Uint32 trlRelCon;
+ Uint32 trlNextused;
+ Uint32 trlPrevused;
+ Uint32 tlcnChecksum;
+ Uint32 tlupElemIndex;
+ Uint32 tlupIndex;
+ Uint32 tlupForward;
+ Uint32 tancNext;
+ Uint32 tancBufType;
+ Uint32 tancContainerptr;
+ Uint32 tancPageindex;
+ Uint32 tancPageid;
+ Uint32 tidrResult;
+ Uint32 tidrElemhead;
+ Uint32 tidrForward;
+ Uint32 tidrPageindex;
+ Uint32 tidrContainerptr;
+ Uint32 tidrContainerhead;
+ Uint32 tlastForward;
+ Uint32 tlastPageindex;
+ Uint32 tlastContainerlen;
+ Uint32 tlastElementptr;
+ Uint32 tlastContainerptr;
+ Uint32 tlastContainerhead;
+ Uint32 trlPageindex;
+ Uint32 tdelContainerptr;
+ Uint32 tdelElementptr;
+ Uint32 tdelForward;
+ Uint32 tiopPageId;
+ Uint32 tipPageId;
+ Uint32 tgeLocked;
+ Uint32 tgeResult;
+ Uint32 tgeContainerptr;
+ Uint32 tgeElementptr;
+ Uint32 tgeForward;
+ Uint32 tundoElemIndex;
+ Uint32 texpReceivedBucket;
+ Uint32 texpDirInd;
+ Uint32 texpDirRangeIndex;
+ Uint32 texpDirPageIndex;
+ Uint32 tdata0;
+ Uint32 tcheckpointid;
+ Uint32 tciContainerptr;
+ Uint32 tnciContainerptr;
+ Uint32 tisoContainerptr;
+ Uint32 trscContainerptr;
+ Uint32 tsscContainerptr;
+ Uint32 tciContainerlen;
+ Uint32 trscContainerlen;
+ Uint32 tsscContainerlen;
+ Uint32 tciContainerhead;
+ Uint32 tnciContainerhead;
+ Uint32 tslElementptr;
+ Uint32 tisoElementptr;
+ Uint32 tsscElementptr;
+ Uint32 tfid;
+ Uint32 tscanFlag;
+ Uint32 theadundoindex;
+ Uint32 tgflBufType;
+ Uint32 tgseIsforward;
+ Uint32 tsscIsforward;
+ Uint32 trscIsforward;
+ Uint32 tciIsforward;
+ Uint32 tnciIsforward;
+ Uint32 tisoIsforward;
+ Uint32 tgseIsLocked;
+ Uint32 tsscIsLocked;
+ Uint32 tkeylen;
+ Uint32 tmp;
+ Uint32 tmpP;
+ Uint32 tmpP2;
+ Uint32 tmp1;
+ Uint32 tmp2;
+ Uint32 tgflPageindex;
+ Uint32 tmpindex;
+ Uint32 tslNextfree;
+ Uint32 tslPageindex;
+ Uint32 tgsePageindex;
+ Uint32 tnciNextSamePage;
+ Uint32 tslPrevfree;
+ Uint32 tciPageindex;
+ Uint32 trsbPageindex;
+ Uint32 tnciPageindex;
+ Uint32 tlastPrevconptr;
+ Uint32 tresult;
+ Uint32 tslUpdateHeader;
+ Uint32 tuserptr;
+ BlockReference tuserblockref;
+ Uint32 tundoindex;
+ Uint32 tlqhPointer;
+ Uint32 tholdSentOp;
+ Uint32 tholdMore;
+ Uint32 tlcpLqhCheckV;
+ Uint32 tgdiPageindex;
+ Uint32 tiopIndex;
+ Uint32 tnciTmp;
+ Uint32 tullIndex;
+ Uint32 turlIndex;
+ Uint32 tlfrTmp1;
+ Uint32 tlfrTmp2;
+ Uint32 tgnptNrTransaction;
+ Uint32 tscanTrid1;
+ Uint32 tscanTrid2;
+
+ Uint16 clastUndoPageIdWritten;
+ Uint32 cactiveCheckpId;
+ Uint32 cactiveRootfrag;
+ Uint32 cactiveSrFsPtr;
+ Uint32 cactiveUndoFilePage;
+ Uint32 cactiveOpenUndoFsPtr;
+ Uint32 cactiveSrUndoPage;
+ Uint32 cprevUndoaddress;
+ Uint32 creadyUndoaddress;
+ Uint32 ctest;
+ Uint32 cundoLogActive;
+ Uint32 clqhPtr;
+ BlockReference clqhBlockRef;
+ Uint32 cminusOne;
+ NodeId cmynodeid;
+ Uint32 cactiveUndoFileVersion;
+ BlockReference cownBlockref;
+ BlockReference cndbcntrRef;
+ Uint16 csignalkey;
+ Uint32 cundopagesize;
+ Uint32 cundoposition;
+ Uint32 cundoElemIndex;
+ Uint32 cundoinfolength;
+ Uint32 czero;
+ Uint32 csrVersList[16];
+ Uint32 clblPageCounter;
+ Uint32 clblPageOver;
+ Uint32 clblPagesPerTick;
+ Uint32 clblPagesPerTickAfterSr;
+ Uint32 csystemRestart;
+ Uint32 cexcForward;
+ Uint32 cexcPageindex;
+ Uint32 cexcContainerptr;
+ Uint32 cexcContainerhead;
+ Uint32 cexcContainerlen;
+ Uint32 cexcElementptr;
+ Uint32 cexcPrevconptr;
+ Uint32 cexcMovedLen;
+ Uint32 cexcPrevpageptr;
+ Uint32 cexcPrevpageindex;
+ Uint32 cexcPrevforward;
+ Uint32 clocalkey[32];
+ union {
+ Uint32 ckeys[2048];
+ Uint64 ckeys_align;
+ };
+
+ Uint32 c_errorInsert3000_TableId;
+ Uint32 cSrUndoRecords[UndoHeader::ZNO_UNDORECORD_TYPES];
+};
+
+#endif
diff --git a/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp b/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
new file mode 100644
index 00000000000..94782e13e00
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
@@ -0,0 +1,269 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+
+#define DBACC_C
+#include "Dbacc.hpp"
+
+#define DEBUG(x) { ndbout << "ACC::" << x << endl; }
+
+void Dbacc::initData()
+{
+ cdirarraysize = ZDIRARRAY;
+ coprecsize = ZOPRECSIZE;
+ cpagesize = ZPAGESIZE;
+ clcpConnectsize = ZLCP_CONNECTSIZE;
+ ctablesize = ZTABLESIZE;
+ cfragmentsize = ZFRAGMENTSIZE;
+ crootfragmentsize = ZROOTFRAGMENTSIZE;
+ cdirrangesize = ZDIRRANGESIZE;
+ coverflowrecsize = ZOVERFLOWRECSIZE;
+ cfsConnectsize = ZFS_CONNECTSIZE;
+ cfsOpsize = ZFS_OPSIZE;
+ cscanRecSize = ZSCAN_REC_SIZE;
+ csrVersionRecSize = ZSR_VERSION_REC_SIZE;
+
+
+ dirRange = 0;
+ directoryarray = 0;
+ fragmentrec = 0;
+ fsConnectrec = 0;
+ fsOprec = 0;
+ lcpConnectrec = 0;
+ operationrec = 0;
+ overflowRecord = 0;
+ page8 = 0;
+ rootfragmentrec = 0;
+ scanRec = 0;
+ srVersionRec = 0;
+ tabrec = 0;
+ undopage = 0;
+
+ // Records with constant sizes
+}//Dbacc::initData()
+
+void Dbacc::initRecords()
+{
+ // Records with dynamic sizes
+ dirRange = (DirRange*)allocRecord("DirRange",
+ sizeof(DirRange),
+ cdirrangesize);
+
+ directoryarray = (Directoryarray*)allocRecord("Directoryarray",
+ sizeof(Directoryarray),
+ cdirarraysize);
+
+ fragmentrec = (Fragmentrec*)allocRecord("Fragmentrec",
+ sizeof(Fragmentrec),
+ cfragmentsize);
+
+ fsConnectrec = (FsConnectrec*)allocRecord("FsConnectrec",
+ sizeof(FsConnectrec),
+ cfsConnectsize);
+
+ fsOprec = (FsOprec*)allocRecord("FsOprec",
+ sizeof(FsOprec),
+ cfsOpsize);
+
+ lcpConnectrec = (LcpConnectrec*)allocRecord("LcpConnectrec",
+ sizeof(LcpConnectrec),
+ clcpConnectsize);
+
+ operationrec = (Operationrec*)allocRecord("Operationrec",
+ sizeof(Operationrec),
+ coprecsize);
+
+ overflowRecord = (OverflowRecord*)allocRecord("OverflowRecord",
+ sizeof(OverflowRecord),
+ coverflowrecsize);
+
+ page8 = (Page8*)allocRecord("Page8",
+ sizeof(Page8),
+ cpagesize,
+ false);
+
+ rootfragmentrec = (Rootfragmentrec*)allocRecord("Rootfragmentrec",
+ sizeof(Rootfragmentrec),
+ crootfragmentsize);
+
+ scanRec = (ScanRec*)allocRecord("ScanRec",
+ sizeof(ScanRec),
+ cscanRecSize);
+
+ srVersionRec = (SrVersionRec*)allocRecord("SrVersionRec",
+ sizeof(SrVersionRec),
+ csrVersionRecSize);
+
+ tabrec = (Tabrec*)allocRecord("Tabrec",
+ sizeof(Tabrec),
+ ctablesize);
+
+ undopage = (Undopage*)allocRecord("Undopage",
+ sizeof(Undopage),
+ cundopagesize,
+ false);
+
+ // Initialize BAT for interface to file system
+
+ NewVARIABLE* bat = allocateBat(3);
+ bat[1].WA = &page8->word32[0];
+ bat[1].nrr = cpagesize;
+ bat[1].ClusterSize = sizeof(Page8);
+ bat[1].bits.q = 11;
+ bat[1].bits.v = 5;
+ bat[2].WA = &undopage->undoword[0];
+ bat[2].nrr = cundopagesize;
+ bat[2].ClusterSize = sizeof(Undopage);
+ bat[2].bits.q = 13;
+ bat[2].bits.v = 5;
+}//Dbacc::initRecords()
+
+Dbacc::Dbacc(const class Configuration & conf):
+ SimulatedBlock(DBACC, conf),
+ c_tup(0)
+{
+ Uint32 log_page_size= 0;
+ BLOCK_CONSTRUCTOR(Dbacc);
+
+ const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ ndb_mgm_get_int_parameter(p, CFG_DB_UNDO_INDEX_BUFFER,
+ &log_page_size);
+
+ /**
+ * Always set page size in half MBytes
+ */
+ cundopagesize= (log_page_size / sizeof(Undopage));
+ Uint32 mega_byte_part= cundopagesize & 15;
+ if (mega_byte_part != 0) {
+ jam();
+ cundopagesize+= (16 - mega_byte_part);
+ }
+
+ // Transit signals
+ addRecSignal(GSN_DUMP_STATE_ORD, &Dbacc::execDUMP_STATE_ORD);
+ addRecSignal(GSN_DEBUG_SIG, &Dbacc::execDEBUG_SIG);
+ addRecSignal(GSN_CONTINUEB, &Dbacc::execCONTINUEB);
+ addRecSignal(GSN_ACC_CHECK_SCAN, &Dbacc::execACC_CHECK_SCAN);
+ addRecSignal(GSN_EXPANDCHECK2, &Dbacc::execEXPANDCHECK2);
+ addRecSignal(GSN_SHRINKCHECK2, &Dbacc::execSHRINKCHECK2);
+ addRecSignal(GSN_ACC_OVER_REC, &Dbacc::execACC_OVER_REC);
+ addRecSignal(GSN_ACC_SAVE_PAGES, &Dbacc::execACC_SAVE_PAGES);
+ addRecSignal(GSN_NEXTOPERATION, &Dbacc::execNEXTOPERATION);
+ addRecSignal(GSN_READ_PSUEDO_REQ, &Dbacc::execREAD_PSUEDO_REQ);
+
+ // Received signals
+ addRecSignal(GSN_STTOR, &Dbacc::execSTTOR);
+ addRecSignal(GSN_SR_FRAGIDREQ, &Dbacc::execSR_FRAGIDREQ);
+ addRecSignal(GSN_LCP_FRAGIDREQ, &Dbacc::execLCP_FRAGIDREQ);
+ addRecSignal(GSN_LCP_HOLDOPREQ, &Dbacc::execLCP_HOLDOPREQ);
+ addRecSignal(GSN_END_LCPREQ, &Dbacc::execEND_LCPREQ);
+ addRecSignal(GSN_ACC_LCPREQ, &Dbacc::execACC_LCPREQ);
+ addRecSignal(GSN_START_RECREQ, &Dbacc::execSTART_RECREQ);
+ addRecSignal(GSN_ACC_CONTOPREQ, &Dbacc::execACC_CONTOPREQ);
+ addRecSignal(GSN_ACCKEYREQ, &Dbacc::execACCKEYREQ);
+ addRecSignal(GSN_ACCSEIZEREQ, &Dbacc::execACCSEIZEREQ);
+ addRecSignal(GSN_ACCFRAGREQ, &Dbacc::execACCFRAGREQ);
+ addRecSignal(GSN_TC_SCHVERREQ, &Dbacc::execTC_SCHVERREQ);
+ addRecSignal(GSN_ACC_SRREQ, &Dbacc::execACC_SRREQ);
+ addRecSignal(GSN_NEXT_SCANREQ, &Dbacc::execNEXT_SCANREQ);
+ addRecSignal(GSN_ACC_ABORTREQ, &Dbacc::execACC_ABORTREQ);
+ addRecSignal(GSN_ACC_SCANREQ, &Dbacc::execACC_SCANREQ);
+ addRecSignal(GSN_ACCMINUPDATE, &Dbacc::execACCMINUPDATE);
+ addRecSignal(GSN_ACC_COMMITREQ, &Dbacc::execACC_COMMITREQ);
+ addRecSignal(GSN_ACC_TO_REQ, &Dbacc::execACC_TO_REQ);
+ addRecSignal(GSN_ACC_LOCKREQ, &Dbacc::execACC_LOCKREQ);
+ addRecSignal(GSN_FSOPENCONF, &Dbacc::execFSOPENCONF);
+ addRecSignal(GSN_FSOPENREF, &Dbacc::execFSOPENREF);
+ addRecSignal(GSN_FSCLOSECONF, &Dbacc::execFSCLOSECONF);
+ addRecSignal(GSN_FSCLOSEREF, &Dbacc::execFSCLOSEREF);
+ addRecSignal(GSN_FSWRITECONF, &Dbacc::execFSWRITECONF);
+ addRecSignal(GSN_FSWRITEREF, &Dbacc::execFSWRITEREF);
+ addRecSignal(GSN_FSREADCONF, &Dbacc::execFSREADCONF);
+ addRecSignal(GSN_FSREADREF, &Dbacc::execFSREADREF);
+ addRecSignal(GSN_NDB_STTOR, &Dbacc::execNDB_STTOR);
+ addRecSignal(GSN_DROP_TAB_REQ, &Dbacc::execDROP_TAB_REQ);
+ addRecSignal(GSN_FSREMOVECONF, &Dbacc::execFSREMOVECONF);
+ addRecSignal(GSN_FSREMOVEREF, &Dbacc::execFSREMOVEREF);
+ addRecSignal(GSN_READ_CONFIG_REQ, &Dbacc::execREAD_CONFIG_REQ, true);
+ addRecSignal(GSN_SET_VAR_REQ, &Dbacc::execSET_VAR_REQ);
+
+ initData();
+}//Dbacc::Dbacc()
+
+Dbacc::~Dbacc()
+{
+ deallocRecord((void **)&dirRange, "DirRange",
+ sizeof(DirRange),
+ cdirrangesize);
+
+ deallocRecord((void **)&directoryarray, "Directoryarray",
+ sizeof(Directoryarray),
+ cdirarraysize);
+
+ deallocRecord((void **)&fragmentrec, "Fragmentrec",
+ sizeof(Fragmentrec),
+ cfragmentsize);
+
+ deallocRecord((void **)&fsConnectrec, "FsConnectrec",
+ sizeof(FsConnectrec),
+ cfsConnectsize);
+
+ deallocRecord((void **)&fsOprec, "FsOprec",
+ sizeof(FsOprec),
+ cfsOpsize);
+
+ deallocRecord((void **)&lcpConnectrec, "LcpConnectrec",
+ sizeof(LcpConnectrec),
+ clcpConnectsize);
+
+ deallocRecord((void **)&operationrec, "Operationrec",
+ sizeof(Operationrec),
+ coprecsize);
+
+ deallocRecord((void **)&overflowRecord, "OverflowRecord",
+ sizeof(OverflowRecord),
+ coverflowrecsize);
+
+ deallocRecord((void **)&page8, "Page8",
+ sizeof(Page8),
+ cpagesize);
+
+ deallocRecord((void **)&rootfragmentrec, "Rootfragmentrec",
+ sizeof(Rootfragmentrec),
+ crootfragmentsize);
+
+ deallocRecord((void **)&scanRec, "ScanRec",
+ sizeof(ScanRec),
+ cscanRecSize);
+
+ deallocRecord((void **)&srVersionRec, "SrVersionRec",
+ sizeof(SrVersionRec),
+ csrVersionRecSize);
+
+ deallocRecord((void **)&tabrec, "Tabrec",
+ sizeof(Tabrec),
+ ctablesize);
+
+ deallocRecord((void **)&undopage, "Undopage",
+ sizeof(Undopage),
+ cundopagesize);
+
+}//Dbacc::~Dbacc()
+
+BLOCK_FUNCTIONS(Dbacc)
diff --git a/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
new file mode 100644
index 00000000000..a16c0da369b
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
@@ -0,0 +1,11653 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#define DBACC_C
+#include "Dbacc.hpp"
+#include <my_sys.h>
+
+#include <AttributeHeader.hpp>
+#include <signaldata/AccFrag.hpp>
+#include <signaldata/AccScan.hpp>
+#include <signaldata/AccLock.hpp>
+#include <signaldata/EventReport.hpp>
+#include <signaldata/FsConf.hpp>
+#include <signaldata/FsRef.hpp>
+#include <signaldata/FsRemoveReq.hpp>
+#include <signaldata/DropTab.hpp>
+#include <signaldata/DumpStateOrd.hpp>
+#include <SectionReader.hpp>
+
+// TO_DO_RONM is a label for comments on what needs to be improved in future versions
+// when more time is given.
+
+#ifdef VM_TRACE
+#define DEBUG(x) ndbout << "DBACC: "<< x << endl;
+#else
+#define DEBUG(x)
+#endif
+
+
+Uint32
+Dbacc::remainingUndoPages(){
+ Uint32 HeadPage = cundoposition >> ZUNDOPAGEINDEXBITS;
+ Uint32 TailPage = clastUndoPageIdWritten;
+
+ // Head must be larger or same as tail
+ ndbrequire(HeadPage>=TailPage);
+
+ Uint32 UsedPages = HeadPage - TailPage;
+ Int32 Remaining = cundopagesize - UsedPages;
+
+ // There can not be more than cundopagesize remaining
+ if (Remaining <= 0){
+ // No more undolog, crash node
+ progError(__LINE__,
+ ERR_NO_MORE_UNDOLOG,
+ "There are more than 1Mbyte undolog writes outstanding");
+ }
+ return Remaining;
+}
+
+void
+Dbacc::updateLastUndoPageIdWritten(Signal* signal, Uint32 aNewValue){
+ if (remainingUndoPages() < ZMIN_UNDO_PAGES_AT_COMMIT) {
+ clastUndoPageIdWritten = aNewValue;
+ if (remainingUndoPages() >= ZMIN_UNDO_PAGES_AT_COMMIT) {
+ jam();
+ EXECUTE_DIRECT(DBLQH, GSN_ACC_COM_UNBLOCK, signal, 1);
+ jamEntry();
+ }//if
+ } else {
+ clastUndoPageIdWritten = aNewValue;
+ }//if
+}//Dbacc::updateLastUndoPageIdWritten()
+
+void
+Dbacc::updateUndoPositionPage(Signal* signal, Uint32 aNewValue){
+ if (remainingUndoPages() >= ZMIN_UNDO_PAGES_AT_COMMIT) {
+ cundoposition = aNewValue;
+ if (remainingUndoPages() < ZMIN_UNDO_PAGES_AT_COMMIT) {
+ jam();
+ EXECUTE_DIRECT(DBLQH, GSN_ACC_COM_BLOCK, signal, 1);
+ jamEntry();
+ }//if
+ } else {
+ cundoposition = aNewValue;
+ }//if
+}//Dbacc::updateUndoPositionPage()
+
+// Signal entries and statement blocks
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* COMMON SIGNAL RECEPTION MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+
+/* --------------------------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* CONTINUEB CONTINUE SIGNAL */
+/* ******************------------------------------+ */
+/* SENDER: ACC, LEVEL B */
+void Dbacc::execCONTINUEB(Signal* signal)
+{
+ Uint32 tcase;
+
+ jamEntry();
+ tcase = signal->theData[0];
+ tdata0 = signal->theData[1];
+ tresult = 0;
+ switch (tcase) {
+ case ZLOAD_BAL_LCP_TIMER:
+ if (clblPageOver == 0) {
+ jam();
+ clblPageCounter = clblPagesPerTick;
+ } else {
+ if (clblPageOver > clblPagesPerTick) {
+ jam();
+ clblPageOver = clblPageOver - clblPagesPerTick;
+ } else {
+ jam();
+ clblPageOver = 0;
+ clblPageCounter = clblPagesPerTick - clblPageOver;
+ }//if
+ }//if
+ signal->theData[0] = ZLOAD_BAL_LCP_TIMER;
+ sendSignalWithDelay(cownBlockref, GSN_CONTINUEB, signal, 100, 1);
+ return;
+ break;
+ case ZINITIALISE_RECORDS:
+ jam();
+ initialiseRecordsLab(signal, signal->theData[3], signal->theData[4]);
+ return;
+ break;
+ case ZSR_READ_PAGES_ALLOC:
+ jam();
+ fragrecptr.i = tdata0;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ srReadPagesAllocLab(signal);
+ return;
+ break;
+ case ZSTART_UNDO:
+ jam();
+ startUndoLab(signal);
+ return;
+ break;
+ case ZSEND_SCAN_HBREP:
+ jam();
+ sendScanHbRep(signal, tdata0);
+ break;
+ case ZREL_ROOT_FRAG:
+ {
+ jam();
+ Uint32 tableId = signal->theData[1];
+ releaseRootFragResources(signal, tableId);
+ break;
+ }
+ case ZREL_FRAG:
+ {
+ jam();
+ Uint32 fragIndex = signal->theData[1];
+ releaseFragResources(signal, fragIndex);
+ break;
+ }
+ case ZREL_DIR:
+ {
+ jam();
+ Uint32 fragIndex = signal->theData[1];
+ Uint32 dirIndex = signal->theData[2];
+ Uint32 startIndex = signal->theData[3];
+ releaseDirResources(signal, fragIndex, dirIndex, startIndex);
+ break;
+ }
+ case ZREPORT_MEMORY_USAGE:{
+ jam();
+ static int c_currentMemUsed = 0;
+ int now = (cnoOfAllocatedPages * 100)/cpagesize;
+ const int thresholds[] = { 99, 90, 80, 0};
+
+ Uint32 i = 0;
+ const Uint32 sz = sizeof(thresholds)/sizeof(thresholds[0]);
+ for(i = 0; i<sz; i++){
+ if(now >= thresholds[i]){
+ now = thresholds[i];
+ break;
+ }
+ }
+
+ if(now != c_currentMemUsed){
+ reportMemoryUsage(signal, now > c_currentMemUsed ? 1 : -1);
+ }
+
+ c_currentMemUsed = now;
+
+ signal->theData[0] = ZREPORT_MEMORY_USAGE;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 2000, 1);
+ return;
+ }
+
+ case ZLCP_OP_WRITE_RT_BREAK:
+ {
+ operationRecPtr.i= signal->theData[1];
+ fragrecptr.i= signal->theData[2];
+ lcpConnectptr.i= signal->theData[3];
+ ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
+ lcp_write_op_to_undolog(signal);
+ return;
+ }
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dbacc::execCONTINUEB()
+
+/* ******************--------------------------------------------------------------- */
+/* FSCLOSECONF CLOSE FILE CONF */
+/* ******************------------------------------+ */
+/* SENDER: FS, LEVEL B */
+void Dbacc::execFSCLOSECONF(Signal* signal)
+{
+ jamEntry();
+ fsConnectptr.i = signal->theData[0];
+ ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
+ tresult = 0;
+ switch (fsConnectptr.p->fsState) {
+ case WAIT_CLOSE_UNDO:
+ jam();
+ releaseFsConnRec(signal);
+ break;
+ case LCP_CLOSE_DATA:
+ jam();
+ checkSyncUndoPagesLab(signal);
+ return;
+ break;
+ case SR_CLOSE_DATA:
+ jam();
+ sendaccSrconfLab(signal);
+ return;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dbacc::execFSCLOSECONF()
+
+/* ******************--------------------------------------------------------------- */
+/* FSCLOSEREF OPENFILE CONF */
+/* ******************------------------------------+ */
+/* SENDER: FS, LEVEL B */
+void Dbacc::execFSCLOSEREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}//Dbacc::execFSCLOSEREF()
+
+/* ******************--------------------------------------------------------------- */
+/* FSOPENCONF OPENFILE CONF */
+/* ******************------------------------------+ */
+/* SENDER: FS, LEVEL B */
+void Dbacc::execFSOPENCONF(Signal* signal)
+{
+ jamEntry();
+ fsConnectptr.i = signal->theData[0];
+ ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
+ tuserptr = signal->theData[1];
+ tresult = 0; /* RESULT CHECK VALUE */
+ switch (fsConnectptr.p->fsState) {
+ case WAIT_OPEN_UNDO_LCP:
+ jam();
+ lcpOpenUndofileConfLab(signal);
+ return;
+ break;
+ case WAIT_OPEN_UNDO_LCP_NEXT:
+ jam();
+ fsConnectptr.p->fsPtr = tuserptr;
+ return;
+ break;
+ case OPEN_UNDO_FILE_SR:
+ jam();
+ fsConnectptr.p->fsPtr = tuserptr;
+ srStartUndoLab(signal);
+ return;
+ break;
+ case WAIT_OPEN_DATA_FILE_FOR_WRITE:
+ jam();
+ lcpFsOpenConfLab(signal);
+ return;
+ break;
+ case WAIT_OPEN_DATA_FILE_FOR_READ:
+ jam();
+ fsConnectptr.p->fsPtr = tuserptr;
+ srFsOpenConfLab(signal);
+ return;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dbacc::execFSOPENCONF()
+
+/* ******************--------------------------------------------------------------- */
+/* FSOPENREF OPENFILE REF */
+/* ******************------------------------------+ */
+/* SENDER: FS, LEVEL B */
+void Dbacc::execFSOPENREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}//Dbacc::execFSOPENREF()
+
+/* ******************--------------------------------------------------------------- */
+/* FSREADCONF OPENFILE CONF */
+/* ******************------------------------------+ */
+/* SENDER: FS, LEVEL B */
+void Dbacc::execFSREADCONF(Signal* signal)
+{
+ jamEntry();
+ fsConnectptr.i = signal->theData[0];
+ ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
+ tresult = 0; /* RESULT CHECK VALUE */
+ switch (fsConnectptr.p->fsState) {
+ case WAIT_READ_PAGE_ZERO:
+ jam();
+ fragrecptr.i = fsConnectptr.p->fragrecPtr;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ srReadPageZeroLab(signal);
+ return;
+ break;
+ case WAIT_READ_DATA:
+ jam();
+ fragrecptr.i = fsConnectptr.p->fragrecPtr;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ storeDataPageInDirectoryLab(signal);
+ return;
+ break;
+ case READ_UNDO_PAGE:
+ jam();
+ srDoUndoLab(signal);
+ return;
+ break;
+ case READ_UNDO_PAGE_AND_CLOSE:
+ jam();
+ fsConnectptr.p->fsState = WAIT_CLOSE_UNDO;
+ /* ************************ */
+ /* FSCLOSEREQ */
+ /* ************************ */
+ signal->theData[0] = fsConnectptr.p->fsPtr;
+ signal->theData[1] = cownBlockref;
+ signal->theData[2] = fsConnectptr.i;
+ signal->theData[3] = 0;
+ sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 4, JBA);
+ /* FLAG = DO NOT DELETE FILE */
+ srDoUndoLab(signal);
+ return;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dbacc::execFSREADCONF()
+
+/* ******************--------------------------------------------------------------- */
+/* FSREADRREF OPENFILE CONF */
+/* ******************------------------------------+ */
+/* SENDER: FS, LEVEL B */
+void Dbacc::execFSREADREF(Signal* signal)
+{
+ jamEntry();
+ progError(0, __LINE__, "Read of file refused");
+ return;
+}//Dbacc::execFSREADREF()
+
+/* ******************--------------------------------------------------------------- */
+/* FSWRITECONF OPENFILE CONF */
+/* ******************------------------------------+ */
+/* SENDER: FS, LEVEL B */
+void Dbacc::execFSWRITECONF(Signal* signal)
+{
+ jamEntry();
+ fsOpptr.i = signal->theData[0];
+ ptrCheckGuard(fsOpptr, cfsOpsize, fsOprec);
+ /* FS_OPERATION PTR */
+ tresult = 0; /* RESULT CHECK VALUE */
+ fsConnectptr.i = fsOpptr.p->fsConptr;
+ ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
+ fragrecptr.i = fsOpptr.p->fsOpfragrecPtr;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ switch (fsOpptr.p->fsOpstate) {
+ case WAIT_WRITE_UNDO:
+ jam();
+ updateLastUndoPageIdWritten(signal, fsOpptr.p->fsOpMemPage);
+ releaseFsOpRec(signal);
+ if (fragrecptr.p->nrWaitWriteUndoExit == 0) {
+ jam();
+ checkSendLcpConfLab(signal);
+ return;
+ } else {
+ jam();
+ fragrecptr.p->lastUndoIsStored = ZTRUE;
+ }//if
+ return;
+ break;
+ case WAIT_WRITE_UNDO_EXIT:
+ jam();
+ updateLastUndoPageIdWritten(signal, fsOpptr.p->fsOpMemPage);
+ releaseFsOpRec(signal);
+ if (fragrecptr.p->nrWaitWriteUndoExit > 0) {
+ jam();
+ fragrecptr.p->nrWaitWriteUndoExit--;
+ }//if
+ if (fsConnectptr.p->fsState == WAIT_CLOSE_UNDO) {
+ jam();
+ /* ************************ */
+ /* FSCLOSEREQ */
+ /* ************************ */
+ signal->theData[0] = fsConnectptr.p->fsPtr;
+ signal->theData[1] = cownBlockref;
+ signal->theData[2] = fsConnectptr.i;
+ signal->theData[3] = ZFALSE;
+ sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 4, JBA);
+ }//if
+ if (fragrecptr.p->nrWaitWriteUndoExit == 0) {
+ if (fragrecptr.p->lastUndoIsStored == ZTRUE) {
+ jam();
+ fragrecptr.p->lastUndoIsStored = ZFALSE;
+ checkSendLcpConfLab(signal);
+ return;
+ }//if
+ }//if
+ return;
+ break;
+ case WAIT_WRITE_DATA:
+ jam();
+ releaseFsOpRec(signal);
+ fragrecptr.p->activeDataFilePage += ZWRITEPAGESIZE;
+ fragrecptr.p->activeDataPage = 0;
+ rootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ lcpConnectptr.i = rootfragrecptr.p->lcpPtr;
+ ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
+ switch (fragrecptr.p->fragState) {
+ case LCP_SEND_PAGES:
+ jam();
+ savepagesLab(signal);
+ return;
+ break;
+ case LCP_SEND_OVER_PAGES:
+ jam();
+ saveOverPagesLab(signal);
+ return;
+ break;
+ case LCP_SEND_ZERO_PAGE:
+ jam();
+ saveZeroPageLab(signal);
+ return;
+ break;
+ case WAIT_ZERO_PAGE_STORED:
+ jam();
+ lcpCloseDataFileLab(signal);
+ return;
+ break;
+ default:
+ ndbrequire(false);
+ return;
+ break;
+ }//switch
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dbacc::execFSWRITECONF()
+
+/* ******************--------------------------------------------------------------- */
+/* FSWRITEREF OPENFILE CONF */
+/* ******************------------------------------+ */
+/* SENDER: FS, LEVEL B */
+void Dbacc::execFSWRITEREF(Signal* signal)
+{
+ jamEntry();
+ progError(0, __LINE__, "Write to file refused");
+ return;
+}//Dbacc::execFSWRITEREF()
+
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* */
+/* END OF COMMON SIGNAL RECEPTION MODULE */
+/* */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* */
+/* SYSTEM RESTART MODULE */
+/* */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+void Dbacc::execNDB_STTOR(Signal* signal)
+{
+ Uint32 tstartphase;
+ Uint32 tStartType;
+
+ jamEntry();
+ cndbcntrRef = signal->theData[0];
+ cmynodeid = signal->theData[1];
+ tstartphase = signal->theData[2];
+ tStartType = signal->theData[3];
+ switch (tstartphase) {
+ case ZSPH1:
+ jam();
+ ndbsttorryLab(signal);
+ return;
+ break;
+ case ZSPH2:
+ cnoLcpPages = 2 * (ZWRITEPAGESIZE + 1);
+ initialiseLcpPages(signal);
+ ndbsttorryLab(signal);
+ return;
+ break;
+ case ZSPH3:
+ if ((tStartType == NodeState::ST_NODE_RESTART) ||
+ (tStartType == NodeState::ST_INITIAL_NODE_RESTART)) {
+ jam();
+ //---------------------------------------------
+ // csystemRestart is used to check what is needed
+ // during log execution. When starting a node it
+ // is not a log execution and rather a normal
+ // execution. Thus we reset the variable here to
+ // avoid unnecessary system crashes.
+ //---------------------------------------------
+ csystemRestart = ZFALSE;
+ }//if
+
+ signal->theData[0] = ZLOAD_BAL_LCP_TIMER;
+ sendSignalWithDelay(cownBlockref, GSN_CONTINUEB, signal, 100, 1);
+ break;
+ case ZSPH6:
+ jam();
+ clblPagesPerTick = clblPagesPerTickAfterSr;
+ csystemRestart = ZFALSE;
+
+ signal->theData[0] = ZREPORT_MEMORY_USAGE;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 2000, 1);
+ break;
+ default:
+ jam();
+ /*empty*/;
+ break;
+ }//switch
+ ndbsttorryLab(signal);
+ return;
+}//Dbacc::execNDB_STTOR()
+
+/* ******************--------------------------------------------------------------- */
+/* STTOR START / RESTART */
+/* ******************------------------------------+ */
+/* SENDER: ANY, LEVEL B */
+void Dbacc::execSTTOR(Signal* signal)
+{
+ jamEntry();
+ Uint32 tstartphase = signal->theData[1];
+ switch (tstartphase) {
+ case 1:
+ jam();
+ c_tup = (Dbtup*)globalData.getBlock(DBTUP);
+ ndbrequire(c_tup != 0);
+ break;
+ }
+ tuserblockref = signal->theData[3];
+ csignalkey = signal->theData[6];
+ sttorrysignalLab(signal);
+ return;
+}//Dbacc::execSTTOR()
+
+/* --------------------------------------------------------------------------------- */
+/* ZSPH1 */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::ndbrestart1Lab(Signal* signal)
+{
+ cmynodeid = globalData.ownId;
+ cownBlockref = numberToRef(DBACC, cmynodeid);
+ czero = 0;
+ cminusOne = czero - 1;
+ ctest = 0;
+ cundoLogActive = ZFALSE;
+ csystemRestart = ZTRUE;
+ clblPageOver = 0;
+ clblPageCounter = 0;
+ cactiveUndoFilePage = 0;
+ cprevUndoaddress = cminusOne;
+ cundoposition = 0;
+ clastUndoPageIdWritten = 0;
+ cactiveUndoFileVersion = RNIL;
+ cactiveOpenUndoFsPtr = RNIL;
+ for (Uint32 tmp = 0; tmp < ZMAX_UNDO_VERSION; tmp++) {
+ csrVersList[tmp] = RNIL;
+ }//for
+ return;
+}//Dbacc::ndbrestart1Lab()
+
+void Dbacc::initialiseRecordsLab(Signal* signal, Uint32 ref, Uint32 data)
+{
+ switch (tdata0) {
+ case 0:
+ jam();
+ initialiseTableRec(signal);
+ break;
+ case 1:
+ jam();
+ initialiseFsConnectionRec(signal);
+ break;
+ case 2:
+ jam();
+ initialiseFsOpRec(signal);
+ break;
+ case 3:
+ jam();
+ initialiseLcpConnectionRec(signal);
+ break;
+ case 4:
+ jam();
+ initialiseDirRec(signal);
+ break;
+ case 5:
+ jam();
+ initialiseDirRangeRec(signal);
+ break;
+ case 6:
+ jam();
+ initialiseFragRec(signal);
+ break;
+ case 7:
+ jam();
+ initialiseOverflowRec(signal);
+ break;
+ case 8:
+ jam();
+ initialiseOperationRec(signal);
+ break;
+ case 9:
+ jam();
+ initialisePageRec(signal);
+ break;
+ case 10:
+ jam();
+ initialiseRootfragRec(signal);
+ break;
+ case 11:
+ jam();
+ initialiseScanRec(signal);
+ break;
+ case 12:
+ jam();
+ initialiseSrVerRec(signal);
+
+ {
+ ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = data;
+ sendSignal(ref, GSN_READ_CONFIG_CONF, signal,
+ ReadConfigConf::SignalLength, JBB);
+ }
+ return;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+
+ signal->theData[0] = ZINITIALISE_RECORDS;
+ signal->theData[1] = tdata0 + 1;
+ signal->theData[2] = 0;
+ signal->theData[3] = ref;
+ signal->theData[4] = data;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 5, JBB);
+ return;
+}//Dbacc::initialiseRecordsLab()
+
+/* *********************************<< */
+/* NDB_STTORRY */
+/* *********************************<< */
+void Dbacc::ndbsttorryLab(Signal* signal)
+{
+ signal->theData[0] = cownBlockref;
+ sendSignal(cndbcntrRef, GSN_NDB_STTORRY, signal, 1, JBB);
+ return;
+}//Dbacc::ndbsttorryLab()
+
+/* *********************************<< */
+/* SIZEALT_REP SIZE ALTERATION */
+/* *********************************<< */
+void Dbacc::execREAD_CONFIG_REQ(Signal* signal)
+{
+ const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr();
+ Uint32 ref = req->senderRef;
+ Uint32 senderData = req->senderData;
+ ndbrequire(req->noOfParameters == 0);
+
+ jamEntry();
+
+ const ndb_mgm_configuration_iterator * p =
+ theConfiguration.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_DIR_RANGE, &cdirrangesize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_DIR_ARRAY, &cdirarraysize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_FRAGMENT, &cfragmentsize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_OP_RECS, &coprecsize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_OVERFLOW_RECS,
+ &coverflowrecsize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_PAGE8, &cpagesize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_ROOT_FRAG,
+ &crootfragmentsize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_TABLE, &ctablesize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_SCAN, &cscanRecSize));
+ initRecords();
+ ndbrestart1Lab(signal);
+
+ clblPagesPerTick = 50;
+ //ndb_mgm_get_int_parameter(p, CFG_DB_, &clblPagesPerTick);
+
+ clblPagesPerTickAfterSr = 50;
+ //ndb_mgm_get_int_parameter(p, CFG_DB_, &clblPagesPerTickAfterSr);
+
+ tdata0 = 0;
+ initialiseRecordsLab(signal, ref, senderData);
+ return;
+}//Dbacc::execSIZEALT_REP()
+
+/* *********************************<< */
+/* STTORRY */
+/* *********************************<< */
+void Dbacc::sttorrysignalLab(Signal* signal)
+{
+ signal->theData[0] = csignalkey;
+ signal->theData[1] = 3;
+ /* BLOCK CATEGORY */
+ signal->theData[2] = 2;
+ /* SIGNAL VERSION NUMBER */
+ signal->theData[3] = ZSPH1;
+ signal->theData[4] = 255;
+ sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 5, JBB);
+ /* END OF START PHASES */
+ return;
+}//Dbacc::sttorrysignalLab()
+
+/* --------------------------------------------------------------------------------- */
+/* INITIALISE_DIR_REC */
+/* INITIALATES THE DIRECTORY RECORDS. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initialiseDirRec(Signal* signal)
+{
+ DirectoryarrayPtr idrDirptr;
+ ndbrequire(cdirarraysize > 0);
+ for (idrDirptr.i = 0; idrDirptr.i < cdirarraysize; idrDirptr.i++) {
+ refresh_watch_dog();
+ ptrAss(idrDirptr, directoryarray);
+ for (Uint32 i = 0; i <= 255; i++) {
+ idrDirptr.p->pagep[i] = RNIL;
+ }//for
+ }//for
+ cdirmemory = 0;
+ cfirstfreedir = RNIL;
+}//Dbacc::initialiseDirRec()
+
+/* --------------------------------------------------------------------------------- */
+/* INITIALISE_DIR_RANGE_REC */
+/* INITIALATES THE DIR_RANGE RECORDS. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initialiseDirRangeRec(Signal* signal)
+{
+ DirRangePtr idrDirRangePtr;
+
+ ndbrequire(cdirrangesize > 0);
+ for (idrDirRangePtr.i = 0; idrDirRangePtr.i < cdirrangesize; idrDirRangePtr.i++) {
+ refresh_watch_dog();
+ ptrAss(idrDirRangePtr, dirRange);
+ idrDirRangePtr.p->dirArray[0] = idrDirRangePtr.i + 1;
+ for (Uint32 i = 1; i < 256; i++) {
+ idrDirRangePtr.p->dirArray[i] = RNIL;
+ }//for
+ }//for
+ idrDirRangePtr.i = cdirrangesize - 1;
+ ptrAss(idrDirRangePtr, dirRange);
+ idrDirRangePtr.p->dirArray[0] = RNIL;
+ cfirstfreeDirrange = 0;
+}//Dbacc::initialiseDirRangeRec()
+
+/* --------------------------------------------------------------------------------- */
+/* INITIALISE_FRAG_REC */
+/* INITIALATES THE FRAGMENT RECORDS. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initialiseFragRec(Signal* signal)
+{
+ FragmentrecPtr regFragPtr;
+ ndbrequire(cfragmentsize > 0);
+ for (regFragPtr.i = 0; regFragPtr.i < cfragmentsize; regFragPtr.i++) {
+ jam();
+ refresh_watch_dog();
+ ptrAss(regFragPtr, fragmentrec);
+ initFragGeneral(regFragPtr);
+ regFragPtr.p->nextfreefrag = regFragPtr.i + 1;
+ }//for
+ regFragPtr.i = cfragmentsize - 1;
+ ptrAss(regFragPtr, fragmentrec);
+ regFragPtr.p->nextfreefrag = RNIL;
+ cfirstfreefrag = 0;
+}//Dbacc::initialiseFragRec()
+
+/* --------------------------------------------------------------------------------- */
+/* INITIALISE_FS_CONNECTION_REC */
+/* INITIALATES THE FS_CONNECTION RECORDS */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initialiseFsConnectionRec(Signal* signal)
+{
+ ndbrequire(cfsConnectsize > 0);
+ for (fsConnectptr.i = 0; fsConnectptr.i < cfsConnectsize; fsConnectptr.i++) {
+ ptrAss(fsConnectptr, fsConnectrec);
+ fsConnectptr.p->fsNext = fsConnectptr.i + 1;
+ fsConnectptr.p->fsPrev = RNIL;
+ fsConnectptr.p->fragrecPtr = RNIL;
+ fsConnectptr.p->fsState = WAIT_NOTHING;
+ }//for
+ fsConnectptr.i = cfsConnectsize - 1;
+ ptrAss(fsConnectptr, fsConnectrec);
+ fsConnectptr.p->fsNext = RNIL; /* INITIALITES THE LAST CONNECTRECORD */
+ cfsFirstfreeconnect = 0; /* INITIATES THE FIRST FREE CONNECT RECORD */
+}//Dbacc::initialiseFsConnectionRec()
+
+/* --------------------------------------------------------------------------------- */
+/* INITIALISE_FS_OP_REC */
+/* INITIALATES THE FS_OP RECORDS */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initialiseFsOpRec(Signal* signal)
+{
+ ndbrequire(cfsOpsize > 0);
+ for (fsOpptr.i = 0; fsOpptr.i < cfsOpsize; fsOpptr.i++) {
+ ptrAss(fsOpptr, fsOprec);
+ fsOpptr.p->fsOpnext = fsOpptr.i + 1;
+ fsOpptr.p->fsOpfragrecPtr = RNIL;
+ fsOpptr.p->fsConptr = RNIL;
+ fsOpptr.p->fsOpstate = WAIT_NOTHING;
+ }//for
+ fsOpptr.i = cfsOpsize - 1;
+ ptrAss(fsOpptr, fsOprec);
+ fsOpptr.p->fsOpnext = RNIL;
+ cfsFirstfreeop = 0;
+}//Dbacc::initialiseFsOpRec()
+
+/* --------------------------------------------------------------------------------- */
+/* INITIALISE_LCP_CONNECTION_REC */
+/* INITIALATES THE LCP_CONNECTION RECORDS */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initialiseLcpConnectionRec(Signal* signal)
+{
+ ndbrequire(clcpConnectsize > 0);
+ for (lcpConnectptr.i = 0; lcpConnectptr.i < clcpConnectsize; lcpConnectptr.i++) {
+ ptrAss(lcpConnectptr, lcpConnectrec);
+ lcpConnectptr.p->nextLcpConn = lcpConnectptr.i + 1;
+ lcpConnectptr.p->lcpUserptr = RNIL;
+ lcpConnectptr.p->rootrecptr = RNIL;
+ lcpConnectptr.p->lcpstate = LCP_FREE;
+ }//for
+ lcpConnectptr.i = clcpConnectsize - 1;
+ ptrAss(lcpConnectptr, lcpConnectrec);
+ lcpConnectptr.p->nextLcpConn = RNIL;
+ cfirstfreelcpConnect = 0;
+}//Dbacc::initialiseLcpConnectionRec()
+
+/* --------------------------------------------------------------------------------- */
+/* INITIALISE_OPERATION_REC */
+/* INITIALATES THE OPERATION RECORDS. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initialiseOperationRec(Signal* signal)
+{
+ ndbrequire(coprecsize > 0);
+ for (operationRecPtr.i = 0; operationRecPtr.i < coprecsize; operationRecPtr.i++) {
+ refresh_watch_dog();
+ ptrAss(operationRecPtr, operationrec);
+ operationRecPtr.p->transactionstate = IDLE;
+ operationRecPtr.p->operation = ZUNDEFINED_OP;
+ operationRecPtr.p->opState = FREE_OP;
+ operationRecPtr.p->nextOp = operationRecPtr.i + 1;
+ }//for
+ operationRecPtr.i = coprecsize - 1;
+ ptrAss(operationRecPtr, operationrec);
+ operationRecPtr.p->nextOp = RNIL;
+ cfreeopRec = 0;
+}//Dbacc::initialiseOperationRec()
+
+/* --------------------------------------------------------------------------------- */
+/* INITIALISE_OVERFLOW_REC */
+/* INITIALATES THE OVERFLOW RECORDS */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initialiseOverflowRec(Signal* signal)
+{
+ OverflowRecordPtr iorOverflowRecPtr;
+
+ ndbrequire(coverflowrecsize > 0);
+ for (iorOverflowRecPtr.i = 0; iorOverflowRecPtr.i < coverflowrecsize; iorOverflowRecPtr.i++) {
+ refresh_watch_dog();
+ ptrAss(iorOverflowRecPtr, overflowRecord);
+ iorOverflowRecPtr.p->nextfreeoverrec = iorOverflowRecPtr.i + 1;
+ }//for
+ iorOverflowRecPtr.i = coverflowrecsize - 1;
+ ptrAss(iorOverflowRecPtr, overflowRecord);
+ iorOverflowRecPtr.p->nextfreeoverrec = RNIL;
+ cfirstfreeoverrec = 0;
+}//Dbacc::initialiseOverflowRec()
+
+/* --------------------------------------------------------------------------------- */
+/* INITIALISE_PAGE_REC */
+/* INITIALATES THE PAGE RECORDS. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initialisePageRec(Signal* signal)
+{
+ ndbrequire(cpagesize > 0);
+ cfreepage = 0;
+ cfirstfreepage = RNIL;
+ cnoOfAllocatedPages = 0;
+}//Dbacc::initialisePageRec()
+
+/* --------------------------------------------------------------------------------- */
+/* INITIALISE_LCP_PAGES */
+/* INITIALATES THE LCP PAGE RECORDS. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initialiseLcpPages(Signal* signal)
+{
+ Uint32 tilpIndex;
+
+ ndbrequire(cnoLcpPages >= (2 * (ZWRITEPAGESIZE + 1)));
+ /* --------------------------------------------------------------------------------- */
+ /* AN ABSOLUTE MINIMUM IS THAT WE HAVE 16 LCP PAGES TO HANDLE TWO CONCURRENT */
+ /* LCP'S ON LOCAL FRAGMENTS. */
+ /* --------------------------------------------------------------------------------- */
+ ndbrequire(cpagesize >= (cnoLcpPages + 8));
+ /* --------------------------------------------------------------------------------- */
+ /* THE NUMBER OF PAGES MUST BE AT LEAST 8 PLUS THE NUMBER OF PAGES REQUIRED BY */
+ /* THE LOCAL CHECKPOINT PROCESS. THIS NUMBER IS 8 TIMES THE PARALLELISM OF */
+ /* LOCAL CHECKPOINTS. */
+ /* --------------------------------------------------------------------------------- */
+ /* --------------------------------------------------------------------------------- */
+ /* WE SET UP A LINKED LIST OF PAGES FOR EXCLUSIVE USE BY LOCAL CHECKPOINTS. */
+ /* --------------------------------------------------------------------------------- */
+ cfirstfreeLcpPage = RNIL;
+ for (tilpIndex = 0; tilpIndex < cnoLcpPages; tilpIndex++) {
+ jam();
+ seizePage(signal);
+ rlpPageptr = spPageptr;
+ releaseLcpPage(signal);
+ }//for
+}//Dbacc::initialiseLcpPages()
+
+/* --------------------------------------------------------------------------------- */
+/* INITIALISE_ROOTFRAG_REC */
+/* INITIALATES THE ROOTFRAG RECORDS. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initialiseRootfragRec(Signal* signal)
+{
+ ndbrequire(crootfragmentsize > 0);
+ for (rootfragrecptr.i = 0; rootfragrecptr.i < crootfragmentsize; rootfragrecptr.i++) {
+ refresh_watch_dog();
+ ptrAss(rootfragrecptr, rootfragmentrec);
+ rootfragrecptr.p->nextroot = rootfragrecptr.i + 1;
+ rootfragrecptr.p->fragmentptr[0] = RNIL;
+ rootfragrecptr.p->fragmentptr[1] = RNIL;
+ }//for
+ rootfragrecptr.i = crootfragmentsize - 1;
+ ptrAss(rootfragrecptr, rootfragmentrec);
+ rootfragrecptr.p->nextroot = RNIL;
+ cfirstfreerootfrag = 0;
+}//Dbacc::initialiseRootfragRec()
+
+/* --------------------------------------------------------------------------------- */
+/* INITIALISE_SCAN_REC */
+/* INITIALATES THE QUE_SCAN RECORDS. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initialiseScanRec(Signal* signal)
+{
+ ndbrequire(cscanRecSize > 0);
+ for (scanPtr.i = 0; scanPtr.i < cscanRecSize; scanPtr.i++) {
+ ptrAss(scanPtr, scanRec);
+ scanPtr.p->scanNextfreerec = scanPtr.i + 1;
+ scanPtr.p->scanState = ScanRec::SCAN_DISCONNECT;
+ scanPtr.p->scanTimer = 0;
+ scanPtr.p->scanContinuebCounter = 0;
+ }//for
+ scanPtr.i = cscanRecSize - 1;
+ ptrAss(scanPtr, scanRec);
+ scanPtr.p->scanNextfreerec = RNIL;
+ cfirstFreeScanRec = 0;
+}//Dbacc::initialiseScanRec()
+
+/* --------------------------------------------------------------------------------- */
+/* INITIALISE_SR_VER_REC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initialiseSrVerRec(Signal* signal)
+{
+ ndbrequire(csrVersionRecSize > 0);
+ for (srVersionPtr.i = 0; srVersionPtr.i < csrVersionRecSize; srVersionPtr.i++) {
+ ptrAss(srVersionPtr, srVersionRec);
+ srVersionPtr.p->nextFreeSr = srVersionPtr.i + 1;
+ }//for
+ srVersionPtr.i = csrVersionRecSize - 1;
+ ptrAss(srVersionPtr, srVersionRec);
+ srVersionPtr.p->nextFreeSr = RNIL;
+ cfirstFreeSrVersionRec = 0;
+}//Dbacc::initialiseSrVerRec()
+
+/* --------------------------------------------------------------------------------- */
+/* INITIALISE_TABLE_REC */
+/* INITIALATES THE TABLE RECORDS. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initialiseTableRec(Signal* signal)
+{
+ ndbrequire(ctablesize > 0);
+ for (tabptr.i = 0; tabptr.i < ctablesize; tabptr.i++) {
+ refresh_watch_dog();
+ ptrAss(tabptr, tabrec);
+ for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
+ tabptr.p->fragholder[i] = RNIL;
+ tabptr.p->fragptrholder[i] = RNIL;
+ }//for
+ tabptr.p->noOfKeyAttr = 0;
+ tabptr.p->hasCharAttr = 0;
+ for (Uint32 k = 0; k < MAX_ATTRIBUTES_IN_INDEX; k++) {
+ tabptr.p->keyAttr[k].attributeDescriptor = 0;
+ tabptr.p->keyAttr[k].charsetInfo = 0;
+ }
+ }//for
+}//Dbacc::initialiseTableRec()
+
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* END OF SYSTEM RESTART MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* ADD/DELETE FRAGMENT MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+
+void Dbacc::initRootfragrec(Signal* signal)
+{
+ const AccFragReq * const req = (AccFragReq*)&signal->theData[0];
+ rootfragrecptr.p->mytabptr = req->tableId;
+ rootfragrecptr.p->roothashcheck = req->kValue + req->lhFragBits;
+ rootfragrecptr.p->noOfElements = 0;
+ rootfragrecptr.p->m_commit_count = 0;
+ for (Uint32 i = 0; i < MAX_PARALLEL_SCANS_PER_FRAG; i++) {
+ rootfragrecptr.p->scan[i] = RNIL;
+ }//for
+}//Dbacc::initRootfragrec()
+
+void Dbacc::execACCFRAGREQ(Signal* signal)
+{
+ const AccFragReq * const req = (AccFragReq*)&signal->theData[0];
+ jamEntry();
+ if (ERROR_INSERTED(3001)) {
+ jam();
+ addFragRefuse(signal, 1);
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }
+ tabptr.i = req->tableId;
+#ifndef VM_TRACE
+ // config mismatch - do not crash if release compiled
+ if (tabptr.i >= ctablesize) {
+ jam();
+ addFragRefuse(signal, 640);
+ return;
+ }
+#endif
+ ptrCheckGuard(tabptr, ctablesize, tabrec);
+ ndbrequire((req->reqInfo & 0xF) == ZADDFRAG);
+ ndbrequire(!getrootfragmentrec(signal, rootfragrecptr, req->fragId));
+ if (cfirstfreerootfrag == RNIL) {
+ jam();
+ addFragRefuse(signal, ZFULL_ROOTFRAGRECORD_ERROR);
+ return;
+ }//if
+ seizeRootfragrec(signal);
+ if (!addfragtotab(signal, rootfragrecptr.i, req->fragId)) {
+ jam();
+ releaseRootFragRecord(signal, rootfragrecptr);
+ addFragRefuse(signal, ZFULL_ROOTFRAGRECORD_ERROR);
+ return;
+ }//if
+ initRootfragrec(signal);
+ for (Uint32 i = 0; i < 2; i++) {
+ jam();
+ if (cfirstfreefrag == RNIL) {
+ jam();
+ addFragRefuse(signal, ZFULL_FRAGRECORD_ERROR);
+ return;
+ }//if
+ seizeFragrec(signal);
+ initFragGeneral(fragrecptr);
+ initFragAdd(signal, i, rootfragrecptr.i, fragrecptr);
+ rootfragrecptr.p->fragmentptr[i] = fragrecptr.i;
+ rootfragrecptr.p->fragmentid[i] = fragrecptr.p->myfid;
+ if (cfirstfreeDirrange == RNIL) {
+ jam();
+ addFragRefuse(signal, ZDIR_RANGE_ERROR);
+ return;
+ } else {
+ jam();
+ seizeDirrange(signal);
+ }//if
+ fragrecptr.p->directory = newDirRangePtr.i;
+ seizeDirectory(signal);
+ if (tresult < ZLIMIT_OF_ERROR) {
+ jam();
+ newDirRangePtr.p->dirArray[0] = sdDirptr.i;
+ } else {
+ jam();
+ addFragRefuse(signal, tresult);
+ return;
+ }//if
+ seizePage(signal);
+ if (tresult > ZLIMIT_OF_ERROR) {
+ jam();
+ addFragRefuse(signal, tresult);
+ return;
+ }//if
+ sdDirptr.p->pagep[0] = spPageptr.i;
+ tipPageId = 0;
+ inpPageptr = spPageptr;
+ initPage(signal);
+ if (cfirstfreeDirrange == RNIL) {
+ jam();
+ addFragRefuse(signal, ZDIR_RANGE_ERROR);
+ return;
+ } else {
+ jam();
+ seizeDirrange(signal);
+ }//if
+ fragrecptr.p->overflowdir = newDirRangePtr.i;
+ seizeDirectory(signal);
+ if (tresult < ZLIMIT_OF_ERROR) {
+ jam();
+ newDirRangePtr.p->dirArray[0] = sdDirptr.i;
+ } else {
+ jam();
+ addFragRefuse(signal, tresult);
+ return;
+ }//if
+ }//for
+ Uint32 userPtr = req->userPtr;
+ BlockReference retRef = req->userRef;
+ rootfragrecptr.p->rootState = ACTIVEROOT;
+ AccFragConf * const conf = (AccFragConf*)&signal->theData[0];
+
+ conf->userPtr = userPtr;
+ conf->rootFragPtr = rootfragrecptr.i;
+ conf->fragId[0] = rootfragrecptr.p->fragmentid[0];
+ conf->fragId[1] = rootfragrecptr.p->fragmentid[1];
+ conf->fragPtr[0] = rootfragrecptr.p->fragmentptr[0];
+ conf->fragPtr[1] = rootfragrecptr.p->fragmentptr[1];
+ conf->rootHashCheck = rootfragrecptr.p->roothashcheck;
+ sendSignal(retRef, GSN_ACCFRAGCONF, signal, AccFragConf::SignalLength, JBB);
+}//Dbacc::execACCFRAGREQ()
+
+void Dbacc::addFragRefuse(Signal* signal, Uint32 errorCode)
+{
+ const AccFragReq * const req = (AccFragReq*)&signal->theData[0];
+ AccFragRef * const ref = (AccFragRef*)&signal->theData[0];
+ Uint32 userPtr = req->userPtr;
+ BlockReference retRef = req->userRef;
+
+ ref->userPtr = userPtr;
+ ref->errorCode = errorCode;
+ sendSignal(retRef, GSN_ACCFRAGREF, signal, AccFragRef::SignalLength, JBB);
+ return;
+}//Dbacc::addFragRefuseEarly()
+
+void
+Dbacc::execTC_SCHVERREQ(Signal* signal)
+{
+ jamEntry();
+ if (! assembleFragments(signal)) {
+ jam();
+ return;
+ }
+ tabptr.i = signal->theData[0];
+ ptrCheckGuard(tabptr, ctablesize, tabrec);
+ Uint32 noOfKeyAttr = signal->theData[6];
+ ndbrequire(noOfKeyAttr <= MAX_ATTRIBUTES_IN_INDEX);
+ Uint32 hasCharAttr = 0;
+
+ SegmentedSectionPtr s0Ptr;
+ signal->getSection(s0Ptr, 0);
+ SectionReader r0(s0Ptr, getSectionSegmentPool());
+ Uint32 i = 0;
+ while (i < noOfKeyAttr) {
+ jam();
+ Uint32 attributeDescriptor = ~0;
+ Uint32 csNumber = ~0;
+ if (! r0.getWord(&attributeDescriptor) ||
+ ! r0.getWord(&csNumber)) {
+ jam();
+ break;
+ }
+ CHARSET_INFO* cs = 0;
+ if (csNumber != 0) {
+ cs = all_charsets[csNumber];
+ ndbrequire(cs != 0);
+ hasCharAttr = 1;
+ }
+ tabptr.p->keyAttr[i].attributeDescriptor = attributeDescriptor;
+ tabptr.p->keyAttr[i].charsetInfo = cs;
+ i++;
+ }
+ ndbrequire(i == noOfKeyAttr);
+ releaseSections(signal);
+
+ tabptr.p->noOfKeyAttr = noOfKeyAttr;
+ tabptr.p->hasCharAttr = hasCharAttr;
+
+ // copy char attr flag to each fragment
+ for (Uint32 i1 = 0; i1 < MAX_FRAG_PER_NODE; i1++) {
+ jam();
+ if (tabptr.p->fragptrholder[i1] != RNIL) {
+ rootfragrecptr.i = tabptr.p->fragptrholder[i1];
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ for (Uint32 i2 = 0; i2 < 2; i2++) {
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[i2];
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ fragrecptr.p->hasCharAttr = hasCharAttr;
+ }
+ }
+ }
+
+ // no reply to DICT
+}
+
+void
+Dbacc::execDROP_TAB_REQ(Signal* signal){
+ jamEntry();
+ DropTabReq* req = (DropTabReq*)signal->getDataPtr();
+
+ TabrecPtr tabPtr;
+ tabPtr.i = req->tableId;
+ ptrCheckGuard(tabPtr, ctablesize, tabrec);
+
+ tabPtr.p->tabUserRef = req->senderRef;
+ tabPtr.p->tabUserPtr = req->senderData;
+
+ signal->theData[0] = ZREL_ROOT_FRAG;
+ signal->theData[1] = tabPtr.i;
+ sendSignal(cownBlockref, GSN_CONTINUEB, signal, 2, JBB);
+}
+
+void Dbacc::releaseRootFragResources(Signal* signal, Uint32 tableId)
+{
+ RootfragmentrecPtr rootPtr;
+ TabrecPtr tabPtr;
+ tabPtr.i = tableId;
+ ptrCheckGuard(tabPtr, ctablesize, tabrec);
+ for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
+ jam();
+ if (tabPtr.p->fragholder[i] != RNIL) {
+ jam();
+ Uint32 fragIndex;
+ rootPtr.i = tabPtr.p->fragptrholder[i];
+ ptrCheckGuard(rootPtr, crootfragmentsize, rootfragmentrec);
+ if (rootPtr.p->fragmentptr[0] != RNIL) {
+ jam();
+ fragIndex = rootPtr.p->fragmentptr[0];
+ rootPtr.p->fragmentptr[0] = RNIL;
+ } else if (rootPtr.p->fragmentptr[1] != RNIL) {
+ jam();
+ fragIndex = rootPtr.p->fragmentptr[1];
+ rootPtr.p->fragmentptr[1] = RNIL;
+ } else {
+ jam();
+ releaseRootFragRecord(signal, rootPtr);
+ tabPtr.p->fragholder[i] = RNIL;
+ tabPtr.p->fragptrholder[i] = RNIL;
+ continue;
+ }//if
+ releaseFragResources(signal, fragIndex);
+ return;
+ }//if
+ }//for
+
+ /**
+ * Finished...
+ */
+ sendFSREMOVEREQ(signal, tableId);
+}//Dbacc::releaseRootFragResources()
+
+void Dbacc::releaseRootFragRecord(Signal* signal, RootfragmentrecPtr rootPtr)
+{
+ rootPtr.p->nextroot = cfirstfreerootfrag;
+ cfirstfreerootfrag = rootPtr.i;
+}//Dbacc::releaseRootFragRecord()
+
+void Dbacc::releaseFragResources(Signal* signal, Uint32 fragIndex)
+{
+ FragmentrecPtr regFragPtr;
+ regFragPtr.i = fragIndex;
+ ptrCheckGuard(regFragPtr, cfragmentsize, fragmentrec);
+ verifyFragCorrect(regFragPtr);
+ if (regFragPtr.p->directory != RNIL) {
+ jam();
+ releaseDirResources(signal, regFragPtr.i, regFragPtr.p->directory, 0);
+ regFragPtr.p->directory = RNIL;
+ } else if (regFragPtr.p->overflowdir != RNIL) {
+ jam();
+ releaseDirResources(signal, regFragPtr.i, regFragPtr.p->overflowdir, 0);
+ regFragPtr.p->overflowdir = RNIL;
+ } else if (regFragPtr.p->firstOverflowRec != RNIL) {
+ jam();
+ releaseOverflowResources(signal, regFragPtr);
+ } else if (regFragPtr.p->firstFreeDirindexRec != RNIL) {
+ jam();
+ releaseDirIndexResources(signal, regFragPtr);
+ } else {
+ RootfragmentrecPtr rootPtr;
+ jam();
+ rootPtr.i = regFragPtr.p->myroot;
+ ptrCheckGuard(rootPtr, crootfragmentsize, rootfragmentrec);
+ releaseFragRecord(signal, regFragPtr);
+ signal->theData[0] = ZREL_ROOT_FRAG;
+ signal->theData[1] = rootPtr.p->mytabptr;
+ sendSignal(cownBlockref, GSN_CONTINUEB, signal, 2, JBB);
+ }//if
+}//Dbacc::releaseFragResources()
+
+void Dbacc::verifyFragCorrect(FragmentrecPtr regFragPtr)
+{
+ for (Uint32 i = 0; i < ZWRITEPAGESIZE; i++) {
+ jam();
+ ndbrequire(regFragPtr.p->datapages[i] == RNIL);
+ }//for
+ ndbrequire(regFragPtr.p->lockOwnersList == RNIL);
+ ndbrequire(regFragPtr.p->firstWaitInQueOp == RNIL);
+ ndbrequire(regFragPtr.p->lastWaitInQueOp == RNIL);
+ ndbrequire(regFragPtr.p->sentWaitInQueOp == RNIL);
+ //ndbrequire(regFragPtr.p->fsConnPtr == RNIL);
+ ndbrequire(regFragPtr.p->zeroPagePtr == RNIL);
+ ndbrequire(regFragPtr.p->nrWaitWriteUndoExit == 0);
+ ndbrequire(regFragPtr.p->sentWaitInQueOp == RNIL);
+}//Dbacc::verifyFragCorrect()
+
+void Dbacc::releaseDirResources(Signal* signal,
+ Uint32 fragIndex,
+ Uint32 dirIndex,
+ Uint32 startIndex)
+{
+ DirRangePtr regDirRangePtr;
+ regDirRangePtr.i = dirIndex;
+ ptrCheckGuard(regDirRangePtr, cdirrangesize, dirRange);
+ for (Uint32 i = startIndex; i < 256; i++) {
+ jam();
+ if (regDirRangePtr.p->dirArray[i] != RNIL) {
+ jam();
+ Uint32 directoryIndex = regDirRangePtr.p->dirArray[i];
+ regDirRangePtr.p->dirArray[i] = RNIL;
+ releaseDirectoryResources(signal, fragIndex, dirIndex, (i + 1), directoryIndex);
+ return;
+ }//if
+ }//for
+ rdDirRangePtr = regDirRangePtr;
+ releaseDirrange(signal);
+ signal->theData[0] = ZREL_FRAG;
+ signal->theData[1] = fragIndex;
+ sendSignal(cownBlockref, GSN_CONTINUEB, signal, 2, JBB);
+}//Dbacc::releaseDirResources()
+
+void Dbacc::releaseDirectoryResources(Signal* signal,
+ Uint32 fragIndex,
+ Uint32 dirIndex,
+ Uint32 startIndex,
+ Uint32 directoryIndex)
+{
+ DirectoryarrayPtr regDirPtr;
+ regDirPtr.i = directoryIndex;
+ ptrCheckGuard(regDirPtr, cdirarraysize, directoryarray);
+ for (Uint32 i = 0; i < 256; i++) {
+ jam();
+ if (regDirPtr.p->pagep[i] != RNIL) {
+ jam();
+ rpPageptr.i = regDirPtr.p->pagep[i];
+ ptrCheckGuard(rpPageptr, cpagesize, page8);
+ releasePage(signal);
+ regDirPtr.p->pagep[i] = RNIL;
+ }//if
+ }//for
+ rdDirptr = regDirPtr;
+ releaseDirectory(signal);
+ signal->theData[0] = ZREL_DIR;
+ signal->theData[1] = fragIndex;
+ signal->theData[2] = dirIndex;
+ signal->theData[3] = startIndex;
+ sendSignal(cownBlockref, GSN_CONTINUEB, signal, 4, JBB);
+}//Dbacc::releaseDirectoryResources()
+
+void Dbacc::releaseOverflowResources(Signal* signal, FragmentrecPtr regFragPtr)
+{
+ Uint32 loopCount = 0;
+ OverflowRecordPtr regOverflowRecPtr;
+ while ((regFragPtr.p->firstOverflowRec != RNIL) &&
+ (loopCount < 1)) {
+ jam();
+ regOverflowRecPtr.i = regFragPtr.p->firstOverflowRec;
+ ptrCheckGuard(regOverflowRecPtr, coverflowrecsize, overflowRecord);
+ regFragPtr.p->firstOverflowRec = regOverflowRecPtr.p->nextOverRec;
+ rorOverflowRecPtr = regOverflowRecPtr;
+ releaseOverflowRec(signal);
+ loopCount++;
+ }//while
+ signal->theData[0] = ZREL_FRAG;
+ signal->theData[1] = regFragPtr.i;
+ sendSignal(cownBlockref, GSN_CONTINUEB, signal, 2, JBB);
+}//Dbacc::releaseOverflowResources()
+
+void Dbacc::releaseDirIndexResources(Signal* signal, FragmentrecPtr regFragPtr)
+{
+ Uint32 loopCount = 0;
+ OverflowRecordPtr regOverflowRecPtr;
+ while ((regFragPtr.p->firstFreeDirindexRec != RNIL) &&
+ (loopCount < 1)) {
+ jam();
+ regOverflowRecPtr.i = regFragPtr.p->firstFreeDirindexRec;
+ ptrCheckGuard(regOverflowRecPtr, coverflowrecsize, overflowRecord);
+ regFragPtr.p->firstFreeDirindexRec = regOverflowRecPtr.p->nextOverList;
+ rorOverflowRecPtr = regOverflowRecPtr;
+ releaseOverflowRec(signal);
+ loopCount++;
+ }//while
+ signal->theData[0] = ZREL_FRAG;
+ signal->theData[1] = regFragPtr.i;
+ sendSignal(cownBlockref, GSN_CONTINUEB, signal, 2, JBB);
+}//Dbacc::releaseDirIndexResources()
+
+void Dbacc::releaseFragRecord(Signal* signal, FragmentrecPtr regFragPtr)
+{
+ regFragPtr.p->nextfreefrag = cfirstfreefrag;
+ cfirstfreefrag = regFragPtr.i;
+ initFragGeneral(regFragPtr);
+}//Dbacc::releaseFragRecord()
+
+void Dbacc::sendFSREMOVEREQ(Signal* signal, Uint32 tableId)
+{
+ FsRemoveReq * const fsReq = (FsRemoveReq *)signal->getDataPtrSend();
+ fsReq->userReference = cownBlockref;
+ fsReq->userPointer = tableId;
+ fsReq->fileNumber[0] = tableId;
+ fsReq->fileNumber[1] = (Uint32)-1; // Remove all fragments
+ fsReq->fileNumber[2] = (Uint32)-1; // Remove all data files within fragment
+ fsReq->fileNumber[3] = 255 | // No P-value used here
+ (3 << 8) | // Data-files in D3
+ (0 << 16) | // Data-files
+ (1 << 24); // Version 1 of fileNumber
+ fsReq->directory = 1;
+ fsReq->ownDirectory = 1;
+ sendSignal(NDBFS_REF, GSN_FSREMOVEREQ, signal, FsRemoveReq::SignalLength, JBA);
+}//Dbacc::sendFSREMOVEREQ()
+
+void Dbacc::execFSREMOVECONF(Signal* signal)
+{
+ FsConf * const fsConf = (FsConf *)signal->getDataPtrSend();
+ TabrecPtr tabPtr;
+ tabPtr.i = fsConf->userPointer;
+ ptrCheckGuard(tabPtr, ctablesize, tabrec);
+
+ DropTabConf * const dropConf = (DropTabConf *)signal->getDataPtrSend();
+ dropConf->senderRef = reference();
+ dropConf->senderData = tabPtr.p->tabUserPtr;
+ dropConf->tableId = tabPtr.i;
+ sendSignal(tabPtr.p->tabUserRef, GSN_DROP_TAB_CONF,
+ signal, DropTabConf::SignalLength, JBB);
+
+ tabPtr.p->tabUserPtr = RNIL;
+ tabPtr.p->tabUserRef = 0;
+}//Dbacc::execFSREMOVECONF()
+
+void Dbacc::execFSREMOVEREF(Signal* signal)
+{
+ ndbrequire(false);
+}//Dbacc::execFSREMOVEREF()
+
+/* -------------------------------------------------------------------------- */
+/* ADDFRAGTOTAB */
+/* DESCRIPTION: PUTS A FRAGMENT ID AND A POINTER TO ITS RECORD INTO */
+/* TABLE ARRRAY OF THE TABLE RECORD. */
+/* -------------------------------------------------------------------------- */
+bool Dbacc::addfragtotab(Signal* signal, Uint32 rootIndex, Uint32 fid)
+{
+ for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
+ jam();
+ if (tabptr.p->fragholder[i] == RNIL) {
+ jam();
+ tabptr.p->fragholder[i] = fid;
+ tabptr.p->fragptrholder[i] = rootIndex;
+ return true;
+ }//if
+ }//for
+ return false;
+}//Dbacc::addfragtotab()
+
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* END OF ADD/DELETE FRAGMENT MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* CONNECTION MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* ACCSEIZEREQ SEIZE REQ */
+/* SENDER: LQH, LEVEL B */
+/* ENTER ACCSEIZEREQ WITH */
+/* TUSERPTR , CONECTION PTR OF LQH */
+/* TUSERBLOCKREF BLOCK REFERENCE OF LQH */
+/* ******************--------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* ACCSEIZEREQ SEIZE REQ */
+/* ******************------------------------------+ */
+/* SENDER: LQH, LEVEL B */
+void Dbacc::execACCSEIZEREQ(Signal* signal)
+{
+ jamEntry();
+ tuserptr = signal->theData[0];
+ /* CONECTION PTR OF LQH */
+ tuserblockref = signal->theData[1];
+ /* BLOCK REFERENCE OF LQH */
+ tresult = 0;
+ if (cfreeopRec == RNIL) {
+ jam();
+ refaccConnectLab(signal);
+ return;
+ }//if
+ seizeOpRec(signal);
+ ptrGuard(operationRecPtr);
+ operationRecPtr.p->userptr = tuserptr;
+ operationRecPtr.p->userblockref = tuserblockref;
+ operationRecPtr.p->operation = ZUNDEFINED_OP;
+ operationRecPtr.p->transactionstate = IDLE;
+ /* ******************************< */
+ /* ACCSEIZECONF */
+ /* ******************************< */
+ signal->theData[0] = tuserptr;
+ signal->theData[1] = operationRecPtr.i;
+ sendSignal(tuserblockref, GSN_ACCSEIZECONF, signal, 2, JBB);
+ return;
+}//Dbacc::execACCSEIZEREQ()
+
+void Dbacc::refaccConnectLab(Signal* signal)
+{
+ tresult = ZCONNECT_SIZE_ERROR;
+ /* ******************************< */
+ /* ACCSEIZEREF */
+ /* ******************************< */
+ signal->theData[0] = tuserptr;
+ signal->theData[1] = tresult;
+ sendSignal(tuserblockref, GSN_ACCSEIZEREF, signal, 2, JBB);
+ return;
+}//Dbacc::refaccConnectLab()
+
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* END OF CONNECTION MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* EXECUTE OPERATION MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* INIT_OP_REC */
+/* INFORMATION WHICH IS RECIEVED BY ACCKEYREQ WILL BE SAVED */
+/* IN THE OPERATION RECORD. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initOpRec(Signal* signal)
+{
+ register Uint32 Treqinfo;
+
+ Treqinfo = signal->theData[2];
+
+ operationRecPtr.p->hashValue = signal->theData[3];
+ operationRecPtr.p->tupkeylen = signal->theData[4];
+ operationRecPtr.p->xfrmtupkeylen = signal->theData[4];
+ operationRecPtr.p->transId1 = signal->theData[5];
+ operationRecPtr.p->transId2 = signal->theData[6];
+ operationRecPtr.p->transactionstate = ACTIVE;
+ operationRecPtr.p->commitDeleteCheckFlag = ZFALSE;
+ operationRecPtr.p->operation = Treqinfo & 0x7;
+ /* --------------------------------------------------------------------------------- */
+ // opSimple is not used in this version. Is needed for deadlock handling later on.
+ /* --------------------------------------------------------------------------------- */
+ // operationRecPtr.p->opSimple = (Treqinfo >> 3) & 0x1;
+
+ operationRecPtr.p->lockMode = (Treqinfo >> 4) & 0x3;
+
+ Uint32 readFlag = (((Treqinfo >> 4) & 0x3) == 0); // Only 1 if Read
+ Uint32 dirtyFlag = (((Treqinfo >> 6) & 0x1) == 1); // Only 1 if Dirty
+ Uint32 dirtyReadFlag = readFlag & dirtyFlag;
+ operationRecPtr.p->dirtyRead = dirtyReadFlag;
+
+ operationRecPtr.p->nodeType = (Treqinfo >> 7) & 0x3;
+ operationRecPtr.p->fid = fragrecptr.p->myfid;
+ operationRecPtr.p->fragptr = fragrecptr.i;
+ operationRecPtr.p->nextParallelQue = RNIL;
+ operationRecPtr.p->prevParallelQue = RNIL;
+ operationRecPtr.p->prevQueOp = RNIL;
+ operationRecPtr.p->nextQueOp = RNIL;
+ operationRecPtr.p->nextSerialQue = RNIL;
+ operationRecPtr.p->prevSerialQue = RNIL;
+ operationRecPtr.p->elementPage = RNIL;
+ operationRecPtr.p->keyinfoPage = RNIL;
+ operationRecPtr.p->lockOwner = ZFALSE;
+ operationRecPtr.p->insertIsDone = ZFALSE;
+ operationRecPtr.p->elementIsDisappeared = ZFALSE;
+ operationRecPtr.p->insertDeleteLen = fragrecptr.p->elementLength;
+ operationRecPtr.p->longPagePtr = RNIL;
+ operationRecPtr.p->longKeyPageIndex = RNIL;
+ operationRecPtr.p->scanRecPtr = RNIL;
+
+ // bit to mark lock operation
+ operationRecPtr.p->isAccLockReq = (Treqinfo >> 31) & 0x1;
+
+ // undo log is not run via ACCKEYREQ
+ operationRecPtr.p->isUndoLogReq = 0;
+}//Dbacc::initOpRec()
+
+/* --------------------------------------------------------------------------------- */
+/* SEND_ACCKEYCONF */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::sendAcckeyconf(Signal* signal)
+{
+ signal->theData[0] = operationRecPtr.p->userptr;
+ signal->theData[1] = operationRecPtr.p->insertIsDone;
+ signal->theData[2] = operationRecPtr.p->fid;
+ signal->theData[3] = operationRecPtr.p->localdata[0];
+ signal->theData[4] = operationRecPtr.p->localdata[1];
+ signal->theData[5] = fragrecptr.p->localkeylen;
+}//Dbacc::sendAcckeyconf()
+
+
+void Dbacc::ACCKEY_error(Uint32 fromWhere)
+{
+ switch(fromWhere) {
+ case 0:
+ ndbrequire(false);
+ case 1:
+ ndbrequire(false);
+ case 2:
+ ndbrequire(false);
+ case 3:
+ ndbrequire(false);
+ case 4:
+ ndbrequire(false);
+ case 5:
+ ndbrequire(false);
+ case 6:
+ ndbrequire(false);
+ case 7:
+ ndbrequire(false);
+ case 8:
+ ndbrequire(false);
+ case 9:
+ ndbrequire(false);
+ default:
+ ndbrequire(false);
+ }//switch
+}//Dbacc::ACCKEY_error()
+
+/* ******************--------------------------------------------------------------- */
+/* ACCKEYREQ REQUEST FOR INSERT, DELETE, */
+/* RERAD AND UPDATE, A TUPLE. */
+/* SENDER: LQH, LEVEL B */
+/* SIGNAL DATA: OPERATION_REC_PTR, CONNECTION PTR */
+/* TABPTR, TABLE ID = TABLE RECORD POINTER */
+/* TREQINFO, */
+/* THASHVALUE, HASH VALUE OF THE TUP */
+/* TKEYLEN, LENGTH OF THE PRIMARY KEYS */
+/* TKEY1, PRIMARY KEY 1 */
+/* TKEY2, PRIMARY KEY 2 */
+/* TKEY3, PRIMARY KEY 3 */
+/* TKEY4, PRIMARY KEY 4 */
+/* ******************--------------------------------------------------------------- */
+void Dbacc::execACCKEYREQ(Signal* signal)
+{
+ jamEntry();
+ operationRecPtr.i = signal->theData[0]; /* CONNECTION PTR */
+ fragrecptr.i = signal->theData[1]; /* FRAGMENT RECORD POINTER */
+ if (!((operationRecPtr.i < coprecsize) ||
+ (fragrecptr.i < cfragmentsize))) {
+ ACCKEY_error(0);
+ return;
+ }//if
+ ptrAss(operationRecPtr, operationrec);
+ ptrAss(fragrecptr, fragmentrec);
+ ndbrequire(operationRecPtr.p->transactionstate == IDLE);
+
+ initOpRec(signal);
+ // normalize key if any char attr
+ if (! operationRecPtr.p->isAccLockReq && fragrecptr.p->hasCharAttr)
+ xfrmKeyData(signal);
+
+ /*---------------------------------------------------------------*/
+ /* */
+ /* WE WILL USE THE HASH VALUE TO LOOK UP THE PROPER MEMORY */
+ /* PAGE AND MEMORY PAGE INDEX TO START THE SEARCH WITHIN. */
+ /* WE REMEMBER THESE ADDRESS IF WE LATER NEED TO INSERT */
+ /* THE ITEM AFTER NOT FINDING THE ITEM. */
+ /*---------------------------------------------------------------*/
+ getElement(signal);
+
+ if (tgeResult == ZTRUE) {
+ switch (operationRecPtr.p->operation) {
+ case ZREAD:
+ case ZUPDATE:
+ case ZDELETE:
+ case ZWRITE:
+ case ZSCAN_OP:
+ if (!tgeLocked){
+ sendAcckeyconf(signal);
+ if (operationRecPtr.p->dirtyRead == ZFALSE) {
+ /*---------------------------------------------------------------*/
+ // It is not a dirty read. We proceed by locking and continue with
+ // the operation.
+ /*---------------------------------------------------------------*/
+ Uint32 eh = gePageptr.p->word32[tgeElementptr];
+ operationRecPtr.p->scanBits = ElementHeader::getScanBits(eh);
+ operationRecPtr.p->hashvaluePart = ElementHeader::getHashValuePart(eh);
+ operationRecPtr.p->elementPage = gePageptr.i;
+ operationRecPtr.p->elementContainer = tgeContainerptr;
+ operationRecPtr.p->elementPointer = tgeElementptr;
+ operationRecPtr.p->elementIsforward = tgeForward;
+
+ eh = ElementHeader::setLocked(operationRecPtr.i);
+ dbgWord32(gePageptr, tgeElementptr, eh);
+ gePageptr.p->word32[tgeElementptr] = eh;
+
+ insertLockOwnersList(signal , operationRecPtr);
+ return;
+ } else {
+ jam();
+ /*---------------------------------------------------------------*/
+ // It is a dirty read. We do not lock anything. Set state to
+ // IDLE since no COMMIT call will come.
+ /*---------------------------------------------------------------*/
+ operationRecPtr.p->transactionstate = IDLE;
+ operationRecPtr.p->operation = ZUNDEFINED_OP;
+ return;
+ }//if
+ } else {
+ jam();
+ accIsLockedLab(signal);
+ return;
+ }//if
+ break;
+ case ZINSERT:
+ jam();
+ insertExistElemLab(signal);
+ return;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ } else if (tgeResult == ZFALSE) {
+ switch (operationRecPtr.p->operation) {
+ case ZINSERT:
+ case ZWRITE:
+ jam();
+ // If a write operation makes an insert we switch operation to ZINSERT so
+ // that the commit-method knows an insert has been made and updates noOfElements.
+ operationRecPtr.p->operation = ZINSERT;
+ operationRecPtr.p->insertIsDone = ZTRUE;
+ insertelementLab(signal);
+ return;
+ break;
+ case ZREAD:
+ case ZUPDATE:
+ case ZDELETE:
+ case ZSCAN_OP:
+ jam();
+ acckeyref1Lab(signal, ZREAD_ERROR);
+ return;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ } else {
+ jam();
+ acckeyref1Lab(signal, tgeResult);
+ return;
+ }//if
+ return;
+}//Dbacc::execACCKEYREQ()
+
+void
+Dbacc::xfrmKeyData(Signal* signal)
+{
+ tabptr.i = fragrecptr.p->myTableId;
+ ptrCheckGuard(tabptr, ctablesize, tabrec);
+
+ Uint32 dst[1024 * MAX_XFRM_MULTIPLY];
+ Uint32 dstSize = (sizeof(dst) >> 2);
+ Uint32* src = &signal->theData[7];
+ const Uint32 noOfKeyAttr = tabptr.p->noOfKeyAttr;
+ Uint32 dstPos = 0;
+ Uint32 srcPos = 0;
+ Uint32 i = 0;
+
+ while (i < noOfKeyAttr) {
+ const Tabrec::KeyAttr& keyAttr = tabptr.p->keyAttr[i];
+
+ Uint32 srcBytes = AttributeDescriptor::getSizeInBytes(keyAttr.attributeDescriptor);
+ Uint32 srcWords = (srcBytes + 3) / 4;
+ Uint32 dstWords = ~0;
+ uchar* dstPtr = (uchar*)&dst[dstPos];
+ const uchar* srcPtr = (const uchar*)&src[srcPos];
+ CHARSET_INFO* cs = keyAttr.charsetInfo;
+
+ if (cs == 0) {
+ jam();
+ memcpy(dstPtr, srcPtr, srcWords << 2);
+ dstWords = srcWords;
+ } else {
+ jam();
+ Uint32 typeId = AttributeDescriptor::getType(keyAttr.attributeDescriptor);
+ Uint32 lb, len;
+ bool ok = NdbSqlUtil::get_var_length(typeId, srcPtr, srcBytes, lb, len);
+ ndbrequire(ok);
+ Uint32 xmul = cs->strxfrm_multiply;
+ if (xmul == 0)
+ xmul = 1;
+ // see comment in DbtcMain.cpp
+ Uint32 dstLen = xmul * (srcBytes - lb);
+ ndbrequire(dstLen <= ((dstSize - dstPos) << 2));
+ int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len);
+ ndbrequire(n != -1);
+ while ((n & 3) != 0)
+ dstPtr[n++] = 0;
+ dstWords = (n >> 2);
+ }
+ dstPos += dstWords;
+ srcPos += srcWords;
+ i++;
+ }
+ memcpy(src, dst, dstPos << 2);
+ operationRecPtr.p->xfrmtupkeylen = dstPos;
+}
+
+void Dbacc::accIsLockedLab(Signal* signal)
+{
+ ndbrequire(csystemRestart == ZFALSE);
+ queOperPtr.i = ElementHeader::getOpPtrI(gePageptr.p->word32[tgeElementptr]);
+ ptrCheckGuard(queOperPtr, coprecsize, operationrec);
+ if (operationRecPtr.p->dirtyRead == ZFALSE) {
+ Uint32 return_result;
+ if (operationRecPtr.p->lockMode == ZREADLOCK) {
+ jam();
+ priPageptr = gePageptr;
+ tpriElementptr = tgeElementptr;
+ return_result = placeReadInLockQueue(signal);
+ } else {
+ jam();
+ pwiPageptr = gePageptr;
+ tpwiElementptr = tgeElementptr;
+ return_result = placeWriteInLockQueue(signal);
+ }//if
+ if (return_result == ZPARALLEL_QUEUE) {
+ jam();
+ sendAcckeyconf(signal);
+ return;
+ } else if (return_result == ZSERIAL_QUEUE) {
+ jam();
+ signal->theData[0] = RNIL;
+ return;
+ } else if (return_result == ZWRITE_ERROR) {
+ jam();
+ acckeyref1Lab(signal, return_result);
+ return;
+ }//if
+ ndbrequire(false);
+ } else {
+ if (queOperPtr.p->elementIsDisappeared == ZFALSE) {
+ jam();
+ /*---------------------------------------------------------------*/
+ // It is a dirty read. We do not lock anything. Set state to
+ // IDLE since no COMMIT call will arrive.
+ /*---------------------------------------------------------------*/
+ sendAcckeyconf(signal);
+ operationRecPtr.p->transactionstate = IDLE;
+ operationRecPtr.p->operation = ZUNDEFINED_OP;
+ return;
+ } else {
+ jam();
+ /*---------------------------------------------------------------*/
+ // The tuple does not exist in the committed world currently.
+ // Report read error.
+ /*---------------------------------------------------------------*/
+ acckeyref1Lab(signal, ZREAD_ERROR);
+ return;
+ }//if
+ }//if
+}//Dbacc::accIsLockedLab()
+
+/* --------------------------------------------------------------------------------- */
+/* I N S E R T E X I S T E L E M E N T */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::insertExistElemLab(Signal* signal)
+{
+ if (!tgeLocked){
+ jam();
+ acckeyref1Lab(signal, ZWRITE_ERROR);/* THE ELEMENT ALREADY EXIST */
+ return;
+ }//if
+ accIsLockedLab(signal);
+}//Dbacc::insertExistElemLab()
+
+/* --------------------------------------------------------------------------------- */
+/* INSERTELEMENT */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::insertelementLab(Signal* signal)
+{
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ if (remainingUndoPages() < ZMIN_UNDO_PAGES_AT_OPERATION) {
+ jam();
+ acckeyref1Lab(signal, ZTEMPORARY_ACC_UNDO_FAILURE);
+ return;
+ }//if
+ }//if
+ if (fragrecptr.p->firstOverflowRec == RNIL) {
+ jam();
+ allocOverflowPage(signal);
+ if (tresult > ZLIMIT_OF_ERROR) {
+ jam();
+ acckeyref1Lab(signal, tresult);
+ return;
+ }//if
+ }//if
+ if (fragrecptr.p->keyLength != operationRecPtr.p->tupkeylen) {
+ // historical
+ ndbrequire(fragrecptr.p->keyLength == 0);
+ }//if
+
+ signal->theData[0] = operationRecPtr.p->userptr;
+ Uint32 blockNo = refToBlock(operationRecPtr.p->userblockref);
+ EXECUTE_DIRECT(blockNo, GSN_LQH_ALLOCREQ, signal, 1);
+ jamEntry();
+ if (signal->theData[0] != 0) {
+ jam();
+ Uint32 result_code = signal->theData[0];
+ acckeyref1Lab(signal, result_code);
+ return;
+ }//if
+ Uint32 localKey = (signal->theData[1] << MAX_TUPLES_BITS) + signal->theData[2];
+
+ insertLockOwnersList(signal, operationRecPtr);
+
+ const Uint32 tmp = fragrecptr.p->k + fragrecptr.p->lhfragbits;
+ operationRecPtr.p->hashvaluePart =
+ (operationRecPtr.p->hashValue >> tmp) & 0xFFFF;
+ operationRecPtr.p->scanBits = 0; /* NOT ANY ACTIVE SCAN */
+ tidrElemhead = ElementHeader::setLocked(operationRecPtr.i);
+ idrPageptr = gdiPageptr;
+ tidrPageindex = tgdiPageindex;
+ tidrForward = ZTRUE;
+ idrOperationRecPtr = operationRecPtr;
+ clocalkey[0] = localKey;
+ operationRecPtr.p->localdata[0] = localKey;
+ /* --------------------------------------------------------------------------------- */
+ /* WE SET THE LOCAL KEY TO MINUS ONE TO INDICATE IT IS NOT YET VALID. */
+ /* --------------------------------------------------------------------------------- */
+ insertElement(signal);
+ sendAcckeyconf(signal);
+ return;
+}//Dbacc::insertelementLab()
+
+/* --------------------------------------------------------------------------------- */
+/* PLACE_READ_IN_LOCK_QUEUE */
+/* INPUT: OPERATION_REC_PTR OUR OPERATION POINTER */
+/* QUE_OPER_PTR LOCK QUEUE OWNER OPERATION POINTER */
+/* PRI_PAGEPTR PAGE POINTER OF ELEMENT */
+/* TPRI_ELEMENTPTR ELEMENT POINTER OF ELEMENT */
+/* OUTPUT TRESULT = */
+/* ZPARALLEL_QUEUE OPERATION PLACED IN PARALLEL QUEUE */
+/* OPERATION CAN PROCEED NOW. */
+/* ZSERIAL_QUEUE OPERATION PLACED IN SERIAL QUEUE */
+/* ERROR CODE OPERATION NEEDS ABORTING */
+/* THE ELEMENT WAS LOCKED AND WE WANT TO READ THE TUPLE. WE WILL CHECK THE LOCK */
+/* QUEUES TO PERFORM THE PROPER ACTION. */
+/* */
+/* IN SOME PLACES IN THE CODE BELOW THAT HANDLES WHAT TO DO WHEN THE TUPLE IS LOCKED */
+/* WE DO ASSUME THAT NEXT_PARALLEL_QUEUE AND NEXT_SERIAL_QUEUE ON OPERATION_REC_PTR */
+/* HAVE BEEN INITIALISED TO RNIL. THUS WE DO NOT PERFORM THIS ONCE MORE EVEN IF IT */
+/* COULD BE NICE FOR READABILITY. */
+/* --------------------------------------------------------------------------------- */
+Uint32 Dbacc::placeReadInLockQueue(Signal* signal)
+{
+ tgnptMainOpPtr = queOperPtr;
+ getNoParallelTransaction(signal);
+ if (tgnptNrTransaction == 1) {
+ if ((queOperPtr.p->transId1 == operationRecPtr.p->transId1) &&
+ (queOperPtr.p->transId2 == operationRecPtr.p->transId2)) {
+ /* --------------------------------------------------------------------------------- */
+ /* WE ARE PERFORMING A READ OPERATION AND THIS TRANSACTION ALREADY OWNS THE LOCK */
+ /* ALONE. PUT THE OPERATION LAST IN THE PARALLEL QUEUE. */
+ /* --------------------------------------------------------------------------------- */
+ jam();
+ mlpqOperPtr = queOperPtr;
+ moveLastParallelQueue(signal);
+ operationRecPtr.p->localdata[0] = queOperPtr.p->localdata[0];
+ operationRecPtr.p->localdata[1] = queOperPtr.p->localdata[1];
+ operationRecPtr.p->prevParallelQue = mlpqOperPtr.i;
+ mlpqOperPtr.p->nextParallelQue = operationRecPtr.i;
+ switch (queOperPtr.p->lockMode) {
+ case ZREADLOCK:
+ jam();
+ /*empty*/;
+ break;
+ default:
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* IF THE TRANSACTION PREVIOUSLY SET A WRITE LOCK WE MUST ENSURE THAT ALL */
+ /* OPERATIONS IN THE PARALLEL QUEUE HAVE WRITE LOCK MODE TO AVOID STRANGE BUGS.*/
+ /* --------------------------------------------------------------------------------- */
+ operationRecPtr.p->lockMode = queOperPtr.p->lockMode;
+ break;
+ }//switch
+ return ZPARALLEL_QUEUE;
+ }//if
+ }//if
+ if (queOperPtr.p->nextSerialQue == RNIL) {
+ /* --------------------------------------------------------------------------------- */
+ /* WE ARE PERFORMING A READ OPERATION AND THERE IS NO SERIAL QUEUE. IF THERE IS NO */
+ /* WRITE OPERATION THAT OWNS THE LOCK OR ANY WRITE OPERATION IN THE PARALLEL QUEUE */
+ /* IT IS ENOUGH TO CHECK THE LOCK MODE OF THE LEADER IN THE PARALLEL QUEUE. IF IT IS */
+ /* A READ LOCK THEN WE PLACE OURSELVES IN THE PARALLEL QUEUE OTHERWISE WE GO ON TO */
+ /* PLACE OURSELVES IN THE SERIAL QUEUE. */
+ /* --------------------------------------------------------------------------------- */
+ switch (queOperPtr.p->lockMode) {
+ case ZREADLOCK:
+ jam();
+ mlpqOperPtr = queOperPtr;
+ moveLastParallelQueue(signal);
+ operationRecPtr.p->prevParallelQue = mlpqOperPtr.i;
+ mlpqOperPtr.p->nextParallelQue = operationRecPtr.i;
+ operationRecPtr.p->localdata[0] = queOperPtr.p->localdata[0];
+ operationRecPtr.p->localdata[1] = queOperPtr.p->localdata[1];
+ return ZPARALLEL_QUEUE;
+ default:
+ jam();
+ queOperPtr.p->nextSerialQue = operationRecPtr.i;
+ operationRecPtr.p->prevSerialQue = queOperPtr.i;
+ putOpInFragWaitQue(signal);
+ break;
+ }//switch
+ } else {
+ jam();
+ placeSerialQueueRead(signal);
+ }//if
+ return ZSERIAL_QUEUE;
+}//Dbacc::placeReadInLockQueue()
+
+/* --------------------------------------------------------------------------------- */
+/* WE WILL CHECK IF THIS TRANSACTION IS ALREADY PLACED AT SOME SPOT IN THE PARALLEL */
+/* SERIAL QUEUE WITHOUT ANY NEIGHBORS FROM OTHER TRANSACTION. IF SO WE WILL INSERT */
+/* IT IN THAT PARALLEL QUEUE. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::placeSerialQueueRead(Signal* signal)
+{
+ readWriteOpPtr.i = queOperPtr.p->nextSerialQue;
+ ptrCheckGuard(readWriteOpPtr, coprecsize, operationrec);
+ PSQR_LOOP:
+ jam();
+ if (readWriteOpPtr.p->nextSerialQue == RNIL) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* THERE WAS NO PREVIOUS OPERATION IN THIS TRANSACTION WHICH WE COULD PUT IT */
+ /* IN THE PARALLEL QUEUE TOGETHER WITH. */
+ /* --------------------------------------------------------------------------------- */
+ checkOnlyReadEntry(signal);
+ return;
+ }//if
+ tgnptMainOpPtr = readWriteOpPtr;
+ getNoParallelTransaction(signal);
+ if (tgnptNrTransaction == 1) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* THERE WAS ONLY ONE TRANSACTION INVOLVED IN THE PARALLEL QUEUE. IF THIS IS OUR */
+ /* TRANSACTION WE CAN STILL GET HOLD OF THE LOCK. */
+ /* --------------------------------------------------------------------------------- */
+ if ((readWriteOpPtr.p->transId1 == operationRecPtr.p->transId1) &&
+ (readWriteOpPtr.p->transId2 == operationRecPtr.p->transId2)) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* WE ARE PERFORMING A READ IN THE SAME TRANSACTION WHERE WE ALREADY */
+ /* PREVIOUSLY HAVE EXECUTED AN OPERATION. INSERT-DELETE, READ-UPDATE, READ-READ, */
+ /* UPDATE-UPDATE, UPDATE-DELETE, READ-DELETE, INSERT-READ, INSERT-UPDATE ARE ALLOWED */
+ /* COMBINATIONS. A NEW INSERT AFTER A DELETE IS NOT ALLOWED AND SUCH AN INSERT WILL */
+ /* GO TO THE SERIAL LOCK QUEUE WHICH IT WILL NOT LEAVE UNTIL A TIME-OUT AND THE */
+ /* TRANSACTION IS ABORTED. READS AND UPDATES AFTER DELETES IS ALSO NOT ALLOWED. */
+ /* --------------------------------------------------------------------------------- */
+ mlpqOperPtr = readWriteOpPtr;
+ moveLastParallelQueue(signal);
+ readWriteOpPtr = mlpqOperPtr;
+ operationRecPtr.p->prevParallelQue = readWriteOpPtr.i;
+ readWriteOpPtr.p->nextParallelQue = operationRecPtr.i;
+ operationRecPtr.p->localdata[0] = readWriteOpPtr.p->localdata[0];
+ operationRecPtr.p->localdata[1] = readWriteOpPtr.p->localdata[1];
+ switch (readWriteOpPtr.p->lockMode) {
+ case ZREADLOCK:
+ jam();
+ /*empty*/;
+ break;
+ default:
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* IF THE TRANSACTION PREVIOUSLY SET A WRITE LOCK WE MUST ENSURE THAT ALL */
+ /* OPERATIONS IN THE PARALLEL QUEUE HAVE WRITE LOCK MODE TO AVOID STRANGE BUGS.*/
+ /* --------------------------------------------------------------------------------- */
+ operationRecPtr.p->lockMode = readWriteOpPtr.p->lockMode;
+ break;
+ }//switch
+ putOpInFragWaitQue(signal);
+ return;
+ }//if
+ }//if
+ readWriteOpPtr.i = readWriteOpPtr.p->nextSerialQue;
+ ptrCheckGuard(readWriteOpPtr, coprecsize, operationrec);
+ goto PSQR_LOOP;
+}//Dbacc::placeSerialQueueRead()
+
+/* --------------------------------------------------------------------------------- */
+/* WE WILL CHECK IF THE LAST ENTRY IN THE SERIAL QUEUE CONTAINS ONLY READ */
+/* OPERATIONS. IF SO WE WILL INSERT IT IN THAT PARALLEL QUEUE. OTHERWISE WE */
+/* WILL PLACE IT AT THE END OF THE SERIAL QUEUE. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::checkOnlyReadEntry(Signal* signal)
+{
+ switch (readWriteOpPtr.p->lockMode) {
+ case ZREADLOCK:
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* SINCE THIS LAST QUEUE ONLY CONTAINS READ LOCKS WE CAN JOIN THE PARALLEL QUEUE AT */
+ /* THE END. */
+ /* --------------------------------------------------------------------------------- */
+ mlpqOperPtr = readWriteOpPtr;
+ moveLastParallelQueue(signal);
+ readWriteOpPtr = mlpqOperPtr;
+ operationRecPtr.p->prevParallelQue = readWriteOpPtr.i;
+ readWriteOpPtr.p->nextParallelQue = operationRecPtr.i;
+ operationRecPtr.p->localdata[0] = readWriteOpPtr.p->localdata[0];
+ operationRecPtr.p->localdata[1] = readWriteOpPtr.p->localdata[1];
+ break;
+ default:
+ jam(); /* PUT THE OPERATION RECORD IN THE SERIAL QUEUE */
+ readWriteOpPtr.p->nextSerialQue = operationRecPtr.i;
+ operationRecPtr.p->prevSerialQue = readWriteOpPtr.i;
+ break;
+ }//switch
+ putOpInFragWaitQue(signal);
+}//Dbacc::checkOnlyReadEntry()
+
+/* --------------------------------------------------------------------------------- */
+/* GET_NO_PARALLEL_TRANSACTION */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::getNoParallelTransaction(Signal* signal)
+{
+ OperationrecPtr tnptOpPtr;
+
+ tgnptNrTransaction = 1;
+ tnptOpPtr.i = tgnptMainOpPtr.p->nextParallelQue;
+ while ((tnptOpPtr.i != RNIL) &&
+ (tgnptNrTransaction == 1)) {
+ jam();
+ ptrCheckGuard(tnptOpPtr, coprecsize, operationrec);
+ if ((tnptOpPtr.p->transId1 == tgnptMainOpPtr.p->transId1) &&
+ (tnptOpPtr.p->transId2 == tgnptMainOpPtr.p->transId2)) {
+ tnptOpPtr.i = tnptOpPtr.p->nextParallelQue;
+ } else {
+ jam();
+ tgnptNrTransaction++;
+ }//if
+ }//while
+}//Dbacc::getNoParallelTransaction()
+
+void Dbacc::moveLastParallelQueue(Signal* signal)
+{
+ while (mlpqOperPtr.p->nextParallelQue != RNIL) {
+ jam();
+ mlpqOperPtr.i = mlpqOperPtr.p->nextParallelQue;
+ ptrCheckGuard(mlpqOperPtr, coprecsize, operationrec);
+ }//if
+}//Dbacc::moveLastParallelQueue()
+
+void Dbacc::moveLastParallelQueueWrite(Signal* signal)
+{
+ /* --------------------------------------------------------------------------------- */
+ /* ENSURE THAT ALL OPERATIONS HAVE LOCK MODE SET TO WRITE SINCE WE INSERT A */
+ /* WRITE LOCK INTO THE PARALLEL QUEUE. */
+ /* --------------------------------------------------------------------------------- */
+ while (mlpqOperPtr.p->nextParallelQue != RNIL) {
+ jam();
+ mlpqOperPtr.p->lockMode = operationRecPtr.p->lockMode;
+ mlpqOperPtr.i = mlpqOperPtr.p->nextParallelQue;
+ ptrCheckGuard(mlpqOperPtr, coprecsize, operationrec);
+ }//if
+ mlpqOperPtr.p->lockMode = operationRecPtr.p->lockMode;
+}//Dbacc::moveLastParallelQueueWrite()
+
+/* --------------------------------------------------------------------------------- */
+/* PLACE_WRITE_IN_LOCK_QUEUE */
+/* INPUT: OPERATION_REC_PTR OUR OPERATION POINTER */
+/* QUE_OPER_PTR LOCK QUEUE OWNER OPERATION POINTER */
+/* PWI_PAGEPTR PAGE POINTER OF ELEMENT */
+/* TPWI_ELEMENTPTR ELEMENT POINTER OF ELEMENT */
+/* OUTPUT TRESULT = */
+/* ZPARALLEL_QUEUE OPERATION PLACED IN PARALLEL QUEUE */
+/* OPERATION CAN PROCEED NOW. */
+/* ZSERIAL_QUEUE OPERATION PLACED IN SERIAL QUEUE */
+/* ERROR CODE OPERATION NEEDS ABORTING */
+/* --------------------------------------------------------------------------------- */
+Uint32 Dbacc::placeWriteInLockQueue(Signal* signal)
+{
+ tgnptMainOpPtr = queOperPtr;
+ getNoParallelTransaction(signal);
+ if (!((tgnptNrTransaction == 1) &&
+ (queOperPtr.p->transId1 == operationRecPtr.p->transId1) &&
+ (queOperPtr.p->transId2 == operationRecPtr.p->transId2))) {
+ jam();
+ placeSerialQueueWrite(signal);
+ return ZSERIAL_QUEUE;
+ }//if
+
+ /*
+ WE ARE PERFORMING AN READ EXCLUSIVE, INSERT, UPDATE OR DELETE IN THE SAME
+ TRANSACTION WHERE WE PREVIOUSLY HAVE EXECUTED AN OPERATION.
+ Read-All, Update-All, Insert-All and Delete-Insert are allowed
+ combinations.
+ Delete-Read, Delete-Update and Delete-Delete are not an allowed
+ combination and will result in tuple not found error.
+ */
+ mlpqOperPtr = queOperPtr;
+ moveLastParallelQueueWrite(signal);
+
+ if (operationRecPtr.p->operation == ZINSERT &&
+ mlpqOperPtr.p->operation != ZDELETE){
+ jam();
+ return ZWRITE_ERROR;
+ }//if
+
+ operationRecPtr.p->localdata[0] = queOperPtr.p->localdata[0];
+ operationRecPtr.p->localdata[1] = queOperPtr.p->localdata[1];
+ operationRecPtr.p->prevParallelQue = mlpqOperPtr.i;
+ mlpqOperPtr.p->nextParallelQue = operationRecPtr.i;
+ return ZPARALLEL_QUEUE;
+}//Dbacc::placeWriteInLockQueue()
+
+/* --------------------------------------------------------------------------------- */
+/* WE HAVE TO PLACE IT SOMEWHERE IN THE SERIAL QUEUE INSTEAD. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::placeSerialQueueWrite(Signal* signal)
+{
+ readWriteOpPtr = queOperPtr;
+ PSQW_LOOP:
+ if (readWriteOpPtr.p->nextSerialQue == RNIL) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* WE COULD NOT PUT IN ANY PARALLEL QUEUE. WE MUST PUT IT LAST IN THE SERIAL QUEUE. */
+ /* --------------------------------------------------------------------------------- */
+ readWriteOpPtr.p->nextSerialQue = operationRecPtr.i;
+ operationRecPtr.p->prevSerialQue = readWriteOpPtr.i;
+ putOpInFragWaitQue(signal);
+ return;
+ }//if
+ readWriteOpPtr.i = readWriteOpPtr.p->nextSerialQue;
+ ptrCheckGuard(readWriteOpPtr, coprecsize, operationrec);
+ tgnptMainOpPtr = readWriteOpPtr;
+ getNoParallelTransaction(signal);
+ if (tgnptNrTransaction == 1) {
+ /* --------------------------------------------------------------------------------- */
+ /* THERE WAS ONLY ONE TRANSACTION INVOLVED IN THE PARALLEL QUEUE. IF THIS IS OUR */
+ /* TRANSACTION WE CAN STILL GET HOLD OF THE LOCK. */
+ /* --------------------------------------------------------------------------------- */
+ if ((readWriteOpPtr.p->transId1 == operationRecPtr.p->transId1) &&
+ (readWriteOpPtr.p->transId2 == operationRecPtr.p->transId2)) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* WE ARE PERFORMING AN UPDATE OR DELETE IN THE SAME TRANSACTION WHERE WE ALREADY */
+ /* PREVIOUSLY HAVE EXECUTED AN OPERATION. INSERT-DELETE, READ-UPDATE, READ-READ, */
+ /* UPDATE-UPDATE, UPDATE-DELETE, READ-DELETE, INSERT-READ, INSERT-UPDATE ARE ALLOWED */
+ /* COMBINATIONS. A NEW INSERT AFTER A DELETE IS NOT ALLOWED AND SUCH AN INSERT WILL */
+ /* GO TO THE SERIAL LOCK QUEUE WHICH IT WILL NOT LEAVE UNTIL A TIME-OUT AND THE */
+ /* TRANSACTION IS ABORTED. READS AND UPDATES AFTER DELETES IS ALSO NOT ALLOWED. */
+ /* --------------------------------------------------------------------------------- */
+ mlpqOperPtr = readWriteOpPtr;
+ moveLastParallelQueueWrite(signal);
+ readWriteOpPtr = mlpqOperPtr;
+ operationRecPtr.p->prevParallelQue = readWriteOpPtr.i;
+ readWriteOpPtr.p->nextParallelQue = operationRecPtr.i;
+ operationRecPtr.p->localdata[0] = readWriteOpPtr.p->localdata[0];
+ operationRecPtr.p->localdata[1] = readWriteOpPtr.p->localdata[1];
+ putOpInFragWaitQue(signal);
+ return;
+ }//if
+ }//if
+ goto PSQW_LOOP;
+}//Dbacc::placeSerialQueueWrite()
+
+/* ------------------------------------------------------------------------- */
+/* ACC KEYREQ END */
+/* ------------------------------------------------------------------------- */
+void Dbacc::acckeyref1Lab(Signal* signal, Uint32 result_code)
+{
+ if (operationRecPtr.p->keyinfoPage != RNIL) {
+ jam();
+ rpPageptr.i = operationRecPtr.p->keyinfoPage;
+ ptrCheckGuard(rpPageptr, cpagesize, page8);
+ releasePage(signal);
+ operationRecPtr.p->keyinfoPage = RNIL;
+ }//if
+ operationRecPtr.p->transactionstate = WAIT_COMMIT_ABORT;
+ /* ************************<< */
+ /* ACCKEYREF */
+ /* ************************<< */
+ signal->theData[0] = cminusOne;
+ signal->theData[1] = result_code;
+ return;
+}//Dbacc::acckeyref1Lab()
+
+/* ******************--------------------------------------------------------------- */
+/* ACCMINUPDATE UPDATE LOCAL KEY REQ */
+/* DESCRIPTION: UPDATES LOCAL KEY OF AN ELEMENTS IN THE HASH TABLE */
+/* THIS SIGNAL IS WAITED AFTER ANY INSERT REQ */
+/* ENTER ACCMINUPDATE WITH SENDER: LQH, LEVEL B */
+/* OPERATION_REC_PTR, OPERATION RECORD PTR */
+/* CLOCALKEY(0), LOCAL KEY 1 */
+/* CLOCALKEY(1) LOCAL KEY 2 */
+/* ******************--------------------------------------------------------------- */
+void Dbacc::execACCMINUPDATE(Signal* signal)
+{
+ Page8Ptr ulkPageidptr;
+ Uint32 tulkLocalPtr;
+ Uint32 tlocalkey1, tlocalkey2;
+ Uint32 TlogStart;
+
+ jamEntry();
+ operationRecPtr.i = signal->theData[0];
+ tlocalkey1 = signal->theData[1];
+ tlocalkey2 = signal->theData[2];
+ ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
+ if (operationRecPtr.p->transactionstate == ACTIVE) {
+ fragrecptr.i = operationRecPtr.p->fragptr;
+ ulkPageidptr.i = operationRecPtr.p->elementPage;
+ tulkLocalPtr = operationRecPtr.p->elementPointer + operationRecPtr.p->elementIsforward;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ ptrCheckGuard(ulkPageidptr, cpagesize, page8);
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ //----------------------------------------------------------
+ // To avoid undo log the element header we take care to only
+ // undo log the local key part.
+ //----------------------------------------------------------
+ if (operationRecPtr.p->elementIsforward == 1) {
+ jam();
+ TlogStart = tulkLocalPtr;
+ } else {
+ jam();
+ TlogStart = tulkLocalPtr - fragrecptr.p->localkeylen + 1;
+ }//if
+ datapageptr.p = ulkPageidptr.p;
+ cundoinfolength = fragrecptr.p->localkeylen;
+ cundoElemIndex = TlogStart;
+ undoWritingProcess(signal);
+ }//if
+ dbgWord32(ulkPageidptr, tulkLocalPtr, tlocalkey1);
+ arrGuard(tulkLocalPtr, 2048);
+ ulkPageidptr.p->word32[tulkLocalPtr] = tlocalkey1;
+ operationRecPtr.p->localdata[0] = tlocalkey1;
+ if (fragrecptr.p->localkeylen == 1) {
+ return;
+ } else if (fragrecptr.p->localkeylen == 2) {
+ jam();
+ tulkLocalPtr = tulkLocalPtr + operationRecPtr.p->elementIsforward;
+ operationRecPtr.p->localdata[1] = tlocalkey2;
+ dbgWord32(ulkPageidptr, tulkLocalPtr, tlocalkey2);
+ arrGuard(tulkLocalPtr, 2048);
+ ulkPageidptr.p->word32[tulkLocalPtr] = tlocalkey2;
+ return;
+ } else {
+ jam();
+ }//if
+ }//if
+ ndbrequire(false);
+}//Dbacc::execACCMINUPDATE()
+
+/* ******************--------------------------------------------------------------- */
+/* ACC_COMMITREQ COMMIT TRANSACTION */
+/* SENDER: LQH, LEVEL B */
+/* INPUT: OPERATION_REC_PTR , */
+/* ******************--------------------------------------------------------------- */
+void Dbacc::execACC_COMMITREQ(Signal* signal)
+{
+ Uint8 Toperation;
+ jamEntry();
+ operationRecPtr.i = signal->theData[0];
+ ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
+ ndbrequire(operationRecPtr.p->transactionstate == ACTIVE);
+ fragrecptr.i = operationRecPtr.p->fragptr;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ commitOperation(signal);
+ Toperation = operationRecPtr.p->operation;
+ operationRecPtr.p->transactionstate = IDLE;
+ operationRecPtr.p->operation = ZUNDEFINED_OP;
+ if(Toperation != ZREAD){
+ rootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ rootfragrecptr.p->m_commit_count++;
+ if (Toperation != ZINSERT) {
+ if (Toperation != ZDELETE) {
+ return;
+ } else {
+ jam();
+ rootfragrecptr.p->noOfElements--;
+ fragrecptr.p->slack += operationRecPtr.p->insertDeleteLen;
+ if (fragrecptr.p->slack > fragrecptr.p->slackCheck) {
+ /* TIME FOR JOIN BUCKETS PROCESS */
+ if (fragrecptr.p->expandCounter > 0) {
+ if (fragrecptr.p->expandFlag < 2) {
+ jam();
+ signal->theData[0] = fragrecptr.i;
+ signal->theData[1] = fragrecptr.p->p;
+ signal->theData[2] = fragrecptr.p->maxp;
+ signal->theData[3] = fragrecptr.p->expandFlag;
+ fragrecptr.p->expandFlag = 2;
+ sendSignal(cownBlockref, GSN_SHRINKCHECK2, signal, 4, JBB);
+ }//if
+ }//if
+ }//if
+ }//if
+ } else {
+ jam(); /* EXPAND PROCESS HANDLING */
+ rootfragrecptr.p->noOfElements++;
+ fragrecptr.p->slack -= operationRecPtr.p->insertDeleteLen;
+ if (fragrecptr.p->slack >= (1u << 31)) {
+ /* IT MEANS THAT IF SLACK < ZERO */
+ if (fragrecptr.p->expandFlag == 0) {
+ jam();
+ fragrecptr.p->expandFlag = 2;
+ signal->theData[0] = fragrecptr.i;
+ signal->theData[1] = fragrecptr.p->p;
+ signal->theData[2] = fragrecptr.p->maxp;
+ sendSignal(cownBlockref, GSN_EXPANDCHECK2, signal, 3, JBB);
+ }//if
+ }//if
+ }//if
+ }
+ return;
+}//Dbacc::execACC_COMMITREQ()
+
+/* ******************--------------------------------------------------------------- */
+/* ACC ABORT REQ ABORT ALL OPERATION OF THE TRANSACTION */
+/* ******************------------------------------+ */
+/* SENDER: LQH, LEVEL B */
+/* ******************--------------------------------------------------------------- */
+/* ACC ABORT REQ ABORT TRANSACTION */
+/* ******************------------------------------+ */
+/* SENDER: LQH, LEVEL B */
+void Dbacc::execACC_ABORTREQ(Signal* signal)
+{
+ jamEntry();
+ accAbortReqLab(signal, true);
+}//Dbacc::execACC_ABORTREQ()
+
+void Dbacc::accAbortReqLab(Signal* signal, bool sendConf)
+{
+ operationRecPtr.i = signal->theData[0];
+ ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
+ tresult = 0; /* ZFALSE */
+ if ((operationRecPtr.p->transactionstate == ACTIVE) ||
+ (operationRecPtr.p->transactionstate == WAIT_COMMIT_ABORT)) {
+ jam();
+ fragrecptr.i = operationRecPtr.p->fragptr;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ operationRecPtr.p->transactionstate = ABORT;
+ abortOperation(signal);
+ } else {
+ ndbrequire(operationRecPtr.p->transactionstate == IDLE);
+ jam();
+ }//if
+ operationRecPtr.p->transactionstate = IDLE;
+ operationRecPtr.p->operation = ZUNDEFINED_OP;
+ if (! sendConf)
+ return;
+ signal->theData[0] = operationRecPtr.p->userptr;
+ sendSignal(operationRecPtr.p->userblockref, GSN_ACC_ABORTCONF, signal, 1, JBB);
+ return;
+}//Dbacc::accAbortReqLab()
+
+/*
+ * Lock or unlock tuple.
+ */
+void Dbacc::execACC_LOCKREQ(Signal* signal)
+{
+ jamEntry();
+ AccLockReq* sig = (AccLockReq*)signal->getDataPtrSend();
+ AccLockReq reqCopy = *sig;
+ AccLockReq* const req = &reqCopy;
+ Uint32 lockOp = (req->requestInfo & 0xFF);
+ if (lockOp == AccLockReq::LockShared ||
+ lockOp == AccLockReq::LockExclusive) {
+ jam();
+ // find table
+ tabptr.i = req->tableId;
+ ptrCheckGuard(tabptr, ctablesize, tabrec);
+ // find fragment (TUX will know it)
+ if (req->fragPtrI == RNIL) {
+ for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
+ jam();
+ if (tabptr.p->fragptrholder[i] != RNIL) {
+ rootfragrecptr.i = tabptr.p->fragptrholder[i];
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ if (rootfragrecptr.p->fragmentid[0] == req->fragId) {
+ jam();
+ req->fragPtrI = rootfragrecptr.p->fragmentptr[0];
+ break;
+ }
+ if (rootfragrecptr.p->fragmentid[1] == req->fragId) {
+ jam();
+ req->fragPtrI = rootfragrecptr.p->fragmentptr[1];
+ break;
+ }
+ }
+ }
+ }
+ fragrecptr.i = req->fragPtrI;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ ndbrequire(req->fragId == fragrecptr.p->myfid);
+ // caller must be explicit here
+ ndbrequire(req->accOpPtr == RNIL);
+ // seize operation to hold the lock
+ if (cfreeopRec != RNIL) {
+ jam();
+ seizeOpRec(signal);
+ // init as in ACCSEIZEREQ
+ operationRecPtr.p->userptr = req->userPtr;
+ operationRecPtr.p->userblockref = req->userRef;
+ operationRecPtr.p->operation = ZUNDEFINED_OP;
+ operationRecPtr.p->transactionstate = IDLE;
+ // do read with lock via ACCKEYREQ
+ Uint32 lockMode = (lockOp == AccLockReq::LockShared) ? 0 : 1;
+ Uint32 opCode = ZSCAN_OP;
+ signal->theData[0] = operationRecPtr.i;
+ signal->theData[1] = fragrecptr.i;
+ signal->theData[2] = opCode | (lockMode << 4) | (1u << 31);
+ signal->theData[3] = req->hashValue;
+ signal->theData[4] = 1; // fake primKeyLen
+ signal->theData[5] = req->transId1;
+ signal->theData[6] = req->transId2;
+ // enter local key in place of PK
+ signal->theData[7] = req->tupAddr;
+ EXECUTE_DIRECT(DBACC, GSN_ACCKEYREQ, signal, 8);
+ // translate the result
+ if (signal->theData[0] < RNIL) {
+ jam();
+ req->returnCode = AccLockReq::Success;
+ req->accOpPtr = operationRecPtr.i;
+ } else if (signal->theData[0] == RNIL) {
+ jam();
+ req->returnCode = AccLockReq::IsBlocked;
+ req->accOpPtr = operationRecPtr.i;
+ } else {
+ ndbrequire(signal->theData[0] == (UintR)-1);
+ releaseOpRec(signal);
+ req->returnCode = AccLockReq::Refused;
+ req->accOpPtr = RNIL;
+ }
+ } else {
+ jam();
+ req->returnCode = AccLockReq::NoFreeOp;
+ }
+ *sig = *req;
+ return;
+ }
+ if (lockOp == AccLockReq::Unlock) {
+ jam();
+ // do unlock via ACC_COMMITREQ (immediate)
+ signal->theData[0] = req->accOpPtr;
+ EXECUTE_DIRECT(DBACC, GSN_ACC_COMMITREQ, signal, 1);
+ releaseOpRec(signal);
+ req->returnCode = AccLockReq::Success;
+ *sig = *req;
+ return;
+ }
+ if (lockOp == AccLockReq::Abort) {
+ jam();
+ // do abort via ACC_ABORTREQ (immediate)
+ signal->theData[0] = req->accOpPtr;
+ accAbortReqLab(signal, false);
+ releaseOpRec(signal);
+ req->returnCode = AccLockReq::Success;
+ *sig = *req;
+ return;
+ }
+ if (lockOp == AccLockReq::AbortWithConf) {
+ jam();
+ // do abort via ACC_ABORTREQ (with conf signal)
+ signal->theData[0] = req->accOpPtr;
+ accAbortReqLab(signal, true);
+ releaseOpRec(signal);
+ req->returnCode = AccLockReq::Success;
+ *sig = *req;
+ return;
+ }
+ ndbrequire(false);
+}
+
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* END OF EXECUTE OPERATION MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* MODULE: INSERT */
+/* THE FOLLOWING SUBROUTINES ARE ONLY USED BY INSERT_ELEMENT. THIS */
+/* ROUTINE IS THE SOLE INTERFACE TO INSERT ELEMENTS INTO THE INDEX. */
+/* CURRENT USERS ARE INSERT REQUESTS, EXPAND CONTAINER AND SHRINK */
+/* CONTAINER. */
+/* */
+/* THE FOLLOWING SUBROUTINES ARE INCLUDED IN THIS MODULE: */
+/* INSERT_ELEMENT */
+/* INSERT_CONTAINER */
+/* ADDNEWCONTAINER */
+/* GETFREELIST */
+/* INCREASELISTCONT */
+/* SEIZE_LEFTLIST */
+/* SEIZE_RIGHTLIST */
+/* */
+/* THESE ROUTINES ARE ONLY USED BY THIS MODULE AND BY NO ONE ELSE. */
+/* ALSO THE ROUTINES MAKE NO USE OF ROUTINES IN OTHER MODULES. */
+/* TAKE_REC_OUT_OF_FREE_OVERPAGE AND RELEASE_OVERFLOW_REC ARE */
+/* EXCEPTIONS TO THIS RULE. */
+/* */
+/* THE ONLY SHORT-LIVED VARIABLES USED IN OTHER PARTS OF THE BLOCK ARE */
+/* THOSE DEFINED AS INPUT AND OUTPUT IN INSERT_ELEMENT */
+/* SHORT-LIVED VARIABLES INCLUDE TEMPORARY VARIABLES, COMMON VARIABLES */
+/* AND POINTER VARIABLES. */
+/* THE ONLY EXCEPTION TO THIS RULE IS FRAGRECPTR WHICH POINTS TO THE */
+/* FRAGMENT RECORD. THIS IS MORE LESS STATIC ALWAYS DURING A SIGNAL */
+/* EXECUTION. */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* INSERT_ELEMENT */
+/* INPUT: */
+/* IDR_PAGEPTR (POINTER TO THE ACTIVE PAGE REC) */
+/* TIDR_PAGEINDEX (INDEX OF THE CONTAINER) */
+/* TIDR_FORWARD (DIRECTION FORWARD OR BACKWARD) */
+/* TIDR_ELEMHEAD (HEADER OF ELEMENT TO BE INSERTED */
+/* CIDR_KEYS(ARRAY OF TUPLE KEYS) */
+/* CLOCALKEY(ARRAY OF LOCAL KEYS). */
+/* FRAGRECPTR */
+/* IDR_OPERATION_REC_PTR */
+/* TIDR_KEY_LEN */
+/* */
+/* OUTPUT: */
+/* TIDR_PAGEINDEX (PAGE INDEX OF INSERTED ELEMENT) */
+/* IDR_PAGEPTR (PAGE POINTER OF INSERTED ELEMENT) */
+/* TIDR_FORWARD (CONTAINER DIRECTION OF INSERTED ELEMENT) */
+/* NONE */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::insertElement(Signal* signal)
+{
+ DirRangePtr inrOverflowrangeptr;
+ DirectoryarrayPtr inrOverflowDirptr;
+ OverflowRecordPtr inrOverflowRecPtr;
+ Page8Ptr inrNewPageptr;
+ Uint32 tinrNextSamePage;
+ Uint32 tinrTmp;
+
+ do {
+ insertContainer(signal);
+ if (tidrResult != ZFALSE) {
+ jam();
+ return;
+ /* INSERTION IS DONE, OR */
+ /* AN ERROR IS DETECTED */
+ }//if
+ if (((tidrContainerhead >> 7) & 0x3) != 0) {
+ tinrNextSamePage = (tidrContainerhead >> 9) & 0x1; /* CHECK BIT FOR CHECKING WHERE */
+ /* THE NEXT CONTAINER IS IN THE SAME PAGE */
+ tidrPageindex = tidrContainerhead & 0x7f; /* NEXT CONTAINER PAGE INDEX 7 BITS */
+ if (((tidrContainerhead >> 7) & 3) == ZLEFT) {
+ jam();
+ tidrForward = ZTRUE;
+ } else if (((tidrContainerhead >> 7) & 3) == ZRIGHT) {
+ jam();
+ tidrForward = cminusOne;
+ } else {
+ ndbrequire(false);
+ return;
+ }//if
+ if (tinrNextSamePage == ZFALSE) {
+ jam(); /* NEXT CONTAINER IS IN AN OVERFLOW PAGE */
+ tinrTmp = idrPageptr.p->word32[tidrContainerptr + 1];
+ inrOverflowrangeptr.i = fragrecptr.p->overflowdir;
+ ptrCheckGuard(inrOverflowrangeptr, cdirrangesize, dirRange);
+ arrGuard((tinrTmp >> 8), 256);
+ inrOverflowDirptr.i = inrOverflowrangeptr.p->dirArray[tinrTmp >> 8];
+ ptrCheckGuard(inrOverflowDirptr, cdirarraysize, directoryarray);
+ idrPageptr.i = inrOverflowDirptr.p->pagep[tinrTmp & 0xff];
+ ptrCheckGuard(idrPageptr, cpagesize, page8);
+ }//if
+ ndbrequire(tidrPageindex < ZEMPTYLIST);
+ } else {
+ break;
+ }//if
+ } while (1);
+ gflPageptr.p = idrPageptr.p;
+ getfreelist(signal);
+ if (tgflPageindex == ZEMPTYLIST) {
+ jam();
+ /* NO FREE BUFFER IS FOUND */
+ if (fragrecptr.p->firstOverflowRec == RNIL) {
+ jam();
+ allocOverflowPage(signal);
+ ndbrequire(tresult <= ZLIMIT_OF_ERROR);
+ }//if
+ inrOverflowRecPtr.i = fragrecptr.p->firstOverflowRec;
+ ptrCheckGuard(inrOverflowRecPtr, coverflowrecsize, overflowRecord);
+ inrNewPageptr.i = inrOverflowRecPtr.p->overpage;
+ ptrCheckGuard(inrNewPageptr, cpagesize, page8);
+ gflPageptr.p = inrNewPageptr.p;
+ getfreelist(signal);
+ ndbrequire(tgflPageindex != ZEMPTYLIST);
+ tancNext = 0;
+ } else {
+ jam();
+ inrNewPageptr = idrPageptr;
+ tancNext = 1;
+ }//if
+ tslUpdateHeader = ZTRUE;
+ tslPageindex = tgflPageindex;
+ slPageptr.p = inrNewPageptr.p;
+ if (tgflBufType == ZLEFT) {
+ seizeLeftlist(signal);
+ tidrForward = ZTRUE;
+ } else {
+ seizeRightlist(signal);
+ tidrForward = cminusOne;
+ }//if
+ tancPageindex = tgflPageindex;
+ tancPageid = inrNewPageptr.p->word32[ZPOS_PAGE_ID];
+ tancBufType = tgflBufType;
+ tancContainerptr = tidrContainerptr;
+ ancPageptr.p = idrPageptr.p;
+ addnewcontainer(signal);
+
+ idrPageptr = inrNewPageptr;
+ tidrPageindex = tgflPageindex;
+ insertContainer(signal);
+ ndbrequire(tidrResult == ZTRUE);
+}//Dbacc::insertElement()
+
+/* --------------------------------------------------------------------------------- */
+/* INSERT_CONTAINER */
+/* INPUT: */
+/* IDR_PAGEPTR (POINTER TO THE ACTIVE PAGE REC) */
+/* TIDR_PAGEINDEX (INDEX OF THE CONTAINER) */
+/* TIDR_FORWARD (DIRECTION FORWARD OR BACKWARD) */
+/* TIDR_ELEMHEAD (HEADER OF ELEMENT TO BE INSERTED */
+/* CKEYS(ARRAY OF TUPLE KEYS) */
+/* CLOCALKEY(ARRAY 0F LOCAL KEYS). */
+/* TIDR_KEY_LEN */
+/* FRAGRECPTR */
+/* IDR_OPERATION_REC_PTR */
+/* OUTPUT: */
+/* TIDR_RESULT (ZTRUE FOR SUCCESS AND ZFALSE OTHERWISE) */
+/* TIDR_CONTAINERHEAD (HEADER OF CONTAINER) */
+/* TIDR_CONTAINERPTR (POINTER TO CONTAINER HEADER) */
+/* */
+/* DESCRIPTION: */
+/* THE FREE AREA OF THE CONTAINER WILL BE CALCULATED. IF IT IS */
+/* LARGER THAN OR EQUAL THE ELEMENT LENGTH. THE ELEMENT WILL BE */
+/* INSERT IN THE CONTAINER AND CONTAINER HEAD WILL BE UPDATED. */
+/* THIS ROUTINE ALWAYS DEALS WITH ONLY ONE CONTAINER AND DO NEVER */
+/* START ANYTHING OUTSIDE OF THIS CONTAINER. */
+/* */
+/* SHORT FORM: IDR */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::insertContainer(Signal* signal)
+{
+ Uint32 tidrContainerlen;
+ Uint32 tidrConfreelen;
+ Uint32 tidrNextSide;
+ Uint32 tidrNextConLen;
+ Uint32 tidrIndex;
+ Uint32 tidrInputIndex;
+ Uint32 tidrContLen;
+ Uint32 guard26;
+
+ tidrResult = ZFALSE;
+ tidrContainerptr = (tidrPageindex << ZSHIFT_PLUS) - (tidrPageindex << ZSHIFT_MINUS);
+ tidrContainerptr = tidrContainerptr + ZHEAD_SIZE;
+ /* --------------------------------------------------------------------------------- */
+ /* CALCULATE THE POINTER TO THE ELEMENT TO BE INSERTED AND THE POINTER TO THE */
+ /* CONTAINER HEADER OF THE OTHER SIDE OF THE BUFFER. */
+ /* --------------------------------------------------------------------------------- */
+ if (tidrForward == ZTRUE) {
+ jam();
+ tidrNextSide = tidrContainerptr + (ZBUF_SIZE - ZCON_HEAD_SIZE);
+ arrGuard(tidrNextSide + 1, 2048);
+ tidrContainerhead = idrPageptr.p->word32[tidrContainerptr];
+ tidrContainerlen = tidrContainerhead >> 26;
+ tidrIndex = tidrContainerptr + tidrContainerlen;
+ } else {
+ jam();
+ tidrNextSide = tidrContainerptr;
+ tidrContainerptr = tidrContainerptr + (ZBUF_SIZE - ZCON_HEAD_SIZE);
+ arrGuard(tidrContainerptr + 1, 2048);
+ tidrContainerhead = idrPageptr.p->word32[tidrContainerptr];
+ tidrContainerlen = tidrContainerhead >> 26;
+ tidrIndex = (tidrContainerptr - tidrContainerlen) + (ZCON_HEAD_SIZE - 1);
+ }//if
+ if (tidrContainerlen > (ZBUF_SIZE - 3)) {
+ return;
+ }//if
+ tidrConfreelen = ZBUF_SIZE - tidrContainerlen;
+ /* --------------------------------------------------------------------------------- */
+ /* WE CALCULATE THE TOTAL LENGTH THE CONTAINER CAN EXPAND TO */
+ /* THIS INCLUDES THE OTHER SIDE OF THE BUFFER IF POSSIBLE TO EXPAND THERE. */
+ /* --------------------------------------------------------------------------------- */
+ if (((tidrContainerhead >> 10) & 1) == 0) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* WE HAVE NOT EXPANDED TO THE ENTIRE BUFFER YET. WE CAN THUS READ THE OTHER */
+ /* SIDE'S CONTAINER HEADER TO READ HIS LENGTH. */
+ /* --------------------------------------------------------------------------------- */
+ tidrNextConLen = idrPageptr.p->word32[tidrNextSide] >> 26;
+ tidrConfreelen = tidrConfreelen - tidrNextConLen;
+ if (tidrConfreelen > ZBUF_SIZE) {
+ ndbrequire(false);
+ /* --------------------------------------------------------------------------------- */
+ /* THE BUFFERS ARE PLACED ON TOP OF EACH OTHER. THIS SHOULD NEVER OCCUR. */
+ /* --------------------------------------------------------------------------------- */
+ return;
+ }//if
+ } else {
+ jam();
+ tidrNextConLen = 1; /* INDICATE OTHER SIDE IS NOT PART OF FREE LIST */
+ }//if
+ if (tidrConfreelen < fragrecptr.p->elementLength) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* THE CONTAINER COULD NOT BE EXPANDED TO FIT THE NEW ELEMENT. WE HAVE TO */
+ /* RETURN AND FIND A NEW CONTAINER TO INSERT IT INTO. */
+ /* --------------------------------------------------------------------------------- */
+ return;
+ }//if
+ tidrContainerlen = tidrContainerlen + fragrecptr.p->elementLength;
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ datapageptr.p = idrPageptr.p;
+ cundoElemIndex = tidrContainerptr;
+ cundoinfolength = 1;
+ undoWritingProcess(signal);
+ }//if
+ if (tidrNextConLen == 0) {
+ /* EACH SIDE OF THE BUFFER WHICH BELONG TO A FREE */
+ /* LIST, HAS ZERO AS LENGTH. */
+ if (tidrContainerlen > ZUP_LIMIT) {
+ dbgWord32(idrPageptr, tidrContainerptr, idrPageptr.p->word32[tidrContainerptr] | (1 << 10));
+ idrPageptr.p->word32[tidrContainerptr] = idrPageptr.p->word32[tidrContainerptr] | (1 << 10);
+ tslUpdateHeader = ZFALSE;
+ tslPageindex = tidrPageindex;
+ slPageptr.p = idrPageptr.p;
+ if (tidrForward == ZTRUE) {
+ jam();
+ seizeRightlist(signal); /* REMOVE THE RIGHT SIDE OF THE BUFFER FROM THE LIST */
+ } else {
+ jam();
+ /* OF THE FREE CONTAINERS */
+ seizeLeftlist(signal); /* REMOVE THE LEFT SIDE OF THE BUFFER FROM THE LIST */
+ }//if
+ }//if
+ }//if
+ /* OF THE FREE CONTAINERS */
+ /* --------------------------------------------------------------------------------- */
+ /* WE HAVE NOW FOUND A FREE SPOT IN THE CURRENT CONTAINER. WE INSERT THE */
+ /* ELEMENT HERE. THE ELEMENT CONTAINS A HEADER, A LOCAL KEY AND A TUPLE KEY. */
+ /* BEFORE INSERTING THE ELEMENT WE WILL UPDATE THE OPERATION RECORD WITH THE */
+ /* DATA CONCERNING WHERE WE INSERTED THE ELEMENT. THIS MAKES IT EASY TO FIND */
+ /* THIS INFORMATION WHEN WE RETURN TO UPDATE THE LOCAL KEY OR RETURN TO COMMIT */
+ /* OR ABORT THE INSERT. IF NO OPERATION RECORD EXIST IT MEANS THAT WE ARE */
+ /* PERFORMING THIS AS A PART OF THE EXPAND OR SHRINK PROCESS. */
+ /* --------------------------------------------------------------------------------- */
+ if (idrOperationRecPtr.i != RNIL) {
+ jam();
+ idrOperationRecPtr.p->elementIsforward = tidrForward;
+ idrOperationRecPtr.p->elementPage = idrPageptr.i;
+ idrOperationRecPtr.p->elementContainer = tidrContainerptr;
+ idrOperationRecPtr.p->elementPointer = tidrIndex;
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ /* WE CHOOSE TO UNDO LOG INSERTS BY WRITING THE BEFORE VALUE TO THE UNDO LOG. */
+ /* WE COULD ALSO HAVE DONE THIS BY WRITING THIS BEFORE VALUE WHEN DELETING */
+ /* ELEMENTS. WE CHOOSE TO PUT IT HERE SINCE WE THEREBY ENSURE THAT WE ALWAYS */
+ /* UNDO LOG ALL WRITES TO PAGE MEMORY. IT SHOULD BE EASIER TO MAINTAIN SUCH A */
+ /* STRUCTURE. IT IS RATHER DIFFICULT TO MAINTAIN A LOGICAL STRUCTURE WHERE */
+ /* DELETES ARE INSERTS AND INSERTS ARE PURELY DELETES. */
+ /* --------------------------------------------------------------------------------- */
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ if (tidrForward == ZTRUE) {
+ cundoElemIndex = tidrIndex;
+ } else {
+ cundoElemIndex = (tidrIndex + 1) - fragrecptr.p->elementLength;
+ }//if
+ cundoinfolength = fragrecptr.p->elementLength;
+ undoWritingProcess(signal);
+ }//if
+ dbgWord32(idrPageptr, tidrIndex, tidrElemhead);
+ idrPageptr.p->word32[tidrIndex] = tidrElemhead; /* INSERTS THE HEAD OF THE ELEMENT */
+ tidrIndex += tidrForward;
+ guard26 = fragrecptr.p->localkeylen - 1;
+ arrGuard(guard26, 2);
+ for (tidrInputIndex = 0; tidrInputIndex <= guard26; tidrInputIndex++) {
+ dbgWord32(idrPageptr, tidrIndex, clocalkey[tidrInputIndex]);
+ arrGuard(tidrIndex, 2048);
+ idrPageptr.p->word32[tidrIndex] = clocalkey[tidrInputIndex]; /* INSERTS LOCALKEY */
+ tidrIndex += tidrForward;
+ }//for
+ tidrContLen = idrPageptr.p->word32[tidrContainerptr] << 6;
+ tidrContLen = tidrContLen >> 6;
+ dbgWord32(idrPageptr, tidrContainerptr, (tidrContainerlen << 26) | tidrContLen);
+ idrPageptr.p->word32[tidrContainerptr] = (tidrContainerlen << 26) | tidrContLen;
+ tidrResult = ZTRUE;
+}//Dbacc::insertContainer()
+
+/* --------------------------------------------------------------------------------- */
+/* ADDNEWCONTAINER */
+/* INPUT: */
+/* TANC_CONTAINERPTR */
+/* ANC_PAGEPTR */
+/* TANC_NEXT */
+/* TANC_PAGEINDEX */
+/* TANC_BUF_TYPE */
+/* TANC_PAGEID */
+/* OUTPUT: */
+/* NONE */
+/* */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::addnewcontainer(Signal* signal)
+{
+ Uint32 tancTmp1;
+
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ cundoElemIndex = tancContainerptr;
+ datapageptr.p = ancPageptr.p;
+ cundoinfolength = 2;
+ undoWritingProcess(signal); /* WHEN UNDO PROCESS HAS STARTED, */
+ }//if
+ /* THE OLD DATA IS STORED ON AN UNDO PAGE */
+ /* --------------------------------------------------------------------------------- */
+ /* KEEP LENGTH INFORMATION IN BIT 26-31. */
+ /* SET BIT 9 INDICATING IF NEXT BUFFER IN THE SAME PAGE USING TANC_NEXT. */
+ /* SET TYPE OF NEXT CONTAINER IN BIT 7-8. */
+ /* SET PAGE INDEX OF NEXT CONTAINER IN BIT 0-6. */
+ /* KEEP INDICATOR OF OWNING OTHER SIDE OF BUFFER IN BIT 10. */
+ /* --------------------------------------------------------------------------------- */
+ tancTmp1 = ancPageptr.p->word32[tancContainerptr] >> 10;
+ tancTmp1 = tancTmp1 << 1;
+ tancTmp1 = tancTmp1 | tancNext;
+ tancTmp1 = tancTmp1 << 2;
+ tancTmp1 = tancTmp1 | tancBufType; /* TYPE OF THE NEXT CONTAINER */
+ tancTmp1 = tancTmp1 << 7;
+ tancTmp1 = tancTmp1 | tancPageindex;
+ dbgWord32(ancPageptr, tancContainerptr, tancTmp1);
+ ancPageptr.p->word32[tancContainerptr] = tancTmp1; /* HEAD OF THE CONTAINER IS UPDATED */
+ dbgWord32(ancPageptr, tancContainerptr + 1, tancPageid);
+ ancPageptr.p->word32[tancContainerptr + 1] = tancPageid;
+}//Dbacc::addnewcontainer()
+
+/* --------------------------------------------------------------------------------- */
+/* GETFREELIST */
+/* INPUT: */
+/* GFL_PAGEPTR (POINTER TO A PAGE RECORD). */
+/* OUTPUT: */
+/* TGFL_PAGEINDEX(POINTER TO A FREE BUFFER IN THE FREEPAGE), AND */
+/* TGFL_BUF_TYPE( TYPE OF THE FREE BUFFER). */
+/* DESCRIPTION: SEARCHS IN THE FREE LIST OF THE FREE BUFFER IN THE PAGE HEAD */
+/* (WORD32(1)),AND RETURN ADDRESS OF A FREE BUFFER OR NIL. */
+/* THE FREE BUFFER CAN BE A RIGHT CONTAINER OR A LEFT ONE */
+/* THE KIND OF THE CONTAINER IS NOTED BY TGFL_BUF_TYPE. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::getfreelist(Signal* signal)
+{
+ Uint32 tgflTmp;
+
+ tgflTmp = gflPageptr.p->word32[ZPOS_EMPTY_LIST];
+ tgflPageindex = (tgflTmp >> 7) & 0x7f; /* LEFT FREE LIST */
+ tgflBufType = ZLEFT;
+ if (tgflPageindex == ZEMPTYLIST) {
+ jam();
+ tgflPageindex = tgflTmp & 0x7f; /* RIGHT FREE LIST */
+ tgflBufType = ZRIGHT;
+ }//if
+ ndbrequire(tgflPageindex <= ZEMPTYLIST);
+}//Dbacc::getfreelist()
+
+/* --------------------------------------------------------------------------------- */
+/* INCREASELISTCONT */
+/* INPUT: */
+/* ILC_PAGEPTR PAGE POINTER TO INCREASE NUMBER OF CONTAINERS IN */
+/* A CONTAINER OF AN OVERFLOW PAGE (FREEPAGEPTR) IS ALLOCATED, NR OF */
+/* ALLOCATED CONTAINER HAVE TO BE INCRESE BY ONE . */
+/* IF THE NUMBER OF ALLOCATED CONTAINERS IS ABOVE THE FREE LIMIT WE WILL */
+/* REMOVE THE PAGE FROM THE FREE LIST. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::increaselistcont(Signal* signal)
+{
+ OverflowRecordPtr ilcOverflowRecPtr;
+
+ dbgWord32(ilcPageptr, ZPOS_ALLOC_CONTAINERS, ilcPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] + 1);
+ ilcPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] = ilcPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] + 1;
+ if (ilcPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] > ZFREE_LIMIT) {
+ if (ilcPageptr.p->word32[ZPOS_OVERFLOWREC] != RNIL) {
+ jam();
+ ilcOverflowRecPtr.i = ilcPageptr.p->word32[ZPOS_OVERFLOWREC];
+ dbgWord32(ilcPageptr, ZPOS_OVERFLOWREC, RNIL);
+ ilcPageptr.p->word32[ZPOS_OVERFLOWREC] = RNIL;
+ ptrCheckGuard(ilcOverflowRecPtr, coverflowrecsize, overflowRecord);
+ tfoOverflowRecPtr = ilcOverflowRecPtr;
+ takeRecOutOfFreeOverpage(signal);
+ rorOverflowRecPtr = ilcOverflowRecPtr;
+ releaseOverflowRec(signal);
+ }//if
+ }//if
+}//Dbacc::increaselistcont()
+
+/* --------------------------------------------------------------------------------- */
+/* SEIZE_LEFTLIST */
+/* INPUT: */
+/* TSL_PAGEINDEX PAGE INDEX OF CONTAINER TO SEIZE */
+/* SL_PAGEPTR PAGE POINTER OF CONTAINER TO SEIZE */
+/* TSL_UPDATE_HEADER SHOULD WE UPDATE THE CONTAINER HEADER */
+/* */
+/* OUTPUT: */
+/* NONE */
+/* DESCRIPTION: THE BUFFER NOTED BY TSL_PAGEINDEX WILL BE REMOVED FROM THE */
+/* LIST OF LEFT FREE CONTAINER, IN THE HEADER OF THE PAGE */
+/* (FREEPAGEPTR). PREVIOUS AND NEXT BUFFER OF REMOVED BUFFER */
+/* WILL BE UPDATED. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::seizeLeftlist(Signal* signal)
+{
+ Uint32 tsllTmp1;
+ Uint32 tsllNewHead;
+ Uint32 tsllHeadIndex;
+ Uint32 tsllTmp;
+
+ tsllHeadIndex = ((tslPageindex << ZSHIFT_PLUS) - (tslPageindex << ZSHIFT_MINUS)) + ZHEAD_SIZE;
+ arrGuard(tsllHeadIndex + 1, 2048);
+ tslNextfree = slPageptr.p->word32[tsllHeadIndex];
+ tslPrevfree = slPageptr.p->word32[tsllHeadIndex + 1];
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ datapageptr.p = slPageptr.p;
+ cundoElemIndex = tsllHeadIndex;
+ cundoinfolength = 2;
+ undoWritingProcess(signal);
+ }//if
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ cundoElemIndex = ZPOS_EMPTY_LIST;
+ cundoinfolength = 2;
+ undoWritingProcess(signal);
+ }//if
+ if (tslPrevfree == ZEMPTYLIST) {
+ jam();
+ /* UPDATE FREE LIST OF LEFT CONTAINER IN PAGE HEAD */
+ tsllTmp1 = slPageptr.p->word32[ZPOS_EMPTY_LIST];
+ tsllTmp = tsllTmp1 & 0x7f;
+ tsllTmp1 = (tsllTmp1 >> 14) << 14;
+ tsllTmp1 = (tsllTmp1 | (tslNextfree << 7)) | tsllTmp;
+ dbgWord32(slPageptr, ZPOS_EMPTY_LIST, tsllTmp1);
+ slPageptr.p->word32[ZPOS_EMPTY_LIST] = tsllTmp1;
+ } else {
+ ndbrequire(tslPrevfree < ZEMPTYLIST);
+ jam();
+ tsllTmp = ((tslPrevfree << ZSHIFT_PLUS) - (tslPrevfree << ZSHIFT_MINUS)) + ZHEAD_SIZE;
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ cundoElemIndex = tsllTmp;
+ cundoinfolength = 1;
+ undoWritingProcess(signal);
+ }//if
+ dbgWord32(slPageptr, tsllTmp, tslNextfree);
+ slPageptr.p->word32[tsllTmp] = tslNextfree;
+ }//if
+ if (tslNextfree < ZEMPTYLIST) {
+ jam();
+ tsllTmp = (((tslNextfree << ZSHIFT_PLUS) - (tslNextfree << ZSHIFT_MINUS)) + ZHEAD_SIZE) + 1;
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ cundoElemIndex = tsllTmp;
+ cundoinfolength = 1;
+ undoWritingProcess(signal);
+ }//if
+ dbgWord32(slPageptr, tsllTmp, tslPrevfree);
+ slPageptr.p->word32[tsllTmp] = tslPrevfree;
+ } else {
+ ndbrequire(tslNextfree == ZEMPTYLIST);
+ jam();
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ /* IF WE ARE UPDATING THE HEADER WE ARE CREATING A NEW CONTAINER IN THE PAGE. */
+ /* TO BE ABLE TO FIND ALL LOCKED ELEMENTS WE KEEP ALL CONTAINERS IN LINKED */
+ /* LISTS IN THE PAGE. */
+ /* */
+ /* ZPOS_EMPTY_LIST CONTAINS A NEXT POINTER IN BIT 16-22 THAT REFERS TO THE */
+ /* FIRST CONTAINER IN A LIST OF USED RIGHT CONTAINERS IN THE PAGE. */
+ /* ZPOS_EMPTY_LIST CONTAINS A NEXT POINTER IN BIT 23-29 THAT REFERS TO THE */
+ /* FIRST CONTAINER IN A LIST OF USED LEFT CONTAINERS IN THE PAGE. */
+ /* EACH CONTAINER IN THE LIST CONTAINS A NEXT POINTER IN BIT 11-17 AND IT */
+ /* CONTAINS A PREVIOUS POINTER IN BIT 18-24. */
+ /* WE ALSO SET BIT 25 TO INDICATE THAT IT IS A CONTAINER HEADER. */
+ /* --------------------------------------------------------------------------------- */
+ if (tslUpdateHeader == ZTRUE) {
+ jam();
+ tslNextfree = (slPageptr.p->word32[ZPOS_EMPTY_LIST] >> 23) & 0x7f;
+ tsllNewHead = ZCON_HEAD_SIZE;
+ tsllNewHead = ((tsllNewHead << 8) + ZEMPTYLIST) + (1 << 7);
+ tsllNewHead = (tsllNewHead << 7) + tslNextfree;
+ tsllNewHead = tsllNewHead << 11;
+ dbgWord32(slPageptr, tsllHeadIndex, tsllNewHead);
+ slPageptr.p->word32[tsllHeadIndex] = tsllNewHead;
+ tsllTmp = slPageptr.p->word32[ZPOS_EMPTY_LIST] & 0xc07fffff;
+ tsllTmp = tsllTmp | (tslPageindex << 23);
+ dbgWord32(slPageptr, ZPOS_EMPTY_LIST, tsllTmp);
+ slPageptr.p->word32[ZPOS_EMPTY_LIST] = tsllTmp;
+ if (tslNextfree < ZEMPTYLIST) {
+ jam();
+ tsllTmp = ((tslNextfree << ZSHIFT_PLUS) - (tslNextfree << ZSHIFT_MINUS)) + ZHEAD_SIZE;
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ cundoElemIndex = tsllTmp;
+ cundoinfolength = 1;
+ undoWritingProcess(signal);
+ }//if
+ tsllTmp1 = slPageptr.p->word32[tsllTmp] & 0xfe03ffff;
+ tsllTmp1 = tsllTmp1 | (tslPageindex << 18);
+ dbgWord32(slPageptr, tsllTmp, tsllTmp1);
+ slPageptr.p->word32[tsllTmp] = tsllTmp1;
+ } else {
+ ndbrequire(tslNextfree == ZEMPTYLIST);
+ jam();
+ }//if
+ }//if
+ ilcPageptr.p = slPageptr.p;
+ increaselistcont(signal);
+}//Dbacc::seizeLeftlist()
+
+/* --------------------------------------------------------------------------------- */
+/* SEIZE_RIGHTLIST */
+/* DESCRIPTION: THE BUFFER NOTED BY TSL_PAGEINDEX WILL BE REMOVED FROM THE */
+/* LIST OF RIGHT FREE CONTAINER, IN THE HEADER OF THE PAGE */
+/* (SL_PAGEPTR). PREVIOUS AND NEXT BUFFER OF REMOVED BUFFER */
+/* WILL BE UPDATED. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::seizeRightlist(Signal* signal)
+{
+ Uint32 tsrlTmp1;
+ Uint32 tsrlNewHead;
+ Uint32 tsrlHeadIndex;
+ Uint32 tsrlTmp;
+
+ tsrlHeadIndex = ((tslPageindex << ZSHIFT_PLUS) - (tslPageindex << ZSHIFT_MINUS)) + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE);
+ arrGuard(tsrlHeadIndex + 1, 2048);
+ tslNextfree = slPageptr.p->word32[tsrlHeadIndex];
+ tslPrevfree = slPageptr.p->word32[tsrlHeadIndex + 1];
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ datapageptr.p = slPageptr.p;
+ cundoElemIndex = tsrlHeadIndex;
+ cundoinfolength = 2;
+ undoWritingProcess(signal);
+ }//if
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ cundoElemIndex = ZPOS_EMPTY_LIST;
+ cundoinfolength = 2;
+ undoWritingProcess(signal);
+ }//if
+ if (tslPrevfree == ZEMPTYLIST) {
+ jam();
+ tsrlTmp = slPageptr.p->word32[ZPOS_EMPTY_LIST];
+ dbgWord32(slPageptr, ZPOS_EMPTY_LIST, ((tsrlTmp >> 7) << 7) | tslNextfree);
+ slPageptr.p->word32[ZPOS_EMPTY_LIST] = ((tsrlTmp >> 7) << 7) | tslNextfree;
+ } else {
+ ndbrequire(tslPrevfree < ZEMPTYLIST);
+ jam();
+ tsrlTmp = ((tslPrevfree << ZSHIFT_PLUS) - (tslPrevfree << ZSHIFT_MINUS)) + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE);
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ cundoElemIndex = tsrlTmp;
+ cundoinfolength = 1;
+ undoWritingProcess(signal);
+ }//if
+ dbgWord32(slPageptr, tsrlTmp, tslNextfree);
+ slPageptr.p->word32[tsrlTmp] = tslNextfree;
+ }//if
+ if (tslNextfree < ZEMPTYLIST) {
+ jam();
+ tsrlTmp = ((tslNextfree << ZSHIFT_PLUS) - (tslNextfree << ZSHIFT_MINUS)) + ((ZHEAD_SIZE + ZBUF_SIZE) - (ZCON_HEAD_SIZE - 1));
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ cundoElemIndex = tsrlTmp;
+ cundoinfolength = 1;
+ undoWritingProcess(signal);
+ }//if
+ dbgWord32(slPageptr, tsrlTmp, tslPrevfree);
+ slPageptr.p->word32[tsrlTmp] = tslPrevfree;
+ } else {
+ ndbrequire(tslNextfree == ZEMPTYLIST);
+ jam();
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ /* IF WE ARE UPDATING THE HEADER WE ARE CREATING A NEW CONTAINER IN THE PAGE. */
+ /* TO BE ABLE TO FIND ALL LOCKED ELEMENTS WE KEEP ALL CONTAINERS IN LINKED */
+ /* LISTS IN THE PAGE. */
+ /* */
+ /* ZPOS_EMPTY_LIST CONTAINS A NEXT POINTER IN BIT 16-22 THAT REFERS TO THE */
+ /* FIRST CONTAINER IN A LIST OF USED RIGHT CONTAINERS IN THE PAGE. */
+ /* ZPOS_EMPTY_LIST CONTAINS A NEXT POINTER IN BIT 23-29 THAT REFERS TO THE */
+ /* FIRST CONTAINER IN A LIST OF USED LEFT CONTAINERS IN THE PAGE. */
+ /* EACH CONTAINER IN THE LIST CONTAINS A NEXT POINTER IN BIT 11-17 AND IT */
+ /* CONTAINS A PREVIOUS POINTER IN BIT 18-24. */
+ /* --------------------------------------------------------------------------------- */
+ if (tslUpdateHeader == ZTRUE) {
+ jam();
+ tslNextfree = (slPageptr.p->word32[ZPOS_EMPTY_LIST] >> 16) & 0x7f;
+ tsrlNewHead = ZCON_HEAD_SIZE;
+ tsrlNewHead = ((tsrlNewHead << 8) + ZEMPTYLIST) + (1 << 7);
+ tsrlNewHead = (tsrlNewHead << 7) + tslNextfree;
+ tsrlNewHead = tsrlNewHead << 11;
+ dbgWord32(slPageptr, tsrlHeadIndex, tsrlNewHead);
+ slPageptr.p->word32[tsrlHeadIndex] = tsrlNewHead;
+ tsrlTmp = slPageptr.p->word32[ZPOS_EMPTY_LIST] & 0xff80ffff;
+ dbgWord32(slPageptr, ZPOS_EMPTY_LIST, tsrlTmp | (tslPageindex << 16));
+ slPageptr.p->word32[ZPOS_EMPTY_LIST] = tsrlTmp | (tslPageindex << 16);
+ if (tslNextfree < ZEMPTYLIST) {
+ jam();
+ tsrlTmp = ((tslNextfree << ZSHIFT_PLUS) - (tslNextfree << ZSHIFT_MINUS)) + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE);
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ cundoElemIndex = tsrlTmp;
+ cundoinfolength = 1;
+ undoWritingProcess(signal);
+ }//if
+ tsrlTmp1 = slPageptr.p->word32[tsrlTmp] & 0xfe03ffff;
+ dbgWord32(slPageptr, tsrlTmp, tsrlTmp1 | (tslPageindex << 18));
+ slPageptr.p->word32[tsrlTmp] = tsrlTmp1 | (tslPageindex << 18);
+ } else {
+ ndbrequire(tslNextfree == ZEMPTYLIST);
+ jam();
+ }//if
+ }//if
+ ilcPageptr.p = slPageptr.p;
+ increaselistcont(signal);
+}//Dbacc::seizeRightlist()
+
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* END OF INSERT_ELEMENT MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* MODULE: GET_ELEMENT */
+/* THE FOLLOWING SUBROUTINES ARE ONLY USED BY GET_ELEMENT AND */
+/* GETDIRINDEX. THIS ROUTINE IS THE SOLE INTERFACE TO GET ELEMENTS */
+/* FROM THE INDEX. CURRENT USERS ARE ALL REQUESTS AND EXECUTE UNDO LOG */
+/* */
+/* THE FOLLOWING SUBROUTINES ARE INCLUDED IN THIS MODULE: */
+/* GET_ELEMENT */
+/* GET_DIRINDEX */
+/* SEARCH_LONG_KEY */
+/* */
+/* THESE ROUTINES ARE ONLY USED BY THIS MODULE AND BY NO ONE ELSE. */
+/* ALSO THE ROUTINES MAKE NO USE OF ROUTINES IN OTHER MODULES. */
+/* THE ONLY SHORT-LIVED VARIABLES USED IN OTHER PARTS OF THE BLOCK ARE */
+/* THOSE DEFINED AS INPUT AND OUTPUT IN GET_ELEMENT AND GETDIRINDEX */
+/* SHORT-LIVED VARIABLES INCLUDE TEMPORARY VARIABLES, COMMON VARIABLES */
+/* AND POINTER VARIABLES. */
+/* THE ONLY EXCEPTION TO THIS RULE IS FRAGRECPTR WHICH POINTS TO THE */
+/* FRAGMENT RECORD. THIS IS MORE LESS STATIC ALWAYS DURING A SIGNAL */
+/* EXECUTION. */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* GETDIRINDEX */
+/* SUPPORT ROUTINE FOR INSERT ELEMENT, GET ELEMENT AND COMMITDELETE */
+/* INPUT:FRAGRECPTR ( POINTER TO THE ACTIVE FRAGMENT REC) */
+/* OPERATION_REC_PTR (POINTER TO THE OPERATION REC). */
+/* */
+/* OUTPUT:GDI_PAGEPTR ( POINTER TO THE PAGE OF THE ELEMENT) */
+/* TGDI_PAGEINDEX ( INDEX OF THE ELEMENT IN THE PAGE). */
+/* */
+/* DESCRIPTION: CHECK THE HASH VALUE OF THE OPERATION REC AND CALCULATE THE */
+/* THE ADDRESS OF THE ELEMENT IN THE HASH TABLE,(GDI_PAGEPTR, */
+/* TGDI_PAGEINDEX) ACCORDING TO LH3. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::getdirindex(Signal* signal)
+{
+ DirRangePtr gdiDirRangePtr;
+ DirectoryarrayPtr gdiDirptr;
+ Uint32 tgdiTmp;
+ Uint32 tgdiAddress;
+
+ tgdiTmp = fragrecptr.p->k + fragrecptr.p->lhfragbits; /* OBS K = 6 */
+ tgdiPageindex = operationRecPtr.p->hashValue & ((1 << fragrecptr.p->k) - 1);
+ tgdiTmp = operationRecPtr.p->hashValue >> tgdiTmp;
+ tgdiTmp = (tgdiTmp << fragrecptr.p->k) | tgdiPageindex;
+ tgdiAddress = tgdiTmp & fragrecptr.p->maxp;
+ gdiDirRangePtr.i = fragrecptr.p->directory;
+ ptrCheckGuard(gdiDirRangePtr, cdirrangesize, dirRange);
+ if (tgdiAddress < fragrecptr.p->p) {
+ jam();
+ tgdiAddress = tgdiTmp & ((fragrecptr.p->maxp << 1) | 1);
+ }//if
+ tgdiTmp = tgdiAddress >> fragrecptr.p->k;
+ arrGuard((tgdiTmp >> 8), 256);
+ gdiDirptr.i = gdiDirRangePtr.p->dirArray[tgdiTmp >> 8];
+ ptrCheckGuard(gdiDirptr, cdirarraysize, directoryarray);
+ gdiPageptr.i = gdiDirptr.p->pagep[tgdiTmp & 0xff]; /* DIRECTORY INDEX OF SEND BUCKET PAGE */
+ ptrCheckGuard(gdiPageptr, cpagesize, page8);
+}//Dbacc::getdirindex()
+
+Uint32
+Dbacc::readTablePk(Uint32 localkey1)
+{
+ Uint32 tableId = fragrecptr.p->myTableId;
+ Uint32 fragId = fragrecptr.p->myfid;
+ Uint32 fragPageId = localkey1 >> MAX_TUPLES_BITS;
+ Uint32 pageIndex = localkey1 & ((1 << MAX_TUPLES_BITS ) - 1);
+#ifdef VM_TRACE
+ memset(ckeys, 0x1f, (fragrecptr.p->keyLength * MAX_XFRM_MULTIPLY) << 2);
+#endif
+ int ret = c_tup->accReadPk(tableId, fragId, fragPageId, pageIndex, ckeys, true);
+ ndbrequire(ret > 0);
+ return ret;
+}
+
+/* --------------------------------------------------------------------------------- */
+/* GET_ELEMENT */
+/* INPUT: */
+/* OPERATION_REC_PTR */
+/* FRAGRECPTR */
+/* OUTPUT: */
+/* TGE_RESULT RESULT SUCCESS = ZTRUE OTHERWISE ZFALSE */
+/* TGE_LOCKED LOCK INFORMATION IF SUCCESSFUL RESULT */
+/* GE_PAGEPTR PAGE POINTER OF FOUND ELEMENT */
+/* TGE_CONTAINERPTR CONTAINER INDEX OF FOUND ELEMENT */
+/* TGE_ELEMENTPTR ELEMENT INDEX OF FOUND ELEMENT */
+/* TGE_FORWARD DIRECTION OF CONTAINER WHERE ELEMENT FOUND */
+/* */
+/* DESCRIPTION: THE SUBROUTIN GOES THROUGH ALL CONTAINERS OF THE ACTIVE */
+/* BUCKET, AND SERCH FOR ELEMENT.THE PRIMARY KEYS WHICH IS SAVED */
+/* IN THE OPERATION REC ARE THE CHECK ITEMS IN THE SEARCHING. */
+/* --------------------------------------------------------------------------------- */
+
+#if __ia64 == 1
+#if __INTEL_COMPILER == 810
+int ndb_acc_ia64_icc810_dummy_var = 0;
+void ndb_acc_ia64_icc810_dummy_func()
+{
+ ndb_acc_ia64_icc810_dummy_var++;
+}
+#endif
+#endif
+
+void Dbacc::getElement(Signal* signal)
+{
+ DirRangePtr geOverflowrangeptr;
+ DirectoryarrayPtr geOverflowDirptr;
+ OperationrecPtr geTmpOperationRecPtr;
+ Uint32 tgeElementHeader;
+ Uint32 tgeElemStep;
+ Uint32 tgeContainerhead;
+ Uint32 tgePageindex;
+ Uint32 tgeActivePageDir;
+ Uint32 tgeNextptrtype;
+ register Uint32 tgeKeyptr;
+ register Uint32 tgeRemLen;
+ register Uint32 TelemLen = fragrecptr.p->elementLength;
+ register Uint32* Tkeydata = (Uint32*)&signal->theData[7];
+
+ getdirindex(signal);
+ tgePageindex = tgdiPageindex;
+ gePageptr = gdiPageptr;
+ tgeResult = ZFALSE;
+ /*
+ * The value seached is
+ * - table key for ACCKEYREQ, stored in TUP
+ * - local key (1 word) for ACC_LOCKREQ and UNDO, stored in ACC
+ */
+ const bool searchLocalKey =
+ operationRecPtr.p->isAccLockReq || operationRecPtr.p->isUndoLogReq;
+
+ ndbrequire(TelemLen == ZELEM_HEAD_SIZE + fragrecptr.p->localkeylen);
+ tgeNextptrtype = ZLEFT;
+ tgeLocked = 0;
+
+ const Uint32 tmp = fragrecptr.p->k + fragrecptr.p->lhfragbits;
+ const Uint32 opHashValuePart = (operationRecPtr.p->hashValue >> tmp) &0xFFFF;
+ do {
+ tgeContainerptr = (tgePageindex << ZSHIFT_PLUS) - (tgePageindex << ZSHIFT_MINUS);
+ if (tgeNextptrtype == ZLEFT) {
+ jam();
+ tgeContainerptr = tgeContainerptr + ZHEAD_SIZE;
+ tgeElementptr = tgeContainerptr + ZCON_HEAD_SIZE;
+ tgeKeyptr = (tgeElementptr + ZELEM_HEAD_SIZE) + fragrecptr.p->localkeylen;
+ tgeElemStep = TelemLen;
+ tgeForward = 1;
+ if (tgeContainerptr >= 2048) { ACCKEY_error(4); return;}
+ tgeRemLen = gePageptr.p->word32[tgeContainerptr] >> 26;
+ if ((tgeContainerptr + tgeRemLen - 1) >= 2048) { ACCKEY_error(5); return;}
+ } else if (tgeNextptrtype == ZRIGHT) {
+ jam();
+ tgeContainerptr = tgeContainerptr + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE);
+ tgeElementptr = tgeContainerptr - 1;
+ tgeKeyptr = (tgeElementptr - ZELEM_HEAD_SIZE) - fragrecptr.p->localkeylen;
+ tgeElemStep = 0 - TelemLen;
+ tgeForward = (Uint32)-1;
+ if (tgeContainerptr >= 2048) { ACCKEY_error(4); return;}
+ tgeRemLen = gePageptr.p->word32[tgeContainerptr] >> 26;
+ if ((tgeContainerptr - tgeRemLen) >= 2048) { ACCKEY_error(5); return;}
+ } else {
+ ACCKEY_error(6); return;
+ }//if
+ if (tgeRemLen >= ZCON_HEAD_SIZE + TelemLen) {
+ if (tgeRemLen > ZBUF_SIZE) {
+ ACCKEY_error(7); return;
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ // There is at least one element in this container. Check if it is the element
+ // searched for.
+ /* --------------------------------------------------------------------------------- */
+ do {
+ tgeElementHeader = gePageptr.p->word32[tgeElementptr];
+ tgeRemLen = tgeRemLen - TelemLen;
+ Uint32 hashValuePart;
+ if (ElementHeader::getLocked(tgeElementHeader)) {
+ jam();
+ geTmpOperationRecPtr.i = ElementHeader::getOpPtrI(tgeElementHeader);
+ ptrCheckGuard(geTmpOperationRecPtr, coprecsize, operationrec);
+ hashValuePart = geTmpOperationRecPtr.p->hashvaluePart;
+ } else {
+ jam();
+ hashValuePart = ElementHeader::getHashValuePart(tgeElementHeader);
+ }
+ if (hashValuePart == opHashValuePart) {
+ jam();
+ Uint32 localkey1 = gePageptr.p->word32[tgeElementptr + tgeForward];
+ Uint32 localkey2 = 0;
+ bool found;
+ if (! searchLocalKey) {
+ Uint32 len = readTablePk(localkey1);
+ found = (len == operationRecPtr.p->xfrmtupkeylen) &&
+ (memcmp(Tkeydata, ckeys, len << 2) == 0);
+ } else {
+ jam();
+ found = (localkey1 == Tkeydata[0]);
+ }
+ if (found) {
+ jam();
+ tgeLocked = ElementHeader::getLocked(tgeElementHeader);
+ tgeResult = ZTRUE;
+ operationRecPtr.p->localdata[0] = localkey1;
+ operationRecPtr.p->localdata[1] = localkey2;
+ return;
+ }
+ }
+ if (tgeRemLen <= ZCON_HEAD_SIZE) {
+ break;
+ }
+ tgeElementptr = tgeElementptr + tgeElemStep;
+ } while (true);
+ }//if
+ if (tgeRemLen != ZCON_HEAD_SIZE) {
+ ACCKEY_error(8); return;
+ }//if
+ tgeContainerhead = gePageptr.p->word32[tgeContainerptr];
+ tgeNextptrtype = (tgeContainerhead >> 7) & 0x3;
+ if (tgeNextptrtype == 0) {
+ jam();
+ return; /* NO MORE CONTAINER */
+ }//if
+ tgePageindex = tgeContainerhead & 0x7f; /* NEXT CONTAINER PAGE INDEX 7 BITS */
+ if (tgePageindex > ZEMPTYLIST) {
+ ACCKEY_error(9); return;
+ }//if
+ if (((tgeContainerhead >> 9) & 1) == ZFALSE) {
+ jam();
+ tgeActivePageDir = gePageptr.p->word32[tgeContainerptr + 1]; /* NEXT PAGE ID */
+ geOverflowrangeptr.i = fragrecptr.p->overflowdir;
+ ptrCheckGuard(geOverflowrangeptr, cdirrangesize, dirRange);
+ arrGuard((tgeActivePageDir >> 8), 256);
+ geOverflowDirptr.i = geOverflowrangeptr.p->dirArray[tgeActivePageDir >> 8];
+ ptrCheckGuard(geOverflowDirptr, cdirarraysize, directoryarray);
+ gePageptr.i = geOverflowDirptr.p->pagep[tgeActivePageDir & 0xff];
+ ptrCheckGuard(gePageptr, cpagesize, page8);
+ }//if
+ } while (1);
+ return;
+}//Dbacc::getElement()
+
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* END OF GET_ELEMENT MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* MODULE: DELETE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* COMMITDELETE */
+/* INPUT: OPERATION_REC_PTR, PTR TO AN OPERATION RECORD. */
+/* FRAGRECPTR, PTR TO A FRAGMENT RECORD */
+/* */
+/* OUTPUT: */
+/* NONE */
+/* DESCRIPTION: DELETE OPERATIONS WILL BE COMPLETED AT THE COMMIT OF TRANSA- */
+/* CTION. THIS SUBROUTINE SEARCHS FOR ELEMENT AND DELETES IT. IT DOES SO BY */
+/* REPLACING IT WITH THE LAST ELEMENT IN THE BUCKET. IF THE DELETED ELEMENT */
+/* IS ALSO THE LAST ELEMENT THEN IT IS ONLY NECESSARY TO REMOVE THE ELEMENT. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::commitdelete(Signal* signal, bool systemRestart)
+{
+ if (!systemRestart) {
+ jam();
+ signal->theData[0] = fragrecptr.p->myfid;
+ signal->theData[1] = fragrecptr.p->myTableId;
+ signal->theData[2] = operationRecPtr.p->localdata[0];
+ Uint32 localKey = operationRecPtr.p->localdata[0];
+ Uint32 pageId = localKey >> MAX_TUPLES_BITS;
+ Uint32 pageIndex = localKey & ((1 << MAX_TUPLES_BITS) - 1);
+ signal->theData[2] = pageId;
+ signal->theData[3] = pageIndex;
+ EXECUTE_DIRECT(DBTUP, GSN_TUP_DEALLOCREQ, signal, 4);
+ jamEntry();
+ }//if
+ getdirindex(signal);
+ tlastPageindex = tgdiPageindex;
+ lastPageptr.i = gdiPageptr.i;
+ lastPageptr.p = gdiPageptr.p;
+ tlastForward = ZTRUE;
+ tlastContainerptr = (tlastPageindex << ZSHIFT_PLUS) - (tlastPageindex << ZSHIFT_MINUS);
+ tlastContainerptr = tlastContainerptr + ZHEAD_SIZE;
+ arrGuard(tlastContainerptr, 2048);
+ tlastContainerhead = lastPageptr.p->word32[tlastContainerptr];
+ tlastContainerlen = tlastContainerhead >> 26;
+ lastPrevpageptr.i = RNIL;
+ ptrNull(lastPrevpageptr);
+ tlastPrevconptr = 0;
+ getLastAndRemove(signal);
+
+ delPageptr.i = operationRecPtr.p->elementPage;
+ ptrCheckGuard(delPageptr, cpagesize, page8);
+ tdelElementptr = operationRecPtr.p->elementPointer;
+ /* --------------------------------------------------------------------------------- */
+ // Here we have to take extreme care since we do not want locks to end up after the
+ // log execution. Thus it is necessary to put back the element in unlocked shape.
+ // We thus update the element header to ensure we log an unlocked element. We do not
+ // need to restore it later since it is deleted immediately anyway.
+ /* --------------------------------------------------------------------------------- */
+ const Uint32 hv = operationRecPtr.p->hashvaluePart;
+ const Uint32 eh = ElementHeader::setUnlocked(hv, 0);
+ delPageptr.p->word32[tdelElementptr] = eh;
+ if (operationRecPtr.p->elementPage == lastPageptr.i) {
+ if (operationRecPtr.p->elementPointer == tlastElementptr) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* THE LAST ELEMENT WAS THE ELEMENT TO BE DELETED. WE NEED NOT COPY IT. */
+ /* --------------------------------------------------------------------------------- */
+ return;
+ }//if
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ /* THE DELETED ELEMENT IS NOT THE LAST. WE READ THE LAST ELEMENT AND OVERWRITE THE */
+ /* DELETED ELEMENT. */
+ /* --------------------------------------------------------------------------------- */
+ tdelContainerptr = operationRecPtr.p->elementContainer;
+ tdelForward = operationRecPtr.p->elementIsforward;
+ deleteElement(signal);
+}//Dbacc::commitdelete()
+
+/* --------------------------------------------------------------------------------- */
+/* DELETE_ELEMENT */
+/* INPUT: FRAGRECPTR, POINTER TO A FRAGMENT RECORD */
+/* LAST_PAGEPTR, POINTER TO THE PAGE OF THE LAST ELEMENT */
+/* DEL_PAGEPTR, POINTER TO THE PAGE OF THE DELETED ELEMENT */
+/* TLAST_ELEMENTPTR, ELEMENT POINTER OF THE LAST ELEMENT */
+/* TDEL_ELEMENTPTR, ELEMENT POINTER OF THE DELETED ELEMENT */
+/* TLAST_FORWARD, DIRECTION OF LAST ELEMENT */
+/* TDEL_FORWARD, DIRECTION OF DELETED ELEMENT */
+/* TDEL_CONTAINERPTR, CONTAINER POINTER OF DELETED ELEMENT */
+/* DESCRIPTION: COPY LAST ELEMENT TO DELETED ELEMENT AND UPDATE UNDO LOG AND */
+/* UPDATE ANY ACTIVE OPERATION ON THE MOVED ELEMENT. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::deleteElement(Signal* signal)
+{
+ OperationrecPtr deOperationRecPtr;
+ Uint32 tdeIndex;
+ Uint32 tlastMoveElemptr;
+ Uint32 tdelMoveElemptr;
+ Uint32 guard31;
+
+ if (tlastElementptr >= 2048)
+ goto deleteElement_index_error1;
+ {
+ const Uint32 tdeElemhead = lastPageptr.p->word32[tlastElementptr];
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ datapageptr.p = delPageptr.p;
+ cundoinfolength = fragrecptr.p->elementLength;
+ if (tdelForward == ZTRUE) {
+ jam();
+ cundoElemIndex = tdelElementptr;
+ } else {
+ jam();
+ cundoElemIndex = (tdelElementptr + 1) - fragrecptr.p->elementLength;
+ }//if
+ undoWritingProcess(signal);
+ }//if
+ tlastMoveElemptr = tlastElementptr;
+ tdelMoveElemptr = tdelElementptr;
+ guard31 = fragrecptr.p->elementLength - 1;
+ for (tdeIndex = 0; tdeIndex <= guard31; tdeIndex++) {
+ dbgWord32(delPageptr, tdelMoveElemptr, lastPageptr.p->word32[tlastMoveElemptr]);
+ if ((tlastMoveElemptr >= 2048) ||
+ (tdelMoveElemptr >= 2048))
+ goto deleteElement_index_error2;
+ delPageptr.p->word32[tdelMoveElemptr] = lastPageptr.p->word32[tlastMoveElemptr];
+ tdelMoveElemptr = tdelMoveElemptr + tdelForward;
+ tlastMoveElemptr = tlastMoveElemptr + tlastForward;
+ }//for
+ if (ElementHeader::getLocked(tdeElemhead)) {
+ /* --------------------------------------------------------------------------------- */
+ /* THE LAST ELEMENT IS LOCKED AND IS THUS REFERENCED BY AN OPERATION RECORD. WE NEED */
+ /* TO UPDATE THE OPERATION RECORD WITH THE NEW REFERENCE TO THE ELEMENT. */
+ /* --------------------------------------------------------------------------------- */
+ deOperationRecPtr.i = ElementHeader::getOpPtrI(tdeElemhead);
+ ptrCheckGuard(deOperationRecPtr, coprecsize, operationrec);
+ if (cundoLogActive == ZFALSE) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* WE DO NOT BOTHER WITH THIS INFORMATION DURING EXECUTION OF THE UNDO LOG. */
+ /* --------------------------------------------------------------------------------- */
+ deOperationRecPtr.p->elementPage = delPageptr.i;
+ deOperationRecPtr.p->elementContainer = tdelContainerptr;
+ deOperationRecPtr.p->elementPointer = tdelElementptr;
+ deOperationRecPtr.p->elementIsforward = tdelForward;
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ // We need to take extreme care to not install locked records after system restart.
+ // An undo of the delete will reinstall the moved record. We have to ensure that the
+ // lock is removed to ensure that no such thing happen.
+ /* --------------------------------------------------------------------------------- */
+ Uint32 eh = ElementHeader::setUnlocked(deOperationRecPtr.p->hashvaluePart,
+ 0);
+ lastPageptr.p->word32[tlastElementptr] = eh;
+ }//if
+ return;
+ }
+
+ deleteElement_index_error1:
+ arrGuard(tlastElementptr, 2048);
+ return;
+
+ deleteElement_index_error2:
+ arrGuard(tdelMoveElemptr + guard31, 2048);
+ arrGuard(tlastMoveElemptr, 2048);
+ return;
+
+}//Dbacc::deleteElement()
+
+/* --------------------------------------------------------------------------------- */
+/* GET_LAST_AND_REMOVE */
+/* INPUT: */
+/* LAST_PAGEPTR PAGE POINTER OF FIRST CONTAINER IN SEARCH OF LAST*/
+/* TLAST_CONTAINERPTR CONTAINER INDEX OF THE SAME */
+/* TLAST_CONTAINERHEAD CONTAINER HEADER OF THE SAME */
+/* TLAST_PAGEINDEX PAGE INDEX OF THE SAME */
+/* TLAST_FORWARD CONTAINER DIRECTION OF THE SAME */
+/* TLAST_CONTAINERLEN CONTAINER LENGTH OF THE SAME */
+/* LAST_PREVPAGEPTR PAGE POINTER OF PREVIOUS CONTAINER OF THE SAME */
+/* TLAST_PREVCONPTR CONTAINER INDEX OF PREVIOUS CONTAINER OF THE SAME*/
+/* */
+/* OUTPUT: */
+/* ALL VARIABLES FROM INPUT BUT NOW CONTAINING INFO ABOUT LAST */
+/* CONTAINER. */
+/* TLAST_ELEMENTPTR LAST ELEMENT POINTER IN LAST CONTAINER */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::getLastAndRemove(Signal* signal)
+{
+ DirRangePtr glrOverflowrangeptr;
+ DirectoryarrayPtr glrOverflowDirptr;
+ Uint32 tglrHead;
+ Uint32 tglrTmp;
+
+ GLR_LOOP_10:
+ if (((tlastContainerhead >> 7) & 0x3) != 0) {
+ jam();
+ lastPrevpageptr.i = lastPageptr.i;
+ lastPrevpageptr.p = lastPageptr.p;
+ tlastPrevconptr = tlastContainerptr;
+ tlastPageindex = tlastContainerhead & 0x7f;
+ if (((tlastContainerhead >> 9) & 0x1) == ZFALSE) {
+ jam();
+ arrGuard(tlastContainerptr + 1, 2048);
+ tglrTmp = lastPageptr.p->word32[tlastContainerptr + 1];
+ glrOverflowrangeptr.i = fragrecptr.p->overflowdir;
+ ptrCheckGuard(glrOverflowrangeptr, cdirrangesize, dirRange);
+ arrGuard((tglrTmp >> 8), 256);
+ glrOverflowDirptr.i = glrOverflowrangeptr.p->dirArray[tglrTmp >> 8];
+ ptrCheckGuard(glrOverflowDirptr, cdirarraysize, directoryarray);
+ lastPageptr.i = glrOverflowDirptr.p->pagep[tglrTmp & 0xff];
+ ptrCheckGuard(lastPageptr, cpagesize, page8);
+ }//if
+ tlastContainerptr = (tlastPageindex << ZSHIFT_PLUS) - (tlastPageindex << ZSHIFT_MINUS);
+ if (((tlastContainerhead >> 7) & 3) == ZLEFT) {
+ jam();
+ tlastForward = ZTRUE;
+ tlastContainerptr = tlastContainerptr + ZHEAD_SIZE;
+ } else if (((tlastContainerhead >> 7) & 3) == ZRIGHT) {
+ jam();
+ tlastForward = cminusOne;
+ tlastContainerptr = ((tlastContainerptr + ZHEAD_SIZE) + ZBUF_SIZE) - ZCON_HEAD_SIZE;
+ } else {
+ ndbrequire(false);
+ return;
+ }//if
+ arrGuard(tlastContainerptr, 2048);
+ tlastContainerhead = lastPageptr.p->word32[tlastContainerptr];
+ tlastContainerlen = tlastContainerhead >> 26;
+ ndbrequire(tlastContainerlen >= ((Uint32)ZCON_HEAD_SIZE + fragrecptr.p->elementLength));
+ goto GLR_LOOP_10;
+ }//if
+ tlastContainerlen = tlastContainerlen - fragrecptr.p->elementLength;
+ if (tlastForward == ZTRUE) {
+ jam();
+ tlastElementptr = tlastContainerptr + tlastContainerlen;
+ } else {
+ jam();
+ tlastElementptr = (tlastContainerptr + (ZCON_HEAD_SIZE - 1)) - tlastContainerlen;
+ }//if
+ rlPageptr.i = lastPageptr.i;
+ rlPageptr.p = lastPageptr.p;
+ trlPageindex = tlastPageindex;
+ if (((tlastContainerhead >> 10) & 1) == 1) {
+ /* --------------------------------------------------------------------------------- */
+ /* WE HAVE OWNERSHIP OF BOTH PARTS OF THE CONTAINER ENDS. */
+ /* --------------------------------------------------------------------------------- */
+ if (tlastContainerlen < ZDOWN_LIMIT) {
+ /* --------------------------------------------------------------------------------- */
+ /* WE HAVE DECREASED THE SIZE BELOW THE DOWN LIMIT, WE MUST GIVE UP THE OTHER */
+ /* SIDE OF THE BUFFER. */
+ /* --------------------------------------------------------------------------------- */
+ tlastContainerhead = tlastContainerhead ^ (1 << 10);
+ trlRelCon = ZFALSE;
+ if (tlastForward == ZTRUE) {
+ jam();
+ turlIndex = tlastContainerptr + (ZBUF_SIZE - ZCON_HEAD_SIZE);
+ releaseRightlist(signal);
+ } else {
+ jam();
+ tullIndex = tlastContainerptr - (ZBUF_SIZE - ZCON_HEAD_SIZE);
+ releaseLeftlist(signal);
+ }//if
+ }//if
+ }//if
+ if (tlastContainerlen <= 2) {
+ ndbrequire(tlastContainerlen == 2);
+ if (lastPrevpageptr.i != RNIL) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* THE LAST CONTAINER IS EMPTY AND IS NOT THE FIRST CONTAINER WHICH IS NOT REMOVED. */
+ /* DELETE THE LAST CONTAINER AND UPDATE THE PREVIOUS CONTAINER. ALSO PUT THIS */
+ /* CONTAINER IN FREE CONTAINER LIST OF THE PAGE. */
+ /* --------------------------------------------------------------------------------- */
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ datapageptr.p = lastPrevpageptr.p;
+ cundoElemIndex = tlastPrevconptr;
+ cundoinfolength = 1;
+ undoWritingProcess(signal);
+ }//if
+ ndbrequire(tlastPrevconptr < 2048);
+ tglrTmp = lastPrevpageptr.p->word32[tlastPrevconptr] >> 9;
+ dbgWord32(lastPrevpageptr, tlastPrevconptr, tglrTmp << 9);
+ lastPrevpageptr.p->word32[tlastPrevconptr] = tglrTmp << 9;
+ trlRelCon = ZTRUE;
+ if (tlastForward == ZTRUE) {
+ jam();
+ tullIndex = tlastContainerptr;
+ releaseLeftlist(signal);
+ } else {
+ jam();
+ turlIndex = tlastContainerptr;
+ releaseRightlist(signal);
+ }//if
+ return;
+ }//if
+ }//if
+ tglrHead = tlastContainerhead << 6;
+ tglrHead = tglrHead >> 6;
+ tglrHead = tglrHead | (tlastContainerlen << 26);
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ datapageptr.p = lastPageptr.p;
+ cundoElemIndex = tlastContainerptr;
+ cundoinfolength = 1;
+ undoWritingProcess(signal);
+ }//if
+ dbgWord32(lastPageptr, tlastContainerptr, tglrHead);
+ arrGuard(tlastContainerptr, 2048);
+ lastPageptr.p->word32[tlastContainerptr] = tglrHead;
+}//Dbacc::getLastAndRemove()
+
+/* --------------------------------------------------------------------------------- */
+/* RELEASE_LEFTLIST */
+/* INPUT: */
+/* RL_PAGEPTR PAGE POINTER OF CONTAINER TO BE RELEASED */
+/* TRL_PAGEINDEX PAGE INDEX OF CONTAINER TO BE RELEASED */
+/* TURL_INDEX INDEX OF CONTAINER TO BE RELEASED */
+/* TRL_REL_CON TRUE IF CONTAINER RELEASED OTHERWISE ONLY */
+/* A PART IS RELEASED. */
+/* */
+/* OUTPUT: */
+/* NONE */
+/* */
+/* THE FREE LIST OF LEFT FREE BUFFER IN THE PAGE WILL BE UPDATE */
+/* TULL_INDEX IS INDEX TO THE FIRST WORD IN THE LEFT SIDE OF THE BUFFER */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::releaseLeftlist(Signal* signal)
+{
+ Uint32 tullTmp;
+ Uint32 tullTmp1;
+
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ datapageptr.p = rlPageptr.p;
+ cundoElemIndex = tullIndex;
+ cundoinfolength = 2;
+ undoWritingProcess(signal);
+ }//if
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ cundoElemIndex = ZPOS_EMPTY_LIST;
+ cundoinfolength = 2;
+ undoWritingProcess(signal);
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ /* IF A CONTAINER IS RELEASED AND NOT ONLY A PART THEN WE HAVE TO REMOVE IT */
+ /* FROM THE LIST OF USED CONTAINERS IN THE PAGE. THIS IN ORDER TO ENSURE THAT */
+ /* WE CAN FIND ALL LOCKED ELEMENTS DURING LOCAL CHECKPOINT. */
+ /* --------------------------------------------------------------------------------- */
+ if (trlRelCon == ZTRUE) {
+ arrGuard(tullIndex, 2048);
+ trlHead = rlPageptr.p->word32[tullIndex];
+ trlNextused = (trlHead >> 11) & 0x7f;
+ trlPrevused = (trlHead >> 18) & 0x7f;
+ if (trlNextused < ZEMPTYLIST) {
+ jam();
+ tullTmp1 = (trlNextused << ZSHIFT_PLUS) - (trlNextused << ZSHIFT_MINUS);
+ tullTmp1 = tullTmp1 + ZHEAD_SIZE;
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ cundoElemIndex = tullTmp1;
+ cundoinfolength = 1;
+ undoWritingProcess(signal);
+ }//if
+ tullTmp = rlPageptr.p->word32[tullTmp1] & 0xfe03ffff;
+ dbgWord32(rlPageptr, tullTmp1, tullTmp | (trlPrevused << 18));
+ rlPageptr.p->word32[tullTmp1] = tullTmp | (trlPrevused << 18);
+ } else {
+ ndbrequire(trlNextused == ZEMPTYLIST);
+ jam();
+ }//if
+ if (trlPrevused < ZEMPTYLIST) {
+ jam();
+ tullTmp1 = (trlPrevused << ZSHIFT_PLUS) - (trlPrevused << ZSHIFT_MINUS);
+ tullTmp1 = tullTmp1 + ZHEAD_SIZE;
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ cundoElemIndex = tullTmp1;
+ cundoinfolength = 1;
+ undoWritingProcess(signal);
+ }//if
+ tullTmp = rlPageptr.p->word32[tullTmp1] & 0xfffc07ff;
+ dbgWord32(rlPageptr, tullTmp1, tullTmp | (trlNextused << 11));
+ rlPageptr.p->word32[tullTmp1] = tullTmp | (trlNextused << 11);
+ } else {
+ ndbrequire(trlPrevused == ZEMPTYLIST);
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* WE ARE FIRST IN THE LIST AND THUS WE NEED TO UPDATE THE FIRST POINTER. */
+ /* --------------------------------------------------------------------------------- */
+ tullTmp = rlPageptr.p->word32[ZPOS_EMPTY_LIST] & 0xc07fffff;
+ dbgWord32(rlPageptr, ZPOS_EMPTY_LIST, tullTmp | (trlNextused << 23));
+ rlPageptr.p->word32[ZPOS_EMPTY_LIST] = tullTmp | (trlNextused << 23);
+ }//if
+ }//if
+ dbgWord32(rlPageptr, tullIndex + 1, ZEMPTYLIST);
+ arrGuard(tullIndex + 1, 2048);
+ rlPageptr.p->word32[tullIndex + 1] = ZEMPTYLIST;
+ tullTmp1 = (rlPageptr.p->word32[ZPOS_EMPTY_LIST] >> 7) & 0x7f;
+ dbgWord32(rlPageptr, tullIndex, tullTmp1);
+ arrGuard(tullIndex, 2048);
+ rlPageptr.p->word32[tullIndex] = tullTmp1;
+ if (tullTmp1 < ZEMPTYLIST) {
+ jam();
+ tullTmp1 = (tullTmp1 << ZSHIFT_PLUS) - (tullTmp1 << ZSHIFT_MINUS);
+ tullTmp1 = (tullTmp1 + ZHEAD_SIZE) + 1;
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ cundoElemIndex = tullTmp1;
+ cundoinfolength = 1;
+ undoWritingProcess(signal);
+ }//if
+ dbgWord32(rlPageptr, tullTmp1, trlPageindex);
+ rlPageptr.p->word32[tullTmp1] = trlPageindex; /* UPDATES PREV POINTER IN THE NEXT FREE */
+ } else {
+ ndbrequire(tullTmp1 == ZEMPTYLIST);
+ }//if
+ tullTmp = rlPageptr.p->word32[ZPOS_EMPTY_LIST];
+ tullTmp = (((tullTmp >> 14) << 14) | (trlPageindex << 7)) | (tullTmp & 0x7f);
+ dbgWord32(rlPageptr, ZPOS_EMPTY_LIST, tullTmp);
+ rlPageptr.p->word32[ZPOS_EMPTY_LIST] = tullTmp;
+ dbgWord32(rlPageptr, ZPOS_ALLOC_CONTAINERS, rlPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] - 1);
+ rlPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] = rlPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] - 1;
+ ndbrequire(rlPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] <= ZNIL);
+ if (((rlPageptr.p->word32[ZPOS_EMPTY_LIST] >> ZPOS_PAGE_TYPE_BIT) & 3) == 1) {
+ jam();
+ colPageptr.i = rlPageptr.i;
+ colPageptr.p = rlPageptr.p;
+ ptrCheck(colPageptr, cpagesize, page8);
+ checkoverfreelist(signal);
+ }//if
+}//Dbacc::releaseLeftlist()
+
+/* --------------------------------------------------------------------------------- */
+/* RELEASE_RIGHTLIST */
+/* INPUT: */
+/* RL_PAGEPTR PAGE POINTER OF CONTAINER TO BE RELEASED */
+/* TRL_PAGEINDEX PAGE INDEX OF CONTAINER TO BE RELEASED */
+/* TURL_INDEX INDEX OF CONTAINER TO BE RELEASED */
+/* TRL_REL_CON TRUE IF CONTAINER RELEASED OTHERWISE ONLY */
+/* A PART IS RELEASED. */
+/* */
+/* OUTPUT: */
+/* NONE */
+/* */
+/* THE FREE LIST OF RIGHT FREE BUFFER IN THE PAGE WILL BE UPDATE. */
+/* TURL_INDEX IS INDEX TO THE FIRST WORD IN THE RIGHT SIDE OF */
+/* THE BUFFER, WHICH IS THE LAST WORD IN THE BUFFER. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::releaseRightlist(Signal* signal)
+{
+ Uint32 turlTmp1;
+ Uint32 turlTmp;
+
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ datapageptr.p = rlPageptr.p;
+ cundoElemIndex = turlIndex;
+ cundoinfolength = 2;
+ undoWritingProcess(signal);
+ }//if
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ cundoElemIndex = ZPOS_EMPTY_LIST;
+ cundoinfolength = 2;
+ undoWritingProcess(signal);
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ /* IF A CONTAINER IS RELEASED AND NOT ONLY A PART THEN WE HAVE TO REMOVE IT */
+ /* FROM THE LIST OF USED CONTAINERS IN THE PAGE. THIS IN ORDER TO ENSURE THAT */
+ /* WE CAN FIND ALL LOCKED ELEMENTS DURING LOCAL CHECKPOINT. */
+ /* --------------------------------------------------------------------------------- */
+ if (trlRelCon == ZTRUE) {
+ jam();
+ arrGuard(turlIndex, 2048);
+ trlHead = rlPageptr.p->word32[turlIndex];
+ trlNextused = (trlHead >> 11) & 0x7f;
+ trlPrevused = (trlHead >> 18) & 0x7f;
+ if (trlNextused < ZEMPTYLIST) {
+ jam();
+ turlTmp1 = (trlNextused << ZSHIFT_PLUS) - (trlNextused << ZSHIFT_MINUS);
+ turlTmp1 = turlTmp1 + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE);
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ cundoElemIndex = turlTmp1;
+ cundoinfolength = 1;
+ undoWritingProcess(signal);
+ }//if
+ turlTmp = rlPageptr.p->word32[turlTmp1] & 0xfe03ffff;
+ dbgWord32(rlPageptr, turlTmp1, turlTmp | (trlPrevused << 18));
+ rlPageptr.p->word32[turlTmp1] = turlTmp | (trlPrevused << 18);
+ } else {
+ ndbrequire(trlNextused == ZEMPTYLIST);
+ jam();
+ }//if
+ if (trlPrevused < ZEMPTYLIST) {
+ jam();
+ turlTmp1 = (trlPrevused << ZSHIFT_PLUS) - (trlPrevused << ZSHIFT_MINUS);
+ turlTmp1 = turlTmp1 + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE);
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ cundoElemIndex = turlTmp1;
+ cundoinfolength = 1;
+ undoWritingProcess(signal);
+ }//if
+ turlTmp = rlPageptr.p->word32[turlTmp1] & 0xfffc07ff;
+ dbgWord32(rlPageptr, turlTmp1, turlTmp | (trlNextused << 11));
+ rlPageptr.p->word32[turlTmp1] = turlTmp | (trlNextused << 11);
+ } else {
+ ndbrequire(trlPrevused == ZEMPTYLIST);
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* WE ARE FIRST IN THE LIST AND THUS WE NEED TO UPDATE THE FIRST POINTER */
+ /* OF THE RIGHT CONTAINER LIST. */
+ /* --------------------------------------------------------------------------------- */
+ turlTmp = rlPageptr.p->word32[ZPOS_EMPTY_LIST] & 0xff80ffff;
+ dbgWord32(rlPageptr, ZPOS_EMPTY_LIST, turlTmp | (trlNextused << 16));
+ rlPageptr.p->word32[ZPOS_EMPTY_LIST] = turlTmp | (trlNextused << 16);
+ }//if
+ }//if
+ dbgWord32(rlPageptr, turlIndex + 1, ZEMPTYLIST);
+ arrGuard(turlIndex + 1, 2048);
+ rlPageptr.p->word32[turlIndex + 1] = ZEMPTYLIST;
+ turlTmp1 = rlPageptr.p->word32[ZPOS_EMPTY_LIST] & 0x7f;
+ dbgWord32(rlPageptr, turlIndex, turlTmp1);
+ arrGuard(turlIndex, 2048);
+ rlPageptr.p->word32[turlIndex] = turlTmp1;
+ if (turlTmp1 < ZEMPTYLIST) {
+ jam();
+ turlTmp = (turlTmp1 << ZSHIFT_PLUS) - (turlTmp1 << ZSHIFT_MINUS);
+ turlTmp = turlTmp + ((ZHEAD_SIZE + ZBUF_SIZE) - (ZCON_HEAD_SIZE - 1));
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ cundoElemIndex = turlTmp;
+ cundoinfolength = 1;
+ undoWritingProcess(signal);
+ }//if
+ dbgWord32(rlPageptr, turlTmp, trlPageindex);
+ rlPageptr.p->word32[turlTmp] = trlPageindex; /* UPDATES PREV POINTER IN THE NEXT FREE */
+ } else {
+ ndbrequire(turlTmp1 == ZEMPTYLIST);
+ }//if
+ turlTmp = rlPageptr.p->word32[ZPOS_EMPTY_LIST];
+ dbgWord32(rlPageptr, ZPOS_EMPTY_LIST, ((turlTmp >> 7) << 7) | trlPageindex);
+ rlPageptr.p->word32[ZPOS_EMPTY_LIST] = ((turlTmp >> 7) << 7) | trlPageindex;
+ dbgWord32(rlPageptr, ZPOS_ALLOC_CONTAINERS, rlPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] - 1);
+ rlPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] = rlPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] - 1;
+ ndbrequire(rlPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] <= ZNIL);
+ if (((rlPageptr.p->word32[ZPOS_EMPTY_LIST] >> ZPOS_PAGE_TYPE_BIT) & 3) == 1) {
+ jam();
+ colPageptr.i = rlPageptr.i;
+ colPageptr.p = rlPageptr.p;
+ checkoverfreelist(signal);
+ }//if
+}//Dbacc::releaseRightlist()
+
+/* --------------------------------------------------------------------------------- */
+/* CHECKOVERFREELIST */
+/* INPUT: COL_PAGEPTR, POINTER OF AN OVERFLOW PAGE RECORD. */
+/* DESCRIPTION: CHECKS IF THE PAGE HAVE TO PUT IN FREE LIST OF OVER FLOW */
+/* PAGES. WHEN IT HAVE TO, AN OVERFLOW REC PTR WILL BE ALLOCATED */
+/* TO KEEP NFORMATION ABOUT THE PAGE. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::checkoverfreelist(Signal* signal)
+{
+ Uint32 tcolTmp;
+
+ if (fragrecptr.p->loadingFlag == ZFALSE) {
+ tcolTmp = colPageptr.p->word32[ZPOS_ALLOC_CONTAINERS];
+ if (tcolTmp <= ZFREE_LIMIT) {
+ if (tcolTmp == 0) {
+ jam();
+ ropPageptr = colPageptr;
+ releaseOverpage(signal);
+ } else {
+ jam();
+ if (colPageptr.p->word32[ZPOS_OVERFLOWREC] == RNIL) {
+ ndbrequire(cfirstfreeoverrec != RNIL);
+ jam();
+ seizeOverRec(signal);
+ sorOverflowRecPtr.p->dirindex = colPageptr.p->word32[ZPOS_PAGE_ID];
+ sorOverflowRecPtr.p->overpage = colPageptr.i;
+ dbgWord32(colPageptr, ZPOS_OVERFLOWREC, sorOverflowRecPtr.i);
+ colPageptr.p->word32[ZPOS_OVERFLOWREC] = sorOverflowRecPtr.i;
+ porOverflowRecPtr = sorOverflowRecPtr;
+ putOverflowRecInFrag(signal);
+ }//if
+ }//if
+ }//if
+ }//if
+}//Dbacc::checkoverfreelist()
+
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* */
+/* END OF DELETE MODULE */
+/* */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* */
+/* COMMIT AND ABORT MODULE */
+/* */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* ABORT_OPERATION */
+/*DESCRIPTION: AN OPERATION RECORD CAN BE IN A LOCK QUEUE OF AN ELEMENT OR */
+/*OWNS THE LOCK. BY THIS SUBROUTINE THE LOCK STATE OF THE OPERATION WILL */
+/*BE CHECKED. THE OPERATION RECORD WILL BE REMOVED FROM THE QUEUE IF IT */
+/*BELONGED TO ANY ONE, OTHERWISE THE ELEMENT HEAD WILL BE UPDATED. */
+/* ------------------------------------------------------------------------- */
+void Dbacc::abortOperation(Signal* signal)
+{
+ OperationrecPtr aboOperRecPtr;
+ OperationrecPtr TaboOperRecPtr;
+ Page8Ptr aboPageidptr;
+ Uint32 taboElementptr;
+ Uint32 tmp2Olq;
+
+ if (operationRecPtr.p->lockOwner == ZTRUE) {
+ takeOutLockOwnersList(signal, operationRecPtr);
+ if (operationRecPtr.p->insertIsDone == ZTRUE) {
+ jam();
+ operationRecPtr.p->elementIsDisappeared = ZTRUE;
+ }//if
+ if ((operationRecPtr.p->nextParallelQue != RNIL) ||
+ (operationRecPtr.p->nextSerialQue != RNIL)) {
+ jam();
+ releaselock(signal);
+ } else {
+ /* --------------------------------------------------------------------------------- */
+ /* WE ARE OWNER OF THE LOCK AND NO OTHER OPERATIONS ARE QUEUED. IF INSERT OR STANDBY */
+ /* WE DELETE THE ELEMENT OTHERWISE WE REMOVE THE LOCK FROM THE ELEMENT. */
+ /* --------------------------------------------------------------------------------- */
+ if (operationRecPtr.p->elementIsDisappeared == ZFALSE) {
+ jam();
+ taboElementptr = operationRecPtr.p->elementPointer;
+ aboPageidptr.i = operationRecPtr.p->elementPage;
+ tmp2Olq = ElementHeader::setUnlocked(operationRecPtr.p->hashvaluePart,
+ operationRecPtr.p->scanBits);
+ ptrCheckGuard(aboPageidptr, cpagesize, page8);
+ dbgWord32(aboPageidptr, taboElementptr, tmp2Olq);
+ arrGuard(taboElementptr, 2048);
+ aboPageidptr.p->word32[taboElementptr] = tmp2Olq;
+ return;
+ } else {
+ jam();
+ commitdelete(signal, false);
+ }//if
+ }//if
+ } else {
+ /* --------------------------------------------------------------- */
+ // We are not the lock owner.
+ /* --------------------------------------------------------------- */
+ jam();
+ takeOutFragWaitQue(signal);
+ if (operationRecPtr.p->prevParallelQue != RNIL) {
+ jam();
+ /* ---------------------------------------------------------------------------------- */
+ /* SINCE WE ARE NOT QUEUE LEADER WE NEED NOT CONSIDER IF THE ELEMENT IS TO BE DELETED.*/
+ /* We will simply remove it from the parallel list without any other rearrangements. */
+ /* ---------------------------------------------------------------------------------- */
+ aboOperRecPtr.i = operationRecPtr.p->prevParallelQue;
+ ptrCheckGuard(aboOperRecPtr, coprecsize, operationrec);
+ aboOperRecPtr.p->nextParallelQue = operationRecPtr.p->nextParallelQue;
+ if (operationRecPtr.p->nextParallelQue != RNIL) {
+ jam();
+ aboOperRecPtr.i = operationRecPtr.p->nextParallelQue;
+ ptrCheckGuard(aboOperRecPtr, coprecsize, operationrec);
+ aboOperRecPtr.p->prevParallelQue = operationRecPtr.p->prevParallelQue;
+ }//if
+ } else if (operationRecPtr.p->prevSerialQue != RNIL) {
+ /* ------------------------------------------------------------------------- */
+ // We are not in the parallel queue owning the lock. Thus we are in another parallel
+ // queue longer down in the serial queue. We are however first since prevParallelQue
+ // == RNIL.
+ /* ------------------------------------------------------------------------- */
+ if (operationRecPtr.p->nextParallelQue != RNIL) {
+ jam();
+ /* ------------------------------------------------------------------------- */
+ // We have an operation in the queue after us. We simply rearrange this parallel queue.
+ // The new leader of this parallel queue will be operation in the serial queue.
+ /* ------------------------------------------------------------------------- */
+ aboOperRecPtr.i = operationRecPtr.p->nextParallelQue;
+ ptrCheckGuard(aboOperRecPtr, coprecsize, operationrec);
+ aboOperRecPtr.p->nextSerialQue = operationRecPtr.p->nextSerialQue;
+ aboOperRecPtr.p->prevSerialQue = operationRecPtr.p->prevSerialQue;
+ aboOperRecPtr.p->prevParallelQue = RNIL; // Queue Leader
+ if (operationRecPtr.p->nextSerialQue != RNIL) {
+ jam();
+ TaboOperRecPtr.i = operationRecPtr.p->nextSerialQue;
+ ptrCheckGuard(TaboOperRecPtr, coprecsize, operationrec);
+ TaboOperRecPtr.p->prevSerialQue = aboOperRecPtr.i;
+ }//if
+ TaboOperRecPtr.i = operationRecPtr.p->prevSerialQue;
+ ptrCheckGuard(TaboOperRecPtr, coprecsize, operationrec);
+ TaboOperRecPtr.p->nextSerialQue = aboOperRecPtr.i;
+ } else {
+ jam();
+ /* ------------------------------------------------------------------------- */
+ // We are the only operation in this parallel queue. We will thus shrink the serial
+ // queue.
+ /* ------------------------------------------------------------------------- */
+ aboOperRecPtr.i = operationRecPtr.p->prevSerialQue;
+ ptrCheckGuard(aboOperRecPtr, coprecsize, operationrec);
+ aboOperRecPtr.p->nextSerialQue = operationRecPtr.p->nextSerialQue;
+ if (operationRecPtr.p->nextSerialQue != RNIL) {
+ jam();
+ aboOperRecPtr.i = operationRecPtr.p->nextSerialQue;
+ ptrCheckGuard(aboOperRecPtr, coprecsize, operationrec);
+ aboOperRecPtr.p->prevSerialQue = operationRecPtr.p->prevSerialQue;
+ }//if
+ }//if
+ }//if
+ }//if
+ /* ------------------------------------------------------------------------- */
+ // If prevParallelQue = RNIL and prevSerialQue = RNIL and we are not owner of the
+ // lock then we cannot be in any lock queue at all.
+ /* ------------------------------------------------------------------------- */
+}//Dbacc::abortOperation()
+
+void Dbacc::commitDeleteCheck()
+{
+ OperationrecPtr opPtr;
+ OperationrecPtr lastOpPtr;
+ OperationrecPtr deleteOpPtr;
+ bool elementDeleted = false;
+ bool deleteCheckOngoing = true;
+ Uint32 hashValue = 0;
+ lastOpPtr = operationRecPtr;
+ opPtr.i = operationRecPtr.p->nextParallelQue;
+ while (opPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(opPtr, coprecsize, operationrec);
+ lastOpPtr = opPtr;
+ opPtr.i = opPtr.p->nextParallelQue;
+ }//while
+ deleteOpPtr = lastOpPtr;
+ do {
+ if (deleteOpPtr.p->operation == ZDELETE) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* IF THE CURRENT OPERATION TO BE COMMITTED IS A DELETE OPERATION DUE TO A */
+ /* SCAN-TAKEOVER THE ACTUAL DELETE WILL BE PERFORMED BY THE PREVIOUS OPERATION (SCAN)*/
+ /* IN THE PARALLEL QUEUE WHICH OWNS THE LOCK.THE PROBLEM IS THAT THE SCAN OPERATION */
+ /* DOES NOT HAVE A HASH VALUE ASSIGNED TO IT SO WE COPY IT FROM THIS OPERATION. */
+ /* */
+ /* WE ASSUME THAT THIS SOLUTION WILL WORK BECAUSE THE ONLY WAY A SCAN CAN PERFORM */
+ /* A DELETE IS BY BEING FOLLOWED BY A NORMAL DELETE-OPERATION THAT HAS A HASH VALUE. */
+ /* --------------------------------------------------------------------------------- */
+ hashValue = deleteOpPtr.p->hashValue;
+ elementDeleted = true;
+ deleteCheckOngoing = false;
+ } else if ((deleteOpPtr.p->operation == ZREAD) ||
+ (deleteOpPtr.p->operation == ZSCAN_OP)) {
+ /* --------------------------------------------------------------------------------- */
+ /* We are trying to find out whether the commit will in the end delete the tuple. */
+ /* Normally the delete will be the last operation in the list of operations on this */
+ /* It is however possible to issue reads and scans in the same savepoint as the */
+ /* delete operation was issued and these can end up after the delete in the list of */
+ /* operations in the parallel queue. Thus if we discover a read or a scan we have to */
+ /* continue scanning the list looking for a delete operation. */
+ /* --------------------------------------------------------------------------------- */
+ deleteOpPtr.i = deleteOpPtr.p->prevParallelQue;
+ if (deleteOpPtr.i == RNIL) {
+ jam();
+ deleteCheckOngoing = false;
+ } else {
+ jam();
+ ptrCheckGuard(deleteOpPtr, coprecsize, operationrec);
+ }//if
+ } else {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* Finding an UPDATE or INSERT before finding a DELETE means we cannot be deleting */
+ /* as the end result of this transaction. */
+ /* --------------------------------------------------------------------------------- */
+ deleteCheckOngoing = false;
+ }//if
+ } while (deleteCheckOngoing);
+ opPtr = lastOpPtr;
+ do {
+ jam();
+ opPtr.p->commitDeleteCheckFlag = ZTRUE;
+ if (elementDeleted) {
+ jam();
+ opPtr.p->elementIsDisappeared = ZTRUE;
+ opPtr.p->hashValue = hashValue;
+ }//if
+ opPtr.i = opPtr.p->prevParallelQue;
+ if (opPtr.i == RNIL) {
+ jam();
+ break;
+ }//if
+ ptrCheckGuard(opPtr, coprecsize, operationrec);
+ } while (true);
+}//Dbacc::commitDeleteCheck()
+
+/* ------------------------------------------------------------------------- */
+/* COMMIT_OPERATION */
+/* INPUT: OPERATION_REC_PTR, POINTER TO AN OPERATION RECORD */
+/* DESCRIPTION: THE OPERATION RECORD WILL BE TAKE OUT OF ANY LOCK QUEUE. */
+/* IF IT OWNS THE ELEMENT LOCK. HEAD OF THE ELEMENT WILL BE UPDATED. */
+/* ------------------------------------------------------------------------- */
+void Dbacc::commitOperation(Signal* signal)
+{
+ OperationrecPtr tolqTmpPtr;
+ Page8Ptr coPageidptr;
+ Uint32 tcoElementptr;
+ Uint32 tmp2Olq;
+
+ if ((operationRecPtr.p->commitDeleteCheckFlag == ZFALSE) &&
+ (operationRecPtr.p->operation != ZSCAN_OP) &&
+ (operationRecPtr.p->operation != ZREAD)) {
+ jam();
+ /* This method is used to check whether the end result of the transaction
+ will be to delete the tuple. In this case all operation will be marked
+ with elementIsDisappeared = true to ensure that the last operation
+ committed will remove the tuple. We only run this once per transaction
+ (commitDeleteCheckFlag = true if performed earlier) and we don't
+ execute this code when committing a scan operation since committing
+ a scan operation only means that the scan is continuing and the scan
+ lock is released.
+ */
+ commitDeleteCheck();
+ }//if
+ if (operationRecPtr.p->lockOwner == ZTRUE) {
+ takeOutLockOwnersList(signal, operationRecPtr);
+ if ((operationRecPtr.p->nextParallelQue == RNIL) &&
+ (operationRecPtr.p->nextSerialQue == RNIL) &&
+ (operationRecPtr.p->elementIsDisappeared == ZFALSE)) {
+ /*
+ This is the normal path through the commit for operations owning the
+ lock without any queues and not a delete operation.
+ */
+ coPageidptr.i = operationRecPtr.p->elementPage;
+ tcoElementptr = operationRecPtr.p->elementPointer;
+ tmp2Olq = ElementHeader::setUnlocked(operationRecPtr.p->hashvaluePart,
+ operationRecPtr.p->scanBits);
+ ptrCheckGuard(coPageidptr, cpagesize, page8);
+ dbgWord32(coPageidptr, tcoElementptr, tmp2Olq);
+ arrGuard(tcoElementptr, 2048);
+ coPageidptr.p->word32[tcoElementptr] = tmp2Olq;
+ return;
+ } else if ((operationRecPtr.p->nextParallelQue != RNIL) ||
+ (operationRecPtr.p->nextSerialQue != RNIL)) {
+ jam();
+ /*
+ The case when there is a queue lined up.
+ Release the lock and pass it to the next operation lined up.
+ */
+ releaselock(signal);
+ return;
+ } else {
+ jam();
+ /*
+ No queue and elementIsDisappeared is true. We perform the actual delete
+ operation.
+ */
+ commitdelete(signal, false);
+ return;
+ }//if
+ } else {
+ /*
+ THE OPERATION DOES NOT OWN THE LOCK. IT MUST BE IN A LOCK QUEUE OF THE
+ ELEMENT.
+ */
+ ndbrequire(operationRecPtr.p->prevParallelQue != RNIL);
+ jam();
+ tolqTmpPtr.i = operationRecPtr.p->prevParallelQue;
+ ptrCheckGuard(tolqTmpPtr, coprecsize, operationrec);
+ tolqTmpPtr.p->nextParallelQue = operationRecPtr.p->nextParallelQue;
+ if (operationRecPtr.p->nextParallelQue != RNIL) {
+ jam();
+ tolqTmpPtr.i = operationRecPtr.p->nextParallelQue;
+ ptrCheckGuard(tolqTmpPtr, coprecsize, operationrec);
+ tolqTmpPtr.p->prevParallelQue = operationRecPtr.p->prevParallelQue;
+ }//if
+ }//if
+}//Dbacc::commitOperation()
+
+/* ------------------------------------------------------------------------- */
+/* RELEASELOCK */
+/* RESETS LOCK OF AN ELEMENT. */
+/* INFORMATION ABOUT THE ELEMENT IS SAVED IN THE OPERATION RECORD */
+/* THESE INFORMATION IS USED TO UPDATE HEADER OF THE ELEMENT */
+/* ------------------------------------------------------------------------- */
+void Dbacc::releaselock(Signal* signal)
+{
+ OperationrecPtr rloOperPtr;
+ OperationrecPtr trlOperPtr;
+ OperationrecPtr trlTmpOperPtr;
+ Uint32 TelementIsDisappeared;
+
+ trlOperPtr.i = RNIL;
+ if (operationRecPtr.p->nextParallelQue != RNIL) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* NEXT OPERATION TAKES OVER THE LOCK. We will simply move the info from the leader */
+ // to the new queue leader.
+ /* --------------------------------------------------------------------------------- */
+ trlOperPtr.i = operationRecPtr.p->nextParallelQue;
+ ptrCheckGuard(trlOperPtr, coprecsize, operationrec);
+ copyInOperPtr = trlOperPtr;
+ copyOperPtr = operationRecPtr;
+ copyOpInfo(signal);
+ trlOperPtr.p->prevParallelQue = RNIL;
+ if (operationRecPtr.p->nextSerialQue != RNIL) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* THERE IS A SERIAL QUEUE. MOVE IT FROM RELEASED OP REC TO THE NEW LOCK OWNER. */
+ /* --------------------------------------------------------------------------------- */
+ trlOperPtr.p->nextSerialQue = operationRecPtr.p->nextSerialQue;
+ trlTmpOperPtr.i = trlOperPtr.p->nextSerialQue;
+ ptrCheckGuard(trlTmpOperPtr, coprecsize, operationrec);
+ trlTmpOperPtr.p->prevSerialQue = trlOperPtr.i;
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ /* SINCE THERE ARE STILL ITEMS IN THE PARALLEL QUEUE WE NEED NOT WORRY ABOUT */
+ /* STARTING QUEUED OPERATIONS. THUS WE CAN END HERE. */
+ /* --------------------------------------------------------------------------------- */
+ } else {
+ ndbrequire(operationRecPtr.p->nextSerialQue != RNIL);
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* THE PARALLEL QUEUE IS EMPTY AND THE SERIAL QUEUE IS NOT EMPTY. WE NEED TO */
+ /* REARRANGE LISTS AND START A NUMBER OF OPERATIONS. */
+ /* --------------------------------------------------------------------------------- */
+ trlOperPtr.i = operationRecPtr.p->nextSerialQue;
+ ptrCheckGuard(trlOperPtr, coprecsize, operationrec);
+ copyOperPtr = operationRecPtr;
+ copyInOperPtr = trlOperPtr;
+ copyOpInfo(signal);
+ trlOperPtr.p->prevSerialQue = RNIL;
+ ndbrequire(trlOperPtr.p->prevParallelQue == RNIL);
+ /* --------------------------------------------------------------------------------- */
+ /* WE HAVE MOVED TO THE NEXT PARALLEL QUEUE. WE MUST START ALL OF THOSE */
+ /* OPERATIONS WHICH UP TILL NOW HAVE BEEN QUEUED WAITING FOR THE LOCK. */
+ /* --------------------------------------------------------------------------------- */
+ rloOperPtr = operationRecPtr;
+ trlTmpOperPtr = trlOperPtr;
+ TelementIsDisappeared = trlOperPtr.p->elementIsDisappeared;
+ Uint32 ThashValue = trlOperPtr.p->hashValue;
+ do {
+ /* --------------------------------------------------------------------------------- */
+ // Ensure that all operations in the queue are assigned with the elementIsDisappeared
+ // to ensure that the element is removed after a previous delete. An insert does
+ // however revert this decision since the element is put back again. Local checkpoints
+ // complicate life here since they do not execute the next operation but simply change
+ // the state on the operation. We need to set-up the variable elementIsDisappeared
+ // properly even when local checkpoints and inserts/writes after deletes occur.
+ /* --------------------------------------------------------------------------------- */
+ trlTmpOperPtr.p->elementIsDisappeared = TelementIsDisappeared;
+ if (TelementIsDisappeared == ZTRUE) {
+ /* --------------------------------------------------------------------------------- */
+ // If the elementIsDisappeared is set then we know that the hashValue is also set
+ // since it always originates from a committing abort or a aborting insert. Scans
+ // do not initialise the hashValue and must have this value initialised if they are
+ // to successfully commit the delete.
+ /* --------------------------------------------------------------------------------- */
+ jam();
+ trlTmpOperPtr.p->hashValue = ThashValue;
+ }//if
+ trlTmpOperPtr.p->localdata[0] = trlOperPtr.p->localdata[0];
+ trlTmpOperPtr.p->localdata[1] = trlOperPtr.p->localdata[1];
+ /* --------------------------------------------------------------------------------- */
+ // Restart the queued operation.
+ /* --------------------------------------------------------------------------------- */
+ operationRecPtr = trlTmpOperPtr;
+ TelementIsDisappeared = executeNextOperation(signal);
+ ThashValue = operationRecPtr.p->hashValue;
+ if (trlTmpOperPtr.p->nextParallelQue != RNIL) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ // We will continue with the next operation in the parallel queue and start this as
+ // well.
+ /* --------------------------------------------------------------------------------- */
+ trlTmpOperPtr.i = trlTmpOperPtr.p->nextParallelQue;
+ ptrCheckGuard(trlTmpOperPtr, coprecsize, operationrec);
+ } else {
+ jam();
+ break;
+ }//if
+ } while (1);
+ operationRecPtr = rloOperPtr;
+ }//if
+
+ // Insert the next op into the lock owner list
+ insertLockOwnersList(signal, trlOperPtr);
+ return;
+}//Dbacc::releaselock()
+
+/* --------------------------------------------------------------------------------- */
+/* COPY_OP_INFO */
+/* INPUT: COPY_IN_OPER_PTR AND COPY_OPER_PTR. */
+/* DESCRIPTION:INFORMATION ABOUT THE ELEMENT WILL BE MOVED FROM OPERATION */
+/* REC TO QUEUE OP REC. QUE OP REC TAKES OVER THE LOCK. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::copyOpInfo(Signal* signal)
+{
+ Page8Ptr coiPageidptr;
+
+ copyInOperPtr.p->elementPage = copyOperPtr.p->elementPage;
+ copyInOperPtr.p->elementIsforward = copyOperPtr.p->elementIsforward;
+ copyInOperPtr.p->elementContainer = copyOperPtr.p->elementContainer;
+ copyInOperPtr.p->elementPointer = copyOperPtr.p->elementPointer;
+ copyInOperPtr.p->scanBits = copyOperPtr.p->scanBits;
+ copyInOperPtr.p->hashvaluePart = copyOperPtr.p->hashvaluePart;
+ copyInOperPtr.p->elementIsDisappeared = copyOperPtr.p->elementIsDisappeared;
+ if (copyInOperPtr.p->elementIsDisappeared == ZTRUE) {
+ /* --------------------------------------------------------------------------------- */
+ // If the elementIsDisappeared is set then we know that the hashValue is also set
+ // since it always originates from a committing abort or a aborting insert. Scans
+ // do not initialise the hashValue and must have this value initialised if they are
+ // to successfully commit the delete.
+ /* --------------------------------------------------------------------------------- */
+ jam();
+ copyInOperPtr.p->hashValue = copyOperPtr.p->hashValue;
+ }//if
+ coiPageidptr.i = copyOperPtr.p->elementPage;
+ ptrCheckGuard(coiPageidptr, cpagesize, page8);
+ const Uint32 tmp = ElementHeader::setLocked(copyInOperPtr.i);
+ dbgWord32(coiPageidptr, copyOperPtr.p->elementPointer, tmp);
+ arrGuard(copyOperPtr.p->elementPointer, 2048);
+ coiPageidptr.p->word32[copyOperPtr.p->elementPointer] = tmp;
+ copyInOperPtr.p->localdata[0] = copyOperPtr.p->localdata[0];
+ copyInOperPtr.p->localdata[1] = copyOperPtr.p->localdata[1];
+}//Dbacc::copyOpInfo()
+
+/* ******************--------------------------------------------------------------- */
+/* EXECUTE NEXT OPERATION */
+/* NEXT OPERATION IN A LOCK QUEUE WILL BE EXECUTED. */
+/* --------------------------------------------------------------------------------- */
+Uint32 Dbacc::executeNextOperation(Signal* signal)
+{
+ ndbrequire(operationRecPtr.p->transactionstate == ACTIVE);
+ if (fragrecptr.p->stopQueOp == ZTRUE) {
+ Uint32 TelemDisappeared;
+ jam();
+ TelemDisappeared = operationRecPtr.p->elementIsDisappeared;
+ if ((operationRecPtr.p->elementIsDisappeared == ZTRUE) &&
+ (operationRecPtr.p->prevParallelQue == RNIL) &&
+ ((operationRecPtr.p->operation == ZINSERT) ||
+ (operationRecPtr.p->operation == ZWRITE))) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ // In this case we do not wish to change the elementIsDisappeared since that would
+ // create an error the next time this method is called for this operation after local
+ // checkpoint starts up operations again. We must however ensure that operations
+ // that follow in the queue do not get the value ZTRUE when actually an INSERT/WRITE
+ // precedes them (only if the INSERT/WRITE is the first operation).
+ /* --------------------------------------------------------------------------------- */
+ TelemDisappeared = ZFALSE;
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ /* A LOCAL CHECKPOINT HAS STOPPED OPERATIONS. WE MUST NOT START THE OPERATION */
+ /* AT THIS TIME. WE SET THE STATE TO INDICATE THAT WE ARE READY TO START AS */
+ /* SOON AS WE ARE ALLOWED. */
+ /* --------------------------------------------------------------------------------- */
+ operationRecPtr.p->opState = WAIT_EXE_OP;
+ return TelemDisappeared;
+ }//if
+ takeOutFragWaitQue(signal);
+ if (operationRecPtr.p->elementIsDisappeared == ZTRUE) {
+ /* --------------------------------------------------------------------------------- */
+ /* PREVIOUS OPERATION WAS DELETE OPERATION AND THE ELEMENT IS ALREADY DELETED. */
+ /* --------------------------------------------------------------------------------- */
+ if (((operationRecPtr.p->operation != ZINSERT) &&
+ (operationRecPtr.p->operation != ZWRITE)) ||
+ (operationRecPtr.p->prevParallelQue != RNIL)) {
+ if (operationRecPtr.p->operation != ZSCAN_OP ||
+ operationRecPtr.p->isAccLockReq) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ // Updates and reads with a previous delete simply aborts with read error indicating
+ // that tuple did not exist. Also inserts and writes not being the first operation.
+ /* --------------------------------------------------------------------------------- */
+ operationRecPtr.p->transactionstate = WAIT_COMMIT_ABORT;
+ signal->theData[0] = operationRecPtr.p->userptr;
+ signal->theData[1] = ZREAD_ERROR;
+ sendSignal(operationRecPtr.p->userblockref, GSN_ACCKEYREF, signal, 2, JBB);
+ return operationRecPtr.p->elementIsDisappeared;
+ } else {
+ /* --------------------------------------------------------------------------------- */
+ /* ABORT OF OPERATION NEEDED BUT THE OPERATION IS A SCAN => SPECIAL TREATMENT. */
+ /* IF THE SCAN WAITS IN QUEUE THEN WE MUST REMOVE THE OPERATION FROM THE SCAN */
+ /* LOCK QUEUE AND IF NO MORE OPERATIONS ARE QUEUED THEN WE SHOULD RESTART THE */
+ /* SCAN PROCESS. OTHERWISE WE SIMPLY RELEASE THE OPERATION AND DECREASE THE */
+ /* NUMBER OF LOCKS HELD. */
+ /* --------------------------------------------------------------------------------- */
+ takeOutScanLockQueue(operationRecPtr.p->scanRecPtr);
+ putReadyScanQueue(signal, operationRecPtr.p->scanRecPtr);
+ return operationRecPtr.p->elementIsDisappeared;
+ }//if
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ // Insert and writes can continue but need to be converted to inserts.
+ /* --------------------------------------------------------------------------------- */
+ jam();
+ operationRecPtr.p->elementIsDisappeared = ZFALSE;
+ operationRecPtr.p->operation = ZINSERT;
+ operationRecPtr.p->insertIsDone = ZTRUE;
+ } else if (operationRecPtr.p->operation == ZINSERT) {
+ bool abortFlag = true;
+ if (operationRecPtr.p->prevParallelQue != RNIL) {
+ OperationrecPtr prevOpPtr;
+ jam();
+ prevOpPtr.i = operationRecPtr.p->prevParallelQue;
+ ptrCheckGuard(prevOpPtr, coprecsize, operationrec);
+ if (prevOpPtr.p->operation == ZDELETE) {
+ jam();
+ abortFlag = false;
+ }//if
+ }//if
+ if (abortFlag) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* ELEMENT STILL REMAINS AND WE ARE TRYING TO INSERT IT AGAIN. THIS IS CLEARLY */
+ /* NOT A GOOD IDEA. */
+ /* --------------------------------------------------------------------------------- */
+ operationRecPtr.p->transactionstate = WAIT_COMMIT_ABORT;
+ signal->theData[0] = operationRecPtr.p->userptr;
+ signal->theData[1] = ZWRITE_ERROR;
+ sendSignal(operationRecPtr.p->userblockref, GSN_ACCKEYREF, signal, 2, JBB);
+ return operationRecPtr.p->elementIsDisappeared;
+ }//if
+ }//if
+ if (operationRecPtr.p->operation == ZSCAN_OP &&
+ ! operationRecPtr.p->isAccLockReq) {
+ jam();
+ takeOutScanLockQueue(operationRecPtr.p->scanRecPtr);
+ putReadyScanQueue(signal, operationRecPtr.p->scanRecPtr);
+ } else {
+ jam();
+ sendAcckeyconf(signal);
+ sendSignal(operationRecPtr.p->userblockref, GSN_ACCKEYCONF, signal, 6, JBB);
+ }//if
+ return operationRecPtr.p->elementIsDisappeared;
+}//Dbacc::executeNextOperation()
+
+/* --------------------------------------------------------------------------------- */
+/* TAKE_OUT_FRAG_WAIT_QUE */
+/* DESCRIPTION: AN OPERATION WHICH OWNS A LOCK OF AN ELEMENT, IS IN A LIST */
+/* OF THE FRAGMENT. THIS LIST IS USED TO STOP THE QUEUE OPERATION */
+/* DURING CREATE CHECK POINT PROSESS FOR STOP AND RESTART OF THE */
+/* OPERATIONS. THIS SUBRUTIN TAKES A OPERATION RECORD OUT OF THE LIST */
+/* -------------------------------------------------------------------------------- */
+void Dbacc::takeOutFragWaitQue(Signal* signal)
+{
+ OperationrecPtr tofwqOperRecPtr;
+
+ if (operationRecPtr.p->opState == WAIT_IN_QUEUE) {
+ if (fragrecptr.p->sentWaitInQueOp == operationRecPtr.i) {
+ jam();
+ fragrecptr.p->sentWaitInQueOp = operationRecPtr.p->nextQueOp;
+ }//if
+ if (operationRecPtr.p->prevQueOp != RNIL) {
+ jam();
+ tofwqOperRecPtr.i = operationRecPtr.p->prevQueOp;
+ ptrCheckGuard(tofwqOperRecPtr, coprecsize, operationrec);
+ tofwqOperRecPtr.p->nextQueOp = operationRecPtr.p->nextQueOp;
+ } else {
+ jam();
+ fragrecptr.p->firstWaitInQueOp = operationRecPtr.p->nextQueOp;
+ }//if
+ if (operationRecPtr.p->nextQueOp != RNIL) {
+ jam();
+ tofwqOperRecPtr.i = operationRecPtr.p->nextQueOp;
+ ptrCheckGuard(tofwqOperRecPtr, coprecsize, operationrec);
+ tofwqOperRecPtr.p->prevQueOp = operationRecPtr.p->prevQueOp;
+ } else {
+ jam();
+ fragrecptr.p->lastWaitInQueOp = operationRecPtr.p->prevQueOp;
+ }//if
+ operationRecPtr.p->opState = FREE_OP;
+ return;
+ } else {
+ ndbrequire(operationRecPtr.p->opState == FREE_OP);
+ }//if
+}//Dbacc::takeOutFragWaitQue()
+
+/**
+ * takeOutLockOwnersList
+ *
+ * Description: Take out an operation from the doubly linked
+ * lock owners list on the fragment.
+ *
+ */
+void Dbacc::takeOutLockOwnersList(Signal* signal,
+ const OperationrecPtr& outOperPtr)
+{
+ const Uint32 Tprev = outOperPtr.p->prevLockOwnerOp;
+ const Uint32 Tnext = outOperPtr.p->nextLockOwnerOp;
+
+#ifdef VM_TRACE
+ // Check that operation is already in the list
+ OperationrecPtr tmpOperPtr;
+ bool inList = false;
+ tmpOperPtr.i = fragrecptr.p->lockOwnersList;
+ while (tmpOperPtr.i != RNIL){
+ ptrCheckGuard(tmpOperPtr, coprecsize, operationrec);
+ if (tmpOperPtr.i == outOperPtr.i)
+ inList = true;
+ tmpOperPtr.i = tmpOperPtr.p->nextLockOwnerOp;
+ }
+ ndbrequire(inList == true);
+#endif
+
+ ndbrequire(outOperPtr.p->lockOwner == ZTRUE);
+ outOperPtr.p->lockOwner = ZFALSE;
+
+ // Fast path through the code for the common case.
+ if ((Tprev == RNIL) && (Tnext == RNIL)) {
+ ndbrequire(fragrecptr.p->lockOwnersList == outOperPtr.i);
+ fragrecptr.p->lockOwnersList = RNIL;
+ return;
+ }
+
+ // Check previous operation
+ if (Tprev != RNIL) {
+ jam();
+ arrGuard(Tprev, coprecsize);
+ operationrec[Tprev].nextLockOwnerOp = Tnext;
+ } else {
+ fragrecptr.p->lockOwnersList = Tnext;
+ }//if
+
+ // Check next operation
+ if (Tnext == RNIL) {
+ return;
+ } else {
+ jam();
+ arrGuard(Tnext, coprecsize);
+ operationrec[Tnext].prevLockOwnerOp = Tprev;
+ }//if
+
+ return;
+}//Dbacc::takeOutLockOwnersList()
+
+/**
+ * insertLockOwnersList
+ *
+ * Description: Insert an operation first in the dubly linked lock owners
+ * list on the fragment.
+ *
+ */
+void Dbacc::insertLockOwnersList(Signal* signal,
+ const OperationrecPtr& insOperPtr)
+{
+ OperationrecPtr tmpOperPtr;
+
+#ifdef VM_TRACE
+ // Check that operation is not already in list
+ tmpOperPtr.i = fragrecptr.p->lockOwnersList;
+ while(tmpOperPtr.i != RNIL){
+ ptrCheckGuard(tmpOperPtr, coprecsize, operationrec);
+ ndbrequire(tmpOperPtr.i != insOperPtr.i);
+ tmpOperPtr.i = tmpOperPtr.p->nextLockOwnerOp;
+ }
+#endif
+
+ ndbrequire(insOperPtr.p->lockOwner == ZFALSE);
+
+ insOperPtr.p->lockOwner = ZTRUE;
+ insOperPtr.p->prevLockOwnerOp = RNIL;
+ tmpOperPtr.i = fragrecptr.p->lockOwnersList;
+ fragrecptr.p->lockOwnersList = insOperPtr.i;
+ insOperPtr.p->nextLockOwnerOp = tmpOperPtr.i;
+ if (tmpOperPtr.i == RNIL) {
+ return;
+ } else {
+ jam();
+ ptrCheckGuard(tmpOperPtr, coprecsize, operationrec);
+ tmpOperPtr.p->prevLockOwnerOp = insOperPtr.i;
+ }//if
+}//Dbacc::insertLockOwnersList()
+
+
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* END OF COMMIT AND ABORT MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* ALLOC_OVERFLOW_PAGE */
+/* DESCRIPTION: */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::allocOverflowPage(Signal* signal)
+{
+ DirRangePtr aopDirRangePtr;
+ DirectoryarrayPtr aopOverflowDirptr;
+ OverflowRecordPtr aopOverflowRecPtr;
+ Uint32 taopTmp1;
+ Uint32 taopTmp2;
+ Uint32 taopTmp3;
+
+ tresult = 0;
+ if ((cfirstfreepage == RNIL) &&
+ (cfreepage >= cpagesize)) {
+ jam();
+ zpagesize_error("Dbacc::allocOverflowPage");
+ tresult = ZPAGESIZE_ERROR;
+ return;
+ }//if
+ if (fragrecptr.p->firstFreeDirindexRec != RNIL) {
+ jam();
+ /* FRAGRECPTR:FIRST_FREE_DIRINDEX_REC POINTS */
+ /* TO THE FIRST ELEMENT IN A FREE LIST OF THE */
+ /* DIRECTORY INDEX WICH HAVE NULL AS PAGE */
+ aopOverflowRecPtr.i = fragrecptr.p->firstFreeDirindexRec;
+ ptrCheckGuard(aopOverflowRecPtr, coverflowrecsize, overflowRecord);
+ troOverflowRecPtr.p = aopOverflowRecPtr.p;
+ takeRecOutOfFreeOverdir(signal);
+ } else if (cfirstfreeoverrec == RNIL) {
+ jam();
+ tresult = ZOVER_REC_ERROR;
+ return;
+ } else if ((cfirstfreedir == RNIL) &&
+ (cdirarraysize <= cdirmemory)) {
+ jam();
+ tresult = ZDIRSIZE_ERROR;
+ return;
+ } else {
+ jam();
+ seizeOverRec(signal);
+ aopOverflowRecPtr = sorOverflowRecPtr;
+ aopOverflowRecPtr.p->dirindex = fragrecptr.p->lastOverIndex;
+ }//if
+ aopOverflowRecPtr.p->nextOverRec = RNIL;
+ aopOverflowRecPtr.p->prevOverRec = RNIL;
+ fragrecptr.p->firstOverflowRec = aopOverflowRecPtr.i;
+ fragrecptr.p->lastOverflowRec = aopOverflowRecPtr.i;
+ taopTmp1 = aopOverflowRecPtr.p->dirindex;
+ aopDirRangePtr.i = fragrecptr.p->overflowdir;
+ taopTmp2 = taopTmp1 >> 8;
+ taopTmp3 = taopTmp1 & 0xff;
+ ptrCheckGuard(aopDirRangePtr, cdirrangesize, dirRange);
+ arrGuard(taopTmp2, 256);
+ if (aopDirRangePtr.p->dirArray[taopTmp2] == RNIL) {
+ jam();
+ seizeDirectory(signal);
+ ndbrequire(tresult <= ZLIMIT_OF_ERROR);
+ aopDirRangePtr.p->dirArray[taopTmp2] = sdDirptr.i;
+ }//if
+ aopOverflowDirptr.i = aopDirRangePtr.p->dirArray[taopTmp2];
+ seizePage(signal);
+ ndbrequire(tresult <= ZLIMIT_OF_ERROR);
+ ptrCheckGuard(aopOverflowDirptr, cdirarraysize, directoryarray);
+ aopOverflowDirptr.p->pagep[taopTmp3] = spPageptr.i;
+ tiopPageId = aopOverflowRecPtr.p->dirindex;
+ iopOverflowRecPtr = aopOverflowRecPtr;
+ iopPageptr = spPageptr;
+ initOverpage(signal);
+ aopOverflowRecPtr.p->overpage = spPageptr.i;
+ if (fragrecptr.p->lastOverIndex <= aopOverflowRecPtr.p->dirindex) {
+ jam();
+ ndbrequire(fragrecptr.p->lastOverIndex == aopOverflowRecPtr.p->dirindex);
+ fragrecptr.p->lastOverIndex++;
+ }//if
+}//Dbacc::allocOverflowPage()
+
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* EXPAND/SHRINK MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/*EXPANDCHECK EXPAND BUCKET ORD */
+/* SENDER: ACC, LEVEL B */
+/* INPUT: FRAGRECPTR, POINTS TO A FRAGMENT RECORD. */
+/* DESCRIPTION: A BUCKET OF A FRAGMENT PAGE WILL BE EXPAND INTO TWO BUCKETS */
+/* ACCORDING TO LH3. */
+/* ******************--------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* EXPANDCHECK EXPAND BUCKET ORD */
+/* ******************------------------------------+ */
+/* SENDER: ACC, LEVEL B */
+/* A BUCKET OF THE FRAGMENT WILL */
+/* BE EXPANDED ACORDING TO LH3, */
+/* AND COMMIT TRANSACTION PROCESS */
+/* WILL BE CONTINUED */
+Uint32 Dbacc::checkScanExpand(Signal* signal)
+{
+ Uint32 Ti;
+ Uint32 TreturnCode = 0;
+ Uint32 TPageIndex;
+ Uint32 TDirInd;
+ Uint32 TSplit;
+ Uint32 TreleaseInd = 0;
+ Uint32 TreleaseScanBucket;
+ Uint32 TreleaseScanIndicator[4];
+ DirectoryarrayPtr TDirptr;
+ DirRangePtr TDirRangePtr;
+ Page8Ptr TPageptr;
+ ScanRecPtr TscanPtr;
+ RootfragmentrecPtr Trootfragrecptr;
+
+ Trootfragrecptr.i = fragrecptr.p->myroot;
+ TSplit = fragrecptr.p->p;
+ ptrCheckGuard(Trootfragrecptr, crootfragmentsize, rootfragmentrec);
+ for (Ti = 0; Ti < 4; Ti++) {
+ TreleaseScanIndicator[Ti] = 0;
+ if (Trootfragrecptr.p->scan[Ti] != RNIL) {
+ //-------------------------------------------------------------
+ // A scan is ongoing on this particular local fragment. We have
+ // to check its current state.
+ //-------------------------------------------------------------
+ TscanPtr.i = Trootfragrecptr.p->scan[Ti];
+ ptrCheckGuard(TscanPtr, cscanRecSize, scanRec);
+ if (TscanPtr.p->activeLocalFrag == fragrecptr.i) {
+ if (TscanPtr.p->scanBucketState == ScanRec::FIRST_LAP) {
+ if (TSplit == TscanPtr.p->nextBucketIndex) {
+ jam();
+ //-------------------------------------------------------------
+ // We are currently scanning this bucket. We cannot split it
+ // simultaneously with the scan. We have to pass this offer for
+ // splitting the bucket.
+ //-------------------------------------------------------------
+ TreturnCode = 1;
+ return TreturnCode;
+ } else if (TSplit > TscanPtr.p->nextBucketIndex) {
+ jam();
+ //-------------------------------------------------------------
+ // This bucket has not yet been scanned. We must reset the scanned
+ // bit indicator for this scan on this bucket.
+ //-------------------------------------------------------------
+ TreleaseScanIndicator[Ti] = 1;
+ TreleaseInd = 1;
+ } else {
+ jam();
+ }//if
+ } else if (TscanPtr.p->scanBucketState == ScanRec::SECOND_LAP) {
+ jam();
+ //-------------------------------------------------------------
+ // We are performing a second lap to handle buckets that was
+ // merged during the first lap of scanning. During this second
+ // lap we do not allow any splits or merges.
+ //-------------------------------------------------------------
+ TreturnCode = 1;
+ return TreturnCode;
+ } else {
+ ndbrequire(TscanPtr.p->scanBucketState == ScanRec::SCAN_COMPLETED);
+ jam();
+ //-------------------------------------------------------------
+ // The scan is completed and we can thus go ahead and perform
+ // the split.
+ //-------------------------------------------------------------
+ }//if
+ }//if
+ }//if
+ }//for
+ if (TreleaseInd == 1) {
+ TreleaseScanBucket = TSplit;
+ TDirRangePtr.i = fragrecptr.p->directory;
+ TPageIndex = TreleaseScanBucket & ((1 << fragrecptr.p->k) - 1); /* PAGE INDEX OBS K = 6 */
+ TDirInd = TreleaseScanBucket >> fragrecptr.p->k; /* DIRECTORY INDEX OBS K = 6 */
+ ptrCheckGuard(TDirRangePtr, cdirrangesize, dirRange);
+ arrGuard((TDirInd >> 8), 256);
+ TDirptr.i = TDirRangePtr.p->dirArray[TDirInd >> 8];
+ ptrCheckGuard(TDirptr, cdirarraysize, directoryarray);
+ TPageptr.i = TDirptr.p->pagep[TDirInd & 0xff];
+ ptrCheckGuard(TPageptr, cpagesize, page8);
+ for (Ti = 0; Ti < 4; Ti++) {
+ if (TreleaseScanIndicator[Ti] == 1) {
+ jam();
+ scanPtr.i = Trootfragrecptr.p->scan[Ti];
+ ptrCheckGuard(scanPtr, cscanRecSize, scanRec);
+ rsbPageidptr = TPageptr;
+ trsbPageindex = TPageIndex;
+ releaseScanBucket(signal);
+ }//if
+ }//for
+ }//if
+ return TreturnCode;
+}//Dbacc::checkScanExpand()
+
+void Dbacc::execEXPANDCHECK2(Signal* signal)
+{
+ jamEntry();
+
+ if(refToBlock(signal->getSendersBlockRef()) == DBLQH){
+ jam();
+ reenable_expand_after_redo_log_exection_complete(signal);
+ return;
+ }
+
+ DirectoryarrayPtr newDirptr;
+
+ fragrecptr.i = signal->theData[0];
+ tresult = 0; /* 0= FALSE,1= TRUE,> ZLIMIT_OF_ERROR =ERRORCODE */
+ Uint32 tmp = 1;
+ tmp = tmp << 31;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ fragrecptr.p->expandFlag = 0;
+ if (fragrecptr.p->slack < tmp) {
+ jam();
+ /* IT MEANS THAT IF SLACK > ZERO */
+ /*--------------------------------------------------------------*/
+ /* THE SLACK HAS IMPROVED AND IS NOW ACCEPTABLE AND WE */
+ /* CAN FORGET ABOUT THE EXPAND PROCESS. */
+ /*--------------------------------------------------------------*/
+ return;
+ }//if
+ if (fragrecptr.p->firstOverflowRec == RNIL) {
+ jam();
+ allocOverflowPage(signal);
+ if (tresult > ZLIMIT_OF_ERROR) {
+ jam();
+ /*--------------------------------------------------------------*/
+ /* WE COULD NOT ALLOCATE ANY OVERFLOW PAGE. THUS WE HAVE TO STOP*/
+ /* THE EXPAND SINCE WE CANNOT GUARANTEE ITS COMPLETION. */
+ /*--------------------------------------------------------------*/
+ return;
+ }//if
+ }//if
+ if (cfirstfreepage == RNIL) {
+ if (cfreepage >= cpagesize) {
+ jam();
+ /*--------------------------------------------------------------*/
+ /* WE HAVE TO STOP THE EXPAND PROCESS SINCE THERE ARE NO FREE */
+ /* PAGES. THIS MEANS THAT WE COULD BE FORCED TO CRASH SINCE WE */
+ /* CANNOT COMPLETE THE EXPAND. TO AVOID THE CRASH WE EXIT HERE. */
+ /*--------------------------------------------------------------*/
+ return;
+ }//if
+ }//if
+ if (checkScanExpand(signal) == 1) {
+ jam();
+ /*--------------------------------------------------------------*/
+ // A scan state was inconsistent with performing an expand
+ // operation.
+ /*--------------------------------------------------------------*/
+ return;
+ }//if
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ if (remainingUndoPages() < ZMIN_UNDO_PAGES_AT_EXPAND) {
+ jam();
+ /*--------------------------------------------------------------*/
+ // We did not have enough undo log buffers to start up an
+ // expand operation
+ /*--------------------------------------------------------------*/
+ return;
+ }//if
+ }//if
+
+ /*--------------------------------------------------------------------------*/
+ /* WE START BY FINDING THE PAGE, THE PAGE INDEX AND THE PAGE DIRECTORY*/
+ /* OF THE NEW BUCKET WHICH SHALL RECEIVE THE ELEMENT WHICH HAVE A 1 IN*/
+ /* THE NEXT HASH BIT. THIS BIT IS USED IN THE SPLIT MECHANISM TO */
+ /* DECIDE WHICH ELEMENT GOES WHERE. */
+ /*--------------------------------------------------------------------------*/
+ expDirRangePtr.i = fragrecptr.p->directory;
+ texpReceivedBucket = (fragrecptr.p->maxp + fragrecptr.p->p) + 1; /* RECEIVED BUCKET */
+ texpDirInd = texpReceivedBucket >> fragrecptr.p->k;
+ newDirptr.i = RNIL;
+ ptrNull(newDirptr);
+ texpDirRangeIndex = texpDirInd >> 8;
+ ptrCheckGuard(expDirRangePtr, cdirrangesize, dirRange);
+ arrGuard(texpDirRangeIndex, 256);
+ expDirptr.i = expDirRangePtr.p->dirArray[texpDirRangeIndex];
+ if (expDirptr.i == RNIL) {
+ jam();
+ seizeDirectory(signal);
+ if (tresult > ZLIMIT_OF_ERROR) {
+ jam();
+ return;
+ } else {
+ jam();
+ newDirptr = sdDirptr;
+ expDirptr = sdDirptr;
+ expDirRangePtr.p->dirArray[texpDirRangeIndex] = sdDirptr.i;
+ }//if
+ } else {
+ ptrCheckGuard(expDirptr, cdirarraysize, directoryarray);
+ }//if
+ texpDirPageIndex = texpDirInd & 0xff;
+ expPageptr.i = expDirptr.p->pagep[texpDirPageIndex];
+ if (expPageptr.i == RNIL) {
+ jam();
+ seizePage(signal);
+ if (tresult > ZLIMIT_OF_ERROR) {
+ jam();
+ if (newDirptr.i != RNIL) {
+ jam();
+ rdDirptr.i = newDirptr.i;
+ releaseDirectory(signal);
+ }//if
+ return;
+ }//if
+ expDirptr.p->pagep[texpDirPageIndex] = spPageptr.i;
+ tipPageId = texpDirInd;
+ inpPageptr = spPageptr;
+ initPage(signal);
+ fragrecptr.p->dirsize++;
+ expPageptr = spPageptr;
+ } else {
+ ptrCheckGuard(expPageptr, cpagesize, page8);
+ }//if
+
+ fragrecptr.p->expReceivePageptr = expPageptr.i;
+ fragrecptr.p->expReceiveIndex = texpReceivedBucket & ((1 << fragrecptr.p->k) - 1);
+ /*--------------------------------------------------------------------------*/
+ /* THE NEXT ACTION IS TO FIND THE PAGE, THE PAGE INDEX AND THE PAGE */
+ /* DIRECTORY OF THE BUCKET TO BE SPLIT. */
+ /*--------------------------------------------------------------------------*/
+ expDirRangePtr.i = fragrecptr.p->directory;
+ cexcPageindex = fragrecptr.p->p & ((1 << fragrecptr.p->k) - 1); /* PAGE INDEX OBS K = 6 */
+ texpDirInd = fragrecptr.p->p >> fragrecptr.p->k; /* DIRECTORY INDEX OBS K = 6 */
+ ptrCheckGuard(expDirRangePtr, cdirrangesize, dirRange);
+ arrGuard((texpDirInd >> 8), 256);
+ expDirptr.i = expDirRangePtr.p->dirArray[texpDirInd >> 8];
+ ptrCheckGuard(expDirptr, cdirarraysize, directoryarray);
+ excPageptr.i = expDirptr.p->pagep[texpDirInd & 0xff];
+ fragrecptr.p->expSenderIndex = cexcPageindex;
+ fragrecptr.p->expSenderPageptr = excPageptr.i;
+ if (excPageptr.i == RNIL) {
+ jam();
+ endofexpLab(signal); /* EMPTY BUCKET */
+ return;
+ }//if
+ fragrecptr.p->expReceiveForward = ZTRUE;
+ ptrCheckGuard(excPageptr, cpagesize, page8);
+ expandcontainer(signal);
+ endofexpLab(signal);
+ return;
+}//Dbacc::execEXPANDCHECK2()
+
+void Dbacc::endofexpLab(Signal* signal)
+{
+ fragrecptr.p->p++;
+ fragrecptr.p->slack += fragrecptr.p->maxloadfactor;
+ fragrecptr.p->expandCounter++;
+ if (fragrecptr.p->p > fragrecptr.p->maxp) {
+ jam();
+ fragrecptr.p->maxp = (fragrecptr.p->maxp << 1) | 1;
+ fragrecptr.p->lhdirbits++;
+ fragrecptr.p->hashcheckbit++;
+ fragrecptr.p->p = 0;
+ }//if
+ Uint32 noOfBuckets = (fragrecptr.p->maxp + 1) + fragrecptr.p->p;
+ Uint32 Thysteres = fragrecptr.p->maxloadfactor - fragrecptr.p->minloadfactor;
+ fragrecptr.p->slackCheck = noOfBuckets * Thysteres;
+ if (fragrecptr.p->slack > (1u << 31)) {
+ jam();
+ /* IT MEANS THAT IF SLACK < ZERO */
+ /* --------------------------------------------------------------------------------- */
+ /* IT IS STILL NECESSARY TO EXPAND THE FRAGMENT EVEN MORE. START IT FROM HERE */
+ /* WITHOUT WAITING FOR NEXT COMMIT ON THE FRAGMENT. */
+ /* --------------------------------------------------------------------------------- */
+ fragrecptr.p->expandFlag = 2;
+ signal->theData[0] = fragrecptr.i;
+ signal->theData[1] = fragrecptr.p->p;
+ signal->theData[2] = fragrecptr.p->maxp;
+ sendSignal(cownBlockref, GSN_EXPANDCHECK2, signal, 3, JBB);
+ }//if
+ return;
+}//Dbacc::endofexpLab()
+
+void Dbacc::reenable_expand_after_redo_log_exection_complete(Signal* signal){
+
+ tabptr.i = signal->theData[0];
+ Uint32 fragId = signal->theData[1];
+
+ ptrCheckGuard(tabptr, ctablesize, tabrec);
+ ndbrequire(getrootfragmentrec(signal, rootfragrecptr, fragId));
+#if 0
+ ndbout_c("reenable expand check for table %d fragment: %d",
+ tabptr.i, fragId);
+#endif
+
+ for (Uint32 i = 0; i < 2; i++) {
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[i];
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ switch(fragrecptr.p->expandFlag){
+ case 0:
+ /**
+ * Hmm... this means that it's alreay has been reenabled...
+ */
+ ndbassert(false);
+ continue;
+ case 1:
+ /**
+ * Nothing is going on start expand check
+ */
+ case 2:
+ /**
+ * A shrink is running, do expand check anyway
+ * (to reset expandFlag)
+ */
+ fragrecptr.p->expandFlag = 2;
+ signal->theData[0] = fragrecptr.i;
+ signal->theData[1] = fragrecptr.p->p;
+ signal->theData[2] = fragrecptr.p->maxp;
+ sendSignal(cownBlockref, GSN_EXPANDCHECK2, signal, 3, JBB);
+ break;
+ }
+ }
+}
+
+void Dbacc::execDEBUG_SIG(Signal* signal)
+{
+ jamEntry();
+ expPageptr.i = signal->theData[0];
+
+ progError(__LINE__,
+ ERR_SR_UNDOLOG);
+ return;
+}//Dbacc::execDEBUG_SIG()
+
+/* --------------------------------------------------------------------------------- */
+/* EXPANDCONTAINER */
+/* INPUT: EXC_PAGEPTR (POINTER TO THE ACTIVE PAGE RECORD) */
+/* CEXC_PAGEINDEX (INDEX OF THE BUCKET). */
+/* */
+/* DESCRIPTION: THE HASH VALUE OF ALL ELEMENTS IN THE CONTAINER WILL BE */
+/* CHECKED. SOME OF THIS ELEMENTS HAVE TO MOVE TO THE NEW CONTAINER */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::expandcontainer(Signal* signal)
+{
+ Uint32 texcHashvalue;
+ Uint32 texcTmp;
+ Uint32 texcIndex;
+ Uint32 guard20;
+
+ cexcPrevpageptr = RNIL;
+ cexcPrevconptr = 0;
+ cexcForward = ZTRUE;
+ EXP_CONTAINER_LOOP:
+ cexcContainerptr = (cexcPageindex << ZSHIFT_PLUS) - (cexcPageindex << ZSHIFT_MINUS);
+ if (cexcForward == ZTRUE) {
+ jam();
+ cexcContainerptr = cexcContainerptr + ZHEAD_SIZE;
+ cexcElementptr = cexcContainerptr + ZCON_HEAD_SIZE;
+ } else {
+ jam();
+ cexcContainerptr = ((cexcContainerptr + ZHEAD_SIZE) + ZBUF_SIZE) - ZCON_HEAD_SIZE;
+ cexcElementptr = cexcContainerptr - 1;
+ }//if
+ arrGuard(cexcContainerptr, 2048);
+ cexcContainerhead = excPageptr.p->word32[cexcContainerptr];
+ cexcContainerlen = cexcContainerhead >> 26;
+ cexcMovedLen = ZCON_HEAD_SIZE;
+ if (cexcContainerlen <= ZCON_HEAD_SIZE) {
+ ndbrequire(cexcContainerlen >= ZCON_HEAD_SIZE);
+ jam();
+ goto NEXT_ELEMENT;
+ }//if
+ NEXT_ELEMENT_LOOP:
+ idrOperationRecPtr.i = RNIL;
+ ptrNull(idrOperationRecPtr);
+ /* --------------------------------------------------------------------------------- */
+ /* CEXC_PAGEINDEX PAGE INDEX OF CURRENT CONTAINER BEING EXAMINED. */
+ /* CEXC_CONTAINERPTR INDEX OF CURRENT CONTAINER BEING EXAMINED. */
+ /* CEXC_ELEMENTPTR INDEX OF CURRENT ELEMENT BEING EXAMINED. */
+ /* EXC_PAGEPTR PAGE WHERE CURRENT ELEMENT RESIDES. */
+ /* CEXC_PREVPAGEPTR PAGE OF PREVIOUS CONTAINER. */
+ /* CEXC_PREVCONPTR INDEX OF PREVIOUS CONTAINER */
+ /* CEXC_FORWARD DIRECTION OF CURRENT CONTAINER */
+ /* --------------------------------------------------------------------------------- */
+ arrGuard(cexcElementptr, 2048);
+ tidrElemhead = excPageptr.p->word32[cexcElementptr];
+ if (ElementHeader::getUnlocked(tidrElemhead)){
+ jam();
+ texcHashvalue = ElementHeader::getHashValuePart(tidrElemhead);
+ } else {
+ jam();
+ idrOperationRecPtr.i = ElementHeader::getOpPtrI(tidrElemhead);
+ ptrCheckGuard(idrOperationRecPtr, coprecsize, operationrec);
+ texcHashvalue = idrOperationRecPtr.p->hashvaluePart;
+ if ((fragrecptr.p->createLcp == ZTRUE) &&
+ (((texcHashvalue >> fragrecptr.p->hashcheckbit) & 1) != 0)) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ // During local checkpoints we must ensure that we restore the element header in
+ // unlocked state and with the hash value part there with tuple status zeroed.
+ // Otherwise a later insert over the same element will write an UNDO log that will
+ // ensure that the now removed element is restored together with its locked element
+ // header and without the hash value part.
+ /* --------------------------------------------------------------------------------- */
+ const Uint32 hv = idrOperationRecPtr.p->hashvaluePart;
+ const Uint32 eh = ElementHeader::setUnlocked(hv, 0);
+ excPageptr.p->word32[cexcElementptr] = eh;
+ }//if
+ }//if
+ if (((texcHashvalue >> fragrecptr.p->hashcheckbit) & 1) == 0) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* THIS ELEMENT IS NOT TO BE MOVED. WE CALCULATE THE WHEREABOUTS OF THE NEXT */
+ /* ELEMENT AND PROCEED WITH THAT OR END THE SEARCH IF THERE ARE NO MORE */
+ /* ELEMENTS IN THIS CONTAINER. */
+ /* --------------------------------------------------------------------------------- */
+ goto NEXT_ELEMENT;
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ /* THE HASH BIT WAS SET AND WE SHALL MOVE THIS ELEMENT TO THE NEW BUCKET. */
+ /* WE START BY READING THE ELEMENT TO BE ABLE TO INSERT IT INTO THE NEW BUCKET.*/
+ /* THEN WE INSERT THE ELEMENT INTO THE NEW BUCKET. THE NEXT STEP IS TO DELETE */
+ /* THE ELEMENT FROM THIS BUCKET. THIS IS PERFORMED BY REPLACING IT WITH THE */
+ /* LAST ELEMENT IN THE BUCKET. IF THIS ELEMENT IS TO BE MOVED WE MOVE IT AND */
+ /* GET THE LAST ELEMENT AGAIN UNTIL WE EITHER FIND ONE THAT STAYS OR THIS */
+ /* ELEMENT IS THE LAST ELEMENT. */
+ /* --------------------------------------------------------------------------------- */
+ texcTmp = cexcElementptr + cexcForward;
+ guard20 = fragrecptr.p->localkeylen - 1;
+ for (texcIndex = 0; texcIndex <= guard20; texcIndex++) {
+ arrGuard(texcIndex, 2);
+ arrGuard(texcTmp, 2048);
+ clocalkey[texcIndex] = excPageptr.p->word32[texcTmp];
+ texcTmp = texcTmp + cexcForward;
+ }//for
+ tidrPageindex = fragrecptr.p->expReceiveIndex;
+ idrPageptr.i = fragrecptr.p->expReceivePageptr;
+ ptrCheckGuard(idrPageptr, cpagesize, page8);
+ tidrForward = fragrecptr.p->expReceiveForward;
+ insertElement(signal);
+ fragrecptr.p->expReceiveIndex = tidrPageindex;
+ fragrecptr.p->expReceivePageptr = idrPageptr.i;
+ fragrecptr.p->expReceiveForward = tidrForward;
+ REMOVE_LAST_LOOP:
+ jam();
+ lastPageptr.i = excPageptr.i;
+ lastPageptr.p = excPageptr.p;
+ tlastContainerptr = cexcContainerptr;
+ lastPrevpageptr.i = cexcPrevpageptr;
+ ptrCheck(lastPrevpageptr, cpagesize, page8);
+ tlastPrevconptr = cexcPrevconptr;
+ arrGuard(tlastContainerptr, 2048);
+ tlastContainerhead = lastPageptr.p->word32[tlastContainerptr];
+ tlastContainerlen = tlastContainerhead >> 26;
+ tlastForward = cexcForward;
+ tlastPageindex = cexcPageindex;
+ getLastAndRemove(signal);
+ if (excPageptr.i == lastPageptr.i) {
+ if (cexcElementptr == tlastElementptr) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* THE CURRENT ELEMENT WAS ALSO THE LAST ELEMENT. */
+ /* --------------------------------------------------------------------------------- */
+ return;
+ }//if
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ /* THE CURRENT ELEMENT WAS NOT THE LAST ELEMENT. IF THE LAST ELEMENT SHOULD */
+ /* STAY WE COPY IT TO THE POSITION OF THE CURRENT ELEMENT, OTHERWISE WE INSERT */
+ /* INTO THE NEW BUCKET, REMOVE IT AND TRY WITH THE NEW LAST ELEMENT. */
+ /* --------------------------------------------------------------------------------- */
+ idrOperationRecPtr.i = RNIL;
+ ptrNull(idrOperationRecPtr);
+ arrGuard(tlastElementptr, 2048);
+ tidrElemhead = lastPageptr.p->word32[tlastElementptr];
+ if (ElementHeader::getUnlocked(tidrElemhead)) {
+ jam();
+ texcHashvalue = ElementHeader::getHashValuePart(tidrElemhead);
+ } else {
+ jam();
+ idrOperationRecPtr.i = ElementHeader::getOpPtrI(tidrElemhead);
+ ptrCheckGuard(idrOperationRecPtr, coprecsize, operationrec);
+ texcHashvalue = idrOperationRecPtr.p->hashvaluePart;
+ if ((fragrecptr.p->createLcp == ZTRUE) &&
+ (((texcHashvalue >> fragrecptr.p->hashcheckbit) & 1) != 0)) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ // During local checkpoints we must ensure that we restore the element header in
+ // unlocked state and with the hash value part there with tuple status zeroed.
+ // Otherwise a later insert over the same element will write an UNDO log that will
+ // ensure that the now removed element is restored together with its locked element
+ // header and without the hash value part.
+ /* --------------------------------------------------------------------------------- */
+ const Uint32 hv = idrOperationRecPtr.p->hashvaluePart;
+ const Uint32 eh = ElementHeader::setUnlocked(hv, 0);
+ lastPageptr.p->word32[tlastElementptr] = eh;
+ }//if
+ }//if
+ if (((texcHashvalue >> fragrecptr.p->hashcheckbit) & 1) == 0) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* THE LAST ELEMENT IS NOT TO BE MOVED. WE COPY IT TO THE CURRENT ELEMENT. */
+ /* --------------------------------------------------------------------------------- */
+ delPageptr = excPageptr;
+ tdelContainerptr = cexcContainerptr;
+ tdelForward = cexcForward;
+ tdelElementptr = cexcElementptr;
+ deleteElement(signal);
+ } else {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* THE LAST ELEMENT IS ALSO TO BE MOVED. */
+ /* --------------------------------------------------------------------------------- */
+ texcTmp = tlastElementptr + tlastForward;
+ for (texcIndex = 0; texcIndex < fragrecptr.p->localkeylen; texcIndex++) {
+ arrGuard(texcIndex, 2);
+ arrGuard(texcTmp, 2048);
+ clocalkey[texcIndex] = lastPageptr.p->word32[texcTmp];
+ texcTmp = texcTmp + tlastForward;
+ }//for
+ tidrPageindex = fragrecptr.p->expReceiveIndex;
+ idrPageptr.i = fragrecptr.p->expReceivePageptr;
+ ptrCheckGuard(idrPageptr, cpagesize, page8);
+ tidrForward = fragrecptr.p->expReceiveForward;
+ insertElement(signal);
+ fragrecptr.p->expReceiveIndex = tidrPageindex;
+ fragrecptr.p->expReceivePageptr = idrPageptr.i;
+ fragrecptr.p->expReceiveForward = tidrForward;
+ goto REMOVE_LAST_LOOP;
+ }//if
+ NEXT_ELEMENT:
+ arrGuard(cexcContainerptr, 2048);
+ cexcContainerhead = excPageptr.p->word32[cexcContainerptr];
+ cexcMovedLen = cexcMovedLen + fragrecptr.p->elementLength;
+ if ((cexcContainerhead >> 26) > cexcMovedLen) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* WE HAVE NOT YET MOVED THE COMPLETE CONTAINER. WE PROCEED WITH THE NEXT */
+ /* ELEMENT IN THE CONTAINER. IT IS IMPORTANT TO READ THE CONTAINER LENGTH */
+ /* FROM THE CONTAINER HEADER SINCE IT MIGHT CHANGE BY REMOVING THE LAST */
+ /* ELEMENT IN THE BUCKET. */
+ /* --------------------------------------------------------------------------------- */
+ cexcElementptr = cexcElementptr + (cexcForward * fragrecptr.p->elementLength);
+ goto NEXT_ELEMENT_LOOP;
+ }//if
+ if (((cexcContainerhead >> 7) & 3) != 0) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* WE PROCEED TO THE NEXT CONTAINER IN THE BUCKET. */
+ /* --------------------------------------------------------------------------------- */
+ cexcPrevpageptr = excPageptr.i;
+ cexcPrevconptr = cexcContainerptr;
+ nextcontainerinfoExp(signal);
+ goto EXP_CONTAINER_LOOP;
+ }//if
+}//Dbacc::expandcontainer()
+
+/* ******************--------------------------------------------------------------- */
+/* SHRINKCHECK JOIN BUCKET ORD */
+/* SENDER: ACC, LEVEL B */
+/* INPUT: FRAGRECPTR, POINTS TO A FRAGMENT RECORD. */
+/* DESCRIPTION: TWO BUCKET OF A FRAGMENT PAGE WILL BE JOINED TOGETHER */
+/* ACCORDING TO LH3. */
+/* ******************--------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* SHRINKCHECK JOIN BUCKET ORD */
+/* ******************------------------------------+ */
+/* SENDER: ACC, LEVEL B */
+/* TWO BUCKETS OF THE FRAGMENT */
+/* WILL BE JOINED ACORDING TO LH3 */
+/* AND COMMIT TRANSACTION PROCESS */
+/* WILL BE CONTINUED */
+Uint32 Dbacc::checkScanShrink(Signal* signal)
+{
+ Uint32 Ti;
+ Uint32 TreturnCode = 0;
+ Uint32 TPageIndex;
+ Uint32 TDirInd;
+ Uint32 TmergeDest;
+ Uint32 TmergeSource;
+ Uint32 TreleaseScanBucket;
+ Uint32 TreleaseInd = 0;
+ Uint32 TreleaseScanIndicator[4];
+ DirectoryarrayPtr TDirptr;
+ DirRangePtr TDirRangePtr;
+ Page8Ptr TPageptr;
+ ScanRecPtr TscanPtr;
+ RootfragmentrecPtr Trootfragrecptr;
+
+ Trootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(Trootfragrecptr, crootfragmentsize, rootfragmentrec);
+ if (fragrecptr.p->p == 0) {
+ jam();
+ TmergeDest = fragrecptr.p->maxp >> 1;
+ } else {
+ jam();
+ TmergeDest = fragrecptr.p->p - 1;
+ }//if
+ TmergeSource = fragrecptr.p->maxp + fragrecptr.p->p;
+ for (Ti = 0; Ti < 4; Ti++) {
+ TreleaseScanIndicator[Ti] = 0;
+ if (Trootfragrecptr.p->scan[Ti] != RNIL) {
+ TscanPtr.i = Trootfragrecptr.p->scan[Ti];
+ ptrCheckGuard(TscanPtr, cscanRecSize, scanRec);
+ if (TscanPtr.p->activeLocalFrag == fragrecptr.i) {
+ //-------------------------------------------------------------
+ // A scan is ongoing on this particular local fragment. We have
+ // to check its current state.
+ //-------------------------------------------------------------
+ if (TscanPtr.p->scanBucketState == ScanRec::FIRST_LAP) {
+ jam();
+ if ((TmergeDest == TscanPtr.p->nextBucketIndex) ||
+ (TmergeSource == TscanPtr.p->nextBucketIndex)) {
+ jam();
+ //-------------------------------------------------------------
+ // We are currently scanning one of the buckets involved in the
+ // merge. We cannot merge while simultaneously performing a scan.
+ // We have to pass this offer for merging the buckets.
+ //-------------------------------------------------------------
+ TreturnCode = 1;
+ return TreturnCode;
+ } else if (TmergeDest < TscanPtr.p->nextBucketIndex) {
+ jam();
+ TreleaseScanIndicator[Ti] = 1;
+ TreleaseInd = 1;
+ }//if
+ } else if (TscanPtr.p->scanBucketState == ScanRec::SECOND_LAP) {
+ jam();
+ //-------------------------------------------------------------
+ // We are performing a second lap to handle buckets that was
+ // merged during the first lap of scanning. During this second
+ // lap we do not allow any splits or merges.
+ //-------------------------------------------------------------
+ TreturnCode = 1;
+ return TreturnCode;
+ } else if (TscanPtr.p->scanBucketState == ScanRec::SCAN_COMPLETED) {
+ jam();
+ //-------------------------------------------------------------
+ // The scan is completed and we can thus go ahead and perform
+ // the split.
+ //-------------------------------------------------------------
+ } else {
+ jam();
+ sendSystemerror(signal);
+ return TreturnCode;
+ }//if
+ }//if
+ }//if
+ }//for
+ if (TreleaseInd == 1) {
+ jam();
+ TreleaseScanBucket = TmergeSource;
+ TDirRangePtr.i = fragrecptr.p->directory;
+ TPageIndex = TreleaseScanBucket & ((1 << fragrecptr.p->k) - 1); /* PAGE INDEX OBS K = 6 */
+ TDirInd = TreleaseScanBucket >> fragrecptr.p->k; /* DIRECTORY INDEX OBS K = 6 */
+ ptrCheckGuard(TDirRangePtr, cdirrangesize, dirRange);
+ arrGuard((TDirInd >> 8), 256);
+ TDirptr.i = TDirRangePtr.p->dirArray[TDirInd >> 8];
+ ptrCheckGuard(TDirptr, cdirarraysize, directoryarray);
+ TPageptr.i = TDirptr.p->pagep[TDirInd & 0xff];
+ ptrCheckGuard(TPageptr, cpagesize, page8);
+ for (Ti = 0; Ti < 4; Ti++) {
+ if (TreleaseScanIndicator[Ti] == 1) {
+ jam();
+ scanPtr.i = Trootfragrecptr.p->scan[Ti];
+ ptrCheckGuard(scanPtr, cscanRecSize, scanRec);
+ rsbPageidptr.i = TPageptr.i;
+ rsbPageidptr.p = TPageptr.p;
+ trsbPageindex = TPageIndex;
+ releaseScanBucket(signal);
+ if (TmergeDest < scanPtr.p->minBucketIndexToRescan) {
+ jam();
+ //-------------------------------------------------------------
+ // We have to keep track of the starting bucket to Rescan in the
+ // second lap.
+ //-------------------------------------------------------------
+ scanPtr.p->minBucketIndexToRescan = TmergeDest;
+ }//if
+ if (TmergeDest > scanPtr.p->maxBucketIndexToRescan) {
+ jam();
+ //-------------------------------------------------------------
+ // We have to keep track of the ending bucket to Rescan in the
+ // second lap.
+ //-------------------------------------------------------------
+ scanPtr.p->maxBucketIndexToRescan = TmergeDest;
+ }//if
+ }//if
+ }//for
+ }//if
+ return TreturnCode;
+}//Dbacc::checkScanShrink()
+
+void Dbacc::execSHRINKCHECK2(Signal* signal)
+{
+ Uint32 tshrTmp1;
+
+ jamEntry();
+ fragrecptr.i = signal->theData[0];
+ Uint32 oldFlag = signal->theData[3];
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ fragrecptr.p->expandFlag = oldFlag;
+ tresult = 0; /* 0= FALSE,1= TRUE,> ZLIMIT_OF_ERROR =ERRORCODE */
+ if (fragrecptr.p->slack <= fragrecptr.p->slackCheck) {
+ jam();
+ /* TIME FOR JOIN BUCKETS PROCESS */
+ /*--------------------------------------------------------------*/
+ /* NO LONGER NECESSARY TO SHRINK THE FRAGMENT. */
+ /*--------------------------------------------------------------*/
+ return;
+ }//if
+ if (fragrecptr.p->slack > (1u << 31)) {
+ jam();
+ /*--------------------------------------------------------------*/
+ /* THE SLACK IS NEGATIVE, IN THIS CASE WE WILL NOT NEED ANY */
+ /* SHRINK. */
+ /*--------------------------------------------------------------*/
+ return;
+ }//if
+ texpDirInd = (fragrecptr.p->maxp + fragrecptr.p->p) >> fragrecptr.p->k;
+ if (((fragrecptr.p->maxp + fragrecptr.p->p) & ((1 << fragrecptr.p->k) - 1)) == 0) {
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ if (fragrecptr.p->fragState == LCP_SEND_PAGES) {
+ if (fragrecptr.p->lcpMaxDirIndex > texpDirInd) {
+ if (fragrecptr.p->lcpDirIndex <= texpDirInd) {
+ jam();
+ /*--------------------------------------------------------------*/
+ /* WE DO NOT ALLOW ANY SHRINKS THAT REMOVE PAGES THAT ARE */
+ /* NEEDED AS PART OF THE LOCAL CHECKPOINT. */
+ /*--------------------------------------------------------------*/
+ return;
+ }//if
+ }//if
+ }//if
+ }//if
+ }//if
+ if (fragrecptr.p->firstOverflowRec == RNIL) {
+ jam();
+ allocOverflowPage(signal);
+ if (tresult > ZLIMIT_OF_ERROR) {
+ jam();
+ return;
+ }//if
+ }//if
+ if (cfirstfreepage == RNIL) {
+ if (cfreepage >= cpagesize) {
+ jam();
+ /*--------------------------------------------------------------*/
+ /* WE HAVE TO STOP THE SHRINK PROCESS SINCE THERE ARE NO FREE */
+ /* PAGES. THIS MEANS THAT WE COULD BE FORCED TO CRASH SINCE WE */
+ /* CANNOT COMPLETE THE SHRINK. TO AVOID THE CRASH WE EXIT HERE. */
+ /*--------------------------------------------------------------*/
+ return;
+ }//if
+ }//if
+ if (checkScanShrink(signal) == 1) {
+ jam();
+ /*--------------------------------------------------------------*/
+ // A scan state was inconsistent with performing a shrink
+ // operation.
+ /*--------------------------------------------------------------*/
+ return;
+ }//if
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ if (remainingUndoPages() < ZMIN_UNDO_PAGES_AT_EXPAND) {
+ jam();
+ /*--------------------------------------------------------------*/
+ // We did not have enough undo log buffers to start up an
+ // shrink operation
+ /*--------------------------------------------------------------*/
+ return;
+ }//if
+ }//if
+ if (fragrecptr.p->p == 0) {
+ jam();
+ fragrecptr.p->maxp = fragrecptr.p->maxp >> 1;
+ fragrecptr.p->p = fragrecptr.p->maxp;
+ fragrecptr.p->lhdirbits--;
+ fragrecptr.p->hashcheckbit--;
+ } else {
+ jam();
+ fragrecptr.p->p--;
+ }//if
+
+ /*--------------------------------------------------------------------------*/
+ /* WE START BY FINDING THE NECESSARY INFORMATION OF THE BUCKET TO BE */
+ /* REMOVED WHICH WILL SEND ITS ELEMENTS TO THE RECEIVING BUCKET. */
+ /*--------------------------------------------------------------------------*/
+ expDirRangePtr.i = fragrecptr.p->directory;
+ cexcPageindex = ((fragrecptr.p->maxp + fragrecptr.p->p) + 1) & ((1 << fragrecptr.p->k) - 1);
+ texpDirInd = ((fragrecptr.p->maxp + fragrecptr.p->p) + 1) >> fragrecptr.p->k;
+ texpDirRangeIndex = texpDirInd >> 8;
+ texpDirPageIndex = texpDirInd & 0xff;
+ ptrCheckGuard(expDirRangePtr, cdirrangesize, dirRange);
+ arrGuard(texpDirRangeIndex, 256);
+ expDirptr.i = expDirRangePtr.p->dirArray[texpDirRangeIndex];
+ ptrCheckGuard(expDirptr, cdirarraysize, directoryarray);
+ excPageptr.i = expDirptr.p->pagep[texpDirPageIndex];
+ fragrecptr.p->expSenderDirptr = expDirptr.i;
+ fragrecptr.p->expSenderIndex = cexcPageindex;
+ fragrecptr.p->expSenderPageptr = excPageptr.i;
+ fragrecptr.p->expSenderDirIndex = texpDirInd;
+ /*--------------------------------------------------------------------------*/
+ /* WE NOW PROCEED BY FINDING THE NECESSARY INFORMATION ABOUT THE */
+ /* RECEIVING BUCKET. */
+ /*--------------------------------------------------------------------------*/
+ expDirRangePtr.i = fragrecptr.p->directory;
+ texpReceivedBucket = fragrecptr.p->p >> fragrecptr.p->k;
+ ptrCheckGuard(expDirRangePtr, cdirrangesize, dirRange);
+ arrGuard((texpReceivedBucket >> 8), 256);
+ expDirptr.i = expDirRangePtr.p->dirArray[texpReceivedBucket >> 8];
+ ptrCheckGuard(expDirptr, cdirarraysize, directoryarray);
+ fragrecptr.p->expReceivePageptr = expDirptr.p->pagep[texpReceivedBucket & 0xff];
+ fragrecptr.p->expReceiveIndex = fragrecptr.p->p & ((1 << fragrecptr.p->k) - 1);
+ fragrecptr.p->expReceiveForward = ZTRUE;
+ if (excPageptr.i == RNIL) {
+ jam();
+ endofshrinkbucketLab(signal); /* EMPTY BUCKET */
+ return;
+ }//if
+ /*--------------------------------------------------------------------------*/
+ /* INITIALISE THE VARIABLES FOR THE SHRINK PROCESS. */
+ /*--------------------------------------------------------------------------*/
+ ptrCheckGuard(excPageptr, cpagesize, page8);
+ cexcForward = ZTRUE;
+ cexcContainerptr = (cexcPageindex << ZSHIFT_PLUS) - (cexcPageindex << ZSHIFT_MINUS);
+ cexcContainerptr = cexcContainerptr + ZHEAD_SIZE;
+ arrGuard(cexcContainerptr, 2048);
+ cexcContainerhead = excPageptr.p->word32[cexcContainerptr];
+ cexcContainerlen = cexcContainerhead >> 26;
+ if (cexcContainerlen <= ZCON_HEAD_SIZE) {
+ ndbrequire(cexcContainerlen == ZCON_HEAD_SIZE);
+ } else {
+ jam();
+ shrinkcontainer(signal);
+ }//if
+ /*--------------------------------------------------------------------------*/
+ /* THIS CONTAINER IS NOT YET EMPTY AND WE REMOVE ALL THE ELEMENTS. */
+ /*--------------------------------------------------------------------------*/
+ if (((cexcContainerhead >> 10) & 1) == 1) {
+ jam();
+ rlPageptr = excPageptr;
+ trlPageindex = cexcPageindex;
+ trlRelCon = ZFALSE;
+ turlIndex = cexcContainerptr + (ZBUF_SIZE - ZCON_HEAD_SIZE);
+ releaseRightlist(signal);
+ }//if
+ tshrTmp1 = ZCON_HEAD_SIZE;
+ tshrTmp1 = tshrTmp1 << 26;
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ datapageptr.p = excPageptr.p;
+ cundoinfolength = 1;
+ cundoElemIndex = cexcContainerptr;
+ undoWritingProcess(signal);
+ }//if
+ dbgWord32(excPageptr, cexcContainerptr, tshrTmp1);
+ arrGuard(cexcContainerptr, 2048);
+ excPageptr.p->word32[cexcContainerptr] = tshrTmp1;
+ if (((cexcContainerhead >> 7) & 0x3) == 0) {
+ jam();
+ endofshrinkbucketLab(signal);
+ return;
+ }//if
+ nextcontainerinfoExp(signal);
+ do {
+ cexcContainerptr = (cexcPageindex << ZSHIFT_PLUS) - (cexcPageindex << ZSHIFT_MINUS);
+ if (cexcForward == ZTRUE) {
+ jam();
+ cexcContainerptr = cexcContainerptr + ZHEAD_SIZE;
+ } else {
+ jam();
+ cexcContainerptr = ((cexcContainerptr + ZHEAD_SIZE) + ZBUF_SIZE) - ZCON_HEAD_SIZE;
+ }//if
+ arrGuard(cexcContainerptr, 2048);
+ cexcContainerhead = excPageptr.p->word32[cexcContainerptr];
+ cexcContainerlen = cexcContainerhead >> 26;
+ ndbrequire(cexcContainerlen > ZCON_HEAD_SIZE);
+ /*--------------------------------------------------------------------------*/
+ /* THIS CONTAINER IS NOT YET EMPTY AND WE REMOVE ALL THE ELEMENTS. */
+ /*--------------------------------------------------------------------------*/
+ shrinkcontainer(signal);
+ cexcPrevpageptr = excPageptr.i;
+ cexcPrevpageindex = cexcPageindex;
+ cexcPrevforward = cexcForward;
+ if (((cexcContainerhead >> 7) & 0x3) != 0) {
+ jam();
+ /*--------------------------------------------------------------------------*/
+ /* WE MUST CALL THE NEXT CONTAINER INFO ROUTINE BEFORE WE RELEASE THE */
+ /* CONTAINER SINCE THE RELEASE WILL OVERWRITE THE NEXT POINTER. */
+ /*--------------------------------------------------------------------------*/
+ nextcontainerinfoExp(signal);
+ }//if
+ rlPageptr.i = cexcPrevpageptr;
+ ptrCheckGuard(rlPageptr, cpagesize, page8);
+ trlPageindex = cexcPrevpageindex;
+ if (cexcPrevforward == ZTRUE) {
+ jam();
+ if (((cexcContainerhead >> 10) & 1) == 1) {
+ jam();
+ trlRelCon = ZFALSE;
+ turlIndex = cexcContainerptr + (ZBUF_SIZE - ZCON_HEAD_SIZE);
+ releaseRightlist(signal);
+ }//if
+ trlRelCon = ZTRUE;
+ tullIndex = cexcContainerptr;
+ releaseLeftlist(signal);
+ } else {
+ jam();
+ if (((cexcContainerhead >> 10) & 1) == 1) {
+ jam();
+ trlRelCon = ZFALSE;
+ tullIndex = cexcContainerptr - (ZBUF_SIZE - ZCON_HEAD_SIZE);
+ releaseLeftlist(signal);
+ }//if
+ trlRelCon = ZTRUE;
+ turlIndex = cexcContainerptr;
+ releaseRightlist(signal);
+ }//if
+ } while (((cexcContainerhead >> 7) & 0x3) != 0);
+ endofshrinkbucketLab(signal);
+ return;
+}//Dbacc::execSHRINKCHECK2()
+
+void Dbacc::endofshrinkbucketLab(Signal* signal)
+{
+ fragrecptr.p->expandCounter--;
+ fragrecptr.p->slack -= fragrecptr.p->maxloadfactor;
+ if (fragrecptr.p->expSenderIndex == 0) {
+ jam();
+ fragrecptr.p->dirsize--;
+ if (fragrecptr.p->expSenderPageptr != RNIL) {
+ jam();
+ rpPageptr.i = fragrecptr.p->expSenderPageptr;
+ ptrCheckGuard(rpPageptr, cpagesize, page8);
+ releasePage(signal);
+ expDirptr.i = fragrecptr.p->expSenderDirptr;
+ ptrCheckGuard(expDirptr, cdirarraysize, directoryarray);
+ expDirptr.p->pagep[fragrecptr.p->expSenderDirIndex & 0xff] = RNIL;
+ }//if
+ if (((((fragrecptr.p->p + fragrecptr.p->maxp) + 1) >> fragrecptr.p->k) & 0xff) == 0) {
+ jam();
+ rdDirptr.i = fragrecptr.p->expSenderDirptr;
+ releaseDirectory(signal);
+ expDirRangePtr.i = fragrecptr.p->directory;
+ ptrCheckGuard(expDirRangePtr, cdirrangesize, dirRange);
+ arrGuard((fragrecptr.p->expSenderDirIndex >> 8), 256);
+ expDirRangePtr.p->dirArray[fragrecptr.p->expSenderDirIndex >> 8] = RNIL;
+ }//if
+ }//if
+ if (fragrecptr.p->slack < (1u << 31)) {
+ jam();
+ /*--------------------------------------------------------------*/
+ /* THE SLACK IS POSITIVE, IN THIS CASE WE WILL CHECK WHETHER */
+ /* WE WILL CONTINUE PERFORM ANOTHER SHRINK. */
+ /*--------------------------------------------------------------*/
+ Uint32 noOfBuckets = (fragrecptr.p->maxp + 1) + fragrecptr.p->p;
+ Uint32 Thysteresis = fragrecptr.p->maxloadfactor - fragrecptr.p->minloadfactor;
+ fragrecptr.p->slackCheck = noOfBuckets * Thysteresis;
+ if (fragrecptr.p->slack > Thysteresis) {
+ /*--------------------------------------------------------------*/
+ /* IT IS STILL NECESSARY TO SHRINK THE FRAGMENT MORE. THIS*/
+ /* CAN HAPPEN WHEN A NUMBER OF SHRINKS GET REJECTED */
+ /* DURING A LOCAL CHECKPOINT. WE START A NEW SHRINK */
+ /* IMMEDIATELY FROM HERE WITHOUT WAITING FOR A COMMIT TO */
+ /* START IT. */
+ /*--------------------------------------------------------------*/
+ if (fragrecptr.p->expandCounter > 0) {
+ jam();
+ /*--------------------------------------------------------------*/
+ /* IT IS VERY IMPORTANT TO NOT TRY TO SHRINK MORE THAN */
+ /* WAS EXPANDED. IF MAXP IS SET TO A VALUE BELOW 63 THEN */
+ /* WE WILL LOSE RECORDS SINCE GETDIRINDEX CANNOT HANDLE */
+ /* SHRINKING BELOW 2^K - 1 (NOW 63). THIS WAS A BUG THAT */
+ /* WAS REMOVED 2000-05-12. */
+ /*--------------------------------------------------------------*/
+ signal->theData[0] = fragrecptr.i;
+ signal->theData[1] = fragrecptr.p->p;
+ signal->theData[2] = fragrecptr.p->maxp;
+ signal->theData[3] = fragrecptr.p->expandFlag;
+ ndbrequire(fragrecptr.p->expandFlag < 2);
+ fragrecptr.p->expandFlag = 2;
+ sendSignal(cownBlockref, GSN_SHRINKCHECK2, signal, 4, JBB);
+ }//if
+ }//if
+ }//if
+ ndbrequire(fragrecptr.p->maxp >= (Uint32)((1 << fragrecptr.p->k) - 1));
+ return;
+}//Dbacc::endofshrinkbucketLab()
+
+/* --------------------------------------------------------------------------------- */
+/* SHRINKCONTAINER */
+/* INPUT: EXC_PAGEPTR (POINTER TO THE ACTIVE PAGE RECORD) */
+/* CEXC_CONTAINERLEN (LENGTH OF THE CONTAINER). */
+/* CEXC_CONTAINERPTR (ARRAY INDEX OF THE CONTAINER). */
+/* CEXC_FORWARD (CONTAINER FORWARD (+1) OR BACKWARD (-1)) */
+/* */
+/* DESCRIPTION: ALL ELEMENTS OF THE ACTIVE CONTAINER HAVE TO MOVE TO THE NEW */
+/* CONTAINER. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::shrinkcontainer(Signal* signal)
+{
+ Uint32 tshrElementptr;
+ Uint32 tshrRemLen;
+ Uint32 tshrInc;
+ Uint32 tshrTmp;
+ Uint32 tshrIndex;
+ Uint32 guard21;
+
+ tshrRemLen = cexcContainerlen - ZCON_HEAD_SIZE;
+ tshrInc = fragrecptr.p->elementLength;
+ if (cexcForward == ZTRUE) {
+ jam();
+ tshrElementptr = cexcContainerptr + ZCON_HEAD_SIZE;
+ } else {
+ jam();
+ tshrElementptr = cexcContainerptr - 1;
+ }//if
+ SHR_LOOP:
+ idrOperationRecPtr.i = RNIL;
+ ptrNull(idrOperationRecPtr);
+ /* --------------------------------------------------------------------------------- */
+ /* THE CODE BELOW IS ALL USED TO PREPARE FOR THE CALL TO INSERT_ELEMENT AND */
+ /* HANDLE THE RESULT FROM INSERT_ELEMENT. INSERT_ELEMENT INSERTS THE ELEMENT */
+ /* INTO ANOTHER BUCKET. */
+ /* --------------------------------------------------------------------------------- */
+ arrGuard(tshrElementptr, 2048);
+ tidrElemhead = excPageptr.p->word32[tshrElementptr];
+ if (ElementHeader::getLocked(tidrElemhead)) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* IF THE ELEMENT IS LOCKED WE MUST UPDATE THE ELEMENT INFO IN THE OPERATION */
+ /* RECORD OWNING THE LOCK. WE DO THIS BY READING THE OPERATION RECORD POINTER */
+ /* FROM THE ELEMENT HEADER. */
+ /* --------------------------------------------------------------------------------- */
+ idrOperationRecPtr.i = ElementHeader::getOpPtrI(tidrElemhead);
+ ptrCheckGuard(idrOperationRecPtr, coprecsize, operationrec);
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ // During local checkpoints we must ensure that we restore the element header in
+ // unlocked state and with the hash value part there with tuple status zeroed.
+ // Otherwise a later insert over the same element will write an UNDO log that will
+ // ensure that the now removed element is restored together with its locked element
+ // header and without the hash value part.
+ /* --------------------------------------------------------------------------------- */
+ const Uint32 hv = idrOperationRecPtr.p->hashvaluePart;
+ const Uint32 eh = ElementHeader::setUnlocked(hv, 0);
+ excPageptr.p->word32[tshrElementptr] = eh;
+ }//if
+ }//if
+ tshrTmp = tshrElementptr + cexcForward;
+ guard21 = fragrecptr.p->localkeylen - 1;
+ for (tshrIndex = 0; tshrIndex <= guard21; tshrIndex++) {
+ arrGuard(tshrIndex, 2);
+ arrGuard(tshrTmp, 2048);
+ clocalkey[tshrIndex] = excPageptr.p->word32[tshrTmp];
+ tshrTmp = tshrTmp + cexcForward;
+ }//for
+ tidrPageindex = fragrecptr.p->expReceiveIndex;
+ idrPageptr.i = fragrecptr.p->expReceivePageptr;
+ ptrCheckGuard(idrPageptr, cpagesize, page8);
+ tidrForward = fragrecptr.p->expReceiveForward;
+ insertElement(signal);
+ /* --------------------------------------------------------------------------------- */
+ /* TAKE CARE OF RESULT FROM INSERT_ELEMENT. */
+ /* --------------------------------------------------------------------------------- */
+ fragrecptr.p->expReceiveIndex = tidrPageindex;
+ fragrecptr.p->expReceivePageptr = idrPageptr.i;
+ fragrecptr.p->expReceiveForward = tidrForward;
+ if (tshrRemLen < tshrInc) {
+ jam();
+ sendSystemerror(signal);
+ }//if
+ tshrRemLen = tshrRemLen - tshrInc;
+ if (tshrRemLen != 0) {
+ jam();
+ tshrElementptr = tshrTmp;
+ goto SHR_LOOP;
+ }//if
+}//Dbacc::shrinkcontainer()
+
+/* --------------------------------------------------------------------------------- */
+/* NEXTCONTAINERINFO_EXP */
+/* DESCRIPTION:THE CONTAINER HEAD WILL BE CHECKED TO CALCULATE INFORMATION */
+/* ABOUT NEXT CONTAINER IN THE BUCKET. */
+/* INPUT: CEXC_CONTAINERHEAD */
+/* CEXC_CONTAINERPTR */
+/* EXC_PAGEPTR */
+/* OUTPUT: */
+/* CEXC_PAGEINDEX (INDEX FROM WHICH PAGE INDEX CAN BE CALCULATED. */
+/* EXC_PAGEPTR (PAGE REFERENCE OF NEXT CONTAINER) */
+/* CEXC_FORWARD */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::nextcontainerinfoExp(Signal* signal)
+{
+ tnciNextSamePage = (cexcContainerhead >> 9) & 0x1; /* CHECK BIT FOR CHECKING WHERE */
+ /* THE NEXT CONTAINER IS IN THE SAME PAGE */
+ cexcPageindex = cexcContainerhead & 0x7f; /* NEXT CONTAINER PAGE INDEX 7 BITS */
+ if (((cexcContainerhead >> 7) & 3) == ZLEFT) {
+ jam();
+ cexcForward = ZTRUE;
+ } else if (((cexcContainerhead >> 7) & 3) == ZRIGHT) {
+ jam();
+ cexcForward = cminusOne;
+ } else {
+ jam();
+ sendSystemerror(signal);
+ cexcForward = 0; /* DUMMY FOR COMPILER */
+ }//if
+ if (tnciNextSamePage == ZFALSE) {
+ jam();
+ /* NEXT CONTAINER IS IN AN OVERFLOW PAGE */
+ arrGuard(cexcContainerptr + 1, 2048);
+ tnciTmp = excPageptr.p->word32[cexcContainerptr + 1];
+ nciOverflowrangeptr.i = fragrecptr.p->overflowdir;
+ ptrCheckGuard(nciOverflowrangeptr, cdirrangesize, dirRange);
+ arrGuard((tnciTmp >> 8), 256);
+ nciOverflowDirptr.i = nciOverflowrangeptr.p->dirArray[tnciTmp >> 8];
+ ptrCheckGuard(nciOverflowDirptr, cdirarraysize, directoryarray);
+ excPageptr.i = nciOverflowDirptr.p->pagep[tnciTmp & 0xff];
+ ptrCheckGuard(excPageptr, cpagesize, page8);
+ }//if
+}//Dbacc::nextcontainerinfoExp()
+
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* END OF EXPAND/SHRINK MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* LOCAL CHECKPOINT MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* LCP_FRAGIDREQ */
+/* SENDER: LQH, LEVEL B */
+/* ENTER LCP_FRAGIDREQ WITH */
+/* TUSERPTR LQH CONNECTION PTR */
+/* TUSERBLOCKREF, LQH BLOCK REFERENCE */
+/* TCHECKPOINTID, THE CHECKPOINT NUMBER TO USE */
+/* (E.G. 1,2 OR 3) */
+/* TABPTR, TABLE ID = TABLE RECORD POINTER */
+/* TFID ROOT FRAGMENT ID */
+/* CACTIVE_UNDO_FILE_VERSION UNDO FILE VERSION 0,1,2 OR 3. */
+/* ******************--------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* LCP_FRAGIDREQ REQUEST FOR LIST OF STOPED OPERATION */
+/* ******************------------------------------+ */
+/* SENDER: LQH, LEVEL B */
+void Dbacc::execLCP_FRAGIDREQ(Signal* signal)
+{
+ jamEntry();
+ tuserptr = signal->theData[0]; /* LQH CONNECTION PTR */
+ tuserblockref = signal->theData[1]; /* LQH BLOCK REFERENCE */
+ tcheckpointid = signal->theData[2]; /* THE CHECKPOINT NUMBER TO USE */
+ /* (E.G. 1,2 OR 3) */
+ tabptr.i = signal->theData[3]; /* TABLE ID = TABLE RECORD POINTER */
+ ptrCheck(tabptr, ctablesize, tabrec);
+ tfid = signal->theData[4]; /* ROOT FRAGMENT ID */
+ cactiveUndoFileVersion = signal->theData[5]; /* UNDO FILE VERSION 0,1,2 OR 3. */
+ tresult = 0;
+ ndbrequire(getrootfragmentrec(signal, rootfragrecptr, tfid));
+ ndbrequire(rootfragrecptr.p->rootState == ACTIVEROOT);
+ seizeLcpConnectRec(signal);
+ initLcpConnRec(signal);
+ lcpConnectptr.p->rootrecptr = rootfragrecptr.i;
+ rootfragrecptr.p->lcpPtr = lcpConnectptr.i;
+ lcpConnectptr.p->localCheckPid = tcheckpointid;
+ lcpConnectptr.p->lcpstate = LCP_ACTIVE;
+ rootfragrecptr.p->rootState = LCP_CREATION;
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[0];
+ /* D6 AT FSOPENREQ =#010003FF. */
+ tlfrTmp1 = 0x010003ff; /* FILE TYPE = .DATA ,VERSION OF FILENAME = 1 */
+ tlfrTmp2 = 0x301; /* D7 CREATE, WRITE ONLY, TRUNCATE TO ZERO */
+ ndbrequire(cfsFirstfreeconnect != RNIL);
+ seizeFsConnectRec(signal);
+ fsConnectptr.p->fragrecPtr = fragrecptr.i;
+ fsConnectptr.p->fsState = WAIT_OPEN_DATA_FILE_FOR_WRITE;
+ /* ----------- FILENAME (FILESYSTEM)/D3/DBACC/"T"TABID/"F"FRAGID/"S"VERSIONID.DATA ------------ */
+ /* ************************ */
+ /* FSOPENREQ */
+ /* ************************ */
+ signal->theData[0] = cownBlockref;
+ signal->theData[1] = fsConnectptr.i;
+ signal->theData[2] = tabptr.i; /* TABLE IDENTITY */
+ signal->theData[3] = rootfragrecptr.p->fragmentid[0]; /* FRAGMENT IDENTITY */
+ signal->theData[4] = lcpConnectptr.p->localCheckPid; /* CHECKPOINT ID */
+ signal->theData[5] = tlfrTmp1;
+ signal->theData[6] = tlfrTmp2;
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
+ return;
+}//Dbacc::execLCP_FRAGIDREQ()
+
+/* ******************--------------------------------------------------------------- */
+/* FSOPENCONF OPENFILE CONF */
+/* SENDER: FS, LEVEL B */
+/* ENTER FSOPENCONF WITH */
+/* FS_CONNECTPTR, FS_CONNECTION PTR */
+/* TUSERPOINTER, FILE POINTER */
+/* ******************--------------------------------------------------------------- */
+void Dbacc::lcpFsOpenConfLab(Signal* signal)
+{
+ fsConnectptr.p->fsPtr = tuserptr;
+ fragrecptr.i = fsConnectptr.p->fragrecPtr;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ rootfragrecptr.i = fragrecptr.p->myroot;
+ fragrecptr.p->activeDataFilePage = 1; /* ZERO IS KEPT FOR PAGE_ZERO */
+ fragrecptr.p->fsConnPtr = fsConnectptr.i;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ lcpConnectptr.i = rootfragrecptr.p->lcpPtr;
+ ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
+ if (rootfragrecptr.p->fragmentptr[0] == fragrecptr.i) {
+ jam();
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[1];
+ ptrCheck(fragrecptr, cfragmentsize, fragmentrec);
+ /* ----------- FILENAME (FILESYSTEM)/D3/DBACC/"T"TABID/"F"FRAGID/"S"VERSIONID.DATA ------------ */
+ /* D6 AT FSOPENREQ =#010003FF. */
+ tlfrTmp1 = 0x010003ff; /* FILE TYPE = .DATA ,VERSION OF FILENAME = 1 */
+ tlfrTmp2 = 0x301; /* D7 CREATE, WRITE ONLY, TRUNCATE TO ZERO */
+ ndbrequire(cfsFirstfreeconnect != RNIL);
+ seizeFsConnectRec(signal);
+ fsConnectptr.p->fragrecPtr = fragrecptr.i;
+ fsConnectptr.p->fsState = WAIT_OPEN_DATA_FILE_FOR_WRITE;
+ /* ************************ */
+ /* FSOPENREQ */
+ /* ************************ */
+ signal->theData[0] = cownBlockref;
+ signal->theData[1] = fsConnectptr.i;
+ signal->theData[2] = rootfragrecptr.p->mytabptr; /* TABLE IDENTITY */
+ signal->theData[3] = rootfragrecptr.p->fragmentid[1]; /* FRAGMENT IDENTITY */
+ signal->theData[4] = lcpConnectptr.p->localCheckPid; /* CHECKPOINT ID */
+ signal->theData[5] = tlfrTmp1;
+ signal->theData[6] = tlfrTmp2;
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
+ return;
+ } else {
+ ndbrequire(rootfragrecptr.p->fragmentptr[1] == fragrecptr.i);
+ }//if
+ /*---- BOTH DATA FILES ARE OPEN------*/
+ /* ----IF THE UNDO FILE IS CLOSED , OPEN IT.----- */
+ if (cactiveOpenUndoFsPtr != RNIL) {
+ jam();
+ sendLcpFragidconfLab(signal);
+ return;
+ }//if
+ cactiveUndoFilePage = 0;
+ cprevUndoaddress = cminusOne;
+ cundoposition = 0;
+ clastUndoPageIdWritten = 0;
+ ndbrequire(cfsFirstfreeconnect != RNIL);
+ seizeFsConnectRec(signal);
+ fsConnectptr.p->fsState = WAIT_OPEN_UNDO_LCP;
+ fsConnectptr.p->fsPart = 0; /* FILE INDEX, SECOND FILE IN THE DIRECTORY */
+ cactiveOpenUndoFsPtr = fsConnectptr.i;
+ cactiveRootfrag = rootfragrecptr.i;
+ tlfrTmp1 = 1; /* FILE VERSION */
+ tlfrTmp1 = (tlfrTmp1 << 8) + ZLOCALLOGFILE; /* .LOCLOG = 2 */
+ tlfrTmp1 = (tlfrTmp1 << 8) + 4; /* ROOT DIRECTORY = D4 */
+ tlfrTmp1 = (tlfrTmp1 << 8) + fsConnectptr.p->fsPart; /* P2 */
+ tlfrTmp2 = 0x302; /* D7 CREATE , READ / WRITE , TRUNCATE TO ZERO */
+ /* ---FILE NAME "D4"/"DBACC"/LCP_CONNECTPTR:LOCAL_CHECK_PID/FS_CONNECTPTR:FS_PART".LOCLOG-- */
+ /* ************************ */
+ /* FSOPENREQ */
+ /* ************************ */
+ signal->theData[0] = cownBlockref;
+ signal->theData[1] = fsConnectptr.i;
+ signal->theData[2] = cminusOne; /* #FFFFFFFF */
+ signal->theData[3] = cminusOne; /* #FFFFFFFF */
+ signal->theData[4] = cactiveUndoFileVersion;
+ /* A GROUP OF UNDO FILES WHICH ARE UPDATED */
+ signal->theData[5] = tlfrTmp1;
+ signal->theData[6] = tlfrTmp2;
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
+ return;
+}//Dbacc::lcpFsOpenConfLab()
+
+void Dbacc::lcpOpenUndofileConfLab(Signal* signal)
+{
+ ptrGuard(fsConnectptr);
+ fsConnectptr.p->fsState = WAIT_NOTHING;
+ rootfragrecptr.i = cactiveRootfrag;
+ ptrCheck(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ fsConnectptr.p->fsPtr = tuserptr;
+ sendLcpFragidconfLab(signal);
+ return;
+}//Dbacc::lcpOpenUndofileConfLab()
+
+void Dbacc::sendLcpFragidconfLab(Signal* signal)
+{
+ ptrGuard(rootfragrecptr);
+ lcpConnectptr.i = rootfragrecptr.p->lcpPtr;
+ ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
+ /* ************************ */
+ /* LCP_FRAGIDCONF */
+ /* ************************ */
+ signal->theData[0] = lcpConnectptr.p->lcpUserptr;
+ signal->theData[1] = lcpConnectptr.i;
+ signal->theData[2] = 2;
+ /* NO OF LOCAL FRAGMENTS */
+ signal->theData[3] = rootfragrecptr.p->fragmentid[0];
+ signal->theData[4] = rootfragrecptr.p->fragmentid[1];
+ signal->theData[5] = RNIL;
+ signal->theData[6] = RNIL;
+ sendSignal(lcpConnectptr.p->lcpUserblockref, GSN_LCP_FRAGIDCONF, signal, 7, JBB);
+ return;
+}//Dbacc::sendLcpFragidconfLab()
+
+/* ******************--------------------------------------------------------------- */
+/* LCP_HOLDOPERATION REQUEST FOR LIST OF STOPED OPERATION */
+/* SENDER: LQH, LEVEL B */
+/* ENTER LCP_HOLDOPREQ WITH */
+/* LCP_CONNECTPTR CONNECTION POINTER */
+/* TFID, LOCAL FRAGMENT ID */
+/* THOLD_PREV_SENT_OP NR OF SENT OPERATIONS AT */
+/* PREVIOUS SIGNALS */
+/* TLQH_POINTER LQH USER POINTER */
+/* ******************--------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* LCP_HOLDOPERATION REQUEST FOR LIST OF STOPED OPERATION */
+/* ******************------------------------------+ */
+/* SENDER: LQH, LEVEL B */
+void Dbacc::execLCP_HOLDOPREQ(Signal* signal)
+{
+ Uint32 tholdPrevSentOp;
+
+ jamEntry();
+ lcpConnectptr.i = signal->theData[0]; /* CONNECTION POINTER */
+ tfid = signal->theData[1]; /* LOCAL FRAGMENT ID */
+ tholdPrevSentOp = signal->theData[2]; /* NR OF SENT OPERATIONS AT */
+ /* PREVIOUS SIGNALS */
+ tlqhPointer = signal->theData[3]; /* LQH USER POINTER */
+
+ tresult = 0;
+ ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
+ ndbrequire(lcpConnectptr.p->lcpstate == LCP_ACTIVE);
+ rootfragrecptr.i = lcpConnectptr.p->rootrecptr;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ if (rootfragrecptr.p->fragmentid[0] == tfid) {
+ jam();
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[0];
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ } else {
+ ndbrequire(rootfragrecptr.p->fragmentid[1] == tfid);
+ jam();
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[1];
+ }//if
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ fragrecptr.p->lcpLqhPtr = tlqhPointer;
+ if (tholdPrevSentOp != 0) {
+ ndbrequire(fragrecptr.p->fragState == SEND_QUE_OP);
+ } else if (tholdPrevSentOp == 0) {
+ jam();
+ fragrecptr.p->fragState = SEND_QUE_OP;
+ fragrecptr.p->stopQueOp = ZTRUE;
+ fragrecptr.p->sentWaitInQueOp = fragrecptr.p->firstWaitInQueOp;
+ }//if
+ tholdSentOp = 0; /* NR OF OPERATION WHICH ARE SENT THIS TIME */
+ operationRecPtr.i = fragrecptr.p->sentWaitInQueOp;
+
+ /* --------------------------------------------- */
+ /* GO THROUGH ALL OPERATION IN THE WAIT */
+ /* LIST AND SEND THE LQH CONNECTION PTR OF THE */
+ /* OPERATIONS TO THE LQH BLOCK. MAX 23 0PERATION */
+ /* PER SIGNAL */
+ /* --------------------------------------------- */
+ while (operationRecPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
+ ckeys[tholdSentOp] = operationRecPtr.p->userptr;
+ operationRecPtr.i = operationRecPtr.p->nextQueOp;
+ tholdSentOp++;
+ if ((tholdSentOp >= 23) &&
+ (operationRecPtr.i != RNIL)) {
+ jam();
+ /* ----------------------------------------------- */
+ /* THERE IS MORE THAN 23 WAIT OPERATION. WE */
+ /* HAVE TO SEND THESE 23 AND WAITE FOR NEXT SIGNAL */
+ /* ----------------------------------------------- */
+ tholdMore = ZTRUE; /* SECOUND DATA AT THE CONF SIGNAL , = MORE */
+ fragrecptr.p->sentWaitInQueOp = operationRecPtr.i;
+ sendholdconfsignalLab(signal);
+ return;
+ }//if
+ }//while
+ /* ----------------------------------------------- */
+ /* OPERATION_REC_PTR = RNIL */
+ /* THERE IS NO MORE WAITING OPERATION, STATE OF */
+ /* THE FRAGMENT RRECORD IS CHANGED AND RETURN */
+ /* SIGNAL IS SENT */
+ /* ----------------------------------------------- */
+ fragrecptr.p->sentWaitInQueOp = RNIL;
+ tholdMore = ZFALSE; /* SECOND DATA AT THE CONF SIGNAL , = NOT MORE */
+ fragrecptr.p->fragState = WAIT_ACC_LCPREQ;
+ sendholdconfsignalLab(signal);
+ return;
+}//Dbacc::execLCP_HOLDOPREQ()
+
+void Dbacc::sendholdconfsignalLab(Signal* signal)
+{
+ tholdMore = (tholdMore << 16) + tholdSentOp;
+ /* SECOND SIGNAL DATA, LENGTH + MORE */
+ /* ************************ */
+ /* LCP_HOLDOPCONF */
+ /* ************************ */
+ signal->theData[0] = fragrecptr.p->lcpLqhPtr;
+ signal->theData[1] = tholdMore;
+ signal->theData[2] = ckeys[0];
+ signal->theData[3] = ckeys[1];
+ signal->theData[4] = ckeys[2];
+ signal->theData[5] = ckeys[3];
+ signal->theData[6] = ckeys[4];
+ signal->theData[7] = ckeys[5];
+ signal->theData[8] = ckeys[6];
+ signal->theData[9] = ckeys[7];
+ signal->theData[10] = ckeys[8];
+ signal->theData[11] = ckeys[9];
+ signal->theData[12] = ckeys[10];
+ signal->theData[13] = ckeys[11];
+ signal->theData[14] = ckeys[12];
+ signal->theData[15] = ckeys[13];
+ signal->theData[16] = ckeys[14];
+ signal->theData[17] = ckeys[15];
+ signal->theData[18] = ckeys[16];
+ signal->theData[19] = ckeys[17];
+ signal->theData[20] = ckeys[18];
+ signal->theData[21] = ckeys[19];
+ signal->theData[22] = ckeys[20];
+ signal->theData[23] = ckeys[21];
+ signal->theData[24] = ckeys[22];
+ sendSignal(lcpConnectptr.p->lcpUserblockref, GSN_LCP_HOLDOPCONF, signal, 25, JBA);
+ return;
+}//Dbacc::sendholdconfsignalLab()
+
+/**
+ * execACC_LCPREQ
+ * Perform local checkpoint of a fragment
+ *
+ * SENDER: LQH, LEVEL B
+ * ENTER ACC_LCPREQ WITH
+ * LCP_CONNECTPTR, OPERATION RECORD PTR
+ * TLCP_LQH_CHECK_V, LQH'S LOCAL FRAG CHECK VALUE
+ * TLCP_LOCAL_FRAG_ID, LOCAL FRAG ID
+ *
+ */
+void Dbacc::execACC_LCPREQ(Signal* signal)
+{
+ Uint32 tlcpLocalFragId;
+ Uint32 tlcpLqhCheckV;
+
+ jamEntry();
+ lcpConnectptr.i = signal->theData[0]; // CONNECTION PTR
+ tlcpLqhCheckV = signal->theData[1]; // LQH'S LOCAL FRAG CHECK VALUE
+ tlcpLocalFragId = signal->theData[2]; // LOCAL FRAG ID
+ tresult = 0;
+ ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
+ ndbrequire(lcpConnectptr.p->lcpstate == LCP_ACTIVE);
+
+ rootfragrecptr.i = lcpConnectptr.p->rootrecptr;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ if (rootfragrecptr.p->fragmentid[0] == tlcpLocalFragId) {
+ jam();
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[0];
+ } else {
+ ndbrequire(rootfragrecptr.p->fragmentid[1] == tlcpLocalFragId);
+ jam();
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[1];
+ }//if
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ ndbrequire(fragrecptr.p->fragState == WAIT_ACC_LCPREQ);
+ fragrecptr.p->lcpLqhPtr = tlcpLqhCheckV;
+
+ Page8Ptr zeroPagePtr;
+ seizeLcpPage(zeroPagePtr);
+ fragrecptr.p->zeroPagePtr = zeroPagePtr.i;
+ fragrecptr.p->prevUndoposition = cminusOne;
+ initRootFragPageZero(rootfragrecptr, zeroPagePtr);
+ initFragPageZero(fragrecptr, zeroPagePtr);
+ /*-----------------------------------------------------------------*/
+ /* SEIZE ZERO PAGE FIRST AND THEN SEIZE DATA PAGES IN */
+ /* BACKWARDS ORDER. THIS IS TO ENSURE THAT WE GET THE PAGES */
+ /* IN ORDER. ON WINDOWS NT THIS WILL BE A BENEFIT SINCE WE */
+ /* CAN THEN DO 1 WRITE_FILE INSTEAD OF 8. */
+ /* WHEN WE RELEASE THE PAGES WE RELEASE THEM IN THE OPPOSITE */
+ /* ORDER. */
+ /*-----------------------------------------------------------------*/
+ for (Uint32 taspTmp = ZWRITEPAGESIZE - 1; (Uint32)~taspTmp; taspTmp--) {
+ Page8Ptr dataPagePtr;
+ jam();
+ ndbrequire(fragrecptr.p->datapages[taspTmp] == RNIL);
+ seizeLcpPage(dataPagePtr);
+ fragrecptr.p->datapages[taspTmp] = dataPagePtr.i;
+ }//for
+ fragrecptr.p->lcpMaxDirIndex = fragrecptr.p->dirsize;
+ fragrecptr.p->lcpMaxOverDirIndex = fragrecptr.p->lastOverIndex;
+ fragrecptr.p->createLcp = ZTRUE;
+ operationRecPtr.i = fragrecptr.p->lockOwnersList;
+ lcp_write_op_to_undolog(signal);
+}
+
+void
+Dbacc::lcp_write_op_to_undolog(Signal* signal)
+{
+ bool delay_continueb= false;
+ Uint32 i, j;
+ for (i= 0; i < 16; i++) {
+ jam();
+ if (remainingUndoPages() <= ZMIN_UNDO_PAGES_AT_COMMIT) {
+ jam();
+ delay_continueb= true;
+ break;
+ }
+ for (j= 0; j < 32; j++) {
+ if (operationRecPtr.i == RNIL) {
+ jam();
+ break;
+ }
+ jam();
+ ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
+
+ if ((operationRecPtr.p->operation == ZINSERT) ||
+ (operationRecPtr.p->elementIsDisappeared == ZTRUE)){
+ /*******************************************************************
+ * Only log inserts and elements that are marked as dissapeared.
+ * All other operations update the element header and that is handled
+ * when pages are written to disk
+ ********************************************************************/
+ undopageptr.i = (cundoposition>>ZUNDOPAGEINDEXBITS) & (cundopagesize-1);
+ ptrAss(undopageptr, undopage);
+ theadundoindex = cundoposition & ZUNDOPAGEINDEX_MASK;
+ tundoindex = theadundoindex + ZUNDOHEADSIZE;
+
+ writeUndoOpInfo(signal);/* THE INFORMATION ABOUT ELEMENT HEADER, STORED*/
+ /* IN OP REC, IS WRITTEN AT UNDO PAGES */
+ cundoElemIndex = 0;/* DEFAULT VALUE USED BY WRITE_UNDO_HEADER SUBROTINE */
+ writeUndoHeader(signal, RNIL, UndoHeader::ZOP_INFO); /* WRITE THE HEAD OF THE UNDO ELEMENT */
+ checkUndoPages(signal); /* SEND UNDO PAGE TO DISK WHEN A GROUP OF */
+ /* UNDO PAGES,CURRENTLY 8, IS FILLED */
+ }
+ operationRecPtr.i = operationRecPtr.p->nextLockOwnerOp;
+ }
+ if (operationRecPtr.i == RNIL) {
+ jam();
+ break;
+ }
+ }
+ if (operationRecPtr.i != RNIL) {
+ jam();
+ signal->theData[0]= ZLCP_OP_WRITE_RT_BREAK;
+ signal->theData[1]= operationRecPtr.i;
+ signal->theData[2]= fragrecptr.i;
+ signal->theData[3]= lcpConnectptr.i;
+ if (delay_continueb) {
+ jam();
+ sendSignalWithDelay(cownBlockref, GSN_CONTINUEB, signal, 10, 4);
+ } else {
+ jam();
+ sendSignal(cownBlockref, GSN_CONTINUEB, signal, 4, JBB);
+ }
+ return;
+ }
+
+ signal->theData[0] = fragrecptr.p->lcpLqhPtr;
+ sendSignal(lcpConnectptr.p->lcpUserblockref, GSN_ACC_LCPSTARTED,
+ signal, 1, JBA);
+
+ fragrecptr.p->activeDataPage = 0;
+ fragrecptr.p->lcpDirIndex = 0;
+ fragrecptr.p->fragState = LCP_SEND_PAGES;
+
+ signal->theData[0] = lcpConnectptr.i;
+ signal->theData[1] = fragrecptr.i;
+ sendSignal(cownBlockref, GSN_ACC_SAVE_PAGES, signal, 2, JBB);
+}
+
+/* ******************--------------------------------------------------------------- */
+/* ACC_SAVE_PAGES A GROUP OF PAGES IS ALLOCATED. THE PAGES AND OVERFLOW */
+/* PAGES OF THE FRAGMENT ARE COPIED IN THEM AND IS SEND TO */
+/* THE DATA FILE OF THE CHECK POINT. */
+/* SENDER: ACC, LEVEL B */
+/* ENTER ACC_SAVE_PAGES WITH */
+/* LCP_CONNECTPTR, CONNECTION RECORD PTR */
+/* FRAGRECPTR FRAGMENT RECORD PTR */
+/* ******************--------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* ACC_SAVE_PAGES REQUEST TO SEND THE PAGE TO DISK */
+/* ******************------------------------------+ UNDO PAGES */
+/* SENDER: ACC, LEVEL B */
+void Dbacc::execACC_SAVE_PAGES(Signal* signal)
+{
+ jamEntry();
+ lcpConnectptr.i = signal->theData[0];
+ /* CONNECTION RECORD PTR */
+ fragrecptr.i = signal->theData[1];
+ /* FRAGMENT RECORD PTR */
+ tresult = 0;
+ ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
+ if (lcpConnectptr.p->lcpstate != LCP_ACTIVE) {
+ jam();
+ sendSystemerror(signal);
+ return;
+ }//if
+ if (ERROR_INSERTED(3000)) {
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ rootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ if (rootfragrecptr.p->mytabptr == c_errorInsert3000_TableId){
+ ndbout << "Delay writing of datapages" << endl;
+ // Delay writing of pages
+ jam();
+ sendSignalWithDelay(cownBlockref, GSN_ACC_SAVE_PAGES, signal, 1000, 2);
+ return;
+ }
+ }
+ if (clblPageCounter == 0) {
+ jam();
+ signal->theData[0] = lcpConnectptr.i;
+ signal->theData[1] = fragrecptr.i;
+ sendSignalWithDelay(cownBlockref, GSN_ACC_SAVE_PAGES, signal, 100, 2);
+ return;
+ } else {
+ jam();
+ clblPageCounter = clblPageCounter - 1;
+ }//if
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ if (fragrecptr.p->fragState == LCP_SEND_PAGES) {
+ jam();
+ savepagesLab(signal);
+ return;
+ } else {
+ if (fragrecptr.p->fragState == LCP_SEND_OVER_PAGES) {
+ jam();
+ saveOverPagesLab(signal);
+ return;
+ } else {
+ ndbrequire(fragrecptr.p->fragState == LCP_SEND_ZERO_PAGE);
+ jam();
+ saveZeroPageLab(signal);
+ return;
+ }//if
+ }//if
+}//Dbacc::execACC_SAVE_PAGES()
+
+void Dbacc::savepagesLab(Signal* signal)
+{
+ DirRangePtr spDirRangePtr;
+ DirectoryarrayPtr spDirptr;
+ Page8Ptr aspPageptr;
+ Page8Ptr aspCopyPageptr;
+ Uint32 taspDirindex;
+ Uint32 taspDirIndex;
+ Uint32 taspIndex;
+
+ if ((fragrecptr.p->lcpDirIndex >= fragrecptr.p->dirsize) ||
+ (fragrecptr.p->lcpDirIndex >= fragrecptr.p->lcpMaxDirIndex)) {
+ jam();
+ endsavepageLab(signal);
+ return;
+ }//if
+ /* SOME EXPAND PROCESSES HAVE BEEN PERFORMED. */
+ /* THE ADDED PAGE ARE NOT SENT TO DISK */
+ arrGuard(fragrecptr.p->activeDataPage, 8);
+ aspCopyPageptr.i = fragrecptr.p->datapages[fragrecptr.p->activeDataPage];
+ ptrCheckGuard(aspCopyPageptr, cpagesize, page8);
+ taspDirindex = fragrecptr.p->lcpDirIndex; /* DIRECTORY OF ACTIVE PAGE */
+ spDirRangePtr.i = fragrecptr.p->directory;
+ taspDirIndex = taspDirindex >> 8;
+ taspIndex = taspDirindex & 0xff;
+ ptrCheckGuard(spDirRangePtr, cdirrangesize, dirRange);
+ arrGuard(taspDirIndex, 256);
+ spDirptr.i = spDirRangePtr.p->dirArray[taspDirIndex];
+ ptrCheckGuard(spDirptr, cdirarraysize, directoryarray);
+ aspPageptr.i = spDirptr.p->pagep[taspIndex];
+ ptrCheckGuard(aspPageptr, cpagesize, page8);
+ ndbrequire(aspPageptr.p->word32[ZPOS_PAGE_ID] == fragrecptr.p->lcpDirIndex);
+ lcnPageptr = aspPageptr;
+ lcnCopyPageptr = aspCopyPageptr;
+ lcpCopyPage(signal);
+ fragrecptr.p->lcpDirIndex++;
+ fragrecptr.p->activeDataPage++;
+ if (fragrecptr.p->activeDataPage < ZWRITEPAGESIZE) {
+ jam();
+ signal->theData[0] = lcpConnectptr.i;
+ signal->theData[1] = fragrecptr.i;
+ sendSignal(cownBlockref, GSN_ACC_SAVE_PAGES, signal, 2, JBB);
+ return;
+ }//if
+ senddatapagesLab(signal);
+ return;
+}//Dbacc::savepagesLab()
+
+/* FRAGRECPTR:ACTIVE_DATA_PAGE = ZWRITEPAGESIZE */
+/* SEND A GROUP OF PAGES TO DISK */
+void Dbacc::senddatapagesLab(Signal* signal)
+{
+ fsConnectptr.i = fragrecptr.p->fsConnPtr;
+ ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
+ seizeFsOpRec(signal);
+ initFsOpRec(signal);
+ fsOpptr.p->fsOpstate = WAIT_WRITE_DATA;
+ ndbrequire(fragrecptr.p->activeDataPage <= 8);
+ for (Uint32 i = 0; i < fragrecptr.p->activeDataPage; i++) {
+ signal->theData[i + 6] = fragrecptr.p->datapages[i];
+ }//for
+ signal->theData[fragrecptr.p->activeDataPage + 6] = fragrecptr.p->activeDataFilePage;
+ signal->theData[0] = fsConnectptr.p->fsPtr;
+ signal->theData[1] = cownBlockref;
+ signal->theData[2] = fsOpptr.i;
+ signal->theData[3] = 0x2;
+ signal->theData[4] = ZPAGE8_BASE_ADD;
+ signal->theData[5] = fragrecptr.p->activeDataPage;
+ sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 15, JBA);
+ return;
+}//Dbacc::senddatapagesLab()
+
+void Dbacc::endsavepageLab(Signal* signal)
+{
+ Page8Ptr espPageidptr;
+
+ espPageidptr.i = fragrecptr.p->zeroPagePtr;
+ ptrCheckGuard(espPageidptr, cpagesize, page8);
+ dbgWord32(espPageidptr, ZPAGEZERO_NO_PAGES, fragrecptr.p->lcpDirIndex);
+ espPageidptr.p->word32[ZPAGEZERO_NO_PAGES] = fragrecptr.p->lcpDirIndex;
+ fragrecptr.p->fragState = LCP_SEND_OVER_PAGES;
+ fragrecptr.p->noOfStoredOverPages = 0;
+ fragrecptr.p->lcpDirIndex = 0;
+ saveOverPagesLab(signal);
+ return;
+}//Dbacc::endsavepageLab()
+
+/* ******************--------------------------------------------------------------- */
+/* ACC_SAVE_OVER_PAGES CONTINUE SAVING THE LEFT OVERPAGES. */
+/* ******************--------------------------------------------------------------- */
+void Dbacc::saveOverPagesLab(Signal* signal)
+{
+ DirRangePtr sopDirRangePtr;
+ DirectoryarrayPtr sopOverflowDirptr;
+ Page8Ptr sopPageptr;
+ Page8Ptr sopCopyPageptr;
+ Uint32 tsopDirindex;
+ Uint32 tsopDirInd;
+ Uint32 tsopIndex;
+
+ if ((fragrecptr.p->lcpDirIndex >= fragrecptr.p->lastOverIndex) ||
+ (fragrecptr.p->lcpDirIndex >= fragrecptr.p->lcpMaxOverDirIndex)) {
+ jam();
+ endsaveoverpageLab(signal);
+ return;
+ }//if
+ arrGuard(fragrecptr.p->activeDataPage, 8);
+ sopCopyPageptr.i = fragrecptr.p->datapages[fragrecptr.p->activeDataPage];
+ ptrCheckGuard(sopCopyPageptr, cpagesize, page8);
+ tsopDirindex = fragrecptr.p->lcpDirIndex;
+ sopDirRangePtr.i = fragrecptr.p->overflowdir;
+ tsopDirInd = tsopDirindex >> 8;
+ tsopIndex = tsopDirindex & 0xff;
+ ptrCheckGuard(sopDirRangePtr, cdirrangesize, dirRange);
+ arrGuard(tsopDirInd, 256);
+ sopOverflowDirptr.i = sopDirRangePtr.p->dirArray[tsopDirInd];
+ ptrCheckGuard(sopOverflowDirptr, cdirarraysize, directoryarray);
+ sopPageptr.i = sopOverflowDirptr.p->pagep[tsopIndex];
+ fragrecptr.p->lcpDirIndex++;
+ if (sopPageptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(sopPageptr, cpagesize, page8);
+ ndbrequire(sopPageptr.p->word32[ZPOS_PAGE_ID] == tsopDirindex);
+ ndbrequire(((sopPageptr.p->word32[ZPOS_PAGE_TYPE] >> ZPOS_PAGE_TYPE_BIT) & 3) != ZNORMAL_PAGE_TYPE);
+ lcnPageptr = sopPageptr;
+ lcnCopyPageptr = sopCopyPageptr;
+ lcpCopyPage(signal);
+ fragrecptr.p->noOfStoredOverPages++;
+ fragrecptr.p->activeDataPage++;
+ if ((sopPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] == 0)) {
+ //ndbrequire(((sopPageptr.p->word32[ZPOS_PAGE_TYPE] >> ZPOS_PAGE_TYPE_BIT) & 3) == ZOVERFLOW_PAGE_TYPE);
+ if (((sopPageptr.p->word32[ZPOS_PAGE_TYPE] >> ZPOS_PAGE_TYPE_BIT) & 3) ==
+ ZOVERFLOW_PAGE_TYPE) {
+ /*--------------------------------------------------------------------------------*/
+ /* THE PAGE IS EMPTY AND WAITING TO BE RELEASED. IT COULD NOT BE RELEASED */
+ /* EARLIER SINCE IT WAS PART OF A LOCAL CHECKPOINT. */
+ /*--------------------------------------------------------------------------------*/
+ jam();
+ ropPageptr = sopPageptr;
+ releaseOverpage(signal);
+ } else {
+ jam();
+ sendSystemerror(signal);
+ }
+ }//if
+ }
+ if (fragrecptr.p->activeDataPage == ZWRITEPAGESIZE) {
+ jam();
+ senddatapagesLab(signal);
+ return;
+ }//if
+ signal->theData[0] = lcpConnectptr.i;
+ signal->theData[1] = fragrecptr.i;
+ sendSignal(cownBlockref, GSN_ACC_SAVE_PAGES, signal, 2, JBB);
+ return;
+}//Dbacc::saveOverPagesLab()
+
+void Dbacc::endsaveoverpageLab(Signal* signal)
+{
+ Page8Ptr esoPageidptr;
+
+ esoPageidptr.i = fragrecptr.p->zeroPagePtr;
+ ptrCheckGuard(esoPageidptr, cpagesize, page8);
+ dbgWord32(esoPageidptr, ZPAGEZERO_NO_OVER_PAGE, fragrecptr.p->noOfStoredOverPages);
+ esoPageidptr.p->word32[ZPAGEZERO_NO_OVER_PAGE] = fragrecptr.p->noOfStoredOverPages;
+ fragrecptr.p->fragState = LCP_SEND_ZERO_PAGE;
+ if (fragrecptr.p->activeDataPage != 0) {
+ jam();
+ senddatapagesLab(signal); /* SEND LEFT PAGES TO DISK */
+ return;
+ }//if
+ saveZeroPageLab(signal);
+ return;
+}//Dbacc::endsaveoverpageLab()
+
+/* ******************--------------------------------------------------------------- */
+/* ACC_SAVE_ZERO_PAGE PAGE ZERO IS SENT TO DISK.IT IS THE LAST STAGE AT THE */
+/* CREATION LCP. ACC_LCPCONF IS RETURND. */
+/* ******************--------------------------------------------------------------- */
+void Dbacc::saveZeroPageLab(Signal* signal)
+{
+ Page8Ptr szpPageidptr;
+ Uint32 Tchs;
+ Uint32 Ti;
+
+ fragrecptr.p->createLcp = ZFALSE;
+ fsConnectptr.i = fragrecptr.p->fsConnPtr;
+ ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
+ szpPageidptr.i = fragrecptr.p->zeroPagePtr;
+ ptrCheckGuard(szpPageidptr, cpagesize, page8);
+ dbgWord32(szpPageidptr, ZPAGEZERO_PREV_UNDOP, fragrecptr.p->prevUndoposition);
+ szpPageidptr.p->word32[ZPAGEZERO_PREV_UNDOP] = fragrecptr.p->prevUndoposition;
+ dbgWord32(szpPageidptr, ZPAGEZERO_NEXT_UNDO_FILE, cactiveUndoFileVersion);
+ szpPageidptr.p->word32[ZPAGEZERO_NEXT_UNDO_FILE] = cactiveUndoFileVersion;
+ fragrecptr.p->fragState = WAIT_ZERO_PAGE_STORED;
+
+ /* --------------------------------------------------------------------------------- */
+ // Calculate the checksum and store it for the zero page of the fragment.
+ /* --------------------------------------------------------------------------------- */
+ szpPageidptr.p->word32[ZPOS_CHECKSUM] = 0;
+ Tchs = 0;
+ for (Ti = 0; Ti < 2048; Ti++) {
+ Tchs = Tchs ^ szpPageidptr.p->word32[Ti];
+ }//for
+ szpPageidptr.p->word32[ZPOS_CHECKSUM] = Tchs;
+ dbgWord32(szpPageidptr, ZPOS_CHECKSUM, Tchs);
+
+ seizeFsOpRec(signal);
+ initFsOpRec(signal);
+ fsOpptr.p->fsOpstate = WAIT_WRITE_DATA;
+ if (clblPageCounter > 0) {
+ jam();
+ clblPageCounter = clblPageCounter - 1;
+ } else {
+ jam();
+ clblPageOver = clblPageOver + 1;
+ }//if
+ /* ************************ */
+ /* FSWRITEREQ */
+ /* ************************ */
+ signal->theData[0] = fsConnectptr.p->fsPtr;
+ signal->theData[1] = cownBlockref;
+ signal->theData[2] = fsOpptr.i;
+ signal->theData[3] = 0x10;
+ /* FLAG = LIST MEM PAGES, LIST FILE PAGES */
+ /* SYNC FILE AFTER WRITING */
+ signal->theData[4] = ZPAGE8_BASE_ADD;
+ signal->theData[5] = 1;
+ /* NO OF PAGES */
+ signal->theData[6] = fragrecptr.p->zeroPagePtr;
+ /* ZERO PAGE */
+ signal->theData[7] = 0;
+ sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA);
+ /* ZERO PAGE AT DATA FILE */
+ return;
+}//Dbacc::saveZeroPageLab()
+
+/* ******************--------------------------------------------------------------- */
+/* FSWRITECONF OPENFILE CONF */
+/* ENTER FSWRITECONF WITH SENDER: FS, LEVEL B */
+/* FS_OPPTR FS_CONNECTION PTR */
+/* ******************--------------------------------------------------------------- */
+void Dbacc::lcpCloseDataFileLab(Signal* signal)
+{
+ rootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ lcpConnectptr.i = rootfragrecptr.p->lcpPtr;
+ ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
+ fsConnectptr.p->fsState = LCP_CLOSE_DATA;
+ /* ************************ */
+ /* FSCLOSEREQ */
+ /* ************************ */
+ /* CLOSE DATA FILE */
+ signal->theData[0] = fsConnectptr.p->fsPtr;
+ signal->theData[1] = cownBlockref;
+ signal->theData[2] = fsConnectptr.i;
+ signal->theData[3] = ZFALSE;
+ sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 4, JBA);
+ /* FLAG = 0, DO NOT DELETE FILE */
+ return;
+}//Dbacc::lcpCloseDataFileLab()
+
+void Dbacc::checkSyncUndoPagesLab(Signal* signal)
+{
+ fragrecptr.i = fsConnectptr.p->fragrecPtr;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ releaseFsConnRec(signal);
+ rootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ lcpConnectptr.i = rootfragrecptr.p->lcpPtr;
+ ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
+ switch (lcpConnectptr.p->syncUndopageState) {
+ case WAIT_NOTHING:
+ jam();
+ lcpConnectptr.p->syncUndopageState = WAIT_ONE_CONF;
+ break;
+ case WAIT_ONE_CONF:
+ jam();
+ lcpConnectptr.p->syncUndopageState = WAIT_TWO_CONF;
+ break;
+ default:
+ jam();
+ sendSystemerror(signal);
+ return;
+ break;
+ }//switch
+
+ /* ACTIVE UNDO PAGE ID */
+ Uint32 tundoPageId = cundoposition >> ZUNDOPAGEINDEXBITS;
+ tmp1 = tundoPageId - (tundoPageId & (ZWRITE_UNDOPAGESIZE - 1));
+ /* START PAGE OF THE LAST UNDO PAGES GROUP */
+ tmp2 = (tundoPageId - tmp1) + 1; /* NO OF LEFT UNDO PAGES */
+ tmp1 = tmp1 & (cundopagesize - 1); /* 1 MBYTE PAGE WINDOW IN MEMORY */
+ fsConnectptr.i = cactiveOpenUndoFsPtr;
+ ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
+ seizeFsOpRec(signal);
+ initFsOpRec(signal);
+ fsOpptr.p->fsOpstate = WAIT_WRITE_UNDO;
+ fsOpptr.p->fsOpMemPage = tundoPageId; /* RECORD MEMORY PAGE WRITTEN */
+ if (clblPageCounter >= (4 * tmp2)) {
+ jam();
+ clblPageCounter = clblPageCounter - (4 * tmp2);
+ } else {
+ jam();
+ clblPageOver = clblPageOver + ((4 * tmp2) - clblPageCounter);
+ clblPageCounter = 0;
+ }//if
+ /* ************************ */
+ /* FSWRITEREQ */
+ /* ************************ */
+ signal->theData[0] = fsConnectptr.p->fsPtr;
+ signal->theData[1] = cownBlockref;
+ signal->theData[2] = fsOpptr.i;
+ /* FLAG = START MEM PAGES, START FILE PAGES */
+ /* SYNC FILE AFTER WRITING */
+ signal->theData[3] = 0x11;
+ signal->theData[4] = ZUNDOPAGE_BASE_ADD;
+ /* NO OF UNDO PAGES */
+ signal->theData[5] = tmp2;
+ /* FIRST MEMORY PAGE */
+ signal->theData[6] = tmp1;
+ /* ACTIVE PAGE AT UNDO FILE */
+ signal->theData[7] = cactiveUndoFilePage;
+ sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA);
+
+ return;
+}//Dbacc::checkSyncUndoPagesLab()
+
+void Dbacc::checkSendLcpConfLab(Signal* signal)
+{
+ rootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ lcpConnectptr.i = rootfragrecptr.p->lcpPtr;
+ ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
+ ndbrequire(lcpConnectptr.p->lcpstate == LCP_ACTIVE);
+ switch (lcpConnectptr.p->syncUndopageState) {
+ case WAIT_ONE_CONF:
+ jam();
+ lcpConnectptr.p->syncUndopageState = WAIT_NOTHING;
+ break;
+ case WAIT_TWO_CONF:
+ jam();
+ lcpConnectptr.p->syncUndopageState = WAIT_ONE_CONF;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ lcpConnectptr.p->noOfLcpConf++;
+ ndbrequire(lcpConnectptr.p->noOfLcpConf <= 2);
+ fragrecptr.p->fragState = ACTIVEFRAG;
+ rlpPageptr.i = fragrecptr.p->zeroPagePtr;
+ ptrCheckGuard(rlpPageptr, cpagesize, page8);
+ releaseLcpPage(signal);
+ fragrecptr.p->zeroPagePtr = RNIL;
+ for (Uint32 i = 0; i < ZWRITEPAGESIZE; i++) {
+ jam();
+ if (fragrecptr.p->datapages[i] != RNIL) {
+ jam();
+ rlpPageptr.i = fragrecptr.p->datapages[i];
+ ptrCheckGuard(rlpPageptr, cpagesize, page8);
+ releaseLcpPage(signal);
+ fragrecptr.p->datapages[i] = RNIL;
+ }//if
+ }//for
+ signal->theData[0] = fragrecptr.p->lcpLqhPtr;
+ sendSignal(lcpConnectptr.p->lcpUserblockref, GSN_ACC_LCPCONF, signal, 1, JBB);
+ if (lcpConnectptr.p->noOfLcpConf == 2) {
+ jam();
+ releaseLcpConnectRec(signal);
+ rootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ rootfragrecptr.p->rootState = ACTIVEROOT;
+ }//if
+}//Dbacc::checkSendLcpConfLab()
+
+/* ******************--------------------------------------------------------------- */
+/* ACC_CONTOPREQ */
+/* SENDER: LQH, LEVEL B */
+/* ENTER ACC_CONTOPREQ WITH */
+/* LCP_CONNECTPTR */
+/* TMP1 LOCAL FRAG ID */
+/* ******************--------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* ACC_CONTOPREQ COMMIT TRANSACTION */
+/* ******************------------------------------+ */
+/* SENDER: LQH, LEVEL B */
+void Dbacc::execACC_CONTOPREQ(Signal* signal)
+{
+ Uint32 tcorLocalFrag;
+
+ jamEntry();
+ lcpConnectptr.i = signal->theData[0];
+ /* CONNECTION PTR */
+ tcorLocalFrag = signal->theData[1];
+ /* LOCAL FRAG ID */
+ tresult = 0;
+ ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
+ ndbrequire(lcpConnectptr.p->lcpstate == LCP_ACTIVE);
+ rootfragrecptr.i = lcpConnectptr.p->rootrecptr;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ if (rootfragrecptr.p->fragmentid[0] == tcorLocalFrag) {
+ jam();
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[0];
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ } else {
+ ndbrequire(rootfragrecptr.p->fragmentid[1] == tcorLocalFrag);
+ jam();
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[1];
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ }//if
+ operationRecPtr.i = fragrecptr.p->firstWaitInQueOp;
+ fragrecptr.p->sentWaitInQueOp = RNIL;
+ fragrecptr.p->stopQueOp = ZFALSE;
+ while (operationRecPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
+ if (operationRecPtr.p->opState == WAIT_EXE_OP) {
+ jam();
+ //------------------------------------------------------------
+ // Indicate that we are now a normal waiter in the queue. We
+ // will remove the operation from the queue as part of starting
+ // operation again.
+ //------------------------------------------------------------
+ operationRecPtr.p->opState = WAIT_IN_QUEUE;
+ executeNextOperation(signal);
+ }//if
+ operationRecPtr.i = operationRecPtr.p->nextQueOp;
+ }//while
+ signal->theData[0] = fragrecptr.p->lcpLqhPtr;
+ sendSignal(lcpConnectptr.p->lcpUserblockref, GSN_ACC_CONTOPCONF, signal, 1, JBA);
+ return; /* ALL QUEUED OPERATION ARE RESTARTED IF NEEDED. */
+}//Dbacc::execACC_CONTOPREQ()
+
+/* ******************--------------------------------------------------------------- */
+/* END_LCPREQ END OF LOCAL CHECK POINT */
+/* ENTER END_LCPREQ WITH SENDER: LQH, LEVEL B */
+/* CLQH_PTR, LQH PTR */
+/* CLQH_BLOCK_REF LQH BLOCK REF */
+/* ******************--------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* END_LCPREQ PERFORM A LOCAL CHECK POINT */
+/* ******************------------------------------+ */
+/* SENDER: LQH, LEVEL B */
+void Dbacc::execEND_LCPREQ(Signal* signal)
+{
+ jamEntry();
+ clqhPtr = signal->theData[0];
+ /* LQH PTR */
+ clqhBlockRef = signal->theData[1];
+ /* LQH BLOCK REF */
+ tresult = 0;
+ fsConnectptr.i = cactiveOpenUndoFsPtr;
+ ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
+ fsConnectptr.p->fsState = WAIT_CLOSE_UNDO; /* CLOSE FILE AFTER WRITTING */
+ /* ************************ */
+ /* FSCLOSEREQ */
+ /* ************************ */
+ signal->theData[0] = fsConnectptr.p->fsPtr;
+ signal->theData[1] = cownBlockref;
+ signal->theData[2] = fsConnectptr.i;
+ signal->theData[3] = ZFALSE;
+ sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 4, JBA);
+ /* FLAG = 0, DO NOT DELETE FILE */
+ cactiveUndoFileVersion = RNIL;
+ cactiveOpenUndoFsPtr = RNIL;
+ /* ************************ */
+ /* END_LCPCONF */
+ /* ************************ */
+ signal->theData[0] = clqhPtr;
+ sendSignal(clqhBlockRef, GSN_END_LCPCONF, signal, 1, JBB);
+ return;
+}//Dbacc::execEND_LCPREQ()
+
+/*-----------------------------------------------------------------*/
+/* WHEN WE COPY THE PAGE WE ALSO WRITE THE ELEMENT HEADER AS */
+/* UNLOCKED IF THEY ARE CURRENTLY LOCKED. */
+/*-----------------------------------------------------------------*/
+void Dbacc::lcpCopyPage(Signal* signal)
+{
+ Uint32 tlcnNextContainer;
+ Uint32 tlcnTmp;
+ Uint32 tlcnConIndex;
+ Uint32 tlcnIndex;
+ Uint32 Tmp1;
+ Uint32 Tmp2;
+ Uint32 Tmp3;
+ Uint32 Tmp4;
+ Uint32 Ti;
+ Uint32 Tchs;
+ Uint32 Tlimit;
+
+ Tchs = 0;
+ lupPageptr.p = lcnCopyPageptr.p;
+ lcnPageptr.p->word32[ZPOS_CHECKSUM] = Tchs;
+ for (Ti = 0; Ti < 32 ; Ti++) {
+ Tlimit = 16 + (Ti << 6);
+ for (tlcnTmp = (Ti << 6); tlcnTmp < Tlimit; tlcnTmp ++) {
+ Tmp1 = lcnPageptr.p->word32[tlcnTmp];
+ Tmp2 = lcnPageptr.p->word32[tlcnTmp + 16];
+ Tmp3 = lcnPageptr.p->word32[tlcnTmp + 32];
+ Tmp4 = lcnPageptr.p->word32[tlcnTmp + 48];
+
+ lcnCopyPageptr.p->word32[tlcnTmp] = Tmp1;
+ lcnCopyPageptr.p->word32[tlcnTmp + 16] = Tmp2;
+ lcnCopyPageptr.p->word32[tlcnTmp + 32] = Tmp3;
+ lcnCopyPageptr.p->word32[tlcnTmp + 48] = Tmp4;
+
+ Tchs = Tchs ^ Tmp1;
+ Tchs = Tchs ^ Tmp2;
+ Tchs = Tchs ^ Tmp3;
+ Tchs = Tchs ^ Tmp4;
+ }//for
+ }//for
+ tlcnChecksum = Tchs;
+ if (((lcnCopyPageptr.p->word32[ZPOS_PAGE_TYPE] >> ZPOS_PAGE_TYPE_BIT) & 3) == ZNORMAL_PAGE_TYPE) {
+ jam();
+ /*-----------------------------------------------------------------*/
+ /* TAKE CARE OF ALL 64 BUFFERS ADDRESSED BY ALGORITHM IN */
+ /* FIRST PAGE. IF THEY ARE EMPTY THEY STILL HAVE A CONTAINER */
+ /* HEADER OF 2 WORDS. */
+ /*-----------------------------------------------------------------*/
+ tlcnConIndex = ZHEAD_SIZE;
+ tlupForward = 1;
+ for (tlcnIndex = 0; tlcnIndex <= ZNO_CONTAINERS - 1; tlcnIndex++) {
+ tlupIndex = tlcnConIndex;
+ tlupElemIndex = tlcnConIndex + ZCON_HEAD_SIZE;
+ lcpUpdatePage(signal);
+ tlcnConIndex = tlcnConIndex + ZBUF_SIZE;
+ }//for
+ }//if
+ /*-----------------------------------------------------------------*/
+ /* TAKE CARE OF ALL USED BUFFERS ON THE LEFT SIDE. */
+ /*-----------------------------------------------------------------*/
+ tlcnNextContainer = (lcnCopyPageptr.p->word32[ZPOS_EMPTY_LIST] >> 23) & 0x7f;
+ while (tlcnNextContainer < ZEMPTYLIST) {
+ tlcnConIndex = (tlcnNextContainer << ZSHIFT_PLUS) - (tlcnNextContainer << ZSHIFT_MINUS);
+ tlcnConIndex = tlcnConIndex + ZHEAD_SIZE;
+ tlupIndex = tlcnConIndex;
+ tlupElemIndex = tlcnConIndex + ZCON_HEAD_SIZE;
+ tlupForward = 1;
+ lcpUpdatePage(signal);
+ tlcnNextContainer = (lcnCopyPageptr.p->word32[tlcnConIndex] >> 11) & 0x7f;
+ }//while
+ if (tlcnNextContainer == ZEMPTYLIST) {
+ jam();
+ /*empty*/;
+ } else {
+ jam();
+ sendSystemerror(signal);
+ return;
+ }//if
+ /*-----------------------------------------------------------------*/
+ /* TAKE CARE OF ALL USED BUFFERS ON THE RIGHT SIDE. */
+ /*-----------------------------------------------------------------*/
+ tlupForward = cminusOne;
+ tlcnNextContainer = (lcnCopyPageptr.p->word32[ZPOS_EMPTY_LIST] >> 16) & 0x7f;
+ while (tlcnNextContainer < ZEMPTYLIST) {
+ tlcnConIndex = (tlcnNextContainer << ZSHIFT_PLUS) - (tlcnNextContainer << ZSHIFT_MINUS);
+ tlcnConIndex = tlcnConIndex + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE);
+ tlupIndex = tlcnConIndex;
+ tlupElemIndex = tlcnConIndex - 1;
+ lcpUpdatePage(signal);
+ tlcnNextContainer = (lcnCopyPageptr.p->word32[tlcnConIndex] >> 11) & 0x7f;
+ }//while
+ if (tlcnNextContainer == ZEMPTYLIST) {
+ jam();
+ /*empty*/;
+ } else {
+ jam();
+ sendSystemerror(signal);
+ return;
+ }//if
+ lcnCopyPageptr.p->word32[ZPOS_CHECKSUM] = tlcnChecksum;
+}//Dbacc::lcpCopyPage()
+
+/* --------------------------------------------------------------------------------- */
+/* THIS SUBROUTINE GOES THROUGH ONE CONTAINER TO CHECK FOR LOCKED ELEMENTS AND */
+/* UPDATING THEM TO ENSURE ALL ELEMENTS ARE UNLOCKED ON DISK. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::lcpUpdatePage(Signal* signal)
+{
+ OperationrecPtr lupOperationRecPtr;
+ Uint32 tlupElemHead;
+ Uint32 tlupElemLen;
+ Uint32 tlupElemStep;
+ Uint32 tlupConLen;
+
+ tlupConLen = lupPageptr.p->word32[tlupIndex] >> 26;
+ tlupElemLen = fragrecptr.p->elementLength;
+ tlupElemStep = tlupForward * tlupElemLen;
+ while (tlupConLen > ZCON_HEAD_SIZE) {
+ jam();
+ tlupElemHead = lupPageptr.p->word32[tlupElemIndex];
+ if (ElementHeader::getLocked(tlupElemHead)) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* WHEN CHANGING THE ELEMENT HEADER WE ALSO HAVE TO UPDATE THE CHECKSUM. IN */
+ /* DOING THIS WE USE THE FORMULA (A XOR B) XOR B = A WHICH MEANS THAT IF WE */
+ /* XOR SOMETHING TWICE WITH THE SAME OPERAND THEN WE RETURN TO THE ORIGINAL */
+ /* VALUE. THEN WE ALSO HAVE TO USE THE NEW ELEMENT HEADER IN THE CHECKSUM */
+ /* CALCULATION. */
+ /* --------------------------------------------------------------------------------- */
+ tlcnChecksum = tlcnChecksum ^ tlupElemHead;
+ lupOperationRecPtr.i = ElementHeader::getOpPtrI(tlupElemHead);
+ ptrCheckGuard(lupOperationRecPtr, coprecsize, operationrec);
+ const Uint32 hv = lupOperationRecPtr.p->hashvaluePart;
+ tlupElemHead = ElementHeader::setUnlocked(hv , 0);
+ arrGuard(tlupElemIndex, 2048);
+ lupPageptr.p->word32[tlupElemIndex] = tlupElemHead;
+ tlcnChecksum = tlcnChecksum ^ tlupElemHead;
+ }//if
+ tlupConLen = tlupConLen - tlupElemLen;
+ tlupElemIndex = tlupElemIndex + tlupElemStep;
+ }//while
+ if (tlupConLen < ZCON_HEAD_SIZE) {
+ jam();
+ sendSystemerror(signal);
+ }//if
+}//Dbacc::lcpUpdatePage()
+
+/*-----------------------------------------------------------------*/
+// At a system restart we check that the page do not contain any
+// locks that hinder the system restart procedure.
+/*-----------------------------------------------------------------*/
+void Dbacc::srCheckPage(Signal* signal)
+{
+ Uint32 tlcnNextContainer;
+ Uint32 tlcnConIndex;
+ Uint32 tlcnIndex;
+
+ lupPageptr.p = lcnCopyPageptr.p;
+ if (((lcnCopyPageptr.p->word32[ZPOS_PAGE_TYPE] >> ZPOS_PAGE_TYPE_BIT) & 3) == ZNORMAL_PAGE_TYPE) {
+ jam();
+ /*-----------------------------------------------------------------*/
+ /* TAKE CARE OF ALL 64 BUFFERS ADDRESSED BY ALGORITHM IN */
+ /* FIRST PAGE. IF THEY ARE EMPTY THEY STILL HAVE A CONTAINER */
+ /* HEADER OF 2 WORDS. */
+ /*-----------------------------------------------------------------*/
+ tlcnConIndex = ZHEAD_SIZE;
+ tlupForward = 1;
+ for (tlcnIndex = 0; tlcnIndex <= ZNO_CONTAINERS - 1; tlcnIndex++) {
+ tlupIndex = tlcnConIndex;
+ tlupElemIndex = tlcnConIndex + ZCON_HEAD_SIZE;
+ srCheckContainer(signal);
+ if (tresult != 0) {
+ jam();
+ return;
+ }//if
+ tlcnConIndex = tlcnConIndex + ZBUF_SIZE;
+ }//for
+ }//if
+ /*-----------------------------------------------------------------*/
+ /* TAKE CARE OF ALL USED BUFFERS ON THE LEFT SIDE. */
+ /*-----------------------------------------------------------------*/
+ tlcnNextContainer = (lcnCopyPageptr.p->word32[ZPOS_EMPTY_LIST] >> 23) & 0x7f;
+ while (tlcnNextContainer < ZEMPTYLIST) {
+ tlcnConIndex = (tlcnNextContainer << ZSHIFT_PLUS) - (tlcnNextContainer << ZSHIFT_MINUS);
+ tlcnConIndex = tlcnConIndex + ZHEAD_SIZE;
+ tlupIndex = tlcnConIndex;
+ tlupElemIndex = tlcnConIndex + ZCON_HEAD_SIZE;
+ tlupForward = 1;
+ srCheckContainer(signal);
+ if (tresult != 0) {
+ jam();
+ return;
+ }//if
+ tlcnNextContainer = (lcnCopyPageptr.p->word32[tlcnConIndex] >> 11) & 0x7f;
+ }//while
+ if (tlcnNextContainer == ZEMPTYLIST) {
+ jam();
+ /*empty*/;
+ } else {
+ jam();
+ tresult = 4;
+ return;
+ }//if
+ /*-----------------------------------------------------------------*/
+ /* TAKE CARE OF ALL USED BUFFERS ON THE RIGHT SIDE. */
+ /*-----------------------------------------------------------------*/
+ tlupForward = cminusOne;
+ tlcnNextContainer = (lcnCopyPageptr.p->word32[ZPOS_EMPTY_LIST] >> 16) & 0x7f;
+ while (tlcnNextContainer < ZEMPTYLIST) {
+ tlcnConIndex = (tlcnNextContainer << ZSHIFT_PLUS) - (tlcnNextContainer << ZSHIFT_MINUS);
+ tlcnConIndex = tlcnConIndex + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE);
+ tlupIndex = tlcnConIndex;
+ tlupElemIndex = tlcnConIndex - 1;
+ srCheckContainer(signal);
+ if (tresult != 0) {
+ jam();
+ return;
+ }//if
+ tlcnNextContainer = (lcnCopyPageptr.p->word32[tlcnConIndex] >> 11) & 0x7f;
+ }//while
+ if (tlcnNextContainer == ZEMPTYLIST) {
+ jam();
+ /*empty*/;
+ } else {
+ jam();
+ tresult = 4;
+ return;
+ }//if
+}//Dbacc::srCheckPage()
+
+/* --------------------------------------------------------------------------------- */
+/* THIS SUBROUTINE GOES THROUGH ONE CONTAINER TO CHECK FOR LOCKED ELEMENTS AND */
+/* UPDATING THEM TO ENSURE ALL ELEMENTS ARE UNLOCKED ON DISK. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::srCheckContainer(Signal* signal)
+{
+ Uint32 tlupElemLen;
+ Uint32 tlupElemStep;
+ Uint32 tlupConLen;
+
+ tlupConLen = lupPageptr.p->word32[tlupIndex] >> 26;
+ tlupElemLen = fragrecptr.p->elementLength;
+ tlupElemStep = tlupForward * tlupElemLen;
+ while (tlupConLen > ZCON_HEAD_SIZE) {
+ jam();
+ const Uint32 tlupElemHead = lupPageptr.p->word32[tlupElemIndex];
+ if (ElementHeader::getLocked(tlupElemHead)){
+ jam();
+ //-------------------------------------------------------
+ // This is absolutely undesirable. We have a lock remaining
+ // after the system restart. We send a crash signal that will
+ // enter the trace file.
+ //-------------------------------------------------------
+ tresult = 2;
+ return;
+ }//if
+ tlupConLen = tlupConLen - tlupElemLen;
+ tlupElemIndex = tlupElemIndex + tlupElemStep;
+ }//while
+ if (tlupConLen < ZCON_HEAD_SIZE) {
+ jam();
+ tresult = 3;
+ }//if
+ return;
+}//Dbacc::srCheckContainer()
+
+/* ------------------------------------------------------------------------- */
+/* CHECK_UNDO_PAGES */
+/* DESCRIPTION: CHECKS WHEN A PAGE OR A GROUP OF UNDO PAGES IS FILLED.WHEN */
+/* A PAGE IS FILLED, CUNDOPOSITION WILL BE UPDATE, THE NEW */
+/* POSITION IS THE BEGNING OF THE NEXT UNDO PAGE. */
+/* IN CASE THAT A GROUP IS FILLED THE PAGES ARE SENT TO DISK, */
+/* AND A NEW GROUP IS CHOSEN. */
+/* ------------------------------------------------------------------------- */
+void Dbacc::checkUndoPages(Signal* signal)
+{
+
+ fragrecptr.p->prevUndoposition = cundoposition;
+ cprevUndoaddress = cundoposition;
+
+ // Calculate active undo page id
+ Uint32 tundoPageId = cundoposition >> ZUNDOPAGEINDEXBITS;
+
+ /**
+ * WE WILL WRITE UNTIL WE HAVE ABOUT 8 KBYTE REMAINING ON THE 32 KBYTE
+ * PAGE. THIS IS TO ENSURE THAT WE DO NOT HAVE ANY UNDO LOG RECORDS THAT PASS
+ * A PAGE BOUNDARIE. THIS SIMPLIFIES CODING TRADING SOME INEFFICIENCY.
+ */
+ static const Uint32 ZMAXUNDOPAGEINDEX = 7100;
+ if (tundoindex < ZMAXUNDOPAGEINDEX) {
+ jam();
+ cundoposition = (tundoPageId << ZUNDOPAGEINDEXBITS) + tundoindex;
+ return;
+ }//if
+
+ /**
+ * WE CHECK IF MORE THAN 1 MBYTE OF WRITES ARE OUTSTANDING TO THE UNDO FILE.
+ * IF SO WE HAVE TO CRASH SINCE WE HAVE NO MORE SPACE TO WRITE UNDO LOG
+ * RECORDS IN
+ */
+ Uint16 nextUndoPageId = tundoPageId + 1;
+ updateUndoPositionPage(signal, nextUndoPageId << ZUNDOPAGEINDEXBITS);
+
+ if ((tundoPageId & (ZWRITE_UNDOPAGESIZE - 1)) == (ZWRITE_UNDOPAGESIZE - 1)) {
+ jam();
+ /* ---------- SEND A GROUP OF UNDO PAGES TO DISK --------- */
+ fsConnectptr.i = cactiveOpenUndoFsPtr;
+ ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
+ Uint32 tcupTmp1 = (tundoPageId - ZWRITE_UNDOPAGESIZE) + 1;
+ tcupTmp1 = tcupTmp1 & (cundopagesize - 1); /* 1 MBYTE PAGE WINDOW */
+ seizeFsOpRec(signal);
+ initFsOpRec(signal);
+ fsOpptr.p->fsOpstate = WAIT_WRITE_UNDO_EXIT;
+ fsOpptr.p->fsOpMemPage = tundoPageId;
+ fragrecptr.p->nrWaitWriteUndoExit++;
+ if (clblPageCounter >= 8) {
+ jam();
+ clblPageCounter = clblPageCounter - 8;
+ } else {
+ jam();
+ clblPageOver = clblPageOver + (8 - clblPageCounter);
+ clblPageCounter = 0;
+ }//if
+ /* ************************ */
+ /* FSWRITEREQ */
+ /* ************************ */
+ signal->theData[0] = fsConnectptr.p->fsPtr;
+ signal->theData[1] = cownBlockref;
+ signal->theData[2] = fsOpptr.i;
+ signal->theData[3] = 0x1;
+ /* FLAG = START MEM PAGES, START FILE PAGES */
+ signal->theData[4] = ZUNDOPAGE_BASE_ADD;
+ signal->theData[5] = ZWRITE_UNDOPAGESIZE;
+ signal->theData[6] = tcupTmp1;
+ signal->theData[7] = cactiveUndoFilePage;
+ sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA);
+ cactiveUndoFilePage = cactiveUndoFilePage + ZWRITE_UNDOPAGESIZE;
+ }//if
+}//Dbacc::checkUndoPages()
+
+/* --------------------------------------------------------------------------------- */
+/* UNDO_WRITING_PROCESS */
+/* INPUT: FRAGRECPTR, CUNDO_ELEM_INDEX, DATAPAGEPTR, CUNDOINFOLENGTH */
+/* DESCRIPTION: WHEN THE PROCESS OF CREATION LOCAL CHECK POINT HAS */
+/* STARTED. IF THE ACTIVE PAGE IS NOT ALREADY SENT TO DISK, THE */
+/* OLD VALUE OF THE ITEM WHICH IS GOING TO BE CHECKED IS STORED ON */
+/* THE ACTIVE UNDO PAGE. INFORMATION ABOUT UNDO PROCESS IN THE */
+/* BLOCK AND IN THE FRAGMENT WILL BE UPDATED. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::undoWritingProcess(Signal* signal)
+{
+ const Uint32 tactivePageDir = datapageptr.p->word32[ZPOS_PAGE_ID];
+ const Uint32 tpageType = (datapageptr.p->word32[ZPOS_EMPTY_LIST] >> ZPOS_PAGE_TYPE_BIT) & 3;
+ if (fragrecptr.p->fragState == LCP_SEND_PAGES) {
+ if (tpageType == ZNORMAL_PAGE_TYPE) {
+ /* --------------------------------------------------------------------------- */
+ /* HANDLING OF LOG OF NORMAL PAGES DURING WRITE OF NORMAL PAGES. */
+ /* --------------------------------------------------------------------------- */
+ if (tactivePageDir < fragrecptr.p->lcpDirIndex) {
+ jam();
+ /* ------------------------------------------------------------------- */
+ /* THIS PAGE HAS ALREADY BEEN WRITTEN IN THE LOCAL CHECKPOINT. */
+ /* ------------------------------------------------------------------- */
+ /*empty*/;
+ } else {
+ if (tactivePageDir >= fragrecptr.p->lcpMaxDirIndex) {
+ jam();
+ /* --------------------------------------------------------------------------- */
+ /* OBVIOUSLY THE FRAGMENT HAS EXPANDED SINCE THE START OF THE LOCAL CHECKPOINT.*/
+ /* WE NEED NOT LOG ANY UPDATES OF PAGES THAT DID NOT EXIST AT START OF LCP. */
+ /* --------------------------------------------------------------------------- */
+ /*empty*/;
+ } else {
+ jam();
+ /* --------------------------------------------------------------------------- */
+ /* IN ALL OTHER CASES WE HAVE TO WRITE TO THE UNDO LOG. */
+ /* --------------------------------------------------------------------------- */
+ undopageptr.i = (cundoposition >> ZUNDOPAGEINDEXBITS) & (cundopagesize - 1);
+ ptrAss(undopageptr, undopage);
+ theadundoindex = cundoposition & ZUNDOPAGEINDEX_MASK;
+ tundoindex = theadundoindex + ZUNDOHEADSIZE;
+ writeUndoHeader(signal, tactivePageDir, UndoHeader::ZPAGE_INFO);
+ tundoElemIndex = cundoElemIndex;
+ writeUndoDataInfo(signal);
+ checkUndoPages(signal);
+ }//if
+ }//if
+ } else if (tpageType == ZOVERFLOW_PAGE_TYPE) {
+ /* --------------------------------------------------------------------------------- */
+ /* OVERFLOW PAGE HANDLING DURING WRITE OF NORMAL PAGES. */
+ /* --------------------------------------------------------------------------------- */
+ if (tactivePageDir >= fragrecptr.p->lcpMaxOverDirIndex) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* OBVIOUSLY THE FRAGMENT HAS EXPANDED THE NUMBER OF OVERFLOW PAGES SINCE THE */
+ /* START OF THE LOCAL CHECKPOINT. WE NEED NOT LOG ANY UPDATES OF PAGES THAT DID*/
+ /* NOT EXIST AT START OF LCP. */
+ /* --------------------------------------------------------------------------------- */
+ /*empty*/;
+ } else {
+ jam();
+ undopageptr.i = (cundoposition >> ZUNDOPAGEINDEXBITS) & (cundopagesize - 1);
+ ptrAss(undopageptr, undopage);
+ theadundoindex = cundoposition & ZUNDOPAGEINDEX_MASK;
+ tundoindex = theadundoindex + ZUNDOHEADSIZE;
+ writeUndoHeader(signal, tactivePageDir, UndoHeader::ZOVER_PAGE_INFO);
+ tundoElemIndex = cundoElemIndex;
+ writeUndoDataInfo(signal);
+ checkUndoPages(signal);
+ }//if
+ } else {
+ jam();
+ /* --------------------------------------------------------------------------- */
+ /* ONLY PAGE INFO AND OVERFLOW PAGE INFO CAN BE LOGGED BY THIS ROUTINE. A */
+ /* SERIOUS ERROR. */
+ /* --------------------------------------------------------------------------- */
+ sendSystemerror(signal);
+ }
+ } else {
+ if (fragrecptr.p->fragState == LCP_SEND_OVER_PAGES) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* DURING WRITE OF OVERFLOW PAGES WE NEED NOT WORRY ANYMORE ABOUT NORMAL PAGES.*/
+ /* --------------------------------------------------------------------------------- */
+ if (tpageType == ZOVERFLOW_PAGE_TYPE) {
+ if (tactivePageDir < fragrecptr.p->lcpDirIndex) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* THIS PAGE HAS ALREADY BEEN WRITTEN IN THE LOCAL CHECKPOINT. */
+ /* --------------------------------------------------------------------------------- */
+ /*empty*/;
+ } else {
+ if (tactivePageDir >= fragrecptr.p->lcpMaxOverDirIndex) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* OBVIOUSLY THE FRAGMENT HAS EXPANDED THE NUMBER OF OVERFLOW PAGES SINCE THE */
+ /* START OF THE LOCAL CHECKPOINT. WE NEED NOT LOG ANY UPDATES OF PAGES THAT DID*/
+ /* NOT EXIST AT START OF LCP. */
+ /* --------------------------------------------------------------------------------- */
+ /*empty*/;
+ } else {
+ jam();
+ undopageptr.i = (cundoposition >> ZUNDOPAGEINDEXBITS) & (cundopagesize - 1);
+ ptrAss(undopageptr, undopage);
+ theadundoindex = cundoposition & ZUNDOPAGEINDEX_MASK;
+ tundoindex = theadundoindex + ZUNDOHEADSIZE;
+ writeUndoHeader(signal, tactivePageDir, UndoHeader::ZOVER_PAGE_INFO);
+ tundoElemIndex = cundoElemIndex;
+ writeUndoDataInfo(signal);
+ checkUndoPages(signal);
+ }//if
+ }//if
+ }
+ }//if
+ }//if
+}//Dbacc::undoWritingProcess()
+
+/* --------------------------------------------------------------------------------- */
+/* OTHER STATES MEANS THAT WE HAVE ALREADY WRITTEN ALL PAGES BUT NOT YET RESET */
+/* THE CREATE_LCP FLAG. */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* WRITE_UNDO_DATA_INFO */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::writeUndoDataInfo(Signal* signal)
+{
+ Uint32 twudiIndex;
+ Uint32 guard22;
+
+ guard22 = cundoinfolength;
+ arrGuard((tundoindex + guard22 - 1), 8192);
+ arrGuard((tundoElemIndex + guard22 - 1), 2048);
+ for (twudiIndex = 1; twudiIndex <= guard22; twudiIndex++) {
+ undopageptr.p->undoword[tundoindex] = datapageptr.p->word32[tundoElemIndex];
+ tundoindex++;
+ tundoElemIndex++;
+ }//for
+}//Dbacc::writeUndoDataInfo()
+
+/* --------------------------------------------------------------------------------- */
+/* WRITE_UNDO_HEADER */
+/* THE HEAD OF UNDO ELEMENT IS 24 BYTES AND CONTAINS THE FOLLOWING INFORMATION: */
+/* TABLE IDENTITY 32 BITS */
+/* ROOT FRAGMENT IDENTITY 32 BITS */
+/* LOCAL FRAGMENT IDENTITY 32 BITS */
+/* LENGTH OF ELEMENT INF0 (BIT 31 - 18) 14 BITS */
+/* INFO TYPE (BIT 17 - 14) 4 BITS */
+/* PAGE INDEX OF THE FIRST FIELD IN THE FRAGMENT (BIT 13 - 0) 14 BITS */
+/* DIRECTORY INDEX OF THE PAGE IN THE FRAGMENT 32 BITS */
+/* ADDRESS OF THE PREVIOUS ELEMENT OF THE FRAGMENT 64 BITS */
+/* ADDRESS OF THE PREVIOUS ELEMENT IN THE UNDO PAGES 64 BITS */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::writeUndoHeader(Signal* signal,
+ Uint32 logicalPageId,
+ UndoHeader::UndoHeaderType pageType)
+{
+ rootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ arrGuard(theadundoindex + 6, 8192);
+
+ // Set the structpointer to point at the undo page at the right address.
+ UndoHeader * const & undoHeaderPtr =
+ (UndoHeader *) &undopageptr.p->undoword[theadundoindex];
+
+ undoHeaderPtr->tableId = rootfragrecptr.p->mytabptr;
+ undoHeaderPtr->rootFragId = rootfragrecptr.p->fragmentid[0] >> 1;
+ undoHeaderPtr->localFragId = fragrecptr.p->myfid;
+ ndbrequire((undoHeaderPtr->localFragId >> 1) == undoHeaderPtr->rootFragId);
+ Uint32 Ttmp = cundoinfolength;
+ Ttmp = (Ttmp << 4) + pageType;
+ Ttmp = Ttmp << 14;
+ undoHeaderPtr->variousInfo = Ttmp + cundoElemIndex;
+ undoHeaderPtr->logicalPageId = logicalPageId;
+ undoHeaderPtr->prevUndoAddressForThisFrag = fragrecptr.p->prevUndoposition;
+ undoHeaderPtr->prevUndoAddress = cprevUndoaddress;
+}//Dbacc::writeUndoHeader()
+
+/* --------------------------------------------------------------------------------- */
+/* WRITE_UNDO_OP_INFO */
+/* FOR A LOCKED ELEMENT, OPERATION TYPE, UNDO OF ELEMENT HEADER AND THE LENGTH OF*/
+/* THE TUPLE KEY HAVE TO BE SAVED IN UNDO PAGES. IN THIS CASE AN UNDO ELEMENT */
+/* INCLUDES THE FLLOWING ITEMS. */
+/* OPERATION TYPE 32 BITS */
+/* HASH VALUE 32 BITS */
+/* LENGTH OF THE TUPLE = N 32 BITS */
+/* TUPLE KEYS N * 32 BITS */
+/* */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::writeUndoOpInfo(Signal* signal)
+{
+ Page8Ptr locPageptr;
+
+ arrGuard((tundoindex + 3), 8192);
+ undopageptr.p->undoword[tundoindex] = operationRecPtr.p->operation;
+ undopageptr.p->undoword[tundoindex + 1] = operationRecPtr.p->hashValue;
+ undopageptr.p->undoword[tundoindex + 2] = operationRecPtr.p->tupkeylen;
+ tundoindex = tundoindex + 3;
+ // log localkey1
+ locPageptr.i = operationRecPtr.p->elementPage;
+ ptrCheckGuard(locPageptr, cpagesize, page8);
+ Uint32 Tforward = operationRecPtr.p->elementIsforward;
+ Uint32 TelemPtr = operationRecPtr.p->elementPointer;
+ TelemPtr += Tforward; // ZELEM_HEAD_SIZE
+ arrGuard(tundoindex+1, 8192);
+ undopageptr.p->undoword[tundoindex] = locPageptr.p->word32[TelemPtr];
+ tundoindex++;
+ cundoinfolength = ZOP_HEAD_INFO_LN + 1;
+}//Dbacc::writeUndoOpInfo()
+
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* END OF LOCAL CHECKPOINT MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* SYSTEM RESTART MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* SR_FRAGIDREQ REQUEST FOR RESTART OF A FRAGMENT */
+/* SENDER: LQH, LEVEL B */
+/* ENTER SR_FRAGIDREQ WITH */
+/* TUSERPTR, LQH CONNECTION PTR */
+/* TUSERBLOCKREF, LQH BLOCK REFERENCE */
+/* TCHECKPOINTID, THE CHECKPOINT NUMBER TO USE */
+/* (E.G. 1,2 OR 3) */
+/* TABPTR, TABLE ID = TABLE RECORD POINTER */
+/* TFID, ROOT FRAGMENT ID */
+/* ******************--------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* SR_FRAGIDREQ REQUEST FOR LIST OF STOPED OPERATION */
+/* ******************------------------------------+ */
+/* SENDER: LQH, LEVEL B */
+void Dbacc::execSR_FRAGIDREQ(Signal* signal)
+{
+ jamEntry();
+ tuserptr = signal->theData[0]; /* LQH CONNECTION PTR */
+ tuserblockref = signal->theData[1]; /* LQH BLOCK REFERENCE */
+ tcheckpointid = signal->theData[2]; /* THE CHECKPOINT NUMBER TO USE */
+ /* (E.G. 1,2 OR 3) */
+ tabptr.i = signal->theData[3];
+ ptrCheckGuard(tabptr, ctablesize, tabrec);
+ /* TABLE ID = TABLE RECORD POINTER */
+ tfid = signal->theData[4]; /* ROOT FRAGMENT ID */
+ tresult = 0; /* 0= FALSE,1= TRUE,> ZLIMIT_OF_ERROR =ERRORCODE */
+ seizeLcpConnectRec(signal);
+ initLcpConnRec(signal);
+
+ ndbrequire(getrootfragmentrec(signal, rootfragrecptr, tfid));
+ rootfragrecptr.p->lcpPtr = lcpConnectptr.i;
+ lcpConnectptr.p->rootrecptr = rootfragrecptr.i;
+ lcpConnectptr.p->localCheckPid = tcheckpointid;
+ for (Uint32 i = 0; i < 2; i++) {
+ Page8Ptr zeroPagePtr;
+ jam();
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[i];
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ seizeLcpPage(zeroPagePtr);
+ fragrecptr.p->zeroPagePtr = zeroPagePtr.i;
+ }//for
+
+ /* ---------------------------OPEN THE DATA FILE WHICH BELONGS TO TFID AND TCHECK POINT ---- */
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[0];
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ tfid = rootfragrecptr.p->fragmentid[0];
+ tmp = 0;
+ srOpenDataFileLoopLab(signal);
+
+ return;
+}//Dbacc::execSR_FRAGIDREQ()
+
+void Dbacc::srOpenDataFileLoopLab(Signal* signal)
+{
+ /* D6 AT FSOPENREQ. FILE TYPE = .DATA */
+ tmp1 = 0x010003ff; /* VERSION OF FILENAME = 1 */
+ tmp2 = 0x0; /* D7 DON'T CREATE, READ ONLY */
+ ndbrequire(cfsFirstfreeconnect != RNIL);
+ seizeFsConnectRec(signal);
+
+ fragrecptr.p->fsConnPtr = fsConnectptr.i;
+ fsConnectptr.p->fragrecPtr = fragrecptr.i;
+ fsConnectptr.p->fsState = WAIT_OPEN_DATA_FILE_FOR_READ;
+ fsConnectptr.p->activeFragId = tmp; /* LOCAL FRAG INDEX */
+ /* ************************ */
+ /* FSOPENREQ */
+ /* ************************ */
+ signal->theData[0] = cownBlockref;
+ signal->theData[1] = fsConnectptr.i;
+ signal->theData[2] = rootfragrecptr.p->mytabptr; /* TABLE IDENTITY */
+ signal->theData[3] = tfid; /* FRAGMENT IDENTITY */
+ signal->theData[4] = lcpConnectptr.p->localCheckPid; /* CHECKPOINT ID */
+ signal->theData[5] = tmp1;
+ signal->theData[6] = tmp2;
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
+ return;
+}//Dbacc::srOpenDataFileLoopLab()
+
+void Dbacc::srFsOpenConfLab(Signal* signal)
+{
+ fsConnectptr.p->fsState = WAIT_READ_PAGE_ZERO;
+ /* ------------------------ READ ZERO PAGE ---------- */
+ fragrecptr.i = fsConnectptr.p->fragrecPtr;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ signal->theData[0] = fsConnectptr.p->fsPtr;
+ signal->theData[1] = cownBlockref;
+ signal->theData[2] = fsConnectptr.i;
+ signal->theData[3] = 0x0;
+ /* FLAG = LIST MEM PAGES, LIST FILE PAGES */
+ signal->theData[4] = ZPAGE8_BASE_ADD;
+ signal->theData[5] = 1; /* NO OF PAGES */
+ signal->theData[6] = fragrecptr.p->zeroPagePtr; /* ZERO PAGE */
+ signal->theData[7] = 0; /* PAGE ZERO OF THE DATA FILE */
+ sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 8, JBA);
+ return;
+}//Dbacc::srFsOpenConfLab()
+
+void Dbacc::srReadPageZeroLab(Signal* signal)
+{
+ Page8Ptr srzPageptr;
+
+ rootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ fragrecptr.p->activeDataFilePage = 1;
+ srzPageptr.i = fragrecptr.p->zeroPagePtr;
+ ptrCheckGuard(srzPageptr, cpagesize, page8);
+ /* --------------------------------------------------------------------------------- */
+ // Check that the checksum of the zero page is ok.
+ /* --------------------------------------------------------------------------------- */
+ ccoPageptr.p = srzPageptr.p;
+ checksumControl(signal, (Uint32)0);
+ if (tresult > 0) {
+ jam();
+ return; // We will crash through a DEBUG_SIG
+ }//if
+
+ ndbrequire(srzPageptr.p->word32[ZPAGEZERO_FRAGID0] == rootfragrecptr.p->fragmentid[0]);
+ lcpConnectptr.i = rootfragrecptr.p->lcpPtr;
+ ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
+ if (fsConnectptr.p->activeFragId == 0) {
+ jam();
+ rootfragrecptr.p->fragmentid[1] = srzPageptr.p->word32[ZPAGEZERO_FRAGID1];
+ /* ---------------------------OPEN THE DATA FILE FOR NEXT LOCAL FRAGMENT ----------- ---- */
+ tfid = rootfragrecptr.p->fragmentid[1];
+ tmp = 1; /* LOCAL FRAG INDEX */
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[1];
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ srOpenDataFileLoopLab(signal);
+ return;
+ } else {
+ jam();
+ lcpConnectptr.p->lcpstate = LCP_ACTIVE;
+ signal->theData[0] = lcpConnectptr.p->lcpUserptr;
+ signal->theData[1] = lcpConnectptr.i;
+ signal->theData[2] = 2; /* NO OF LOCAL FRAGMENTS */
+ signal->theData[3] = srzPageptr.p->word32[ZPAGEZERO_FRAGID0];
+ /* ROOTFRAGRECPTR:FRAGMENTID(0) */
+ signal->theData[4] = srzPageptr.p->word32[ZPAGEZERO_FRAGID1];
+ /* ROOTFRAGRECPTR:FRAGMENTID(1) */
+ signal->theData[5] = RNIL;
+ signal->theData[6] = RNIL;
+ signal->theData[7] = rootfragrecptr.p->fragmentptr[0];
+ signal->theData[8] = rootfragrecptr.p->fragmentptr[1];
+ signal->theData[9] = srzPageptr.p->word32[ZPAGEZERO_HASH_CHECK];
+ sendSignal(lcpConnectptr.p->lcpUserblockref, GSN_SR_FRAGIDCONF, signal, 10, JBB);
+ }//if
+ return;
+}//Dbacc::srReadPageZeroLab()
+
+void Dbacc::initFragAdd(Signal* signal,
+ Uint32 rootFragIndex,
+ Uint32 rootIndex,
+ FragmentrecPtr regFragPtr)
+{
+ const AccFragReq * const req = (AccFragReq*)&signal->theData[0];
+ Uint32 lhFragBits = req->lhFragBits + 1;
+ Uint32 minLoadFactor = (req->minLoadFactor * ZBUF_SIZE) / 100;
+ Uint32 maxLoadFactor = (req->maxLoadFactor * ZBUF_SIZE) / 100;
+ if (minLoadFactor >= maxLoadFactor) {
+ jam();
+ minLoadFactor = maxLoadFactor - 1;
+ }//if
+ regFragPtr.p->fragState = ACTIVEFRAG;
+ // NOTE: next line must match calculation in Dblqh::execLQHFRAGREQ
+ regFragPtr.p->myfid = (req->fragId << 1) | rootFragIndex;
+ regFragPtr.p->myroot = rootIndex;
+ regFragPtr.p->myTableId = req->tableId;
+ ndbrequire(req->kValue == 6);
+ regFragPtr.p->k = req->kValue; /* TK_SIZE = 6 IN THIS VERSION */
+ regFragPtr.p->expandCounter = 0;
+
+ /**
+ * Only allow shrink during SR
+ * - to make sure we don't run out of pages during REDO log execution
+ *
+ * Is later restored to 0 by LQH at end of REDO log execution
+ */
+ regFragPtr.p->expandFlag = (getNodeState().getSystemRestartInProgress()?1:0);
+ regFragPtr.p->p = 0;
+ regFragPtr.p->maxp = (1 << req->kValue) - 1;
+ regFragPtr.p->minloadfactor = minLoadFactor;
+ regFragPtr.p->maxloadfactor = maxLoadFactor;
+ regFragPtr.p->slack = (regFragPtr.p->maxp + 1) * maxLoadFactor;
+ regFragPtr.p->lhfragbits = lhFragBits;
+ regFragPtr.p->lhdirbits = 0;
+ regFragPtr.p->hashcheckbit = 0; //lhFragBits;
+ regFragPtr.p->localkeylen = req->localKeyLen;
+ regFragPtr.p->nodetype = (req->reqInfo >> 4) & 0x3;
+ regFragPtr.p->lastOverIndex = 0;
+ regFragPtr.p->dirsize = 1;
+ regFragPtr.p->loadingFlag = ZFALSE;
+ regFragPtr.p->keyLength = req->keyLength;
+ ndbrequire(req->keyLength != 0);
+ regFragPtr.p->elementLength = ZELEM_HEAD_SIZE + regFragPtr.p->localkeylen;
+ Uint32 Tmp1 = (regFragPtr.p->maxp + 1) + regFragPtr.p->p;
+ Uint32 Tmp2 = regFragPtr.p->maxloadfactor - regFragPtr.p->minloadfactor;
+ Tmp2 = Tmp1 * Tmp2;
+ regFragPtr.p->slackCheck = Tmp2;
+}//Dbacc::initFragAdd()
+
+void Dbacc::initFragGeneral(FragmentrecPtr regFragPtr)
+{
+ regFragPtr.p->directory = RNIL;
+ regFragPtr.p->overflowdir = RNIL;
+ regFragPtr.p->fsConnPtr = RNIL;
+ regFragPtr.p->firstOverflowRec = RNIL;
+ regFragPtr.p->lastOverflowRec = RNIL;
+ regFragPtr.p->firstWaitInQueOp = RNIL;
+ regFragPtr.p->lastWaitInQueOp = RNIL;
+ regFragPtr.p->sentWaitInQueOp = RNIL;
+ regFragPtr.p->lockOwnersList = RNIL;
+ regFragPtr.p->firstFreeDirindexRec = RNIL;
+ regFragPtr.p->zeroPagePtr = RNIL;
+
+ regFragPtr.p->activeDataPage = 0;
+ regFragPtr.p->createLcp = ZFALSE;
+ regFragPtr.p->stopQueOp = ZFALSE;
+ regFragPtr.p->hasCharAttr = ZFALSE;
+ regFragPtr.p->nextAllocPage = 0;
+ regFragPtr.p->nrWaitWriteUndoExit = 0;
+ regFragPtr.p->lastUndoIsStored = ZFALSE;
+ regFragPtr.p->loadingFlag = ZFALSE;
+ regFragPtr.p->fragState = FREEFRAG;
+ for (Uint32 i = 0; i < ZWRITEPAGESIZE; i++) {
+ regFragPtr.p->datapages[i] = RNIL;
+ }//for
+ for (Uint32 j = 0; j < 4; j++) {
+ regFragPtr.p->longKeyPageArray[j] = RNIL;
+ }//for
+}//Dbacc::initFragGeneral()
+
+void Dbacc::initFragSr(FragmentrecPtr regFragPtr, Page8Ptr regPagePtr)
+{
+ regFragPtr.p->prevUndoposition = regPagePtr.p->word32[ZPAGEZERO_PREV_UNDOP];
+ regFragPtr.p->noOfStoredOverPages = regPagePtr.p->word32[ZPAGEZERO_NO_OVER_PAGE];
+ regFragPtr.p->noStoredPages = regPagePtr.p->word32[ZPAGEZERO_NO_PAGES];
+ regFragPtr.p->dirsize = regPagePtr.p->word32[ZPAGEZERO_DIRSIZE];
+ regFragPtr.p->expandCounter = regPagePtr.p->word32[ZPAGEZERO_EXPCOUNTER];
+ regFragPtr.p->slack = regPagePtr.p->word32[ZPAGEZERO_SLACK];
+ regFragPtr.p->hashcheckbit = regPagePtr.p->word32[ZPAGEZERO_HASHCHECKBIT];
+ regFragPtr.p->k = regPagePtr.p->word32[ZPAGEZERO_K];
+ regFragPtr.p->lhfragbits = regPagePtr.p->word32[ZPAGEZERO_LHFRAGBITS];
+ regFragPtr.p->lhdirbits = regPagePtr.p->word32[ZPAGEZERO_LHDIRBITS];
+ regFragPtr.p->localkeylen = regPagePtr.p->word32[ZPAGEZERO_LOCALKEYLEN];
+ regFragPtr.p->maxp = regPagePtr.p->word32[ZPAGEZERO_MAXP];
+ regFragPtr.p->maxloadfactor = regPagePtr.p->word32[ZPAGEZERO_MAXLOADFACTOR];
+ regFragPtr.p->minloadfactor = regPagePtr.p->word32[ZPAGEZERO_MINLOADFACTOR];
+ regFragPtr.p->myfid = regPagePtr.p->word32[ZPAGEZERO_MYFID];
+ regFragPtr.p->lastOverIndex = regPagePtr.p->word32[ZPAGEZERO_LAST_OVER_INDEX];
+ regFragPtr.p->nodetype = regPagePtr.p->word32[ZPAGEZERO_NODETYPE];
+ regFragPtr.p->p = regPagePtr.p->word32[ZPAGEZERO_P];
+ regFragPtr.p->elementLength = regPagePtr.p->word32[ZPAGEZERO_ELEMENT_LENGTH];
+ regFragPtr.p->keyLength = regPagePtr.p->word32[ZPAGEZERO_KEY_LENGTH];
+ regFragPtr.p->slackCheck = regPagePtr.p->word32[ZPAGEZERO_SLACK_CHECK];
+
+ regFragPtr.p->loadingFlag = ZTRUE;
+
+}//Dbacc::initFragSr()
+
+void Dbacc::initFragPageZero(FragmentrecPtr regFragPtr, Page8Ptr regPagePtr)
+{
+ //------------------------------------------------------------------
+ // PREV_UNDOP, NEXT_UNDO_FILE, NO_OVER_PAGE, NO_PAGES
+ // is set at end of copy phase
+ //------------------------------------------------------------------
+ regPagePtr.p->word32[ZPAGEZERO_DIRSIZE] = regFragPtr.p->dirsize;
+ regPagePtr.p->word32[ZPAGEZERO_EXPCOUNTER] = regFragPtr.p->expandCounter;
+ regPagePtr.p->word32[ZPAGEZERO_SLACK] = regFragPtr.p->slack;
+ regPagePtr.p->word32[ZPAGEZERO_HASHCHECKBIT] = regFragPtr.p->hashcheckbit;
+ regPagePtr.p->word32[ZPAGEZERO_K] = regFragPtr.p->k;
+ regPagePtr.p->word32[ZPAGEZERO_LHFRAGBITS] = regFragPtr.p->lhfragbits;
+ regPagePtr.p->word32[ZPAGEZERO_LHDIRBITS] = regFragPtr.p->lhdirbits;
+ regPagePtr.p->word32[ZPAGEZERO_LOCALKEYLEN] = regFragPtr.p->localkeylen;
+ regPagePtr.p->word32[ZPAGEZERO_MAXP] = regFragPtr.p->maxp;
+ regPagePtr.p->word32[ZPAGEZERO_MAXLOADFACTOR] = regFragPtr.p->maxloadfactor;
+ regPagePtr.p->word32[ZPAGEZERO_MINLOADFACTOR] = regFragPtr.p->minloadfactor;
+ regPagePtr.p->word32[ZPAGEZERO_MYFID] = regFragPtr.p->myfid;
+ regPagePtr.p->word32[ZPAGEZERO_LAST_OVER_INDEX] = regFragPtr.p->lastOverIndex;
+ regPagePtr.p->word32[ZPAGEZERO_NODETYPE] = regFragPtr.p->nodetype;
+ regPagePtr.p->word32[ZPAGEZERO_P] = regFragPtr.p->p;
+ regPagePtr.p->word32[ZPAGEZERO_ELEMENT_LENGTH] = regFragPtr.p->elementLength;
+ regPagePtr.p->word32[ZPAGEZERO_KEY_LENGTH] = regFragPtr.p->keyLength;
+ regPagePtr.p->word32[ZPAGEZERO_SLACK_CHECK] = regFragPtr.p->slackCheck;
+}//Dbacc::initFragPageZero()
+
+void Dbacc::initRootFragPageZero(RootfragmentrecPtr rootPtr, Page8Ptr regPagePtr)
+{
+ regPagePtr.p->word32[ZPAGEZERO_TABID] = rootPtr.p->mytabptr;
+ regPagePtr.p->word32[ZPAGEZERO_FRAGID0] = rootPtr.p->fragmentid[0];
+ regPagePtr.p->word32[ZPAGEZERO_FRAGID1] = rootPtr.p->fragmentid[1];
+ regPagePtr.p->word32[ZPAGEZERO_HASH_CHECK] = rootPtr.p->roothashcheck;
+ regPagePtr.p->word32[ZPAGEZERO_NO_OF_ELEMENTS] = rootPtr.p->noOfElements;
+}//Dbacc::initRootFragPageZero()
+
+void Dbacc::initRootFragSr(RootfragmentrecPtr rootPtr, Page8Ptr regPagePtr)
+{
+ rootPtr.p->roothashcheck = regPagePtr.p->word32[ZPAGEZERO_HASH_CHECK];
+ rootPtr.p->noOfElements = regPagePtr.p->word32[ZPAGEZERO_NO_OF_ELEMENTS];
+}//Dbacc::initRootFragSr()
+
+/* ******************--------------------------------------------------------------- */
+/* ACC_SRREQ SYSTEM RESTART OF A LOCAL CHECK POINT */
+/* SENDER: LQH, LEVEL B */
+/* ENTER ACC_SRREQ WITH */
+/* LCP_CONNECTPTR, OPERATION RECORD PTR */
+/* TMP2, LQH'S LOCAL FRAG CHECK VALUE */
+/* TFID, LOCAL FRAG ID */
+/* TMP1, LOCAL CHECKPOINT ID */
+/* ******************--------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* ACC_SRREQ PERFORM A LOCAL CHECK POINT */
+/* ******************------------------------------+ */
+/* SENDER: LQH, LEVEL B */
+void Dbacc::execACC_SRREQ(Signal* signal)
+{
+ Page8Ptr asrPageidptr;
+ jamEntry();
+ lcpConnectptr.i = signal->theData[0];
+ ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
+ Uint32 lqhPtr = signal->theData[1];
+ Uint32 fragId = signal->theData[2];
+ Uint32 lcpId = signal->theData[3];
+ tresult = 0;
+ ndbrequire(lcpConnectptr.p->lcpstate == LCP_ACTIVE);
+ rootfragrecptr.i = lcpConnectptr.p->rootrecptr;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ if (rootfragrecptr.p->fragmentid[0] == fragId) {
+ jam();
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[0];
+ } else {
+ ndbrequire(rootfragrecptr.p->fragmentid[1] == fragId);
+ jam();
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[1];
+ }//if
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ fragrecptr.p->lcpLqhPtr = lqhPtr;
+ fragrecptr.p->localCheckpId = lcpId;
+ asrPageidptr.i = fragrecptr.p->zeroPagePtr;
+ ptrCheckGuard(asrPageidptr, cpagesize, page8);
+ ndbrequire(asrPageidptr.p->word32[ZPAGEZERO_TABID] == rootfragrecptr.p->mytabptr);
+ ndbrequire(asrPageidptr.p->word32[ZPAGEZERO_FRAGID0] == rootfragrecptr.p->fragmentid[0]);
+ ndbrequire(asrPageidptr.p->word32[ZPAGEZERO_FRAGID1] == rootfragrecptr.p->fragmentid[1]);
+ initRootFragSr(rootfragrecptr, asrPageidptr);
+ initFragSr(fragrecptr, asrPageidptr);
+ for (Uint32 i = 0; i < ZMAX_UNDO_VERSION; i++) {
+ jam();
+ if (csrVersList[i] != RNIL) {
+ jam();
+ srVersionPtr.i = csrVersList[i];
+ ptrCheckGuard(srVersionPtr, csrVersionRecSize, srVersionRec);
+ if (fragrecptr.p->localCheckpId == srVersionPtr.p->checkPointId) {
+ jam();
+ ndbrequire(srVersionPtr.p->checkPointId == asrPageidptr.p->word32[ZPAGEZERO_NEXT_UNDO_FILE]);
+ /*--------------------------------------------------------------------------------*/
+ /* SINCE -1 IS THE END OF LOG CODE WE MUST TREAT THIS CODE WITH CARE. WHEN */
+ /* COMPARING IT IS LARGER THAN EVERYTHING ELSE BUT SHOULD BE TREATED AS THE */
+ /* SMALLEST POSSIBLE VALUE, MEANING EMPTY. */
+ /*--------------------------------------------------------------------------------*/
+ if (fragrecptr.p->prevUndoposition != cminusOne) {
+ if (srVersionPtr.p->prevAddress < fragrecptr.p->prevUndoposition) {
+ jam();
+ srVersionPtr.p->prevAddress = fragrecptr.p->prevUndoposition;
+ } else if (srVersionPtr.p->prevAddress == cminusOne) {
+ jam();
+ srVersionPtr.p->prevAddress = fragrecptr.p->prevUndoposition;
+ }//if
+ }//if
+ srAllocPage0011Lab(signal);
+ return;
+ }//if
+ } else {
+ jam();
+ seizeSrVerRec(signal);
+ srVersionPtr.p->checkPointId = fragrecptr.p->localCheckpId;
+ srVersionPtr.p->prevAddress = fragrecptr.p->prevUndoposition;
+ csrVersList[i] = srVersionPtr.i;
+ srAllocPage0011Lab(signal);
+ return;
+ }//if
+ }//for
+ ndbrequire(false);
+}//Dbacc::execACC_SRREQ()
+
+void
+Dbacc::releaseLogicalPage(Fragmentrec * fragP, Uint32 logicalPageId){
+ Ptr<struct DirRange> dirRangePtr;
+ dirRangePtr.i = fragP->directory;
+ ptrCheckGuard(dirRangePtr, cdirrangesize, dirRange);
+
+ const Uint32 lp1 = logicalPageId >> 8;
+ const Uint32 lp2 = logicalPageId & 0xFF;
+ ndbrequire(lp1 < 256);
+
+ Ptr<struct Directoryarray> dirArrPtr;
+ dirArrPtr.i = dirRangePtr.p->dirArray[lp1];
+ ptrCheckGuard(dirArrPtr, cdirarraysize, directoryarray);
+
+ const Uint32 physicalPageId = dirArrPtr.p->pagep[lp2];
+
+ rpPageptr.i = physicalPageId;
+ ptrCheckGuard(rpPageptr, cpagesize, page8);
+ releasePage(0);
+
+ dirArrPtr.p->pagep[lp2] = RNIL;
+}
+
+void Dbacc::srAllocPage0011Lab(Signal* signal)
+{
+ releaseLogicalPage(fragrecptr.p, 0);
+
+#if JONAS
+ ndbrequire(cfirstfreeDirrange != RNIL);
+ seizeDirrange(signal);
+ fragrecptr.p->directory = newDirRangePtr.i;
+ ndbrequire(cfirstfreeDirrange != RNIL);
+ seizeDirrange(signal);
+ fragrecptr.p->overflowdir = newDirRangePtr.i;
+ seizeDirectory(signal);
+ ndbrequire(tresult < ZLIMIT_OF_ERROR);
+ newDirRangePtr.p->dirArray[0] = sdDirptr.i;
+#endif
+
+ fragrecptr.p->nextAllocPage = 0;
+ fragrecptr.p->fragState = SR_READ_PAGES;
+ srReadPagesLab(signal);
+ return;
+}//Dbacc::srAllocPage0011Lab()
+
+void Dbacc::srReadPagesLab(Signal* signal)
+{
+ if (fragrecptr.p->nextAllocPage >= fragrecptr.p->noStoredPages) {
+ /*--------------------------------------------------------------------------------*/
+ /* WE HAVE NOW READ ALL NORMAL PAGES FROM THE FILE. */
+ /*--------------------------------------------------------------------------------*/
+ if (fragrecptr.p->nextAllocPage == fragrecptr.p->dirsize) {
+ jam();
+ /*--------------------------------------------------------------------------------*/
+ /* WE HAVE NOW READ ALL NORMAL PAGES AND ALLOCATED ALL THE NEEDED PAGES. */
+ /*--------------------------------------------------------------------------------*/
+ fragrecptr.p->nextAllocPage = 0; /* THE NEXT OVER FLOW PAGE WHICH WILL BE READ */
+ fragrecptr.p->fragState = SR_READ_OVER_PAGES;
+ srReadOverPagesLab(signal);
+ } else {
+ ndbrequire(fragrecptr.p->nextAllocPage < fragrecptr.p->dirsize);
+ jam();
+ /*--------------------------------------------------------------------------------*/
+ /* WE NEEDED TO ALLOCATE PAGES THAT WERE DEALLOCATED DURING THE LOCAL */
+ /* CHECKPOINT. */
+ /* ALLOCATE THE PAGE AND INITIALISE IT. THEN WE INSERT A REAL-TIME BREAK. */
+ /*--------------------------------------------------------------------------------*/
+ seizePage(signal);
+ ndbrequire(tresult <= ZLIMIT_OF_ERROR);
+ tipPageId = fragrecptr.p->nextAllocPage;
+ inpPageptr.i = spPageptr.i;
+ ptrCheckGuard(inpPageptr, cpagesize, page8);
+ initPage(signal);
+ fragrecptr.p->noOfExpectedPages = 1;
+ fragrecptr.p->datapages[0] = spPageptr.i;
+ signal->theData[0] = ZSR_READ_PAGES_ALLOC;
+ signal->theData[1] = fragrecptr.i;
+ sendSignal(cownBlockref, GSN_CONTINUEB, signal, 2, JBB);
+ }//if
+ return;
+ }//if
+ Uint32 limitLoop;
+ if ((fragrecptr.p->noStoredPages - fragrecptr.p->nextAllocPage) < ZWRITEPAGESIZE) {
+ jam();
+ limitLoop = fragrecptr.p->noStoredPages - fragrecptr.p->nextAllocPage;
+ } else {
+ jam();
+ limitLoop = ZWRITEPAGESIZE;
+ }//if
+ ndbrequire(limitLoop <= 8);
+ for (Uint32 i = 0; i < limitLoop; i++) {
+ jam();
+ seizePage(signal);
+ ndbrequire(tresult <= ZLIMIT_OF_ERROR);
+ fragrecptr.p->datapages[i] = spPageptr.i;
+ signal->theData[i + 6] = spPageptr.i;
+ }//for
+ signal->theData[limitLoop + 6] = fragrecptr.p->activeDataFilePage;
+ fragrecptr.p->noOfExpectedPages = limitLoop;
+ /* -----------------SEND READ PAGES SIGNAL TO THE FILE MANAGER --------- */
+ fsConnectptr.i = fragrecptr.p->fsConnPtr;
+ ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
+ fsConnectptr.p->fsState = WAIT_READ_DATA;
+ signal->theData[0] = fsConnectptr.p->fsPtr;
+ signal->theData[1] = cownBlockref;
+ signal->theData[2] = fsConnectptr.i;
+ signal->theData[3] = 2;
+ /* FLAG = LIST MEM PAGES, RANGE OF FILE PAGES */
+ signal->theData[4] = ZPAGE8_BASE_ADD;
+ signal->theData[5] = fragrecptr.p->noOfExpectedPages;
+ sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 15, JBA);
+ return;
+}//Dbacc::srReadPagesLab()
+
+void Dbacc::storeDataPageInDirectoryLab(Signal* signal)
+{
+ fragrecptr.p->activeDataFilePage += fragrecptr.p->noOfExpectedPages;
+ srReadPagesAllocLab(signal);
+ return;
+}//Dbacc::storeDataPageInDirectoryLab()
+
+void Dbacc::srReadPagesAllocLab(Signal* signal)
+{
+ DirRangePtr srpDirRangePtr;
+ DirectoryarrayPtr srpDirptr;
+ DirectoryarrayPtr srpOverflowDirptr;
+ Page8Ptr srpPageidptr;
+
+ if (fragrecptr.p->fragState == SR_READ_PAGES) {
+ jam();
+ for (Uint32 i = 0; i < fragrecptr.p->noOfExpectedPages; i++) {
+ jam();
+ tmpP = fragrecptr.p->nextAllocPage;
+ srpDirRangePtr.i = fragrecptr.p->directory;
+ tmpP2 = tmpP >> 8;
+ tmp = tmpP & 0xff;
+ ptrCheckGuard(srpDirRangePtr, cdirrangesize, dirRange);
+ arrGuard(tmpP2, 256);
+ if (srpDirRangePtr.p->dirArray[tmpP2] == RNIL) {
+ seizeDirectory(signal);
+ ndbrequire(tresult <= ZLIMIT_OF_ERROR);
+ srpDirptr.i = sdDirptr.i;
+ srpDirRangePtr.p->dirArray[tmpP2] = srpDirptr.i;
+ } else {
+ jam();
+ srpDirptr.i = srpDirRangePtr.p->dirArray[tmpP2];
+ }//if
+ ptrCheckGuard(srpDirptr, cdirarraysize, directoryarray);
+ arrGuard(i, 8);
+ srpDirptr.p->pagep[tmp] = fragrecptr.p->datapages[i];
+ srpPageidptr.i = fragrecptr.p->datapages[i];
+ ptrCheckGuard(srpPageidptr, cpagesize, page8);
+ ndbrequire(srpPageidptr.p->word32[ZPOS_PAGE_ID] == fragrecptr.p->nextAllocPage);
+ ndbrequire(((srpPageidptr.p->word32[ZPOS_PAGE_TYPE] >> ZPOS_PAGE_TYPE_BIT) & 3) == 0);
+ ccoPageptr.p = srpPageidptr.p;
+ checksumControl(signal, (Uint32)1);
+ if (tresult > 0) {
+ jam();
+ return; // We will crash through a DEBUG_SIG
+ }//if
+ dbgWord32(srpPageidptr, ZPOS_OVERFLOWREC, RNIL);
+ srpPageidptr.p->word32[ZPOS_OVERFLOWREC] = RNIL;
+ fragrecptr.p->datapages[i] = RNIL;
+ fragrecptr.p->nextAllocPage++;
+ }//for
+ srReadPagesLab(signal);
+ return;
+ } else {
+ ndbrequire(fragrecptr.p->fragState == SR_READ_OVER_PAGES);
+ for (Uint32 i = 0; i < fragrecptr.p->noOfExpectedPages; i++) {
+ jam();
+ arrGuard(i, 8);
+ srpPageidptr.i = fragrecptr.p->datapages[i];
+ ptrCheckGuard(srpPageidptr, cpagesize, page8);
+ tmpP = srpPageidptr.p->word32[ZPOS_PAGE_ID]; /* DIR INDEX OF THE OVERFLOW PAGE */
+ /*--------------------------------------------------------------------------------*/
+ /* IT IS POSSIBLE THAT WE HAVE LOGICAL PAGES WHICH ARE NOT PART OF THE LOCAL*/
+ /* CHECKPOINT. THUS WE USE THE LOGICAL PAGE ID FROM THE PAGE HERE. */
+ /*--------------------------------------------------------------------------------*/
+ srpDirRangePtr.i = fragrecptr.p->overflowdir;
+ tmpP2 = tmpP >> 8;
+ tmpP = tmpP & 0xff;
+ ptrCheckGuard(srpDirRangePtr, cdirrangesize, dirRange);
+ arrGuard(tmpP2, 256);
+ if (srpDirRangePtr.p->dirArray[tmpP2] == RNIL) {
+ jam();
+ seizeDirectory(signal);
+ ndbrequire(tresult <= ZLIMIT_OF_ERROR);
+ srpDirRangePtr.p->dirArray[tmpP2] = sdDirptr.i;
+ }//if
+ srpOverflowDirptr.i = srpDirRangePtr.p->dirArray[tmpP2];
+ ndbrequire(((srpPageidptr.p->word32[ZPOS_PAGE_TYPE] >> ZPOS_PAGE_TYPE_BIT) & 3) != 0);
+ ndbrequire(((srpPageidptr.p->word32[ZPOS_PAGE_TYPE] >> ZPOS_PAGE_TYPE_BIT) & 3) != 3);
+ ptrCheckGuard(srpOverflowDirptr, cdirarraysize, directoryarray);
+ ndbrequire(srpOverflowDirptr.p->pagep[tmpP] == RNIL);
+ srpOverflowDirptr.p->pagep[tmpP] = srpPageidptr.i;
+ ccoPageptr.p = srpPageidptr.p;
+ checksumControl(signal, (Uint32)1);
+ ndbrequire(tresult == 0);
+ dbgWord32(srpPageidptr, ZPOS_OVERFLOWREC, RNIL);
+ srpPageidptr.p->word32[ZPOS_OVERFLOWREC] = RNIL;
+ fragrecptr.p->nextAllocPage++;
+ }//for
+ srReadOverPagesLab(signal);
+ return;
+ }//if
+}//Dbacc::srReadPagesAllocLab()
+
+void Dbacc::srReadOverPagesLab(Signal* signal)
+{
+ if (fragrecptr.p->nextAllocPage >= fragrecptr.p->noOfStoredOverPages) {
+ fragrecptr.p->nextAllocPage = 0;
+ if (fragrecptr.p->prevUndoposition == cminusOne) {
+ jam();
+ /* ************************ */
+ /* ACC_OVER_REC */
+ /* ************************ */
+ /*--------------------------------------------------------------------------------*/
+ /* UPDATE FREE LIST OF OVERFLOW PAGES AS PART OF SYSTEM RESTART AFTER */
+ /* READING PAGES AND EXECUTING THE UNDO LOG. */
+ /*--------------------------------------------------------------------------------*/
+ signal->theData[0] = fragrecptr.i;
+ sendSignal(cownBlockref, GSN_ACC_OVER_REC, signal, 1, JBB);
+ } else {
+ jam();
+ srCloseDataFileLab(signal);
+ }//if
+ return;
+ }//if
+ Uint32 limitLoop;
+ if ((fragrecptr.p->noOfStoredOverPages - fragrecptr.p->nextAllocPage) < ZWRITEPAGESIZE) {
+ jam();
+ limitLoop = fragrecptr.p->noOfStoredOverPages - fragrecptr.p->nextAllocPage;
+ } else {
+ jam();
+ limitLoop = ZWRITEPAGESIZE;
+ }//if
+ ndbrequire(limitLoop <= 8);
+ for (Uint32 i = 0; i < limitLoop; i++) {
+ jam();
+ seizePage(signal);
+ ndbrequire(tresult <= ZLIMIT_OF_ERROR);
+ fragrecptr.p->datapages[i] = spPageptr.i;
+ signal->theData[i + 6] = spPageptr.i;
+ }//for
+ fragrecptr.p->noOfExpectedPages = limitLoop;
+ signal->theData[limitLoop + 6] = fragrecptr.p->activeDataFilePage;
+ /* -----------------SEND READ PAGES SIGNAL TO THE FILE MANAGER --------- */
+ fsConnectptr.i = fragrecptr.p->fsConnPtr;
+ ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
+ fsConnectptr.p->fsState = WAIT_READ_DATA;
+ signal->theData[0] = fsConnectptr.p->fsPtr;
+ signal->theData[1] = cownBlockref;
+ signal->theData[2] = fsConnectptr.i;
+ signal->theData[3] = 2;
+ signal->theData[4] = ZPAGE8_BASE_ADD;
+ signal->theData[5] = fragrecptr.p->noOfExpectedPages;
+ sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 15, JBA);
+ return;
+}//Dbacc::srReadOverPagesLab()
+
+void Dbacc::srCloseDataFileLab(Signal* signal)
+{
+ fsConnectptr.i = fragrecptr.p->fsConnPtr;
+ ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
+ fsConnectptr.p->fsState = SR_CLOSE_DATA;
+ /* ************************ */
+ /* FSCLOSEREQ */
+ /* ************************ */
+ signal->theData[0] = fsConnectptr.p->fsPtr;
+ signal->theData[1] = cownBlockref;
+ signal->theData[2] = fsConnectptr.i;
+ signal->theData[3] = 0;
+ sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 4, JBA);
+ return;
+}//Dbacc::srCloseDataFileLab()
+
+/* ************************ */
+/* ACC_SRCONF */
+/* ************************ */
+void Dbacc::sendaccSrconfLab(Signal* signal)
+{
+ fragrecptr.i = fsConnectptr.p->fragrecPtr;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ releaseFsConnRec(signal);
+ rootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ lcpConnectptr.i = rootfragrecptr.p->lcpPtr;
+ ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
+ fragrecptr.p->fragState = ACTIVEFRAG;
+ fragrecptr.p->fsConnPtr = RNIL;
+ for (Uint32 i = 0; i < ZWRITEPAGESIZE; i++) {
+ fragrecptr.p->datapages[i] = RNIL;
+ }//for
+ rlpPageptr.i = fragrecptr.p->zeroPagePtr;
+ ptrCheckGuard(rlpPageptr, cpagesize, page8);
+ releaseLcpPage(signal);
+ fragrecptr.p->zeroPagePtr = RNIL;
+ signal->theData[0] = fragrecptr.p->lcpLqhPtr;
+ sendSignal(lcpConnectptr.p->lcpUserblockref, GSN_ACC_SRCONF, signal, 1, JBB);
+ lcpConnectptr.p->noOfLcpConf++;
+ if (lcpConnectptr.p->noOfLcpConf == 2) {
+ jam();
+ releaseLcpConnectRec(signal);
+ rootfragrecptr.p->lcpPtr = RNIL;
+ rootfragrecptr.p->rootState = ACTIVEROOT;
+ }//if
+ return;
+}//Dbacc::sendaccSrconfLab()
+
+/* --------------------------------------------------------------------------------- */
+/* CHECKSUM_CONTROL */
+/* INPUT: CCO_PAGEPTR */
+/* OUTPUT: TRESULT */
+/* */
+/* CHECK THAT CHECKSUM IN PAGE IS CORRECT TO ENSURE THAT NO ONE HAS CORRUPTED */
+/* THE PAGE INFORMATION. WHEN CALCULATING THE CHECKSUM WE REMOVE THE CHECKSUM */
+/* ITSELF FROM THE CHECKSUM BY XOR'ING THE CHECKSUM TWICE. WHEN CALCULATING */
+/* THE CHECKSUM THE CHECKSUM WORD IS ZERO WHICH MEANS NO CHANGE FROM XOR'ING. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::checksumControl(Signal* signal, Uint32 checkPage)
+{
+ Uint32 Tchs;
+ Uint32 tccoIndex;
+ Uint32 Ti;
+ Uint32 Tmp1;
+ Uint32 Tmp2;
+ Uint32 Tmp3;
+ Uint32 Tmp4;
+ Uint32 Tlimit;
+
+ Tchs = 0;
+ for (Ti = 0; Ti < 32 ; Ti++) {
+ Tlimit = 16 + (Ti << 6);
+ for (tccoIndex = (Ti << 6); tccoIndex < Tlimit; tccoIndex ++) {
+ Tmp1 = ccoPageptr.p->word32[tccoIndex];
+ Tmp2 = ccoPageptr.p->word32[tccoIndex + 16];
+ Tmp3 = ccoPageptr.p->word32[tccoIndex + 32];
+ Tmp4 = ccoPageptr.p->word32[tccoIndex + 48];
+
+ Tchs = Tchs ^ Tmp1;
+ Tchs = Tchs ^ Tmp2;
+ Tchs = Tchs ^ Tmp3;
+ Tchs = Tchs ^ Tmp4;
+ }//for
+ }//for
+ if (Tchs == 0) {
+ tresult = 0;
+ if (checkPage != 0) {
+ jam();
+ lcnCopyPageptr.p = ccoPageptr.p;
+ srCheckPage(signal);
+ }//if
+ } else {
+ tresult = 1;
+ }//if
+ if (tresult != 0) {
+ jam();
+ rootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ signal->theData[0] = RNIL;
+ signal->theData[1] = rootfragrecptr.p->mytabptr;
+ signal->theData[2] = fragrecptr.p->myfid;
+ signal->theData[3] = ccoPageptr.p->word32[ZPOS_PAGE_ID];
+ signal->theData[4] = tlupElemIndex;
+ signal->theData[5] = ccoPageptr.p->word32[ZPOS_PAGE_TYPE];
+ signal->theData[6] = tresult;
+ sendSignal(cownBlockref, GSN_DEBUG_SIG, signal, 7, JBA);
+ }//if
+}//Dbacc::checksumControl()
+
+/* ******************--------------------------------------------------------------- */
+/* START_RECREQ REQUEST TO START UNDO PROCESS */
+/* SENDER: LQH, LEVEL B */
+/* ENTER START_RECREQ WITH */
+/* CLQH_PTR, LQH CONNECTION PTR */
+/* CLQH_BLOCK_REF, LQH BLOCK REFERENCE */
+/* ******************--------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* START_RECREQ REQUEST TO START UNDO PROCESS */
+/* ******************------------------------------+ */
+/* SENDER: LQH, LEVEL B */
+void Dbacc::execSTART_RECREQ(Signal* signal)
+{
+ jamEntry();
+ clqhPtr = signal->theData[0]; /* LQH CONNECTION PTR */
+ clqhBlockRef = signal->theData[1]; /* LQH BLOCK REFERENCE */
+ tresult = 0; /* 0= FALSE,1= TRUE,> ZLIMIT_OF_ERROR =ERRORCODE */
+ for (int i = 0; i < UndoHeader::ZNO_UNDORECORD_TYPES; i++)
+ cSrUndoRecords[i] = 0;
+ startUndoLab(signal);
+ return;
+}//Dbacc::execSTART_RECREQ()
+
+void Dbacc::startUndoLab(Signal* signal)
+{
+ cundoLogActive = ZTRUE;
+ /* ----- OPEN UNDO FILES --------- */
+ for (tmp = 0; tmp <= ZMAX_UNDO_VERSION - 1; tmp++) {
+ jam();
+ if (csrVersList[tmp] != RNIL) {
+ jam();
+ /*---------------------------------------------------------------------------*/
+ /* SELECT THE NEXT SYSTEM RESTART RECORD WHICH CONTAINS AN UNDO LOG */
+ /* THAT NEEDS TO BE EXECUTED AND SET UP THE DATA TO EXECUTE IT. */
+ /*---------------------------------------------------------------------------*/
+ srVersionPtr.i = csrVersList[tmp];
+ csrVersList[tmp] = RNIL;
+ ptrCheckGuard(srVersionPtr, csrVersionRecSize, srVersionRec);
+ cactiveUndoFilePage = srVersionPtr.p->prevAddress >> 13;
+ cprevUndoaddress = srVersionPtr.p->prevAddress;
+ cactiveCheckpId = srVersionPtr.p->checkPointId;
+
+ releaseSrRec(signal);
+ startActiveUndo(signal);
+ return;
+ }//if
+ }//for
+
+ // Send report of how many undo log records where executed
+ signal->theData[0] = NDB_LE_UNDORecordsExecuted;
+ signal->theData[1] = DBACC; // From block
+ signal->theData[2] = 0; // Total records executed
+ for (int i = 0; i < 10; i++){
+ if (i < UndoHeader::ZNO_UNDORECORD_TYPES){
+ signal->theData[i+3] = cSrUndoRecords[i];
+ signal->theData[2] += cSrUndoRecords[i];
+ }else{
+ signal->theData[i+3] = 0;
+ }
+ }
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 12, JBB);
+
+ /* ******************************< */
+ /* START_RECCONF */
+ /* ******************************< */
+ /*---------------------------------------------------------------------------*/
+ /* REPORT COMPLETION OF UNDO LOG EXECUTION. */
+ /*---------------------------------------------------------------------------*/
+ cundoLogActive = ZFALSE;
+ signal->theData[0] = clqhPtr;
+ sendSignal(clqhBlockRef, GSN_START_RECCONF, signal, 1, JBB);
+ /* LQH CONNECTION PTR */
+ return;
+}//Dbacc::startUndoLab()
+
+/*---------------------------------------------------------------------------*/
+/* START THE UNDO OF AN UNDO LOG FILE BY OPENING THE UNDO LOG FILE. */
+/*---------------------------------------------------------------------------*/
+void Dbacc::startActiveUndo(Signal* signal)
+{
+ if (cprevUndoaddress == cminusOne) {
+ jam();
+ /*---------------------------------------------------------------------------*/
+ /* THERE WAS NO UNDO LOG INFORMATION IN THIS LOG FILE. WE GET THE NEXT */
+ /* OR REPORT COMPLETION. */
+ /*---------------------------------------------------------------------------*/
+ signal->theData[0] = ZSTART_UNDO;
+ sendSignal(cownBlockref, GSN_CONTINUEB, signal, 1, JBB);
+ } else {
+ jam();
+ /*---------------------------------------------------------------------------*/
+ /* OPEN THE LOG FILE PERTAINING TO THIS UNDO LOG. */
+ /*---------------------------------------------------------------------------*/
+ if (cfsFirstfreeconnect == RNIL) {
+ jam();
+ sendSystemerror(signal);
+ }//if
+ seizeFsConnectRec(signal);
+ cactiveSrFsPtr = fsConnectptr.i;
+ fsConnectptr.p->fsState = OPEN_UNDO_FILE_SR;
+ fsConnectptr.p->fsPart = 0;
+ tmp1 = 1; /* FILE VERSION ? */
+ tmp1 = (tmp1 << 8) + ZLOCALLOGFILE; /* .LOCLOG = 2 */
+ tmp1 = (tmp1 << 8) + 4; /* ROOT DIRECTORY = D4 */
+ tmp1 = (tmp1 << 8) + fsConnectptr.p->fsPart; /* P2 */
+ tmp2 = 0x0; /* D7 DON'T CREATE , READ ONLY */
+ /* DON'T TRUNCATE TO ZERO */
+ /* ---FILE NAME "D4"/"DBACC"/LCP_CONNECTPTR:LOCAL_CHECK_PID/FS_CONNECTPTR:FS_PART".LOCLOG-- */
+ /* ************************ */
+ /* FSOPENREQ */
+ /* ************************ */
+ signal->theData[0] = cownBlockref;
+ signal->theData[1] = fsConnectptr.i;
+ signal->theData[2] = cminusOne; /* #FFFFFFFF */
+ signal->theData[3] = cminusOne; /* #FFFFFFFF */
+ signal->theData[4] = cactiveCheckpId; /* CHECKPOINT VERSION */
+ signal->theData[5] = tmp1;
+ signal->theData[6] = tmp2;
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
+ }//if
+}//Dbacc::startActiveUndo()
+
+/* ------- READ A GROUP OF UNDO PAGES --------------- */
+void Dbacc::srStartUndoLab(Signal* signal)
+{
+ /*---------------------------------------------------------------------------*/
+ /* ALL LOG FILES HAVE BEEN OPENED. WE CAN NOW READ DATA FROM THE LAST */
+ /* PAGE IN THE LAST LOG FILE AND BACKWARDS UNTIL WE REACH THE VERY */
+ /* FIRST UNDO LOG RECORD. */
+ /*---------------------------------------------------------------------------*/
+ if (cactiveUndoFilePage >= ZWRITE_UNDOPAGESIZE) {
+ jam();
+ tmp1 = ZWRITE_UNDOPAGESIZE; /* NO OF READ UNDO PAGES */
+ cactiveSrUndoPage = ZWRITE_UNDOPAGESIZE - 1; /* LAST PAGE */
+ } else {
+ jam();
+ tmp1 = cactiveUndoFilePage + 1; /* NO OF READ UNDO PAGES */
+ cactiveSrUndoPage = cactiveUndoFilePage;
+ }//if
+ fsConnectptr.i = cactiveSrFsPtr;
+ ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
+ signal->theData[0] = fsConnectptr.p->fsPtr;
+ signal->theData[1] = cownBlockref;
+ signal->theData[2] = fsConnectptr.i;
+ signal->theData[3] = 0;
+ /* FLAG = LIST MEM PAGES, LIST FILE PAGES */
+ signal->theData[4] = ZUNDOPAGE_BASE_ADD;
+ signal->theData[5] = tmp1;
+ signal->theData[6] = 0;
+ signal->theData[7] = (cactiveUndoFilePage - tmp1) + 1;
+ signal->theData[8] = 1;
+ signal->theData[9] = cactiveUndoFilePage;
+
+ sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 10, JBA);
+ if (tmp1 > cactiveUndoFilePage) {
+ jam();
+ /*---------------------------------------------------------------------------*/
+ /* THIS IS THE LAST READ IN THIS LOG FILE. WE SET THE ACTIVE FILE */
+ /* POINTER. IF IT IS THE FIRST WE SHOULD NEVER ATTEMPT ANY MORE READS */
+ /* SINCE WE SHOULD ENCOUNTER A FIRST LOG RECORD WITH PREVIOUS PAGE ID */
+ /* EQUAL TO RNIL. */
+ /*---------------------------------------------------------------------------*/
+ cactiveSrFsPtr = RNIL;
+ fsConnectptr.p->fsState = READ_UNDO_PAGE_AND_CLOSE;
+ } else {
+ jam();
+ /*---------------------------------------------------------------------------*/
+ /* WE STILL HAVE MORE INFORMATION IN THIS LOG FILE. WE ONLY MOVE BACK */
+ /* THE FILE PAGE. */
+ /*---------------------------------------------------------------------------*/
+ cactiveUndoFilePage = cactiveUndoFilePage - tmp1;
+ fsConnectptr.p->fsState = READ_UNDO_PAGE;
+ }//if
+ return;
+}//Dbacc::srStartUndoLab()
+
+/* ------- DO UNDO ---------------------------*/
+/* ******************--------------------------------------------------------------- */
+/* NEXTOPERATION ORD FOR EXECUTION OF NEXT OP */
+/* ******************------------------------------+ */
+/* SENDER: ACC, LEVEL B */
+void Dbacc::execNEXTOPERATION(Signal* signal)
+{
+ jamEntry();
+ tresult = 0;
+ srDoUndoLab(signal);
+ return;
+}//Dbacc::execNEXTOPERATION()
+
+void Dbacc::srDoUndoLab(Signal* signal)
+{
+ DirRangePtr souDirRangePtr;
+ DirectoryarrayPtr souDirptr;
+ Page8Ptr souPageidptr;
+ Uint32 tundoPageindex;
+ UndoHeader *undoHeaderPtr;
+ Uint32 tmpindex;
+
+ jam();
+ undopageptr.i = cactiveSrUndoPage;
+ ptrCheckGuard(undopageptr, cundopagesize, undopage);
+ /*---------------------------------------------------------------------------*/
+ /* LAYOUT OF AN UNDO LOG RECORD: */
+ /* ***************************** */
+ /* */
+ /* |----------------------------------------------------| */
+ /* | TABLE ID | */
+ /* |----------------------------------------------------| */
+ /* | ROOT FRAGMENT ID | */
+ /* |----------------------------------------------------| */
+ /* | LOCAL FRAGMENT ID | */
+ /* |----------------------------------------------------| */
+ /* | UNDO INFO LEN 14 b | TYPE 4 b | PAGE INDEX 14 b | */
+ /* |----------------------------------------------------| */
+ /* | INDEX INTO PAGE DIRECTORY (LOGICAL PAGE ID) | */
+ /* |----------------------------------------------------| */
+ /* | PREVIOUS UNDO LOG RECORD FOR THE FRAGMENT | */
+ /* |----------------------------------------------------| */
+ /* | PREVIOUS UNDO LOG RECORD FOR ALL FRAGMENTS | */
+ /* |----------------------------------------------------| */
+ /* | TYPE SPECIFIC PART | */
+ /* |----------------------------------------------------| */
+ /*---------------------------------------------------------------------------*/
+ /*---------------------------------------------------------------------------*/
+ /* SET THE PAGE POINTER. WE ONLY WORK WITH TWO PAGES IN THIS RESTART */
+ /* ACTIVITY. GET THE PAGE POINTER AND THE PAGE INDEX TO READ FROM. */
+ /*---------------------------------------------------------------------------*/
+ tundoindex = cprevUndoaddress & ZUNDOPAGEINDEX_MASK; //0x1fff, 13 bits.
+ undoHeaderPtr = (UndoHeader *) &undopageptr.p->undoword[tundoindex];
+ tundoindex = tundoindex + ZUNDOHEADSIZE;
+
+ /*------------------------------------------------------------------------*/
+ /* READ TABLE ID AND ROOT FRAGMENT ID AND USE THIS TO GET ROOT RECORD. */
+ /*------------------------------------------------------------------------*/
+ arrGuard((tundoindex + 6), 8192);
+
+ // TABLE ID
+ tabptr.i = undoHeaderPtr->tableId;
+ ptrCheckGuard(tabptr, ctablesize, tabrec);
+
+ // ROOT FRAGMENT ID
+ tfid = undoHeaderPtr->rootFragId;
+ ndbrequire((undoHeaderPtr->localFragId >> 1) == undoHeaderPtr->rootFragId);
+ if (!getrootfragmentrec(signal, rootfragrecptr, tfid)) {
+ jam();
+ /*---------------------------------------------------------------------*/
+ /* THE ROOT RECORD WAS NOT FOUND. OBVIOUSLY WE ARE NOT RESTARTING THIS */
+ /* FRAGMENT. WE THUS IGNORE THIS LOG RECORD AND PROCEED WITH THE NEXT. */
+ /*---------------------------------------------------------------------*/
+ creadyUndoaddress = cprevUndoaddress;
+ // PREVIOUS UNDO LOG RECORD FOR ALL FRAGMENTS
+ cprevUndoaddress = undoHeaderPtr->prevUndoAddress;
+ undoNext2Lab(signal);
+#ifdef VM_TRACE
+ ndbout_c("ignoring root fid %d", (int)tfid);
+#endif
+ return;
+ }//if
+ /*-----------------------------------------------------------------------*/
+ /* READ THE LOCAL FRAGMENT ID AND VERIFY THAT IT IS CORRECT. */
+ /*-----------------------------------------------------------------------*/
+ if (rootfragrecptr.p->fragmentid[0] == undoHeaderPtr->localFragId) {
+ jam();
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[0];
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ } else {
+ if (rootfragrecptr.p->fragmentid[1] == undoHeaderPtr->localFragId) {
+ jam();
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[1];
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ } else {
+ jam();
+ progError(__LINE__, 0, "Invalid local fragment id in undo log");
+ return;
+ }//if
+ }//if
+ /*------------------------------------------------------------------------*/
+ /* READ UNDO INFO LENGTH, TYPE OF LOG RECORD AND PAGE INDEX WHERE TO */
+ /* APPLY THIS LOG RECORD. ALSO STEP INDEX TO PREPARE READ OF LOGICAL */
+ /* PAGE ID. SET TMPINDEX TO INDEX THE FIRST WORD IN THE TYPE SPECIFIC */
+ /* PART. */
+ /*------------------------------------------------------------------------*/
+ // UNDO INFO LENGTH 14 b | TYPE 4 b | PAGE INDEX 14 b
+ const Uint32 tmp1 = undoHeaderPtr->variousInfo;
+ cundoinfolength = tmp1 >> 18;
+ const Uint32 tpageType = (tmp1 >> 14) & 0xf;
+ tundoPageindex = tmp1 & 0x3fff;
+
+ // INDEX INTO PAGE DIRECTORY (LOGICAL PAGE ID)
+ tmpP = undoHeaderPtr->logicalPageId ;
+ tmpindex = tundoindex;
+ arrGuard((tmpindex + cundoinfolength - 1), 8192);
+ if (fragrecptr.p->localCheckpId != cactiveCheckpId) {
+ jam();
+ /*-----------------------------------------------------------------------*/
+ /* THE FRAGMENT DID EXIST BUT IS NOT AFFECTED BY THIS UNDO LOG */
+ /* EXECUTION. EITHER IT BELONGS TO ANOTHER OR IT IS CREATED AND ONLY IN */
+ /* NEED OF EXECUTION OF REDO LOG RECORDS FROM LQH. */
+ /*-----------------------------------------------------------------------*/
+ creadyUndoaddress = cprevUndoaddress;
+ // PREVIOUS UNDO LOG RECORD FOR ALL FRAGMENTS
+ cprevUndoaddress = undoHeaderPtr->prevUndoAddress;
+
+ undoNext2Lab(signal);
+ return;
+ }//if
+ /*-----------------------------------------------------------------------*/
+ /* VERIFY CONSISTENCY OF UNDO LOG RECORDS. */
+ /*-----------------------------------------------------------------------*/
+ ndbrequire(fragrecptr.p->prevUndoposition == cprevUndoaddress);
+ cSrUndoRecords[tpageType]++;
+ switch(tpageType){
+
+ case UndoHeader::ZPAGE_INFO:{
+ jam();
+ /*----------------------------------------------------------------------*/
+ /* WE HAVE TO UNDO UPDATES IN A NORMAL PAGE. GET THE PAGE POINTER BY */
+ /* USING THE LOGICAL PAGE ID. THEN RESET THE OLD VALUE IN THE PAGE BY */
+ /* USING THE OLD DATA WHICH IS STORED IN THIS UNDO LOG RECORD. */
+ /*----------------------------------------------------------------------*/
+ souDirRangePtr.i = fragrecptr.p->directory;
+ tmpP2 = tmpP >> 8;
+ tmpP = tmpP & 0xff;
+ ptrCheckGuard(souDirRangePtr, cdirrangesize, dirRange);
+ arrGuard(tmpP2, 256);
+ souDirptr.i = souDirRangePtr.p->dirArray[tmpP2];
+ ptrCheckGuard(souDirptr, cdirarraysize, directoryarray);
+ souPageidptr.i = souDirptr.p->pagep[tmpP];
+ ptrCheckGuard(souPageidptr, cpagesize, page8);
+ Uint32 loopLimit = tundoPageindex + cundoinfolength;
+ ndbrequire(loopLimit <= 2048);
+ for (Uint32 tmp = tundoPageindex; tmp < loopLimit; tmp++) {
+ dbgWord32(souPageidptr, tmp, undopageptr.p->undoword[tmpindex]);
+ souPageidptr.p->word32[tmp] = undopageptr.p->undoword[tmpindex];
+ tmpindex = tmpindex + 1;
+ }//for
+ break;
+ }
+
+ case UndoHeader::ZOVER_PAGE_INFO:{
+ jam();
+ /*----------------------------------------------------------------------*/
+ /* WE HAVE TO UNDO UPDATES IN AN OVERFLOW PAGE. GET THE PAGE POINTER BY*/
+ /* USING THE LOGICAL PAGE ID. THEN RESET THE OLD VALUE IN THE PAGE BY */
+ /* USING THE OLD DATA WHICH IS STORED IN THIS UNDO LOG RECORD. */
+ /*----------------------------------------------------------------------*/
+ souDirRangePtr.i = fragrecptr.p->overflowdir;
+ tmpP2 = tmpP >> 8;
+ tmpP = tmpP & 0xff;
+ ptrCheckGuard(souDirRangePtr, cdirrangesize, dirRange);
+ arrGuard(tmpP2, 256);
+ souDirptr.i = souDirRangePtr.p->dirArray[tmpP2];
+ ptrCheckGuard(souDirptr, cdirarraysize, directoryarray);
+ souPageidptr.i = souDirptr.p->pagep[tmpP];
+ ptrCheckGuard(souPageidptr, cpagesize, page8);
+ Uint32 loopLimit = tundoPageindex + cundoinfolength;
+ ndbrequire(loopLimit <= 2048);
+ for (Uint32 tmp = tundoPageindex; tmp < loopLimit; tmp++) {
+ dbgWord32(souPageidptr, tmp, undopageptr.p->undoword[tmpindex]);
+ souPageidptr.p->word32[tmp] = undopageptr.p->undoword[tmpindex];
+ tmpindex = tmpindex + 1;
+ }//for
+ break;
+ }
+
+ case UndoHeader::ZOP_INFO: {
+ jam();
+ /*---------------------------------------------------------------------*/
+ /* AN OPERATION WAS ACTIVE WHEN LOCAL CHECKPOINT WAS EXECUTED. WE NEED */
+ /* TO RESET THE LOCKS IT HAS SET. IF THE OPERATION WAS AN INSERT OR */
+ /* THE ELEMENT WAS MARKED AS DISSAPEARED IT WILL ALSO BE REMOVED */
+ /* FROM THE PAGE */
+ /* */
+ /* BEGIN BY SEARCHING AFTER THE ELEMENT, WHEN FOUND UNDO THE */
+ /* CHANGES ON THE ELEMENT HEADER. IF IT WAS AN INSERT OPERATION OR */
+ /* MARKED AS DISSAPEARED PROCEED BY REMOVING THE ELEMENT. */
+ /*---------------------------------------------------------------------*/
+ seizeOpRec(signal);
+ // Initialise the opRec
+ operationRecPtr.p->transId1 = 0;
+ operationRecPtr.p->transId2 = RNIL;
+ operationRecPtr.p->transactionstate = ACTIVE;
+ operationRecPtr.p->commitDeleteCheckFlag = ZFALSE;
+ operationRecPtr.p->lockMode = 0;
+ operationRecPtr.p->dirtyRead = 0;
+ operationRecPtr.p->nodeType = 0;
+ operationRecPtr.p->fid = fragrecptr.p->myfid;
+ operationRecPtr.p->nextParallelQue = RNIL;
+ operationRecPtr.p->prevParallelQue = RNIL;
+ operationRecPtr.p->nextQueOp = RNIL;
+ operationRecPtr.p->prevQueOp = RNIL;
+ operationRecPtr.p->nextSerialQue = RNIL;
+ operationRecPtr.p->prevSerialQue = RNIL;
+ operationRecPtr.p->elementPage = RNIL;
+ operationRecPtr.p->keyinfoPage = RNIL;
+ operationRecPtr.p->insertIsDone = ZFALSE;
+ operationRecPtr.p->lockOwner = ZFALSE;
+ operationRecPtr.p->elementIsDisappeared = ZFALSE;
+ operationRecPtr.p->insertDeleteLen = fragrecptr.p->elementLength;
+ operationRecPtr.p->longPagePtr = RNIL;
+ operationRecPtr.p->longKeyPageIndex = RNIL;
+ operationRecPtr.p->scanRecPtr = RNIL;
+ operationRecPtr.p->isAccLockReq = ZFALSE;
+ operationRecPtr.p->isUndoLogReq = ZTRUE;
+
+ // Read operation values from undo page
+ operationRecPtr.p->operation = undopageptr.p->undoword[tmpindex];
+ tmpindex++;
+ operationRecPtr.p->hashValue = undopageptr.p->undoword[tmpindex];
+ tmpindex++;
+ const Uint32 tkeylen = undopageptr.p->undoword[tmpindex];
+ tmpindex++;
+ operationRecPtr.p->tupkeylen = tkeylen;
+ operationRecPtr.p->xfrmtupkeylen = 0; // not used
+ operationRecPtr.p->fragptr = fragrecptr.i;
+
+ ndbrequire(fragrecptr.p->keyLength != 0 &&
+ fragrecptr.p->keyLength == tkeylen);
+
+ // Read localkey1 from undo page
+ signal->theData[7 + 0] = undopageptr.p->undoword[tmpindex];
+ tmpindex = tmpindex + 1;
+ arrGuard((tmpindex - 1), 8192);
+ getElement(signal);
+ if (tgeResult != ZTRUE) {
+ jam();
+ signal->theData[0] = RNIL;
+ signal->theData[1] = tabptr.i;
+ signal->theData[2] = cactiveCheckpId;
+ signal->theData[3] = cprevUndoaddress;
+ signal->theData[4] = operationRecPtr.p->operation;
+ signal->theData[5] = operationRecPtr.p->hashValue;
+ signal->theData[6] = operationRecPtr.p->tupkeylen;
+ sendSignal(cownBlockref, GSN_DEBUG_SIG, signal, 11, JBA);
+ return;
+ }//if
+
+ operationRecPtr.p->elementPage = gePageptr.i;
+ operationRecPtr.p->elementContainer = tgeContainerptr;
+ operationRecPtr.p->elementPointer = tgeElementptr;
+ operationRecPtr.p->elementIsforward = tgeForward;
+
+ commitdelete(signal, true);
+ releaseOpRec(signal);
+ break;
+ }
+
+ default:
+ jam();
+ progError(__LINE__, 0, "Invalid pagetype in undo log");
+ break;
+
+ }//switch(tpageType)
+
+ /*----------------------------------------------------------------------*/
+ /* READ THE PAGE ID AND THE PAGE INDEX OF THE PREVIOUS UNDO LOG RECORD */
+ /* FOR THIS FRAGMENT. */
+ /*----------------------------------------------------------------------*/
+ fragrecptr.p->prevUndoposition = undoHeaderPtr->prevUndoAddressForThisFrag;
+ /*----------------------------------------------------------------------*/
+ /* READ THE PAGE ID AND THE PAGE INDEX OF THE PREVIOUS UNDO LOG RECORD */
+ /* FOR THIS UNDO LOG. */
+ /*----------------------------------------------------------------------*/
+ creadyUndoaddress = cprevUndoaddress;
+ cprevUndoaddress = undoHeaderPtr->prevUndoAddress;
+
+ if (fragrecptr.p->prevUndoposition == cminusOne) {
+ jam();
+ /*---------------------------------------------------------------------*/
+ /* WE HAVE NOW EXECUTED ALL UNDO LOG RECORDS FOR THIS FRAGMENT. WE */
+ /* NOW NEED TO UPDATE THE FREE LIST OF OVERFLOW PAGES. */
+ /*---------------------------------------------------------------------*/
+ ndbrequire(fragrecptr.p->nextAllocPage == 0);
+
+ signal->theData[0] = fragrecptr.i;
+ sendSignal(cownBlockref, GSN_ACC_OVER_REC, signal, 1, JBB);
+ return;
+ }//if
+ undoNext2Lab(signal);
+ return;
+}//Dbacc::srDoUndoLab()
+
+void Dbacc::undoNext2Lab(Signal* signal)
+{
+ /*---------------------------------------------------------------------------*/
+ /* EXECUTE NEXT UNDO LOG RECORD. */
+ /*---------------------------------------------------------------------------*/
+ if (cprevUndoaddress == cminusOne) {
+ jam();
+ /*---------------------------------------------------------------------------*/
+ /* WE HAVE EXECUTED THIS UNDO LOG TO COMPLETION. IT IS NOW TIME TO TAKE*/
+ /* OF THE NEXT UNDO LOG OR REPORT COMPLETION OF UNDO LOG EXECUTION. */
+ /*---------------------------------------------------------------------------*/
+ signal->theData[0] = ZSTART_UNDO;
+ sendSignal(cownBlockref, GSN_CONTINUEB, signal, 1, JBB);
+ return;
+ }//if
+ if ((creadyUndoaddress >> 13) != (cprevUndoaddress >> 13)) {
+ /*---------------------------------------------------------------------------*/
+ /* WE ARE CHANGING PAGE. */
+ /*---------------------------------------------------------------------------*/
+ if (cactiveSrUndoPage == 0) {
+ jam();
+ /*---------------------------------------------------------------------------*/
+ /* WE HAVE READ AND EXECUTED ALL UNDO LOG INFORMATION IN THE CURRENTLY */
+ /* READ PAGES. WE STILL HAVE MORE INFORMATION TO READ FROM FILE SINCE */
+ /* WE HAVEN'T FOUND THE FIRST LOG RECORD IN THE LOG FILE YET. */
+ /*---------------------------------------------------------------------------*/
+ srStartUndoLab(signal);
+ return;
+ } else {
+ jam();
+ /*---------------------------------------------------------------------------*/
+ /* WE HAVE ANOTHER PAGE READ THAT WE NEED TO EXECUTE. */
+ /*---------------------------------------------------------------------------*/
+ cactiveSrUndoPage = cactiveSrUndoPage - 1;
+ }//if
+ }//if
+ /*---------------------------------------------------------------------------*/
+ /* REAL-TIME BREAK */
+ /*---------------------------------------------------------------------------*/
+ /* ******************************< */
+ /* NEXTOPERATION */
+ /* ******************************< */
+ sendSignal(cownBlockref, GSN_NEXTOPERATION, signal, 1, JBB);
+ return;
+}//Dbacc::undoNext2Lab()
+
+/*-----------------------------------------------------------------------------------*/
+/* AFTER COMPLETING THE READING OF DATA PAGES FROM DISK AND EXECUTING THE UNDO */
+/* LOG WE ARE READY TO UPDATE THE FREE LIST OF OVERFLOW PAGES. THIS LIST MUST */
+/* BE BUILT AGAIN SINCE IT IS NOT CHECKPOINTED. WHEN THE PAGES ARE ALLOCATED */
+/* THEY ARE NOT PART OF ANY LIST. PAGES CAN EITHER BE PUT IN FREE LIST, NOT */
+/* IN FREE LIST OR BE PUT INTO LIST OF LONG KEY PAGES. */
+/*-----------------------------------------------------------------------------------*/
+void Dbacc::execACC_OVER_REC(Signal* signal)
+{
+ DirRangePtr pnoDirRangePtr;
+ DirectoryarrayPtr pnoOverflowDirptr;
+ Page8Ptr pnoPageidptr;
+ Uint32 tpnoPageType;
+ Uint32 toverPageCheck;
+
+ jamEntry();
+ fragrecptr.i = signal->theData[0];
+ toverPageCheck = 0;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ ndbrequire((fragrecptr.p->nextAllocPage != 0) ||
+ (fragrecptr.p->firstOverflowRec == RNIL));
+ /*-----------------------------------------------------------------------------------*/
+ /* WHO HAS PUT SOMETHING INTO THE LIST BEFORE WE EVEN STARTED PUTTING THINGS */
+ /* THERE. */
+ /*-----------------------------------------------------------------------------------*/
+ ndbrequire(fragrecptr.p->loadingFlag == ZTRUE);
+ /*---------------------------------------------------------------------------*/
+ /* LOADING HAS STOPPED BEFORE WE HAVE LOADED, SYSTEM ERROR. */
+ /*---------------------------------------------------------------------------*/
+ while (toverPageCheck < ZNO_OF_OP_PER_SIGNAL) {
+ jam();
+ if (fragrecptr.p->nextAllocPage >= fragrecptr.p->lastOverIndex) {
+ jam();
+ fragrecptr.p->loadingFlag = ZFALSE;
+ rootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ if (rootfragrecptr.p->lcpPtr != RNIL) {
+ jam();
+ srCloseDataFileLab(signal);
+ } else {
+ jam();
+ undoNext2Lab(signal);
+ }//if
+ return;
+ }//if
+ tmpP = fragrecptr.p->nextAllocPage;
+ pnoDirRangePtr.i = fragrecptr.p->overflowdir;
+ tmpP2 = tmpP >> 8;
+ tmpP = tmpP & 0xff;
+ arrGuard(tmpP2, 256);
+ ptrCheckGuard(pnoDirRangePtr, cdirrangesize, dirRange);
+ if (pnoDirRangePtr.p->dirArray[tmpP2] == RNIL) {
+ jam();
+ pnoPageidptr.i = RNIL;
+ } else {
+ pnoOverflowDirptr.i = pnoDirRangePtr.p->dirArray[tmpP2];
+ if (pnoOverflowDirptr.i == RNIL) {
+ jam();
+ pnoPageidptr.i = RNIL;
+ } else {
+ jam();
+ ptrCheckGuard(pnoOverflowDirptr, cdirarraysize, directoryarray);
+ pnoPageidptr.i = pnoOverflowDirptr.p->pagep[tmpP];
+ }//if
+ }//if
+ if (pnoPageidptr.i == RNIL) {
+ jam();
+ seizeOverRec(signal);
+ sorOverflowRecPtr.p->dirindex = fragrecptr.p->nextAllocPage;
+ sorOverflowRecPtr.p->overpage = RNIL;
+ priOverflowRecPtr = sorOverflowRecPtr;
+ putRecInFreeOverdir(signal);
+ } else {
+ ptrCheckGuard(pnoPageidptr, cpagesize, page8);
+ tpnoPageType = pnoPageidptr.p->word32[ZPOS_PAGE_TYPE];
+ tpnoPageType = (tpnoPageType >> ZPOS_PAGE_TYPE_BIT) & 3;
+ if (pnoPageidptr.p->word32[ZPOS_ALLOC_CONTAINERS] > ZFREE_LIMIT) {
+ jam();
+ dbgWord32(pnoPageidptr, ZPOS_OVERFLOWREC, RNIL);
+ pnoPageidptr.p->word32[ZPOS_OVERFLOWREC] = RNIL;
+ ndbrequire(pnoPageidptr.p->word32[ZPOS_PAGE_ID] == fragrecptr.p->nextAllocPage);
+ } else {
+ jam();
+ seizeOverRec(signal);
+ sorOverflowRecPtr.p->dirindex = pnoPageidptr.p->word32[ZPOS_PAGE_ID];
+ ndbrequire(sorOverflowRecPtr.p->dirindex == fragrecptr.p->nextAllocPage);
+ dbgWord32(pnoPageidptr, ZPOS_OVERFLOWREC, sorOverflowRecPtr.i);
+ pnoPageidptr.p->word32[ZPOS_OVERFLOWREC] = sorOverflowRecPtr.i;
+ sorOverflowRecPtr.p->overpage = pnoPageidptr.i;
+ porOverflowRecPtr = sorOverflowRecPtr;
+ putOverflowRecInFrag(signal);
+ if (pnoPageidptr.p->word32[ZPOS_ALLOC_CONTAINERS] == 0) {
+ jam();
+ ropPageptr = pnoPageidptr;
+ releaseOverpage(signal);
+ }//if
+ }//if
+ }//if
+ fragrecptr.p->nextAllocPage++;
+ toverPageCheck++;
+ }//while
+ signal->theData[0] = fragrecptr.i;
+ sendSignal(cownBlockref, GSN_ACC_OVER_REC, signal, 1, JBB);
+}//Dbacc::execACC_OVER_REC()
+
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* END OF SYSTEM RESTART MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* SCAN MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* ACC_SCANREQ START OF A SCAN PROCESS */
+/* SENDER: LQH, LEVEL B */
+/* ENTER ACC_SCANREQ WITH */
+/* TUSERPTR, LQH SCAN_CONNECT POINTER */
+/* TUSERBLOCKREF, LQH BLOCK REFERENCE */
+/* TABPTR, TABLE IDENTITY AND PTR */
+/* TFID ROOT FRAGMENT IDENTITY */
+/* TSCAN_FLAG , = ZCOPY, ZSCAN, ZSCAN_LOCK_ALL */
+/* ZREADLOCK, ZWRITELOCK */
+/* TSCAN_TRID1 , TRANSACTION ID PART 1 */
+/* TSCAN_TRID2 TRANSACTION ID PART 2 */
+/* ******************--------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* ACC_SCANREQ START OF A SCAN PROCESS */
+/* ******************------------------------------+ */
+/* SENDER: LQH, LEVEL B */
+void Dbacc::execACC_SCANREQ(Signal* signal)
+{
+ jamEntry();
+ AccScanReq * req = (AccScanReq*)&signal->theData[0];
+ tuserptr = req->senderData;
+ tuserblockref = req->senderRef;
+ tabptr.i = req->tableId;
+ tfid = req->fragmentNo;
+ tscanFlag = req->requestInfo;
+ tscanTrid1 = req->transId1;
+ tscanTrid2 = req->transId2;
+
+ tresult = 0;
+ ptrCheckGuard(tabptr, ctablesize, tabrec);
+ ndbrequire(getrootfragmentrec(signal,rootfragrecptr, tfid));
+
+ Uint32 i;
+ for (i = 0; i < MAX_PARALLEL_SCANS_PER_FRAG; i++) {
+ jam();
+ if (rootfragrecptr.p->scan[i] == RNIL) {
+ jam();
+ break;
+ }
+ }
+ ndbrequire(i != MAX_PARALLEL_SCANS_PER_FRAG);
+ ndbrequire(cfirstFreeScanRec != RNIL);
+ seizeScanRec(signal);
+
+ rootfragrecptr.p->scan[i] = scanPtr.i;
+ scanPtr.p->scanBucketState = ScanRec::FIRST_LAP;
+ scanPtr.p->scanLockMode = AccScanReq::getLockMode(tscanFlag);
+ scanPtr.p->scanReadCommittedFlag = AccScanReq::getReadCommittedFlag(tscanFlag);
+
+ /* TWELVE BITS OF THE ELEMENT HEAD ARE SCAN */
+ /* CHECK BITS. THE MASK NOTES WHICH BIT IS */
+ /* ALLOCATED FOR THE ACTIVE SCAN */
+ scanPtr.p->scanMask = 1 << i;
+ scanPtr.p->scanUserptr = tuserptr;
+ scanPtr.p->scanUserblockref = tuserblockref;
+ scanPtr.p->scanTrid1 = tscanTrid1;
+ scanPtr.p->scanTrid2 = tscanTrid2;
+ scanPtr.p->rootPtr = rootfragrecptr.i;
+ scanPtr.p->scanLockHeld = 0;
+ scanPtr.p->scanOpsAllocated = 0;
+ scanPtr.p->scanFirstActiveOp = RNIL;
+ scanPtr.p->scanFirstQueuedOp = RNIL;
+ scanPtr.p->scanLastQueuedOp = RNIL;
+ scanPtr.p->scanFirstLockedOp = RNIL;
+ scanPtr.p->scanLastLockedOp = RNIL;
+ scanPtr.p->scanState = ScanRec::WAIT_NEXT;
+ fragrecptr.i = rootfragrecptr.p->fragmentptr[0];
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ initScanFragmentPart(signal);
+
+ /*------------------------------------------------------*/
+ /* We start the timeout loop for the scan process here. */
+ /*------------------------------------------------------*/
+ ndbrequire(scanPtr.p->scanTimer == 0);
+ if (scanPtr.p->scanContinuebCounter == 0) {
+ jam();
+ scanPtr.p->scanContinuebCounter = 1;
+ signal->theData[0] = ZSEND_SCAN_HBREP;
+ signal->theData[1] = scanPtr.i;
+ sendSignalWithDelay(cownBlockref, GSN_CONTINUEB, signal, 100, 2);
+ }//if
+ scanPtr.p->scanTimer = scanPtr.p->scanContinuebCounter;
+ /* ************************ */
+ /* ACC_SCANCONF */
+ /* ************************ */
+ signal->theData[0] = scanPtr.p->scanUserptr;
+ signal->theData[1] = scanPtr.i;
+ signal->theData[2] = 2;
+ /* NR OF LOCAL FRAGMENT */
+ signal->theData[3] = rootfragrecptr.p->fragmentid[0];
+ signal->theData[4] = rootfragrecptr.p->fragmentid[1];
+ signal->theData[7] = AccScanConf::ZNOT_EMPTY_FRAGMENT;
+ sendSignal(scanPtr.p->scanUserblockref, GSN_ACC_SCANCONF, signal, 8, JBB);
+ /* NOT EMPTY FRAGMENT */
+ return;
+}//Dbacc::execACC_SCANREQ()
+
+/* ******************--------------------------------------------------------------- */
+/* NEXT_SCANREQ REQUEST FOR NEXT ELEMENT OF */
+/* ******************------------------------------+ A FRAGMENT. */
+/* SENDER: LQH, LEVEL B */
+void Dbacc::execNEXT_SCANREQ(Signal* signal)
+{
+ Uint32 tscanNextFlag;
+ jamEntry();
+ scanPtr.i = signal->theData[0];
+ operationRecPtr.i = signal->theData[1];
+ tscanNextFlag = signal->theData[2];
+ /* ------------------------------------------ */
+ /* 1 = ZCOPY_NEXT GET NEXT ELEMENT */
+ /* 2 = ZCOPY_NEXT_COMMIT COMMIT THE */
+ /* ACTIVE ELEMENT AND GET THE NEXT ONE */
+ /* 3 = ZCOPY_COMMIT COMMIT THE ACTIVE ELEMENT */
+ /* 4 = ZCOPY_REPEAT GET THE ACTIVE ELEMENT */
+ /* 5 = ZCOPY_ABORT RELOCK THE ACTIVE ELEMENT */
+ /* 6 = ZCOPY_CLOSE THE SCAN PROCESS IS READY */
+ /* ------------------------------------------ */
+ tresult = 0;
+ ptrCheckGuard(scanPtr, cscanRecSize, scanRec);
+ ndbrequire(scanPtr.p->scanState == ScanRec::WAIT_NEXT);
+
+ scanPtr.p->scanTimer = scanPtr.p->scanContinuebCounter;
+ switch (tscanNextFlag) {
+ case ZCOPY_NEXT:
+ jam();
+ /*empty*/;
+ break;
+ case ZCOPY_NEXT_COMMIT:
+ case ZCOPY_COMMIT:
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* COMMIT ACTIVE OPERATION. SEND NEXT SCAN ELEMENT IF IT IS ZCOPY_NEXT_COMMIT. */
+ /* --------------------------------------------------------------------------------- */
+ ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
+ fragrecptr.i = operationRecPtr.p->fragptr;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ if (!scanPtr.p->scanReadCommittedFlag) {
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ if (remainingUndoPages() < ZMIN_UNDO_PAGES_AT_COMMIT) {
+ jam();
+ /*--------------------------------------------------------------*/
+ // We did not have enough undo log buffers to safely commit an
+ // operation. Try again in 10 milliseconds.
+ /*--------------------------------------------------------------*/
+ sendSignalWithDelay(cownBlockref, GSN_NEXT_SCANREQ, signal, 10, 3);
+ return;
+ }//if
+ }//if
+ commitOperation(signal);
+ }//if
+ takeOutActiveScanOp(signal);
+ releaseOpRec(signal);
+ scanPtr.p->scanOpsAllocated--;
+ if (tscanNextFlag == ZCOPY_COMMIT) {
+ jam();
+ signal->theData[0] = scanPtr.p->scanUserptr;
+ Uint32 blockNo = refToBlock(scanPtr.p->scanUserblockref);
+ EXECUTE_DIRECT(blockNo, GSN_NEXT_SCANCONF, signal, 1);
+ return;
+ }//if
+ break;
+ case ZCOPY_CLOSE:
+ jam();
+ fragrecptr.i = scanPtr.p->activeLocalFrag;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ if (!scanPtr.p->scanReadCommittedFlag) {
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ if (remainingUndoPages() < ZMIN_UNDO_PAGES_AT_OPERATION) {
+ jam();
+ /*--------------------------------------------------------------*/
+ // We did not have enough undo log buffers to commit a set of
+ // operations. Try again in 10 milliseconds.
+ /*--------------------------------------------------------------*/
+ sendSignalWithDelay(cownBlockref, GSN_NEXT_SCANREQ, signal, 10, 3);
+ return;
+ }//if
+ }//if
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ /* THE SCAN PROCESS IS FINISHED. RELOCK ALL LOCKED EL. RELESE ALL INVOLVED REC. */
+ /* --------------------------------------------------------------------------------- */
+ releaseScanLab(signal);
+ return;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ signal->theData[0] = scanPtr.i;
+ signal->theData[1] = AccCheckScan::ZNOT_CHECK_LCP_STOP;
+ execACC_CHECK_SCAN(signal);
+ return;
+}//Dbacc::execNEXT_SCANREQ()
+
+void Dbacc::checkNextBucketLab(Signal* signal)
+{
+ DirRangePtr cscDirRangePtr;
+ DirectoryarrayPtr cscDirptr;
+ DirectoryarrayPtr tnsDirptr;
+ Page8Ptr nsPageptr;
+ Page8Ptr cscPageidptr;
+ Page8Ptr gnsPageidptr;
+ Page8Ptr tnsPageidptr;
+ Uint32 tnsElementptr;
+ Uint32 tnsContainerptr;
+ Uint32 tnsIsLocked;
+ Uint32 tnsTmp1;
+ Uint32 tnsTmp2;
+ Uint32 tnsCopyIndex1;
+ Uint32 tnsCopyIndex2;
+ Uint32 tnsCopyDir;
+
+ tnsCopyDir = scanPtr.p->nextBucketIndex >> fragrecptr.p->k;
+ tnsCopyIndex1 = tnsCopyDir >> 8;
+ tnsCopyIndex2 = tnsCopyDir & 0xff;
+ arrGuard(tnsCopyIndex1, 256);
+ tnsDirptr.i = gnsDirRangePtr.p->dirArray[tnsCopyIndex1];
+ ptrCheckGuard(tnsDirptr, cdirarraysize, directoryarray);
+ tnsPageidptr.i = tnsDirptr.p->pagep[tnsCopyIndex2];
+ ptrCheckGuard(tnsPageidptr, cpagesize, page8);
+ gnsPageidptr.i = tnsPageidptr.i;
+ gnsPageidptr.p = tnsPageidptr.p;
+ tnsTmp1 = (1 << fragrecptr.p->k) - 1;
+ tgsePageindex = scanPtr.p->nextBucketIndex & tnsTmp1;
+ gsePageidptr.i = gnsPageidptr.i;
+ gsePageidptr.p = gnsPageidptr.p;
+ if (!getScanElement(signal)) {
+ scanPtr.p->nextBucketIndex++;
+ if (scanPtr.p->scanBucketState == ScanRec::SECOND_LAP) {
+ if (scanPtr.p->nextBucketIndex > scanPtr.p->maxBucketIndexToRescan) {
+ /* --------------------------------------------------------------------------------- */
+ // We have finished the rescan phase. We are ready to proceed with the next fragment part.
+ /* --------------------------------------------------------------------------------- */
+ jam();
+ checkNextFragmentLab(signal);
+ return;
+ }//if
+ } else if (scanPtr.p->scanBucketState == ScanRec::FIRST_LAP) {
+ if ((fragrecptr.p->p + fragrecptr.p->maxp) < scanPtr.p->nextBucketIndex) {
+ /* --------------------------------------------------------------------------------- */
+ // All buckets have been scanned a first time.
+ /* --------------------------------------------------------------------------------- */
+ if (scanPtr.p->minBucketIndexToRescan == 0xFFFFFFFF) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ // We have not had any merges behind the scan. Thus it is not necessary to perform
+ // any rescan any buckets and we can proceed immediately with the next fragment part.
+ /* --------------------------------------------------------------------------------- */
+ checkNextFragmentLab(signal);
+ return;
+ } else {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ // Some buckets are in the need of rescanning due to merges that have moved records
+ // from in front of the scan to behind the scan. During the merges we kept track of
+ // which buckets that need a rescan. We start with the minimum and end with maximum.
+ /* --------------------------------------------------------------------------------- */
+ scanPtr.p->nextBucketIndex = scanPtr.p->minBucketIndexToRescan;
+ scanPtr.p->scanBucketState = ScanRec::SECOND_LAP;
+ if (scanPtr.p->maxBucketIndexToRescan > (fragrecptr.p->p + fragrecptr.p->maxp)) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ // If we have had so many merges that the maximum is bigger than the number of buckets
+ // then we will simply satisfy ourselves with scanning to the end. This can only happen
+ // after bringing down the total of buckets to less than half and the minimum should
+ // be 0 otherwise there is some problem.
+ /* --------------------------------------------------------------------------------- */
+ if (scanPtr.p->minBucketIndexToRescan != 0) {
+ jam();
+ sendSystemerror(signal);
+ return;
+ }//if
+ scanPtr.p->maxBucketIndexToRescan = fragrecptr.p->p + fragrecptr.p->maxp;
+ }//if
+ }//if
+ }//if
+ }//if
+ if ((scanPtr.p->scanBucketState == ScanRec::FIRST_LAP) &&
+ (scanPtr.p->nextBucketIndex <= scanPtr.p->startNoOfBuckets)) {
+ /* --------------------------------------------------------------------------------- */
+ // We will only reset the scan indicator on the buckets that existed at the start of the
+ // scan. The others will be handled by the split and merge code.
+ /* --------------------------------------------------------------------------------- */
+ tnsTmp2 = (1 << fragrecptr.p->k) - 1;
+ trsbPageindex = scanPtr.p->nextBucketIndex & tnsTmp2;
+ if (trsbPageindex != 0) {
+ jam();
+ rsbPageidptr.i = gnsPageidptr.i;
+ rsbPageidptr.p = gnsPageidptr.p;
+ } else {
+ jam();
+ cscDirRangePtr.i = fragrecptr.p->directory;
+ tmpP = scanPtr.p->nextBucketIndex >> fragrecptr.p->k;
+ tmpP2 = tmpP >> 8;
+ tmpP = tmpP & 0xff;
+ ptrCheckGuard(cscDirRangePtr, cdirrangesize, dirRange);
+ arrGuard(tmpP2, 256);
+ cscDirptr.i = cscDirRangePtr.p->dirArray[tmpP2];
+ ptrCheckGuard(cscDirptr, cdirarraysize, directoryarray);
+ cscPageidptr.i = cscDirptr.p->pagep[tmpP];
+ ptrCheckGuard(cscPageidptr, cpagesize, page8);
+ tmp1 = (1 << fragrecptr.p->k) - 1;
+ trsbPageindex = scanPtr.p->nextBucketIndex & tmp1;
+ rsbPageidptr.i = cscPageidptr.i;
+ rsbPageidptr.p = cscPageidptr.p;
+ }//if
+ releaseScanBucket(signal);
+ }//if
+ signal->theData[0] = scanPtr.i;
+ signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP;
+ sendSignal(cownBlockref, GSN_ACC_CHECK_SCAN, signal, 2, JBB);
+ return;
+ }//if
+ /* ----------------------------------------------------------------------- */
+ /* AN ELEMENT WHICH HAVE NOT BEEN SCANNED WAS FOUND. WE WILL PREPARE IT */
+ /* TO BE SENT TO THE LQH BLOCK FOR FURTHER PROCESSING. */
+ /* WE ASSUME THERE ARE OPERATION RECORDS AVAILABLE SINCE LQH SHOULD HAVE*/
+ /* GUARANTEED THAT THROUGH EARLY BOOKING. */
+ /* ----------------------------------------------------------------------- */
+ tnsIsLocked = tgseIsLocked;
+ tnsElementptr = tgseElementptr;
+ tnsContainerptr = tgseContainerptr;
+ nsPageptr.i = gsePageidptr.i;
+ nsPageptr.p = gsePageidptr.p;
+ seizeOpRec(signal);
+ tisoIsforward = tgseIsforward;
+ tisoContainerptr = tnsContainerptr;
+ tisoElementptr = tnsElementptr;
+ isoPageptr.i = nsPageptr.i;
+ isoPageptr.p = nsPageptr.p;
+ initScanOpRec(signal);
+
+ if (!tnsIsLocked){
+ if (!scanPtr.p->scanReadCommittedFlag) {
+ jam();
+ slPageidptr = nsPageptr;
+ tslElementptr = tnsElementptr;
+ setlock(signal);
+ insertLockOwnersList(signal, operationRecPtr);
+ }//if
+ } else {
+ arrGuard(tnsElementptr, 2048);
+ queOperPtr.i =
+ ElementHeader::getOpPtrI(nsPageptr.p->word32[tnsElementptr]);
+ ptrCheckGuard(queOperPtr, coprecsize, operationrec);
+ if (queOperPtr.p->elementIsDisappeared == ZTRUE) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ // If the lock owner indicates the element is disappeared then we will not report this
+ // tuple. We will continue with the next tuple.
+ /* --------------------------------------------------------------------------------- */
+ releaseOpRec(signal);
+ scanPtr.p->scanOpsAllocated--;
+ signal->theData[0] = scanPtr.i;
+ signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP;
+ sendSignal(cownBlockref, GSN_ACC_CHECK_SCAN, signal, 2, JBB);
+ return;
+ }//if
+ if (!scanPtr.p->scanReadCommittedFlag) {
+ Uint32 return_result;
+ if (scanPtr.p->scanLockMode == ZREADLOCK) {
+ jam();
+ priPageptr = nsPageptr;
+ tpriElementptr = tnsElementptr;
+ return_result = placeReadInLockQueue(signal);
+ } else {
+ jam();
+ pwiPageptr = nsPageptr;
+ tpwiElementptr = tnsElementptr;
+ return_result = placeWriteInLockQueue(signal);
+ }//if
+ if (return_result == ZSERIAL_QUEUE) {
+ /* --------------------------------------------------------------------------------- */
+ /* WE PLACED THE OPERATION INTO A SERIAL QUEUE AND THUS WE HAVE TO WAIT FOR */
+ /* THE LOCK TO BE RELEASED. WE CONTINUE WITH THE NEXT ELEMENT. */
+ /* --------------------------------------------------------------------------------- */
+ putOpScanLockQue(); /* PUT THE OP IN A QUE IN THE SCAN REC */
+ signal->theData[0] = scanPtr.i;
+ signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP;
+ sendSignal(cownBlockref, GSN_ACC_CHECK_SCAN, signal, 2, JBB);
+ return;
+ } else if (return_result == ZWRITE_ERROR) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ // The tuple is either not committed yet or a delete in the same transaction (not
+ // possible here since we are a scan). Thus we simply continue with the next tuple.
+ /* --------------------------------------------------------------------------------- */
+ releaseOpRec(signal);
+ scanPtr.p->scanOpsAllocated--;
+ signal->theData[0] = scanPtr.i;
+ signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP;
+ sendSignal(cownBlockref, GSN_ACC_CHECK_SCAN, signal, 2, JBB);
+ return;
+ }//if
+ ndbassert(return_result == ZPARALLEL_QUEUE);
+ }//if
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ // Committed read proceed without caring for locks immediately down here except when
+ // the tuple was deleted permanently and no new operation has inserted it again.
+ /* --------------------------------------------------------------------------------- */
+ putActiveScanOp(signal);
+ sendNextScanConf(signal);
+ return;
+}//Dbacc::checkNextBucketLab()
+
+
+void Dbacc::checkNextFragmentLab(Signal* signal)
+{
+ RootfragmentrecPtr cnfRootfragrecptr;
+
+ cnfRootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(cnfRootfragrecptr, crootfragmentsize, rootfragmentrec);
+ if (scanPtr.p->activeLocalFrag == cnfRootfragrecptr.p->fragmentptr[0]) {
+ jam();
+ fragrecptr.i = cnfRootfragrecptr.p->fragmentptr[1];
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ initScanFragmentPart(signal);
+ signal->theData[0] = scanPtr.i;
+ signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP;
+ sendSignal(cownBlockref, GSN_ACC_CHECK_SCAN, signal, 2, JBB);
+ return;
+ } else {
+ if (scanPtr.p->activeLocalFrag == cnfRootfragrecptr.p->fragmentptr[1]) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ // Both fragments have completed their scan part and we can indicate that the scan is
+ // now completed.
+ /* --------------------------------------------------------------------------------- */
+ scanPtr.p->scanBucketState = ScanRec::SCAN_COMPLETED;
+ /*empty*/;
+ } else {
+ jam();
+ /* ALL ELEMENTS ARE SENT */
+ sendSystemerror(signal);
+ }//if
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ // The scan is completed. ACC_CHECK_SCAN will perform all the necessary checks to see
+ // what the next step is.
+ /* --------------------------------------------------------------------------------- */
+ signal->theData[0] = scanPtr.i;
+ signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP;
+ execACC_CHECK_SCAN(signal);
+ return;
+}//Dbacc::checkNextFragmentLab()
+
+void Dbacc::initScanFragmentPart(Signal* signal)
+{
+ DirRangePtr cnfDirRangePtr;
+ DirectoryarrayPtr cnfDirptr;
+ Page8Ptr cnfPageidptr;
+ /* --------------------------------------------------------------------------------- */
+ // Set the active fragment part.
+ // Set the current bucket scanned to the first.
+ // Start with the first lap.
+ // Remember the number of buckets at start of the scan.
+ // Set the minimum and maximum to values that will always be smaller and larger than.
+ // Reset the scan indicator on the first bucket.
+ /* --------------------------------------------------------------------------------- */
+ scanPtr.p->activeLocalFrag = fragrecptr.i;
+ scanPtr.p->nextBucketIndex = 0; /* INDEX OF SCAN BUCKET */
+ scanPtr.p->scanBucketState = ScanRec::FIRST_LAP;
+ scanPtr.p->startNoOfBuckets = fragrecptr.p->p + fragrecptr.p->maxp;
+ scanPtr.p->minBucketIndexToRescan = 0xFFFFFFFF;
+ scanPtr.p->maxBucketIndexToRescan = 0;
+ cnfDirRangePtr.i = fragrecptr.p->directory;
+ ptrCheckGuard(cnfDirRangePtr, cdirrangesize, dirRange);
+ cnfDirptr.i = cnfDirRangePtr.p->dirArray[0];
+ ptrCheckGuard(cnfDirptr, cdirarraysize, directoryarray);
+ cnfPageidptr.i = cnfDirptr.p->pagep[0];
+ ptrCheckGuard(cnfPageidptr, cpagesize, page8);
+ trsbPageindex = scanPtr.p->nextBucketIndex & ((1 << fragrecptr.p->k) - 1);
+ rsbPageidptr.i = cnfPageidptr.i;
+ rsbPageidptr.p = cnfPageidptr.p;
+ releaseScanBucket(signal);
+}//Dbacc::initScanFragmentPart()
+
+/* --------------------------------------------------------------------------------- */
+/* FLAG = 6 = ZCOPY_CLOSE THE SCAN PROCESS IS READY OR ABORTED. ALL OPERATION IN THE */
+/* ACTIVE OR WAIT QUEUE ARE RELEASED, SCAN FLAG OF ROOT FRAG IS RESET AND THE SCAN */
+/* RECORD IS RELEASED. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::releaseScanLab(Signal* signal)
+{
+ releaseAndCommitActiveOps(signal);
+ releaseAndCommitQueuedOps(signal);
+ releaseAndAbortLockedOps(signal);
+
+ rootfragrecptr.i = scanPtr.p->rootPtr;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ for (tmp = 0; tmp < MAX_PARALLEL_SCANS_PER_FRAG; tmp++) {
+ jam();
+ if (rootfragrecptr.p->scan[tmp] == scanPtr.i) {
+ jam();
+ rootfragrecptr.p->scan[tmp] = RNIL;
+ }//if
+ }//for
+ // Stops the heartbeat.
+ scanPtr.p->scanTimer = 0;
+ signal->theData[0] = scanPtr.p->scanUserptr;
+ signal->theData[1] = RNIL;
+ signal->theData[2] = RNIL;
+ sendSignal(scanPtr.p->scanUserblockref, GSN_NEXT_SCANCONF, signal, 3, JBB);
+ releaseScanRec(signal);
+ return;
+}//Dbacc::releaseScanLab()
+
+
+void Dbacc::releaseAndCommitActiveOps(Signal* signal)
+{
+ OperationrecPtr trsoOperPtr;
+ operationRecPtr.i = scanPtr.p->scanFirstActiveOp;
+ while (operationRecPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
+ trsoOperPtr.i = operationRecPtr.p->nextOp;
+ fragrecptr.i = operationRecPtr.p->fragptr;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ if (!scanPtr.p->scanReadCommittedFlag) {
+ jam();
+ commitOperation(signal);
+ }//if
+ takeOutActiveScanOp(signal);
+ releaseOpRec(signal);
+ scanPtr.p->scanOpsAllocated--;
+ operationRecPtr.i = trsoOperPtr.i;
+ }//if
+}//Dbacc::releaseAndCommitActiveOps()
+
+
+void Dbacc::releaseAndCommitQueuedOps(Signal* signal)
+{
+ OperationrecPtr trsoOperPtr;
+ operationRecPtr.i = scanPtr.p->scanFirstQueuedOp;
+ while (operationRecPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
+ trsoOperPtr.i = operationRecPtr.p->nextOp;
+ fragrecptr.i = operationRecPtr.p->fragptr;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ if (!scanPtr.p->scanReadCommittedFlag) {
+ jam();
+ commitOperation(signal);
+ }//if
+ takeOutReadyScanQueue(signal);
+ releaseOpRec(signal);
+ scanPtr.p->scanOpsAllocated--;
+ operationRecPtr.i = trsoOperPtr.i;
+ }//if
+}//Dbacc::releaseAndCommitQueuedOps()
+
+void Dbacc::releaseAndAbortLockedOps(Signal* signal) {
+
+ OperationrecPtr trsoOperPtr;
+ operationRecPtr.i = scanPtr.p->scanFirstLockedOp;
+ while (operationRecPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
+ trsoOperPtr.i = operationRecPtr.p->nextOp;
+ fragrecptr.i = operationRecPtr.p->fragptr;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ if (!scanPtr.p->scanReadCommittedFlag) {
+ jam();
+ abortOperation(signal);
+ }//if
+ takeOutScanLockQueue(scanPtr.i);
+ releaseOpRec(signal);
+ scanPtr.p->scanOpsAllocated--;
+ operationRecPtr.i = trsoOperPtr.i;
+ }//if
+}//Dbacc::releaseAndAbortLockedOps()
+
+/* 3.18.3 ACC_CHECK_SCAN */
+/* ******************--------------------------------------------------------------- */
+/* ACC_CHECK_SCAN */
+/* ENTER ACC_CHECK_SCAN WITH */
+/* SCAN_PTR */
+/* ******************--------------------------------------------------------------- */
+/* ******************--------------------------------------------------------------- */
+/* ACC_CHECK_SCAN */
+/* ******************------------------------------+ */
+void Dbacc::execACC_CHECK_SCAN(Signal* signal)
+{
+ Uint32 TcheckLcpStop;
+ jamEntry();
+ scanPtr.i = signal->theData[0];
+ TcheckLcpStop = signal->theData[1];
+ ptrCheckGuard(scanPtr, cscanRecSize, scanRec);
+ while (scanPtr.p->scanFirstQueuedOp != RNIL) {
+ jam();
+ //----------------------------------------------------------------------------
+ // An operation has been released from the lock queue. We are in the parallel
+ // queue of this tuple. We are ready to report the tuple now.
+ //----------------------------------------------------------------------------
+ operationRecPtr.i = scanPtr.p->scanFirstQueuedOp;
+ ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
+ takeOutReadyScanQueue(signal);
+ if (operationRecPtr.p->elementIsDisappeared == ZTRUE) {
+ jam();
+ fragrecptr.i = operationRecPtr.p->fragptr;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ if (fragrecptr.p->createLcp == ZTRUE) {
+ if (remainingUndoPages() < ZMIN_UNDO_PAGES_AT_COMMIT) {
+ jam();
+ /*--------------------------------------------------------------*/
+ // We did not have enough undo log buffers to safely abort an
+ // operation. Try again in 10 milliseconds.
+ /*--------------------------------------------------------------*/
+ sendSignalWithDelay(cownBlockref, GSN_ACC_CHECK_SCAN, signal, 10, 2);
+ return;
+ }//if
+ }//if
+ abortOperation(signal);
+ releaseOpRec(signal);
+ scanPtr.p->scanOpsAllocated--;
+ continue;
+ }//if
+ putActiveScanOp(signal);
+ sendNextScanConf(signal);
+ return;
+ }//while
+
+
+ if ((scanPtr.p->scanBucketState == ScanRec::SCAN_COMPLETED) &&
+ (scanPtr.p->scanLockHeld == 0)) {
+ jam();
+ //----------------------------------------------------------------------------
+ // The scan is now completed and there are no more locks outstanding. Thus we
+ // we will report the scan as completed to LQH.
+ //----------------------------------------------------------------------------
+ signal->theData[0] = scanPtr.p->scanUserptr;
+ signal->theData[1] = RNIL;
+ signal->theData[2] = RNIL;
+ sendSignal(scanPtr.p->scanUserblockref, GSN_NEXT_SCANCONF, signal, 3, JBB);
+ return;
+ }//if
+ if (TcheckLcpStop == AccCheckScan::ZCHECK_LCP_STOP) {
+ //---------------------------------------------------------------------------
+ // To ensure that the block of the fragment occurring at the start of a local
+ // checkpoint is not held for too long we insert a release and reacquiring of
+ // that lock here. This is performed in LQH. If we are blocked or if we have
+ // requested a sleep then we will receive RNIL in the returning signal word.
+ //---------------------------------------------------------------------------
+ signal->theData[0] = scanPtr.p->scanUserptr;
+ signal->theData[1] =
+ ((scanPtr.p->scanLockHeld >= ZSCAN_MAX_LOCK) ||
+ (scanPtr.p->scanBucketState == ScanRec::SCAN_COMPLETED));
+ EXECUTE_DIRECT(DBLQH, GSN_CHECK_LCP_STOP, signal, 2);
+ jamEntry();
+ if (signal->theData[0] == RNIL) {
+ jam();
+ return;
+ }//if
+ }//if
+ /**
+ * If we have more than max locks held OR
+ * scan is completed AND at least one lock held
+ * - Inform LQH about this condition
+ */
+ if ((scanPtr.p->scanLockHeld >= ZSCAN_MAX_LOCK) ||
+ (cfreeopRec == RNIL) ||
+ ((scanPtr.p->scanBucketState == ScanRec::SCAN_COMPLETED) &&
+ (scanPtr.p->scanLockHeld > 0))) {
+ jam();
+ signal->theData[0] = scanPtr.p->scanUserptr;
+ signal->theData[1] = RNIL; // No operation is returned
+ signal->theData[2] = 512; // MASV
+ sendSignal(scanPtr.p->scanUserblockref, GSN_NEXT_SCANCONF, signal, 3, JBB);
+ return;
+ }
+ if (scanPtr.p->scanBucketState == ScanRec::SCAN_COMPLETED) {
+ jam();
+ signal->theData[0] = scanPtr.i;
+ signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP;
+ execACC_CHECK_SCAN(signal);
+ return;
+ }//if
+
+ scanPtr.p->scanTimer = scanPtr.p->scanContinuebCounter;
+
+ fragrecptr.i = scanPtr.p->activeLocalFrag;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ gnsDirRangePtr.i = fragrecptr.p->directory;
+ ptrCheckGuard(gnsDirRangePtr, cdirrangesize, dirRange);
+ checkNextBucketLab(signal);
+ return;
+}//Dbacc::execACC_CHECK_SCAN()
+
+/* ******************---------------------------------------------------- */
+/* ACC_TO_REQ PERFORM A TAKE OVER */
+/* ******************-------------------+ */
+/* SENDER: LQH, LEVEL B */
+void Dbacc::execACC_TO_REQ(Signal* signal)
+{
+ OperationrecPtr tatrOpPtr;
+
+ jamEntry();
+ tatrOpPtr.i = signal->theData[1]; /* OPER PTR OF ACC */
+ ptrCheckGuard(tatrOpPtr, coprecsize, operationrec);
+ if (tatrOpPtr.p->operation == ZSCAN_OP) {
+ tatrOpPtr.p->transId1 = signal->theData[2];
+ tatrOpPtr.p->transId2 = signal->theData[3];
+ } else {
+ jam();
+ signal->theData[0] = cminusOne;
+ signal->theData[1] = ZTO_OP_STATE_ERROR;
+ }//if
+ return;
+}//Dbacc::execACC_TO_REQ()
+
+/* --------------------------------------------------------------------------------- */
+/* CONTAINERINFO */
+/* INPUT: */
+/* CI_PAGEIDPTR (PAGE POINTER WHERE CONTAINER RESIDES) */
+/* TCI_PAGEINDEX (INDEX OF CONTAINER, USED TO CALCULATE PAGE INDEX) */
+/* TCI_ISFORWARD (DIRECTION OF CONTAINER FORWARD OR BACKWARD) */
+/* */
+/* OUTPUT: */
+/* TCI_CONTAINERPTR (A POINTER TO THE HEAD OF THE CONTAINER) */
+/* TCI_CONTAINERLEN (LENGTH OF THE CONTAINER */
+/* TCI_CONTAINERHEAD (THE HEADER OF THE CONTAINER) */
+/* */
+/* DESCRIPTION: THE ADDRESS OF THE CONTAINER WILL BE CALCULATED AND */
+/* ALL INFORMATION ABOUT THE CONTAINER WILL BE READ */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::containerinfo(Signal* signal)
+{
+ tciContainerptr = (tciPageindex << ZSHIFT_PLUS) - (tciPageindex << ZSHIFT_MINUS);
+ if (tciIsforward == ZTRUE) {
+ jam();
+ tciContainerptr = tciContainerptr + ZHEAD_SIZE;
+ } else {
+ jam();
+ tciContainerptr = ((tciContainerptr + ZHEAD_SIZE) + ZBUF_SIZE) - ZCON_HEAD_SIZE;
+ }//if
+ arrGuard(tciContainerptr, 2048);
+ tciContainerhead = ciPageidptr.p->word32[tciContainerptr];
+ tciContainerlen = tciContainerhead >> 26;
+}//Dbacc::containerinfo()
+
+/* --------------------------------------------------------------------------------- */
+/* GET_SCAN_ELEMENT */
+/* INPUT: GSE_PAGEIDPTR */
+/* TGSE_PAGEINDEX */
+/* OUTPUT: TGSE_IS_LOCKED (IF TRESULT /= ZFALSE) */
+/* GSE_PAGEIDPTR */
+/* TGSE_PAGEINDEX */
+/* --------------------------------------------------------------------------------- */
+bool Dbacc::getScanElement(Signal* signal)
+{
+ tgseIsforward = ZTRUE;
+ NEXTSEARCH_SCAN_LOOP:
+ ciPageidptr.i = gsePageidptr.i;
+ ciPageidptr.p = gsePageidptr.p;
+ tciPageindex = tgsePageindex;
+ tciIsforward = tgseIsforward;
+ containerinfo(signal);
+ sscPageidptr.i = gsePageidptr.i;
+ sscPageidptr.p = gsePageidptr.p;
+ tsscContainerlen = tciContainerlen;
+ tsscContainerptr = tciContainerptr;
+ tsscIsforward = tciIsforward;
+ if (searchScanContainer(signal)) {
+ jam();
+ tgseIsLocked = tsscIsLocked;
+ tgseElementptr = tsscElementptr;
+ tgseContainerptr = tsscContainerptr;
+ return true;
+ }//if
+ if (((tciContainerhead >> 7) & 0x3) != 0) {
+ jam();
+ nciPageidptr.i = gsePageidptr.i;
+ nciPageidptr.p = gsePageidptr.p;
+ tnciContainerhead = tciContainerhead;
+ tnciContainerptr = tciContainerptr;
+ nextcontainerinfo(signal);
+ tgsePageindex = tnciPageindex;
+ gsePageidptr.i = nciPageidptr.i;
+ gsePageidptr.p = nciPageidptr.p;
+ tgseIsforward = tnciIsforward;
+ goto NEXTSEARCH_SCAN_LOOP;
+ }//if
+ return false;
+}//Dbacc::getScanElement()
+
+/* --------------------------------------------------------------------------------- */
+/* INIT_SCAN_OP_REC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initScanOpRec(Signal* signal)
+{
+ Uint32 tisoTmp;
+ Uint32 tisoLocalPtr;
+ Uint32 guard24;
+
+ scanPtr.p->scanOpsAllocated++;
+
+ operationRecPtr.p->scanRecPtr = scanPtr.i;
+ operationRecPtr.p->operation = ZSCAN_OP;
+ operationRecPtr.p->transactionstate = ACTIVE;
+ operationRecPtr.p->commitDeleteCheckFlag = ZFALSE;
+ operationRecPtr.p->lockMode = scanPtr.p->scanLockMode;
+ operationRecPtr.p->fid = fragrecptr.p->myfid;
+ operationRecPtr.p->fragptr = fragrecptr.i;
+ operationRecPtr.p->elementIsDisappeared = ZFALSE;
+ operationRecPtr.p->nextParallelQue = RNIL;
+ operationRecPtr.p->prevParallelQue = RNIL;
+ operationRecPtr.p->nextSerialQue = RNIL;
+ operationRecPtr.p->prevSerialQue = RNIL;
+ operationRecPtr.p->prevQueOp = RNIL;
+ operationRecPtr.p->nextQueOp = RNIL;
+ operationRecPtr.p->keyinfoPage = RNIL; // Safety precaution
+ operationRecPtr.p->transId1 = scanPtr.p->scanTrid1;
+ operationRecPtr.p->transId2 = scanPtr.p->scanTrid2;
+ operationRecPtr.p->lockOwner = ZFALSE;
+ operationRecPtr.p->dirtyRead = 0;
+ operationRecPtr.p->nodeType = 0; // Not a stand-by node
+ operationRecPtr.p->elementIsforward = tisoIsforward;
+ operationRecPtr.p->elementContainer = tisoContainerptr;
+ operationRecPtr.p->elementPointer = tisoElementptr;
+ operationRecPtr.p->elementPage = isoPageptr.i;
+ operationRecPtr.p->isAccLockReq = ZFALSE;
+ operationRecPtr.p->isUndoLogReq = ZFALSE;
+ tisoLocalPtr = tisoElementptr + tisoIsforward;
+ guard24 = fragrecptr.p->localkeylen - 1;
+ for (tisoTmp = 0; tisoTmp <= guard24; tisoTmp++) {
+ arrGuard(tisoTmp, 2);
+ arrGuard(tisoLocalPtr, 2048);
+ operationRecPtr.p->localdata[tisoTmp] = isoPageptr.p->word32[tisoLocalPtr];
+ tisoLocalPtr = tisoLocalPtr + tisoIsforward;
+ }//for
+ arrGuard(tisoLocalPtr, 2048);
+ operationRecPtr.p->keydata[0] = isoPageptr.p->word32[tisoLocalPtr];
+ operationRecPtr.p->tupkeylen = fragrecptr.p->keyLength;
+ operationRecPtr.p->xfrmtupkeylen = 0; // not used
+}//Dbacc::initScanOpRec()
+
+/* --------------------------------------------------------------------------------- */
+/* NEXTCONTAINERINFO */
+/* DESCRIPTION:THE CONTAINER HEAD WILL BE CHECKED TO CALCULATE INFORMATION */
+/* ABOUT NEXT CONTAINER IN THE BUCKET. */
+/* INPUT: TNCI_CONTAINERHEAD */
+/* NCI_PAGEIDPTR */
+/* TNCI_CONTAINERPTR */
+/* OUTPUT: */
+/* TNCI_PAGEINDEX (INDEX FROM WHICH PAGE INDEX CAN BE CALCULATED). */
+/* TNCI_ISFORWARD (IS THE NEXT CONTAINER FORWARD (+1) OR BACKWARD (-1) */
+/* NCI_PAGEIDPTR (PAGE REFERENCE OF NEXT CONTAINER) */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::nextcontainerinfo(Signal* signal)
+{
+ tnciNextSamePage = (tnciContainerhead >> 9) & 0x1; /* CHECK BIT FOR CHECKING WHERE */
+ /* THE NEXT CONTAINER IS IN THE SAME PAGE */
+ tnciPageindex = tnciContainerhead & 0x7f; /* NEXT CONTAINER PAGE INDEX 7 BITS */
+ if (((tnciContainerhead >> 7) & 3) == ZLEFT) {
+ jam();
+ tnciIsforward = ZTRUE;
+ } else {
+ jam();
+ tnciIsforward = cminusOne;
+ }//if
+ if (tnciNextSamePage == ZFALSE) {
+ jam();
+ /* NEXT CONTAINER IS IN AN OVERFLOW PAGE */
+ arrGuard(tnciContainerptr + 1, 2048);
+ tnciTmp = nciPageidptr.p->word32[tnciContainerptr + 1];
+ nciOverflowrangeptr.i = fragrecptr.p->overflowdir;
+ ptrCheckGuard(nciOverflowrangeptr, cdirrangesize, dirRange);
+ arrGuard((tnciTmp >> 8), 256);
+ nciOverflowDirptr.i = nciOverflowrangeptr.p->dirArray[tnciTmp >> 8];
+ ptrCheckGuard(nciOverflowDirptr, cdirarraysize, directoryarray);
+ nciPageidptr.i = nciOverflowDirptr.p->pagep[tnciTmp & 0xff];
+ ptrCheckGuard(nciPageidptr, cpagesize, page8);
+ }//if
+}//Dbacc::nextcontainerinfo()
+
+/* --------------------------------------------------------------------------------- */
+/* PUT_ACTIVE_SCAN_OP */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::putActiveScanOp(Signal* signal)
+{
+ OperationrecPtr pasOperationRecPtr;
+ pasOperationRecPtr.i = scanPtr.p->scanFirstActiveOp;
+ if (pasOperationRecPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(pasOperationRecPtr, coprecsize, operationrec);
+ pasOperationRecPtr.p->prevOp = operationRecPtr.i;
+ }//if
+ operationRecPtr.p->nextOp = pasOperationRecPtr.i;
+ operationRecPtr.p->prevOp = RNIL;
+ scanPtr.p->scanFirstActiveOp = operationRecPtr.i;
+}//Dbacc::putActiveScanOp()
+
+/**
+ * putOpScanLockQueue
+ *
+ * Description: Put an operation in the doubly linked
+ * lock list on a scan record. The list is used to
+ * keep track of which operations belonging
+ * to the scan are put in serial lock list of another
+ * operation
+ *
+ * @note Use takeOutScanLockQueue to remove an operation
+ * from the list
+ *
+ */
+void Dbacc::putOpScanLockQue()
+{
+
+#ifdef VM_TRACE
+ // DEBUG CODE
+ // Check that there are as many operations in the lockqueue as
+ // scanLockHeld indicates
+ OperationrecPtr tmpOp;
+ int numLockedOpsBefore = 0;
+ tmpOp.i = scanPtr.p->scanFirstLockedOp;
+ while(tmpOp.i != RNIL){
+ numLockedOpsBefore++;
+ ptrCheckGuard(tmpOp, coprecsize, operationrec);
+ if (tmpOp.p->nextOp == RNIL)
+ ndbrequire(tmpOp.i == scanPtr.p->scanLastLockedOp);
+ tmpOp.i = tmpOp.p->nextOp;
+ }
+ ndbrequire(numLockedOpsBefore==scanPtr.p->scanLockHeld);
+#endif
+
+ OperationrecPtr pslOperationRecPtr;
+ ScanRec theScanRec;
+ theScanRec = *scanPtr.p;
+
+ pslOperationRecPtr.i = scanPtr.p->scanLastLockedOp;
+ operationRecPtr.p->prevOp = pslOperationRecPtr.i;
+ operationRecPtr.p->nextOp = RNIL;
+ if (pslOperationRecPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(pslOperationRecPtr, coprecsize, operationrec);
+ pslOperationRecPtr.p->nextOp = operationRecPtr.i;
+ } else {
+ jam();
+ scanPtr.p->scanFirstLockedOp = operationRecPtr.i;
+ }//if
+ scanPtr.p->scanLastLockedOp = operationRecPtr.i;
+ scanPtr.p->scanLockHeld++;
+
+}//Dbacc::putOpScanLockQue()
+
+/* --------------------------------------------------------------------------------- */
+/* PUT_READY_SCAN_QUEUE */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::putReadyScanQueue(Signal* signal, Uint32 scanRecIndex)
+{
+ OperationrecPtr prsOperationRecPtr;
+ ScanRecPtr TscanPtr;
+
+ TscanPtr.i = scanRecIndex;
+ ptrCheckGuard(TscanPtr, cscanRecSize, scanRec);
+
+ prsOperationRecPtr.i = TscanPtr.p->scanLastQueuedOp;
+ operationRecPtr.p->prevOp = prsOperationRecPtr.i;
+ operationRecPtr.p->nextOp = RNIL;
+ TscanPtr.p->scanLastQueuedOp = operationRecPtr.i;
+ if (prsOperationRecPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(prsOperationRecPtr, coprecsize, operationrec);
+ prsOperationRecPtr.p->nextOp = operationRecPtr.i;
+ } else {
+ jam();
+ TscanPtr.p->scanFirstQueuedOp = operationRecPtr.i;
+ }//if
+}//Dbacc::putReadyScanQueue()
+
+/* --------------------------------------------------------------------------------- */
+/* RELEASE_SCAN_BUCKET */
+// Input:
+// rsbPageidptr.i Index to page where buckets starts
+// rsbPageidptr.p Pointer to page where bucket starts
+// trsbPageindex Page index of starting container in bucket
+/* --------------------------------------------------------------------------------- */
+void Dbacc::releaseScanBucket(Signal* signal)
+{
+ Uint32 trsbIsforward;
+
+ trsbIsforward = ZTRUE;
+ NEXTRELEASESCANLOOP:
+ ciPageidptr.i = rsbPageidptr.i;
+ ciPageidptr.p = rsbPageidptr.p;
+ tciPageindex = trsbPageindex;
+ tciIsforward = trsbIsforward;
+ containerinfo(signal);
+ rscPageidptr.i = rsbPageidptr.i;
+ rscPageidptr.p = rsbPageidptr.p;
+ trscContainerlen = tciContainerlen;
+ trscContainerptr = tciContainerptr;
+ trscIsforward = trsbIsforward;
+ releaseScanContainer(signal);
+ if (((tciContainerhead >> 7) & 0x3) != 0) {
+ jam();
+ nciPageidptr.i = rsbPageidptr.i;
+ nciPageidptr.p = rsbPageidptr.p;
+ tnciContainerhead = tciContainerhead;
+ tnciContainerptr = tciContainerptr;
+ nextcontainerinfo(signal);
+ rsbPageidptr.i = nciPageidptr.i;
+ rsbPageidptr.p = nciPageidptr.p;
+ trsbPageindex = tnciPageindex;
+ trsbIsforward = tnciIsforward;
+ goto NEXTRELEASESCANLOOP;
+ }//if
+}//Dbacc::releaseScanBucket()
+
+/* --------------------------------------------------------------------------------- */
+/* RELEASE_SCAN_CONTAINER */
+/* INPUT: TRSC_CONTAINERLEN */
+/* RSC_PAGEIDPTR */
+/* TRSC_CONTAINERPTR */
+/* TRSC_ISFORWARD */
+/* SCAN_PTR */
+/* */
+/* DESCRIPTION: SEARCHS IN A CONTAINER, AND THE SCAN BIT OF THE ELEMENTS */
+/* OF THE CONTAINER IS RESET */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::releaseScanContainer(Signal* signal)
+{
+ OperationrecPtr rscOperPtr;
+ Uint32 trscElemStep;
+ Uint32 trscElementptr;
+ Uint32 trscElemlens;
+ Uint32 trscElemlen;
+
+ if (trscContainerlen < 4) {
+ if (trscContainerlen != ZCON_HEAD_SIZE) {
+ jam();
+ sendSystemerror(signal);
+ }//if
+ return; /* 2 IS THE MINIMUM SIZE OF THE ELEMENT */
+ }//if
+ trscElemlens = trscContainerlen - ZCON_HEAD_SIZE;
+ trscElemlen = fragrecptr.p->elementLength;
+ if (trscIsforward == 1) {
+ jam();
+ trscElementptr = trscContainerptr + ZCON_HEAD_SIZE;
+ trscElemStep = trscElemlen;
+ } else {
+ jam();
+ trscElementptr = trscContainerptr - 1;
+ trscElemStep = 0 - trscElemlen;
+ }//if
+ do {
+ arrGuard(trscElementptr, 2048);
+ const Uint32 eh = rscPageidptr.p->word32[trscElementptr];
+ const Uint32 scanMask = scanPtr.p->scanMask;
+ if (ElementHeader::getUnlocked(eh)) {
+ jam();
+ const Uint32 tmp = ElementHeader::clearScanBit(eh, scanMask);
+ dbgWord32(rscPageidptr, trscElementptr, tmp);
+ rscPageidptr.p->word32[trscElementptr] = tmp;
+ } else {
+ jam();
+ rscOperPtr.i = ElementHeader::getOpPtrI(eh);
+ ptrCheckGuard(rscOperPtr, coprecsize, operationrec);
+ rscOperPtr.p->scanBits &= ~scanMask;
+ }//if
+ trscElemlens = trscElemlens - trscElemlen;
+ trscElementptr = trscElementptr + trscElemStep;
+ } while (trscElemlens > 1);
+ if (trscElemlens != 0) {
+ jam();
+ sendSystemerror(signal);
+ }//if
+}//Dbacc::releaseScanContainer()
+
+/* --------------------------------------------------------------------------------- */
+/* RELEASE_SCAN_REC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::releaseScanRec(Signal* signal)
+{
+ // Check that all ops this scan has allocated have been
+ // released
+ ndbrequire(scanPtr.p->scanOpsAllocated==0);
+
+ // Check that all locks this scan might have aquired
+ // have been properly released
+ ndbrequire(scanPtr.p->scanLockHeld == 0);
+ ndbrequire(scanPtr.p->scanFirstLockedOp == RNIL);
+ ndbrequire(scanPtr.p->scanLastLockedOp == RNIL);
+
+ // Check that all active operations have been
+ // properly released
+ ndbrequire(scanPtr.p->scanFirstActiveOp == RNIL);
+
+ // Check that all queued operations have been
+ // properly released
+ ndbrequire(scanPtr.p->scanFirstQueuedOp == RNIL);
+ ndbrequire(scanPtr.p->scanLastQueuedOp == RNIL);
+
+ // Put scan record in free list
+ scanPtr.p->scanNextfreerec = cfirstFreeScanRec;
+ scanPtr.p->scanState = ScanRec::SCAN_DISCONNECT;
+ cfirstFreeScanRec = scanPtr.i;
+
+}//Dbacc::releaseScanRec()
+
+/* --------------------------------------------------------------------------------- */
+/* SEARCH_SCAN_CONTAINER */
+/* INPUT: TSSC_CONTAINERLEN */
+/* TSSC_CONTAINERPTR */
+/* TSSC_ISFORWARD */
+/* SSC_PAGEIDPTR */
+/* SCAN_PTR */
+/* OUTPUT: TSSC_IS_LOCKED */
+/* */
+/* DESCRIPTION: SEARCH IN A CONTAINER TO FIND THE NEXT SCAN ELEMENT. */
+/* TO DO THIS THE SCAN BIT OF THE ELEMENT HEADER IS CHECKED. IF */
+/* THIS BIT IS ZERO, IT IS SET TO ONE AND THE ELEMENT IS RETURNED.*/
+/* --------------------------------------------------------------------------------- */
+bool Dbacc::searchScanContainer(Signal* signal)
+{
+ OperationrecPtr sscOperPtr;
+ Uint32 tsscScanBits;
+ Uint32 tsscElemlens;
+ Uint32 tsscElemlen;
+ Uint32 tsscElemStep;
+
+ if (tsscContainerlen < 4) {
+ jam();
+ return false; /* 2 IS THE MINIMUM SIZE OF THE ELEMENT */
+ }//if
+ tsscElemlens = tsscContainerlen - ZCON_HEAD_SIZE;
+ tsscElemlen = fragrecptr.p->elementLength;
+ /* LENGTH OF THE ELEMENT */
+ if (tsscIsforward == 1) {
+ jam();
+ tsscElementptr = tsscContainerptr + ZCON_HEAD_SIZE;
+ tsscElemStep = tsscElemlen;
+ } else {
+ jam();
+ tsscElementptr = tsscContainerptr - 1;
+ tsscElemStep = 0 - tsscElemlen;
+ }//if
+ SCANELEMENTLOOP001:
+ arrGuard(tsscElementptr, 2048);
+ const Uint32 eh = sscPageidptr.p->word32[tsscElementptr];
+ tsscIsLocked = ElementHeader::getLocked(eh);
+ if (!tsscIsLocked){
+ jam();
+ tsscScanBits = ElementHeader::getScanBits(eh);
+ if ((scanPtr.p->scanMask & tsscScanBits) == 0) {
+ jam();
+ const Uint32 tmp = ElementHeader::setScanBit(eh, scanPtr.p->scanMask);
+ dbgWord32(sscPageidptr, tsscElementptr, tmp);
+ sscPageidptr.p->word32[tsscElementptr] = tmp;
+ return true;
+ }//if
+ } else {
+ jam();
+ sscOperPtr.i = ElementHeader::getOpPtrI(eh);
+ ptrCheckGuard(sscOperPtr, coprecsize, operationrec);
+ if ((sscOperPtr.p->scanBits & scanPtr.p->scanMask) == 0) {
+ jam();
+ sscOperPtr.p->scanBits |= scanPtr.p->scanMask;
+ return true;
+ }//if
+ }//if
+ /* THE ELEMENT IS ALREADY SENT. */
+ /* SEARCH FOR NEXT ONE */
+ tsscElemlens = tsscElemlens - tsscElemlen;
+ if (tsscElemlens > 1) {
+ jam();
+ tsscElementptr = tsscElementptr + tsscElemStep;
+ goto SCANELEMENTLOOP001;
+ }//if
+ return false;
+}//Dbacc::searchScanContainer()
+
+/* --------------------------------------------------------------------------------- */
+/* SEND THE RESPONSE NEXT_SCANCONF AND POSSIBLE KEYINFO SIGNALS AS WELL. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::sendNextScanConf(Signal* signal)
+{
+ scanPtr.p->scanTimer = scanPtr.p->scanContinuebCounter;
+ Uint32 blockNo = refToBlock(scanPtr.p->scanUserblockref);
+ jam();
+ /** ---------------------------------------------------------------------
+ * LQH WILL NOT HAVE ANY USE OF THE TUPLE KEY LENGTH IN THIS CASE AND
+ * SO WE DO NOT PROVIDE IT. IN THIS CASE THESE VALUES ARE UNDEFINED.
+ * ---------------------------------------------------------------------- */
+ signal->theData[0] = scanPtr.p->scanUserptr;
+ signal->theData[1] = operationRecPtr.i;
+ signal->theData[2] = operationRecPtr.p->fid;
+ signal->theData[3] = operationRecPtr.p->localdata[0];
+ signal->theData[4] = operationRecPtr.p->localdata[1];
+ signal->theData[5] = fragrecptr.p->localkeylen;
+ EXECUTE_DIRECT(blockNo, GSN_NEXT_SCANCONF, signal, 6);
+ return;
+}//Dbacc::sendNextScanConf()
+
+/*---------------------------------------------------------------------------
+ * sendScanHbRep
+ * Description: Using Dispatcher::execute() to send a heartbeat to DBTC
+ * from DBLQH telling the scan is alive. We use the sendScanHbRep()
+ * in DBLQH, this needs to be done here in DBACC since it can take
+ * a while before LQH receives an answer the normal way from ACC.
+ *--------------------------------------------------------------------------*/
+void Dbacc::sendScanHbRep(Signal* signal, Uint32 scanPtrIndex)
+{
+ scanPtr.i = scanPtrIndex;
+ ptrCheckGuard(scanPtr, cscanRecSize, scanRec);
+
+ // If the timer status is on we continue with a new heartbeat in one second,
+ // else the loop stops and we will not send a new CONTINUEB
+ if (scanPtr.p->scanTimer != 0){
+ if (scanPtr.p->scanTimer == scanPtr.p->scanContinuebCounter){
+ jam();
+ ndbrequire(scanPtr.p->scanState != ScanRec::SCAN_DISCONNECT);
+
+ signal->theData[0] = scanPtr.p->scanUserptr;
+ signal->theData[1] = scanPtr.p->scanTrid1;
+ signal->theData[2] = scanPtr.p->scanTrid2;
+ EXECUTE_DIRECT(DBLQH, GSN_SCAN_HBREP, signal, 3);
+ jamEntry();
+ }//if
+ scanPtr.p->scanContinuebCounter++;
+ signal->theData[0] = ZSEND_SCAN_HBREP;
+ signal->theData[1] = scanPtr.i;
+ sendSignalWithDelay(cownBlockref, GSN_CONTINUEB, signal, 100, 2);
+ } else {
+ jam();
+ scanPtr.p->scanContinuebCounter = 0;
+ }//if
+}//Dbacc::sendScanHbRep()
+
+/* --------------------------------------------------------------------------------- */
+/* SETLOCK */
+/* DESCRIPTION:SETS LOCK ON AN ELEMENT. INFORMATION ABOUT THE ELEMENT IS */
+/* SAVED IN THE ELEMENT HEAD.A COPY OF THIS INFORMATION WILL */
+/* BE PUT IN THE OPERATION RECORD. A FIELD IN THE HEADER OF */
+/* THE ELEMENT POINTS TO THE OPERATION RECORD. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::setlock(Signal* signal)
+{
+ Uint32 tselTmp1;
+
+ arrGuard(tslElementptr, 2048);
+ tselTmp1 = slPageidptr.p->word32[tslElementptr];
+ operationRecPtr.p->scanBits = ElementHeader::getScanBits(tselTmp1);
+ operationRecPtr.p->hashvaluePart = ElementHeader::getHashValuePart(tselTmp1);
+
+ tselTmp1 = ElementHeader::setLocked(operationRecPtr.i);
+ dbgWord32(slPageidptr, tslElementptr, tselTmp1);
+ slPageidptr.p->word32[tslElementptr] = tselTmp1;
+}//Dbacc::setlock()
+
+/* --------------------------------------------------------------------------------- */
+/* TAKE_OUT_ACTIVE_SCAN_OP */
+/* DESCRIPTION: AN ACTIVE SCAN OPERATION IS BELOGED TO AN ACTIVE LIST OF THE */
+/* SCAN RECORD. BY THIS SUBRUTIN THE LIST IS UPDATED. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::takeOutActiveScanOp(Signal* signal)
+{
+ OperationrecPtr tasOperationRecPtr;
+
+ if (operationRecPtr.p->prevOp != RNIL) {
+ jam();
+ tasOperationRecPtr.i = operationRecPtr.p->prevOp;
+ ptrCheckGuard(tasOperationRecPtr, coprecsize, operationrec);
+ tasOperationRecPtr.p->nextOp = operationRecPtr.p->nextOp;
+ } else {
+ jam();
+ scanPtr.p->scanFirstActiveOp = operationRecPtr.p->nextOp;
+ }//if
+ if (operationRecPtr.p->nextOp != RNIL) {
+ jam();
+ tasOperationRecPtr.i = operationRecPtr.p->nextOp;
+ ptrCheckGuard(tasOperationRecPtr, coprecsize, operationrec);
+ tasOperationRecPtr.p->prevOp = operationRecPtr.p->prevOp;
+ }//if
+}//Dbacc::takeOutActiveScanOp()
+
+/**
+ * takeOutScanLockQueue
+ *
+ * Description: Take out an operation from the doubly linked
+ * lock list on a scan record.
+ *
+ * @note Use putOpScanLockQue to insert a operation in
+ * the list
+ *
+ */
+void Dbacc::takeOutScanLockQueue(Uint32 scanRecIndex)
+{
+ OperationrecPtr tslOperationRecPtr;
+ ScanRecPtr TscanPtr;
+
+ TscanPtr.i = scanRecIndex;
+ ptrCheckGuard(TscanPtr, cscanRecSize, scanRec);
+
+ if (operationRecPtr.p->prevOp != RNIL) {
+ jam();
+ tslOperationRecPtr.i = operationRecPtr.p->prevOp;
+ ptrCheckGuard(tslOperationRecPtr, coprecsize, operationrec);
+ tslOperationRecPtr.p->nextOp = operationRecPtr.p->nextOp;
+ } else {
+ jam();
+ // Check that first are pointing at operation to take out
+ ndbrequire(TscanPtr.p->scanFirstLockedOp==operationRecPtr.i);
+ TscanPtr.p->scanFirstLockedOp = operationRecPtr.p->nextOp;
+ }//if
+ if (operationRecPtr.p->nextOp != RNIL) {
+ jam();
+ tslOperationRecPtr.i = operationRecPtr.p->nextOp;
+ ptrCheckGuard(tslOperationRecPtr, coprecsize, operationrec);
+ tslOperationRecPtr.p->prevOp = operationRecPtr.p->prevOp;
+ } else {
+ jam();
+ // Check that last are pointing at operation to take out
+ ndbrequire(TscanPtr.p->scanLastLockedOp==operationRecPtr.i);
+ TscanPtr.p->scanLastLockedOp = operationRecPtr.p->prevOp;
+ }//if
+ TscanPtr.p->scanLockHeld--;
+
+#ifdef VM_TRACE
+ // DEBUG CODE
+ // Check that there are as many operations in the lockqueue as
+ // scanLockHeld indicates
+ OperationrecPtr tmpOp;
+ int numLockedOps = 0;
+ tmpOp.i = TscanPtr.p->scanFirstLockedOp;
+ while(tmpOp.i != RNIL){
+ numLockedOps++;
+ ptrCheckGuard(tmpOp, coprecsize, operationrec);
+ if (tmpOp.p->nextOp == RNIL)
+ ndbrequire(tmpOp.i == TscanPtr.p->scanLastLockedOp);
+ tmpOp.i = tmpOp.p->nextOp;
+ }
+ ndbrequire(numLockedOps==TscanPtr.p->scanLockHeld);
+#endif
+}//Dbacc::takeOutScanLockQueue()
+
+/* --------------------------------------------------------------------------------- */
+/* TAKE_OUT_READY_SCAN_QUEUE */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::takeOutReadyScanQueue(Signal* signal)
+{
+ OperationrecPtr trsOperationRecPtr;
+
+ if (operationRecPtr.p->prevOp != RNIL) {
+ jam();
+ trsOperationRecPtr.i = operationRecPtr.p->prevOp;
+ ptrCheckGuard(trsOperationRecPtr, coprecsize, operationrec);
+ trsOperationRecPtr.p->nextOp = operationRecPtr.p->nextOp;
+ } else {
+ jam();
+ scanPtr.p->scanFirstQueuedOp = operationRecPtr.p->nextOp;
+ }//if
+ if (operationRecPtr.p->nextOp != RNIL) {
+ jam();
+ trsOperationRecPtr.i = operationRecPtr.p->nextOp;
+ ptrCheckGuard(trsOperationRecPtr, coprecsize, operationrec);
+ trsOperationRecPtr.p->prevOp = operationRecPtr.p->prevOp;
+ } else {
+ jam();
+ scanPtr.p->scanLastQueuedOp = operationRecPtr.p->nextOp;
+ }//if
+}//Dbacc::takeOutReadyScanQueue()
+
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+/* */
+/* END OF SCAN MODULE */
+/* */
+/* --------------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------------- */
+
+bool Dbacc::getrootfragmentrec(Signal* signal, RootfragmentrecPtr& rootPtr, Uint32 fid)
+{
+ for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
+ jam();
+ if (tabptr.p->fragholder[i] == fid) {
+ jam();
+ rootPtr.i = tabptr.p->fragptrholder[i];
+ ptrCheckGuard(rootPtr, crootfragmentsize, rootfragmentrec);
+ return true;
+ }//if
+ }//for
+ return false;
+}//Dbacc::getrootfragmentrec()
+
+/* --------------------------------------------------------------------------------- */
+/* INIT_FS_OP_REC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initFsOpRec(Signal* signal)
+{
+ fsOpptr.p->fsOpfragrecPtr = fragrecptr.i;
+ fsOpptr.p->fsConptr = fsConnectptr.i;
+}//Dbacc::initFsOpRec()
+
+/* --------------------------------------------------------------------------------- */
+/* INIT_LCP_CONN_REC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initLcpConnRec(Signal* signal)
+{
+ lcpConnectptr.p->lcpUserblockref = tuserblockref;
+ lcpConnectptr.p->lcpUserptr = tuserptr;
+ lcpConnectptr.p->noOfLcpConf = 0; /* NO OF RETUREND CONF SIGNALS */
+ lcpConnectptr.p->syncUndopageState = WAIT_NOTHING;
+}//Dbacc::initLcpConnRec()
+
+/* --------------------------------------------------------------------------------- */
+/* INIT_OVERPAGE */
+/* INPUT. IOP_PAGEPTR, POINTER TO AN OVERFLOW PAGE RECORD */
+/* DESCRIPTION: CONTAINERS AND FREE LISTS OF THE PAGE, GET INITIALE VALUE */
+/* ACCORDING TO LH3 AND PAGE STRUCTOR DESCRIPTION OF NDBACC BLOCK */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initOverpage(Signal* signal)
+{
+ Uint32 tiopTmp;
+ Uint32 tiopPrevFree;
+ Uint32 tiopNextFree;
+
+ for (tiopIndex = 0; tiopIndex <= 2047; tiopIndex++) {
+ iopPageptr.p->word32[tiopIndex] = 0;
+ }//for
+ iopPageptr.p->word32[ZPOS_OVERFLOWREC] = iopOverflowRecPtr.i;
+ iopPageptr.p->word32[ZPOS_CHECKSUM] = 0;
+ iopPageptr.p->word32[ZPOS_PAGE_ID] = tiopPageId;
+ iopPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] = 0;
+ tiopTmp = ZEMPTYLIST;
+ tiopTmp = (tiopTmp << 16) + (tiopTmp << 23);
+ iopPageptr.p->word32[ZPOS_EMPTY_LIST] = tiopTmp + (1 << ZPOS_PAGE_TYPE_BIT);
+ /* --------------------------------------------------------------------------------- */
+ /* INITIALISE PREVIOUS PART OF DOUBLY LINKED LIST FOR LEFT CONTAINERS. */
+ /* --------------------------------------------------------------------------------- */
+ tiopIndex = ZHEAD_SIZE + 1;
+ iopPageptr.p->word32[tiopIndex] = ZEMPTYLIST;
+ for (tiopPrevFree = 0; tiopPrevFree <= ZEMPTYLIST - 2; tiopPrevFree++) {
+ tiopIndex = tiopIndex + ZBUF_SIZE;
+ iopPageptr.p->word32[tiopIndex] = tiopPrevFree;
+ }//for
+ /* --------------------------------------------------------------------------------- */
+ /* INITIALISE NEXT PART OF DOUBLY LINKED LIST FOR LEFT CONTAINERS. */
+ /* --------------------------------------------------------------------------------- */
+ tiopIndex = ZHEAD_SIZE;
+ for (tiopNextFree = 1; tiopNextFree <= ZEMPTYLIST - 1; tiopNextFree++) {
+ iopPageptr.p->word32[tiopIndex] = tiopNextFree;
+ tiopIndex = tiopIndex + ZBUF_SIZE;
+ }//for
+ iopPageptr.p->word32[tiopIndex] = ZEMPTYLIST; /* LEFT_LIST IS UPDATED */
+ /* --------------------------------------------------------------------------------- */
+ /* INITIALISE PREVIOUS PART OF DOUBLY LINKED LIST FOR RIGHT CONTAINERS. */
+ /* --------------------------------------------------------------------------------- */
+ tiopIndex = (ZBUF_SIZE + ZHEAD_SIZE) - 1;
+ iopPageptr.p->word32[tiopIndex] = ZEMPTYLIST;
+ for (tiopPrevFree = 0; tiopPrevFree <= ZEMPTYLIST - 2; tiopPrevFree++) {
+ tiopIndex = tiopIndex + ZBUF_SIZE;
+ iopPageptr.p->word32[tiopIndex] = tiopPrevFree;
+ }//for
+ /* --------------------------------------------------------------------------------- */
+ /* INITIALISE NEXT PART OF DOUBLY LINKED LIST FOR RIGHT CONTAINERS. */
+ /* --------------------------------------------------------------------------------- */
+ tiopIndex = (ZBUF_SIZE + ZHEAD_SIZE) - 2;
+ for (tiopNextFree = 1; tiopNextFree <= ZEMPTYLIST - 1; tiopNextFree++) {
+ iopPageptr.p->word32[tiopIndex] = tiopNextFree;
+ tiopIndex = tiopIndex + ZBUF_SIZE;
+ }//for
+ iopPageptr.p->word32[tiopIndex] = ZEMPTYLIST; /* RIGHT_LIST IS UPDATED */
+}//Dbacc::initOverpage()
+
+/* --------------------------------------------------------------------------------- */
+/* INIT_PAGE */
+/* INPUT. INP_PAGEPTR, POINTER TO A PAGE RECORD */
+/* DESCRIPTION: CONTAINERS AND FREE LISTS OF THE PAGE, GET INITIALE VALUE */
+/* ACCORDING TO LH3 AND PAGE STRUCTOR DISACRIPTION OF NDBACC BLOCK */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::initPage(Signal* signal)
+{
+ Uint32 tinpTmp1;
+ Uint32 tinpIndex;
+ Uint32 tinpTmp;
+ Uint32 tinpPrevFree;
+ Uint32 tinpNextFree;
+
+ for (tiopIndex = 0; tiopIndex <= 2047; tiopIndex++) {
+ inpPageptr.p->word32[tiopIndex] = 0;
+ }//for
+ /* --------------------------------------------------------------------------------- */
+ /* SET PAGE ID FOR USE OF CHECKPOINTER. */
+ /* PREPARE CONTAINER HEADERS INDICATING EMPTY CONTAINERS WITHOUT NEXT. */
+ /* --------------------------------------------------------------------------------- */
+ inpPageptr.p->word32[ZPOS_PAGE_ID] = tipPageId;
+ tinpTmp1 = ZCON_HEAD_SIZE;
+ tinpTmp1 = tinpTmp1 << 26;
+ /* --------------------------------------------------------------------------------- */
+ /* INITIALISE ZNO_CONTAINERS PREDEFINED HEADERS ON LEFT SIZE. */
+ /* --------------------------------------------------------------------------------- */
+ tinpIndex = ZHEAD_SIZE;
+ for (tinpTmp = 0; tinpTmp <= ZNO_CONTAINERS - 1; tinpTmp++) {
+ inpPageptr.p->word32[tinpIndex] = tinpTmp1;
+ tinpIndex = tinpIndex + ZBUF_SIZE;
+ }//for
+ /* WORD32(ZPOS_EMPTY_LIST) DATA STRUCTURE:*/
+ /*--------------------------------------- */
+ /*| PAGE TYPE|LEFT FREE|RIGHT FREE */
+ /*| 1 | LIST | LIST */
+ /*| BIT | 7 BITS | 7 BITS */
+ /*--------------------------------------- */
+ /* --------------------------------------------------------------------------------- */
+ /* INITIALISE FIRST POINTER TO DOUBLY LINKED LIST OF FREE CONTAINERS. */
+ /* INITIALISE EMPTY LISTS OF USED CONTAINERS. */
+ /* INITIALISE LEFT FREE LIST TO 64 AND RIGHT FREE LIST TO ZERO. */
+ /* ALSO INITIALISE PAGE TYPE TO NOT OVERFLOW PAGE. */
+ /* --------------------------------------------------------------------------------- */
+ tinpTmp = ZEMPTYLIST;
+ tinpTmp = (tinpTmp << 16) + (tinpTmp << 23);
+ tinpTmp = tinpTmp + (ZNO_CONTAINERS << 7);
+ inpPageptr.p->word32[ZPOS_EMPTY_LIST] = tinpTmp;
+ /* --------------------------------------------------------------------------------- */
+ /* INITIALISE PREVIOUS PART OF DOUBLY LINKED LIST FOR RIGHT CONTAINERS. */
+ /* --------------------------------------------------------------------------------- */
+ tinpIndex = (ZHEAD_SIZE + ZBUF_SIZE) - 1;
+ inpPageptr.p->word32[tinpIndex] = ZEMPTYLIST;
+ for (tinpPrevFree = 0; tinpPrevFree <= ZEMPTYLIST - 2; tinpPrevFree++) {
+ tinpIndex = tinpIndex + ZBUF_SIZE;
+ inpPageptr.p->word32[tinpIndex] = tinpPrevFree;
+ }//for
+ /* --------------------------------------------------------------------------------- */
+ /* INITIALISE NEXT PART OF DOUBLY LINKED LIST FOR RIGHT CONTAINERS. */
+ /* --------------------------------------------------------------------------------- */
+ tinpIndex = (ZHEAD_SIZE + ZBUF_SIZE) - 2;
+ for (tinpNextFree = 1; tinpNextFree <= ZEMPTYLIST - 1; tinpNextFree++) {
+ inpPageptr.p->word32[tinpIndex] = tinpNextFree;
+ tinpIndex = tinpIndex + ZBUF_SIZE;
+ }//for
+ inpPageptr.p->word32[tinpIndex] = ZEMPTYLIST;
+ /* --------------------------------------------------------------------------------- */
+ /* INITIALISE PREVIOUS PART OF DOUBLY LINKED LIST FOR LEFT CONTAINERS. */
+ /* THE FIRST ZNO_CONTAINERS ARE NOT PUT INTO FREE LIST SINCE THEY ARE */
+ /* PREDEFINED AS OCCUPIED. */
+ /* --------------------------------------------------------------------------------- */
+ tinpIndex = (ZNO_CONTAINERS * ZBUF_SIZE) + ZHEAD_SIZE;
+ for (tinpNextFree = ZNO_CONTAINERS + 1; tinpNextFree <= ZEMPTYLIST - 1; tinpNextFree++) {
+ inpPageptr.p->word32[tinpIndex] = tinpNextFree;
+ tinpIndex = tinpIndex + ZBUF_SIZE;
+ }//for
+ inpPageptr.p->word32[tinpIndex] = ZEMPTYLIST;
+ /* --------------------------------------------------------------------------------- */
+ /* INITIALISE NEXT PART OF DOUBLY LINKED LIST FOR LEFT CONTAINERS. */
+ /* THE FIRST ZNO_CONTAINERS ARE NOT PUT INTO FREE LIST SINCE THEY ARE */
+ /* PREDEFINED AS OCCUPIED. */
+ /* --------------------------------------------------------------------------------- */
+ tinpIndex = ((ZNO_CONTAINERS * ZBUF_SIZE) + ZHEAD_SIZE) + 1;
+ inpPageptr.p->word32[tinpIndex] = ZEMPTYLIST;
+ for (tinpPrevFree = ZNO_CONTAINERS; tinpPrevFree <= ZEMPTYLIST - 2; tinpPrevFree++) {
+ tinpIndex = tinpIndex + ZBUF_SIZE;
+ inpPageptr.p->word32[tinpIndex] = tinpPrevFree;
+ }//for
+ /* --------------------------------------------------------------------------------- */
+ /* INITIALISE HEADER POSITIONS NOT CURRENTLY USED AND ENSURE USE OF OVERFLOW */
+ /* RECORD POINTER ON THIS PAGE LEADS TO ERROR. */
+ /* --------------------------------------------------------------------------------- */
+ inpPageptr.p->word32[ZPOS_CHECKSUM] = 0;
+ inpPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] = 0;
+ inpPageptr.p->word32[ZPOS_OVERFLOWREC] = RNIL;
+}//Dbacc::initPage()
+
+/* --------------------------------------------------------------------------------- */
+/* PUT_OP_IN_FRAG_WAIT_QUE */
+/* DESCRIPTION: AN OPERATION WHICH OWNS A LOCK OF AN ELEMENT, IS PUT IN A */
+/* LIST OF THE FRAGMENT. THIS LIST IS USED TO STOP THE QUEUE */
+/* OPERATION DURING CREATE CHECK POINT PROSESS FOR STOP AND */
+/* RESTART OF THE OPERATIONS. */
+/* */
+/* IF CONTINUEB SIGNALS ARE INTRODUCED AFTER STARTING TO EXECUTE ACCKEYREQ WE */
+/* MUST PUT IT IN THIS LIST BEFORE EXITING TO ENSURE THAT WE ARE NOT BEING */
+/* LOCKED AFTER THAT LQH HAS RECEIVED ALL LCP_HOLDOP'S. THEN THE LCP WILL NEVER*/
+/* PROCEED. WE ALSO PUT IT INTO THIS LIST WHEN WAITING FOR LONG KEYS. THIS IS */
+/* ONLY NEEDED IF SIGNALS CAN ENTER BETWEEN THE KEYDATA CARRYING SIGNALS. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::putOpInFragWaitQue(Signal* signal)
+{
+ OperationrecPtr tpiwOperRecPtr;
+
+ if (operationRecPtr.p->operation != ZSCAN_OP) {
+ if (fragrecptr.p->firstWaitInQueOp == RNIL) {
+ jam();
+ fragrecptr.p->firstWaitInQueOp = operationRecPtr.i;
+ } else {
+ jam();
+ tpiwOperRecPtr.i = fragrecptr.p->lastWaitInQueOp;
+ ptrCheckGuard(tpiwOperRecPtr, coprecsize, operationrec);
+ tpiwOperRecPtr.p->nextQueOp = operationRecPtr.i;
+ }//if
+ operationRecPtr.p->opState = WAIT_IN_QUEUE;
+ operationRecPtr.p->nextQueOp = RNIL;
+ operationRecPtr.p->prevQueOp = fragrecptr.p->lastWaitInQueOp;
+ fragrecptr.p->lastWaitInQueOp = operationRecPtr.i;
+ }//if
+}//Dbacc::putOpInFragWaitQue()
+
+/* --------------------------------------------------------------------------------- */
+/* PUT_OVERFLOW_REC_IN_FRAG */
+/* DESCRIPTION: AN OVERFLOW RECORD WITCH IS USED TO KEEP INFORMATION ABOUT */
+/* OVERFLOW PAGE WILL BE PUT IN A LIST OF OVERFLOW RECORDS IN */
+/* THE FRAGMENT RECORD. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::putOverflowRecInFrag(Signal* signal)
+{
+ OverflowRecordPtr tpifNextOverrecPtr;
+ OverflowRecordPtr tpifPrevOverrecPtr;
+
+ tpifNextOverrecPtr.i = fragrecptr.p->firstOverflowRec;
+ tpifPrevOverrecPtr.i = RNIL;
+ while (tpifNextOverrecPtr.i != RNIL) {
+ ptrCheckGuard(tpifNextOverrecPtr, coverflowrecsize, overflowRecord);
+ if (tpifNextOverrecPtr.p->dirindex < porOverflowRecPtr.p->dirindex) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* PROCEED IN LIST TO THE NEXT IN THE LIST SINCE THE ENTRY HAD A LOWER PAGE ID.*/
+ /* WE WANT TO ENSURE THAT LOWER PAGE ID'S ARE KEPT FULL RATHER THAN THE */
+ /* OPPOSITE TO ENSURE THAT HIGH PAGE ID'S CAN BE REMOVED WHEN SHRINKS ARE */
+ /* PERFORMED. */
+ /* --------------------------------------------------------------------------------- */
+ tpifPrevOverrecPtr = tpifNextOverrecPtr;
+ tpifNextOverrecPtr.i = tpifNextOverrecPtr.p->nextOverRec;
+ } else {
+ jam();
+ ndbrequire(tpifNextOverrecPtr.p->dirindex != porOverflowRecPtr.p->dirindex);
+ /* --------------------------------------------------------------------------------- */
+ /* TRYING TO INSERT THE SAME PAGE TWICE. SYSTEM ERROR. */
+ /* --------------------------------------------------------------------------------- */
+ break;
+ }//if
+ }//while
+ if (tpifNextOverrecPtr.i == RNIL) {
+ jam();
+ fragrecptr.p->lastOverflowRec = porOverflowRecPtr.i;
+ } else {
+ jam();
+ tpifNextOverrecPtr.p->prevOverRec = porOverflowRecPtr.i;
+ }//if
+ if (tpifPrevOverrecPtr.i == RNIL) {
+ jam();
+ fragrecptr.p->firstOverflowRec = porOverflowRecPtr.i;
+ } else {
+ jam();
+ tpifPrevOverrecPtr.p->nextOverRec = porOverflowRecPtr.i;
+ }//if
+ porOverflowRecPtr.p->prevOverRec = tpifPrevOverrecPtr.i;
+ porOverflowRecPtr.p->nextOverRec = tpifNextOverrecPtr.i;
+}//Dbacc::putOverflowRecInFrag()
+
+/* --------------------------------------------------------------------------------- */
+/* PUT_REC_IN_FREE_OVERDIR */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::putRecInFreeOverdir(Signal* signal)
+{
+ OverflowRecordPtr tpfoNextOverrecPtr;
+ OverflowRecordPtr tpfoPrevOverrecPtr;
+
+ tpfoNextOverrecPtr.i = fragrecptr.p->firstFreeDirindexRec;
+ tpfoPrevOverrecPtr.i = RNIL;
+ while (tpfoNextOverrecPtr.i != RNIL) {
+ ptrCheckGuard(tpfoNextOverrecPtr, coverflowrecsize, overflowRecord);
+ if (tpfoNextOverrecPtr.p->dirindex < priOverflowRecPtr.p->dirindex) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* PROCEED IN LIST TO THE NEXT IN THE LIST SINCE THE ENTRY HAD A LOWER PAGE ID.*/
+ /* WE WANT TO ENSURE THAT LOWER PAGE ID'S ARE KEPT FULL RATHER THAN THE */
+ /* OPPOSITE TO ENSURE THAT HIGH PAGE ID'S CAN BE REMOVED WHEN SHRINKS ARE */
+ /* PERFORMED. */
+ /* --------------------------------------------------------------------------------- */
+ tpfoPrevOverrecPtr = tpfoNextOverrecPtr;
+ tpfoNextOverrecPtr.i = tpfoNextOverrecPtr.p->nextOverList;
+ } else {
+ jam();
+ ndbrequire(tpfoNextOverrecPtr.p->dirindex != priOverflowRecPtr.p->dirindex);
+ /* --------------------------------------------------------------------------------- */
+ /* ENSURE WE ARE NOT TRYING TO INSERT THE SAME PAGE TWICE. */
+ /* --------------------------------------------------------------------------------- */
+ break;
+ }//if
+ }//while
+ if (tpfoNextOverrecPtr.i != RNIL) {
+ jam();
+ tpfoNextOverrecPtr.p->prevOverList = priOverflowRecPtr.i;
+ }//if
+ if (tpfoPrevOverrecPtr.i == RNIL) {
+ jam();
+ fragrecptr.p->firstFreeDirindexRec = priOverflowRecPtr.i;
+ } else {
+ jam();
+ tpfoPrevOverrecPtr.p->nextOverList = priOverflowRecPtr.i;
+ }//if
+ priOverflowRecPtr.p->prevOverList = tpfoPrevOverrecPtr.i;
+ priOverflowRecPtr.p->nextOverList = tpfoNextOverrecPtr.i;
+}//Dbacc::putRecInFreeOverdir()
+
+/* --------------------------------------------------------------------------------- */
+/* RELEASE_DIRECTORY */
+/* --------------------------------------- ----------------------------------------- */
+void Dbacc::releaseDirectory(Signal* signal)
+{
+ ptrCheckGuard(rdDirptr, cdirarraysize, directoryarray);
+ rdDirptr.p->pagep[0] = cfirstfreedir;
+ cfirstfreedir = rdDirptr.i;
+}//Dbacc::releaseDirectory()
+
+/* --------------------------------------------------------------------------------- */
+/* RELEASE_DIRRANGE */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::releaseDirrange(Signal* signal)
+{
+ ptrCheckGuard(rdDirRangePtr, cdirrangesize, dirRange);
+ rdDirRangePtr.p->dirArray[0] = cfirstfreeDirrange;
+ cfirstfreeDirrange = rdDirRangePtr.i;
+}//Dbacc::releaseDirrange()
+
+/* --------------------------------------------------------------------------------- */
+/* RELEASE_FS_CONN_REC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::releaseFsConnRec(Signal* signal)
+{
+ fsConnectptr.p->fsNext = cfsFirstfreeconnect;
+ cfsFirstfreeconnect = fsConnectptr.i;
+ fsConnectptr.p->fsState = WAIT_NOTHING;
+}//Dbacc::releaseFsConnRec()
+
+/* --------------------------------------------------------------------------------- */
+/* RELEASE_FS_OP_REC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::releaseFsOpRec(Signal* signal)
+{
+ fsOpptr.p->fsOpnext = cfsFirstfreeop;
+ cfsFirstfreeop = fsOpptr.i;
+ fsOpptr.p->fsOpstate = WAIT_NOTHING;
+}//Dbacc::releaseFsOpRec()
+
+/* --------------------------------------------------------------------------------- */
+/* RELEASE_LCP_CONNECT_REC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::releaseLcpConnectRec(Signal* signal)
+{
+ lcpConnectptr.p->lcpstate = LCP_FREE;
+ lcpConnectptr.p->nextLcpConn = cfirstfreelcpConnect;
+ lcpConnectptr.p->lcpstate = LCP_FREE;
+ cfirstfreelcpConnect = lcpConnectptr.i;
+}//Dbacc::releaseLcpConnectRec()
+
+/* --------------------------------------------------------------------------------- */
+/* RELEASE OP RECORD */
+/* PUT A FREE OPERATION IN A FREE LIST OF THE OPERATIONS */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::releaseOpRec(Signal* signal)
+{
+#if 0
+ // DEBUG CODE
+ // Check that the operation to be released isn't
+ // already in the list of free operations
+ // Since this code loops through the entire list of free operations
+ // it's only enabled in VM_TRACE mode
+ OperationrecPtr opRecPtr;
+ bool opInList = false;
+ opRecPtr.i = cfreeopRec;
+ while (opRecPtr.i != RNIL){
+ if (opRecPtr.i == operationRecPtr.i){
+ opInList = true;
+ break;
+ }
+ ptrCheckGuard(opRecPtr, coprecsize, operationrec);
+ opRecPtr.i = opRecPtr.p->nextOp;
+ }
+ ndbrequire(opInList == false);
+#endif
+ ndbrequire(operationRecPtr.p->lockOwner == ZFALSE);
+
+ operationRecPtr.p->nextOp = cfreeopRec;
+ cfreeopRec = operationRecPtr.i; /* UPDATE FREE LIST OF OP RECORDS */
+ operationRecPtr.p->prevOp = RNIL;
+ operationRecPtr.p->opState = FREE_OP;
+ operationRecPtr.p->transactionstate = IDLE;
+ operationRecPtr.p->operation = ZUNDEFINED_OP;
+}//Dbacc::releaseOpRec()
+
+/* --------------------------------------------------------------------------------- */
+/* RELEASE_OVERFLOW_REC */
+/* PUT A FREE OVERFLOW REC IN A FREE LIST OF THE OVERFLOW RECORDS */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::releaseOverflowRec(Signal* signal)
+{
+ rorOverflowRecPtr.p->nextfreeoverrec = cfirstfreeoverrec;
+ cfirstfreeoverrec = rorOverflowRecPtr.i;
+}//Dbacc::releaseOverflowRec()
+
+/* --------------------------------------------------------------------------------- */
+/* RELEASE_OVERPAGE */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::releaseOverpage(Signal* signal)
+{
+ DirRangePtr ropOverflowrangeptr;
+ DirectoryarrayPtr ropOverflowDirptr;
+ OverflowRecordPtr ropOverflowRecPtr;
+ OverflowRecordPtr tuodOverflowRecPtr;
+ Uint32 tropTmp;
+ Uint32 tropTmp1;
+ Uint32 tropTmp2;
+
+ ropOverflowRecPtr.i = ropPageptr.p->word32[ZPOS_OVERFLOWREC];
+ ndbrequire(ropOverflowRecPtr.i != RNIL);
+ /* THE OVERFLOW REC WILL BE TAKEN OUT OF THE */
+ /* FREELIST OF OVERFLOW PAGE WITH FREE */
+ /* CONTAINER AND WILL BE PUT IN THE FREE LIST */
+ /* OF THE FREE DIRECTORY INDEXES. */
+ if ((fragrecptr.p->lastOverflowRec == ropOverflowRecPtr.i) &&
+ (fragrecptr.p->firstOverflowRec == ropOverflowRecPtr.i)) {
+ jam();
+ return; /* THERE IS ONLY ONE OVERFLOW PAGE */
+ }//if
+ if ((fragrecptr.p->createLcp == ZTRUE) &&
+ (fragrecptr.p->lcpMaxOverDirIndex > ropPageptr.p->word32[ZPOS_PAGE_ID])) {
+ /* --------------------------------------------------------------------------------- */
+ /* THE PAGE PARTICIPATES IN THE LOCAL CHECKPOINT. */
+ /* --------------------------------------------------------------------------------- */
+ if (fragrecptr.p->fragState == LCP_SEND_PAGES) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* THE PAGE PARTICIPATES IN THE LOCAL CHECKPOINT AND THE WRITE TO DISK HAS NOT */
+ /* YET BEEN COMPLETED. WE MUST KEEP IT A WHILE LONGER SINCE AN EMPTY PAGE IS */
+ /* NOT EQUIVALENT TO AN INITIALISED PAGE SINCE THE FREE LISTS CAN DIFFER. */
+ /* --------------------------------------------------------------------------------- */
+ return;
+ } else {
+ if ((fragrecptr.p->fragState == LCP_SEND_OVER_PAGES) &&
+ (fragrecptr.p->lcpDirIndex <= ropPageptr.p->word32[ZPOS_PAGE_ID])) {
+ jam();
+ /* --------------------------------------------------------------------------------- */
+ /* SEE COMMENT ABOVE */
+ /* --------------------------------------------------------------------------------- */
+ return;
+ }//if
+ }//if
+ }//if
+#if kalle
+ logicalPage = 0;
+
+ i = fragrecptr.p->directory;
+ p = dirRange.getPtr(i);
+
+ i1 = logicalPage >> 8;
+ i2 = logicalPage & 0xFF;
+
+ ndbrequire(i1 < 256);
+
+ i = p->dirArray[i1];
+ p = directoryarray.getPtr(i);
+
+ physicPageId = p->pagep[i2];
+ physicPageP = page8.getPtr(physicPageId);
+
+ p->pagep[i2] = RNIL;
+ rpPageptr = { physicPageId, physicPageP };
+ releasePage(signal);
+
+#endif
+
+ /* --------------------------------------------------------------------------------- */
+ /* IT WAS OK TO RELEASE THE PAGE. */
+ /* --------------------------------------------------------------------------------- */
+ ptrCheckGuard(ropOverflowRecPtr, coverflowrecsize, overflowRecord);
+ tfoOverflowRecPtr = ropOverflowRecPtr;
+ takeRecOutOfFreeOverpage(signal);
+ ropOverflowRecPtr.p->overpage = RNIL;
+ priOverflowRecPtr = ropOverflowRecPtr;
+ putRecInFreeOverdir(signal);
+ tropTmp = ropPageptr.p->word32[ZPOS_PAGE_ID];
+ ropOverflowrangeptr.i = fragrecptr.p->overflowdir;
+ tropTmp1 = tropTmp >> 8;
+ tropTmp2 = tropTmp & 0xff;
+ ptrCheckGuard(ropOverflowrangeptr, cdirrangesize, dirRange);
+ arrGuard(tropTmp1, 256);
+ ropOverflowDirptr.i = ropOverflowrangeptr.p->dirArray[tropTmp1];
+ ptrCheckGuard(ropOverflowDirptr, cdirarraysize, directoryarray);
+ ropOverflowDirptr.p->pagep[tropTmp2] = RNIL;
+ rpPageptr = ropPageptr;
+ releasePage(signal);
+ if (ropOverflowRecPtr.p->dirindex != (fragrecptr.p->lastOverIndex - 1)) {
+ jam();
+ return;
+ }//if
+ /* --------------------------------------------------------------------------------- */
+ /* THE LAST PAGE IN THE DIRECTORY WAS RELEASED IT IS NOW NECESSARY TO REMOVE */
+ /* ALL RELEASED OVERFLOW DIRECTORIES AT THE END OF THE LIST. */
+ /* --------------------------------------------------------------------------------- */
+ do {
+ fragrecptr.p->lastOverIndex--;
+ if (tropTmp2 == 0) {
+ jam();
+ ndbrequire(tropTmp1 != 0);
+ ropOverflowrangeptr.p->dirArray[tropTmp1] = RNIL;
+ rdDirptr.i = ropOverflowDirptr.i;
+ releaseDirectory(signal);
+ tropTmp1--;
+ tropTmp2 = 255;
+ } else {
+ jam();
+ tropTmp2--;
+ }//if
+ ropOverflowDirptr.i = ropOverflowrangeptr.p->dirArray[tropTmp1];
+ ptrCheckGuard(ropOverflowDirptr, cdirarraysize, directoryarray);
+ } while (ropOverflowDirptr.p->pagep[tropTmp2] == RNIL);
+ /* --------------------------------------------------------------------------------- */
+ /* RELEASE ANY OVERFLOW RECORDS THAT ARE PART OF THE FREE INDEX LIST WHICH */
+ /* DIRECTORY INDEX NOW HAS BEEN RELEASED. */
+ /* --------------------------------------------------------------------------------- */
+ tuodOverflowRecPtr.i = fragrecptr.p->firstFreeDirindexRec;
+ jam();
+ while (tuodOverflowRecPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(tuodOverflowRecPtr, coverflowrecsize, overflowRecord);
+ if (tuodOverflowRecPtr.p->dirindex >= fragrecptr.p->lastOverIndex) {
+ jam();
+ rorOverflowRecPtr = tuodOverflowRecPtr;
+ troOverflowRecPtr.p = tuodOverflowRecPtr.p;
+ tuodOverflowRecPtr.i = troOverflowRecPtr.p->nextOverList;
+ takeRecOutOfFreeOverdir(signal);
+ releaseOverflowRec(signal);
+ } else {
+ jam();
+ tuodOverflowRecPtr.i = tuodOverflowRecPtr.p->nextOverList;
+ }//if
+ }//while
+}//Dbacc::releaseOverpage()
+
+/* --------------------------------------------------------------------------------- */
+/* RELEASE_PAGE */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::releasePage(Signal* signal)
+{
+#ifdef VM_TRACE
+ bool inList = false;
+ Uint32 numInList = 0;
+ Page8Ptr tmpPagePtr;
+ tmpPagePtr.i = cfirstfreepage;
+ while (tmpPagePtr.i != RNIL){
+ ptrCheckGuard(tmpPagePtr, cpagesize, page8);
+ if (tmpPagePtr.i == rpPageptr.i){
+ jam(); inList = true;
+ break;
+ }
+ numInList++;
+ tmpPagePtr.i = tmpPagePtr.p->word32[0];
+ }
+ ndbrequire(inList == false);
+ // ndbrequire(numInList == cnoOfAllocatedPages);
+#endif
+ rpPageptr.p->word32[0] = cfirstfreepage;
+ cfirstfreepage = rpPageptr.i;
+ cnoOfAllocatedPages--;
+}//Dbacc::releasePage()
+
+/* --------------------------------------------------------------------------------- */
+/* RELEASE_LCP_PAGE */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::releaseLcpPage(Signal* signal)
+{
+ rlpPageptr.p->word32[0] = cfirstfreeLcpPage;
+ cfirstfreeLcpPage = rlpPageptr.i;
+}//Dbacc::releaseLcpPage()
+
+/* --------------------------------------------------------------------------------- */
+/* RELEASE_SR_REC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::releaseSrRec(Signal* signal)
+{
+ srVersionPtr.p->nextFreeSr = cfirstFreeSrVersionRec;
+ cfirstFreeSrVersionRec = srVersionPtr.i;
+}//Dbacc::releaseSrRec()
+
+/* --------------------------------------------------------------------------------- */
+/* SEIZE_DIRECTORY */
+/* DESCRIPTION: A DIRECTORY BLOCK (ZDIRBLOCKSIZE NUMBERS OF DIRECTORY */
+/* RECORDS WILL BE ALLOCATED AND RETURNED. */
+/* SIZE OF DIRECTORY ERROR_CODE, WILL BE RETURNED IF THERE IS NO ANY */
+/* FREE BLOCK */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::seizeDirectory(Signal* signal)
+{
+ Uint32 tsdyIndex;
+
+ if (cfirstfreedir == RNIL) {
+ jam();
+ if (cdirarraysize <= cdirmemory) {
+ jam();
+ tresult = ZDIRSIZE_ERROR;
+ return;
+ } else {
+ jam();
+ sdDirptr.i = cdirmemory;
+ ptrCheckGuard(sdDirptr, cdirarraysize, directoryarray);
+ cdirmemory = cdirmemory + 1;
+ }//if
+ } else {
+ jam();
+ sdDirptr.i = cfirstfreedir;
+ ptrCheckGuard(sdDirptr, cdirarraysize, directoryarray);
+ cfirstfreedir = sdDirptr.p->pagep[0];
+ sdDirptr.p->pagep[0] = RNIL;
+ }//if
+ for (tsdyIndex = 0; tsdyIndex <= 255; tsdyIndex++) {
+ sdDirptr.p->pagep[tsdyIndex] = RNIL;
+ }//for
+}//Dbacc::seizeDirectory()
+
+/* --------------------------------------------------------------------------------- */
+/* SEIZE_DIRRANGE */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::seizeDirrange(Signal* signal)
+{
+ Uint32 tsdeIndex;
+
+ newDirRangePtr.i = cfirstfreeDirrange;
+ ptrCheckGuard(newDirRangePtr, cdirrangesize, dirRange);
+ cfirstfreeDirrange = newDirRangePtr.p->dirArray[0];
+ for (tsdeIndex = 0; tsdeIndex <= 255; tsdeIndex++) {
+ newDirRangePtr.p->dirArray[tsdeIndex] = RNIL;
+ }//for
+}//Dbacc::seizeDirrange()
+
+/* --------------------------------------------------------------------------------- */
+/* SEIZE FRAGREC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::seizeFragrec(Signal* signal)
+{
+ fragrecptr.i = cfirstfreefrag;
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ cfirstfreefrag = fragrecptr.p->nextfreefrag;
+ fragrecptr.p->nextfreefrag = RNIL;
+}//Dbacc::seizeFragrec()
+
+/* --------------------------------------------------------------------------------- */
+/* SEIZE_FS_CONNECT_REC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::seizeFsConnectRec(Signal* signal)
+{
+ fsConnectptr.i = cfsFirstfreeconnect;
+ ptrCheckGuard(fsConnectptr, cfsConnectsize, fsConnectrec);
+ cfsFirstfreeconnect = fsConnectptr.p->fsNext;
+ fsConnectptr.p->fsNext = RNIL;
+ fsConnectptr.p->fsState = WAIT_NOTHING;
+}//Dbacc::seizeFsConnectRec()
+
+/* --------------------------------------------------------------------------------- */
+/* SEIZE_FS_OP_REC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::seizeFsOpRec(Signal* signal)
+{
+ fsOpptr.i = cfsFirstfreeop;
+ ptrCheckGuard(fsOpptr, cfsOpsize, fsOprec);
+ cfsFirstfreeop = fsOpptr.p->fsOpnext;
+ fsOpptr.p->fsOpnext = RNIL;
+}//Dbacc::seizeFsOpRec()
+
+/* --------------------------------------------------------------------------------- */
+/* SEIZE_LCP_CONNECT_REC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::seizeLcpConnectRec(Signal* signal)
+{
+ lcpConnectptr.i = cfirstfreelcpConnect;
+ ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec);
+ cfirstfreelcpConnect = lcpConnectptr.p->nextLcpConn;
+ lcpConnectptr.p->nextLcpConn = RNIL;
+}//Dbacc::seizeLcpConnectRec()
+
+/* --------------------------------------------------------------------------------- */
+/* SEIZE_OP_REC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::seizeOpRec(Signal* signal)
+{
+ operationRecPtr.i = cfreeopRec;
+ ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
+ cfreeopRec = operationRecPtr.p->nextOp; /* UPDATE FREE LIST OF OP RECORDS */
+ /* PUTS OPERTION RECORD PTR IN THE LIST */
+ /* OF OPERATION IN CONNECTION RECORD */
+ operationRecPtr.p->nextOp = RNIL;
+}//Dbacc::seizeOpRec()
+
+/* --------------------------------------------------------------------------------- */
+/* SEIZE OVERFLOW RECORD */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::seizeOverRec(Signal* signal) {
+ sorOverflowRecPtr.i = cfirstfreeoverrec;
+ ptrCheckGuard(sorOverflowRecPtr, coverflowrecsize, overflowRecord);
+ cfirstfreeoverrec = sorOverflowRecPtr.p->nextfreeoverrec;
+ sorOverflowRecPtr.p->nextfreeoverrec = RNIL;
+ sorOverflowRecPtr.p->prevOverRec = RNIL;
+ sorOverflowRecPtr.p->nextOverRec = RNIL;
+}//Dbacc::seizeOverRec()
+
+
+/**
+ * A ZPAGESIZE_ERROR has occured, out of index pages
+ * Print some debug info if debug compiled
+ */
+void Dbacc::zpagesize_error(const char* where){
+ DEBUG(where << endl
+ << " ZPAGESIZE_ERROR" << endl
+ << " cfirstfreepage=" << cfirstfreepage << endl
+ << " cfreepage=" <<cfreepage<<endl
+ << " cpagesize=" <<cpagesize<<endl
+ << " cnoOfAllocatedPages="<<cnoOfAllocatedPages);
+}
+
+
+/* --------------------------------------------------------------------------------- */
+/* SEIZE_PAGE */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::seizePage(Signal* signal)
+{
+ tresult = 0;
+ if (cfirstfreepage == RNIL) {
+ if (cfreepage < cpagesize) {
+ jam();
+ spPageptr.i = cfreepage;
+ ptrCheckGuard(spPageptr, cpagesize, page8);
+ cfreepage++;
+ cnoOfAllocatedPages++;
+ } else {
+ jam();
+ zpagesize_error("Dbacc::seizePage");
+ tresult = ZPAGESIZE_ERROR;
+ }//if
+ } else {
+ jam();
+ spPageptr.i = cfirstfreepage;
+ ptrCheckGuard(spPageptr, cpagesize, page8);
+ cfirstfreepage = spPageptr.p->word32[0];
+ cnoOfAllocatedPages++;
+ }//if
+}//Dbacc::seizePage()
+
+/* --------------------------------------------------------------------------------- */
+/* SEIZE_PAGE */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::seizeLcpPage(Page8Ptr& regPagePtr)
+{
+ regPagePtr.i = cfirstfreeLcpPage;
+ ptrCheckGuard(regPagePtr, cpagesize, page8);
+ cfirstfreeLcpPage = regPagePtr.p->word32[0];
+}//Dbacc::seizeLcpPage()
+
+/* --------------------------------------------------------------------------------- */
+/* SEIZE_ROOTFRAGREC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::seizeRootfragrec(Signal* signal)
+{
+ rootfragrecptr.i = cfirstfreerootfrag;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ cfirstfreerootfrag = rootfragrecptr.p->nextroot;
+ rootfragrecptr.p->nextroot = RNIL;
+}//Dbacc::seizeRootfragrec()
+
+/* --------------------------------------------------------------------------------- */
+/* SEIZE_SCAN_REC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::seizeScanRec(Signal* signal)
+{
+ scanPtr.i = cfirstFreeScanRec;
+ ptrCheckGuard(scanPtr, cscanRecSize, scanRec);
+ ndbrequire(scanPtr.p->scanState == ScanRec::SCAN_DISCONNECT);
+ cfirstFreeScanRec = scanPtr.p->scanNextfreerec;
+}//Dbacc::seizeScanRec()
+
+/* --------------------------------------------------------------------------------- */
+/* SEIZE_SR_VERSION_REC */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::seizeSrVerRec(Signal* signal)
+{
+ srVersionPtr.i = cfirstFreeSrVersionRec;
+ ptrCheckGuard(srVersionPtr, csrVersionRecSize, srVersionRec);
+ cfirstFreeSrVersionRec = srVersionPtr.p->nextFreeSr;
+}//Dbacc::seizeSrVerRec()
+
+/* --------------------------------------------------------------------------------- */
+/* SEND_SYSTEMERROR */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::sendSystemerror(Signal* signal)
+{
+ progError(0, 0);
+}//Dbacc::sendSystemerror()
+
+/* --------------------------------------------------------------------------------- */
+/* TAKE_REC_OUT_OF_FREE_OVERDIR */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::takeRecOutOfFreeOverdir(Signal* signal)
+{
+ OverflowRecordPtr tofoOverrecPtr;
+ if (troOverflowRecPtr.p->nextOverList != RNIL) {
+ jam();
+ tofoOverrecPtr.i = troOverflowRecPtr.p->nextOverList;
+ ptrCheckGuard(tofoOverrecPtr, coverflowrecsize, overflowRecord);
+ tofoOverrecPtr.p->prevOverList = troOverflowRecPtr.p->prevOverList;
+ }//if
+ if (troOverflowRecPtr.p->prevOverList != RNIL) {
+ jam();
+ tofoOverrecPtr.i = troOverflowRecPtr.p->prevOverList;
+ ptrCheckGuard(tofoOverrecPtr, coverflowrecsize, overflowRecord);
+ tofoOverrecPtr.p->nextOverList = troOverflowRecPtr.p->nextOverList;
+ } else {
+ jam();
+ fragrecptr.p->firstFreeDirindexRec = troOverflowRecPtr.p->nextOverList;
+ }//if
+}//Dbacc::takeRecOutOfFreeOverdir()
+
+/* --------------------------------------------------------------------------------- */
+/* TAKE_REC_OUT_OF_FREE_OVERPAGE */
+/* DESCRIPTION: AN OVERFLOW PAGE WHICH IS EMPTY HAVE TO BE TAKE OUT OF THE */
+/* FREE LIST OF OVERFLOW PAGE. BY THIS SUBROUTINE THIS LIST */
+/* WILL BE UPDATED. */
+/* --------------------------------------------------------------------------------- */
+void Dbacc::takeRecOutOfFreeOverpage(Signal* signal)
+{
+ OverflowRecordPtr tfoNextOverflowRecPtr;
+ OverflowRecordPtr tfoPrevOverflowRecPtr;
+
+ if (tfoOverflowRecPtr.p->nextOverRec != RNIL) {
+ jam();
+ tfoNextOverflowRecPtr.i = tfoOverflowRecPtr.p->nextOverRec;
+ ptrCheckGuard(tfoNextOverflowRecPtr, coverflowrecsize, overflowRecord);
+ tfoNextOverflowRecPtr.p->prevOverRec = tfoOverflowRecPtr.p->prevOverRec;
+ } else {
+ ndbrequire(fragrecptr.p->lastOverflowRec == tfoOverflowRecPtr.i);
+ jam();
+ fragrecptr.p->lastOverflowRec = tfoOverflowRecPtr.p->prevOverRec;
+ }//if
+ if (tfoOverflowRecPtr.p->prevOverRec != RNIL) {
+ jam();
+ tfoPrevOverflowRecPtr.i = tfoOverflowRecPtr.p->prevOverRec;
+ ptrCheckGuard(tfoPrevOverflowRecPtr, coverflowrecsize, overflowRecord);
+ tfoPrevOverflowRecPtr.p->nextOverRec = tfoOverflowRecPtr.p->nextOverRec;
+ } else {
+ ndbrequire(fragrecptr.p->firstOverflowRec == tfoOverflowRecPtr.i);
+ jam();
+ fragrecptr.p->firstOverflowRec = tfoOverflowRecPtr.p->nextOverRec;
+ }//if
+}//Dbacc::takeRecOutOfFreeOverpage()
+
+void
+Dbacc::reportMemoryUsage(Signal* signal, int gth){
+ signal->theData[0] = NDB_LE_MemoryUsage;
+ signal->theData[1] = gth;
+ signal->theData[2] = sizeof(* rpPageptr.p);
+ signal->theData[3] = cnoOfAllocatedPages;
+ signal->theData[4] = cpagesize;
+ signal->theData[5] = DBACC;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 6, JBB);
+}
+
+void
+Dbacc::execDUMP_STATE_ORD(Signal* signal)
+{
+ DumpStateOrd * const dumpState = (DumpStateOrd *)&signal->theData[0];
+ if (dumpState->args[0] == DumpStateOrd::AccDumpOneScanRec){
+ Uint32 recordNo = RNIL;
+ if (signal->length() == 2)
+ recordNo = dumpState->args[1];
+ else
+ return;
+
+ if (recordNo >= cscanRecSize)
+ return;
+
+ scanPtr.i = recordNo;
+ ptrAss(scanPtr, scanRec);
+ infoEvent("Dbacc::ScanRec[%d]: state=%d, transid(0x%x, 0x%x)",
+ scanPtr.i, scanPtr.p->scanState,scanPtr.p->scanTrid1,
+ scanPtr.p->scanTrid2);
+ infoEvent(" timer=%d, continueBCount=%d, "
+ "activeLocalFrag=%d, root=%d, nextBucketIndex=%d",
+ scanPtr.p->scanTimer,
+ scanPtr.p->scanContinuebCounter,
+ scanPtr.p->activeLocalFrag,
+ scanPtr.p->rootPtr,
+ scanPtr.p->nextBucketIndex);
+ infoEvent(" scanNextfreerec=%d firstActOp=%d firstLockedOp=%d, "
+ "scanLastLockedOp=%d firstQOp=%d lastQOp=%d",
+ scanPtr.p->scanNextfreerec,
+ scanPtr.p->scanFirstActiveOp,
+ scanPtr.p->scanFirstLockedOp,
+ scanPtr.p->scanLastLockedOp,
+ scanPtr.p->scanFirstQueuedOp,
+ scanPtr.p->scanLastQueuedOp);
+ infoEvent(" scanUserP=%d, startNoBuck=%d, minBucketIndexToRescan=%d, "
+ "maxBucketIndexToRescan=%d",
+ scanPtr.p->scanUserptr,
+ scanPtr.p->startNoOfBuckets,
+ scanPtr.p->minBucketIndexToRescan,
+ scanPtr.p->maxBucketIndexToRescan);
+ infoEvent(" scanBucketState=%d, scanLockHeld=%d, userBlockRef=%d, "
+ "scanMask=%d scanLockMode=%d",
+ scanPtr.p->scanBucketState,
+ scanPtr.p->scanLockHeld,
+ scanPtr.p->scanUserblockref,
+ scanPtr.p->scanMask,
+ scanPtr.p->scanLockMode);
+ return;
+ }
+
+ // Dump all ScanRec(ords)
+ if (dumpState->args[0] == DumpStateOrd::AccDumpAllScanRec){
+ Uint32 recordNo = 0;
+ if (signal->length() == 1)
+ infoEvent("ACC: Dump all ScanRec - size: %d",
+ cscanRecSize);
+ else if (signal->length() == 2)
+ recordNo = dumpState->args[1];
+ else
+ return;
+
+ dumpState->args[0] = DumpStateOrd::AccDumpOneScanRec;
+ dumpState->args[1] = recordNo;
+ execDUMP_STATE_ORD(signal);
+
+ if (recordNo < cscanRecSize-1){
+ dumpState->args[0] = DumpStateOrd::AccDumpAllScanRec;
+ dumpState->args[1] = recordNo+1;
+ sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB);
+ }
+ return;
+ }
+
+ // Dump all active ScanRec(ords)
+ if (dumpState->args[0] == DumpStateOrd::AccDumpAllActiveScanRec){
+ Uint32 recordNo = 0;
+ if (signal->length() == 1)
+ infoEvent("ACC: Dump active ScanRec - size: %d",
+ cscanRecSize);
+ else if (signal->length() == 2)
+ recordNo = dumpState->args[1];
+ else
+ return;
+
+ ScanRecPtr sp;
+ sp.i = recordNo;
+ ptrAss(sp, scanRec);
+ if (sp.p->scanState != ScanRec::SCAN_DISCONNECT){
+ dumpState->args[0] = DumpStateOrd::AccDumpOneScanRec;
+ dumpState->args[1] = recordNo;
+ execDUMP_STATE_ORD(signal);
+ }
+
+ if (recordNo < cscanRecSize-1){
+ dumpState->args[0] = DumpStateOrd::AccDumpAllActiveScanRec;
+ dumpState->args[1] = recordNo+1;
+ sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB);
+ }
+ return;
+ }
+
+ if(dumpState->args[0] == DumpStateOrd::DumpPageMemory){
+ reportMemoryUsage(signal, 0);
+ return;
+ }
+
+ if(dumpState->args[0] == DumpStateOrd::EnableUndoDelayDataWrite){
+ ndbout << "Dbacc:: delay write of datapages for table = "
+ << dumpState->args[1]<< endl;
+ c_errorInsert3000_TableId = dumpState->args[1];
+ SET_ERROR_INSERT_VALUE(3000);
+ return;
+ }
+
+ if(dumpState->args[0] == DumpStateOrd::AccDumpOneOperationRec){
+ Uint32 recordNo = RNIL;
+ if (signal->length() == 2)
+ recordNo = dumpState->args[1];
+ else
+ return;
+
+ if (recordNo >= coprecsize)
+ return;
+
+ OperationrecPtr tmpOpPtr;
+ tmpOpPtr.i = recordNo;
+ ptrAss(tmpOpPtr, operationrec);
+ infoEvent("Dbacc::operationrec[%d]: opState=%d, transid(0x%x, 0x%x)",
+ tmpOpPtr.i, tmpOpPtr.p->opState, tmpOpPtr.p->transId1,
+ tmpOpPtr.p->transId2);
+ infoEvent("elementIsforward=%d, elementPage=%d, elementPointer=%d ",
+ tmpOpPtr.p->elementIsforward, tmpOpPtr.p->elementPage,
+ tmpOpPtr.p->elementPointer);
+ infoEvent("fid=%d, fragptr=%d, hashvaluePart=%d ",
+ tmpOpPtr.p->fid, tmpOpPtr.p->fragptr,
+ tmpOpPtr.p->hashvaluePart);
+ infoEvent("hashValue=%d, insertDeleteLen=%d, keyinfoPage=%d ",
+ tmpOpPtr.p->hashValue, tmpOpPtr.p->insertDeleteLen,
+ tmpOpPtr.p->keyinfoPage);
+ infoEvent("nextLockOwnerOp=%d, nextOp=%d, nextParallelQue=%d ",
+ tmpOpPtr.p->nextLockOwnerOp, tmpOpPtr.p->nextOp,
+ tmpOpPtr.p->nextParallelQue);
+ infoEvent("nextQueOp=%d, nextSerialQue=%d, prevOp=%d ",
+ tmpOpPtr.p->nextQueOp, tmpOpPtr.p->nextSerialQue,
+ tmpOpPtr.p->prevOp);
+ infoEvent("prevLockOwnerOp=%d, prevParallelQue=%d, prevQueOp=%d ",
+ tmpOpPtr.p->prevLockOwnerOp, tmpOpPtr.p->nextParallelQue,
+ tmpOpPtr.p->prevQueOp);
+ infoEvent("prevSerialQue=%d, scanRecPtr=%d, longPagePtr=%d ",
+ tmpOpPtr.p->prevSerialQue, tmpOpPtr.p->scanRecPtr,
+ tmpOpPtr.p->longPagePtr);
+ infoEvent("transactionstate=%d, elementIsDisappeared=%d, insertIsDone=%d ",
+ tmpOpPtr.p->transactionstate, tmpOpPtr.p->elementIsDisappeared,
+ tmpOpPtr.p->insertIsDone);
+ infoEvent("lockMode=%d, lockOwner=%d, nodeType=%d ",
+ tmpOpPtr.p->lockMode, tmpOpPtr.p->lockOwner,
+ tmpOpPtr.p->nodeType);
+ infoEvent("operation=%d, opSimple=%d, dirtyRead=%d,scanBits=%d ",
+ tmpOpPtr.p->operation, tmpOpPtr.p->opSimple,
+ tmpOpPtr.p->dirtyRead, tmpOpPtr.p->scanBits);
+ return;
+ }
+
+ if(dumpState->args[0] == DumpStateOrd::AccDumpNumOpRecs){
+
+ Uint32 freeOpRecs = 0;
+ OperationrecPtr opRecPtr;
+ opRecPtr.i = cfreeopRec;
+ while (opRecPtr.i != RNIL){
+ freeOpRecs++;
+ ptrCheckGuard(opRecPtr, coprecsize, operationrec);
+ opRecPtr.i = opRecPtr.p->nextOp;
+ }
+
+ infoEvent("Dbacc::OperationRecords: num=%d, free=%d",
+ coprecsize, freeOpRecs);
+
+ return;
+ }
+ if(dumpState->args[0] == DumpStateOrd::AccDumpFreeOpRecs){
+
+ OperationrecPtr opRecPtr;
+ opRecPtr.i = cfreeopRec;
+ while (opRecPtr.i != RNIL){
+
+ dumpState->args[0] = DumpStateOrd::AccDumpOneOperationRec;
+ dumpState->args[1] = opRecPtr.i;
+ execDUMP_STATE_ORD(signal);
+
+ ptrCheckGuard(opRecPtr, coprecsize, operationrec);
+ opRecPtr.i = opRecPtr.p->nextOp;
+ }
+
+
+ return;
+ }
+
+ if(dumpState->args[0] == DumpStateOrd::AccDumpNotFreeOpRecs){
+ Uint32 recordStart = RNIL;
+ if (signal->length() == 2)
+ recordStart = dumpState->args[1];
+ else
+ return;
+
+ if (recordStart >= coprecsize)
+ return;
+
+ for (Uint32 i = recordStart; i < coprecsize; i++){
+
+ bool inFreeList = false;
+ OperationrecPtr opRecPtr;
+ opRecPtr.i = cfreeopRec;
+ while (opRecPtr.i != RNIL){
+ if (opRecPtr.i == i){
+ inFreeList = true;
+ break;
+ }
+ ptrCheckGuard(opRecPtr, coprecsize, operationrec);
+ opRecPtr.i = opRecPtr.p->nextOp;
+ }
+ if (inFreeList == false){
+ dumpState->args[0] = DumpStateOrd::AccDumpOneOperationRec;
+ dumpState->args[1] = i;
+ execDUMP_STATE_ORD(signal);
+ }
+ }
+ return;
+ }
+
+#if 0
+ if (type == 100) {
+ RelTabMemReq * const req = (RelTabMemReq *)signal->getDataPtrSend();
+ req->primaryTableId = 2;
+ req->secondaryTableId = RNIL;
+ req->userPtr = 2;
+ req->userRef = DBDICT_REF;
+ sendSignal(cownBlockref, GSN_REL_TABMEMREQ, signal,
+ RelTabMemReq::SignalLength, JBB);
+ return;
+ }//if
+ if (type == 101) {
+ RelTabMemReq * const req = (RelTabMemReq *)signal->getDataPtrSend();
+ req->primaryTableId = 4;
+ req->secondaryTableId = 5;
+ req->userPtr = 4;
+ req->userRef = DBDICT_REF;
+ sendSignal(cownBlockref, GSN_REL_TABMEMREQ, signal,
+ RelTabMemReq::SignalLength, JBB);
+ return;
+ }//if
+ if (type == 102) {
+ RelTabMemReq * const req = (RelTabMemReq *)signal->getDataPtrSend();
+ req->primaryTableId = 6;
+ req->secondaryTableId = 8;
+ req->userPtr = 6;
+ req->userRef = DBDICT_REF;
+ sendSignal(cownBlockref, GSN_REL_TABMEMREQ, signal,
+ RelTabMemReq::SignalLength, JBB);
+ return;
+ }//if
+ if (type == 103) {
+ DropTabFileReq * const req = (DropTabFileReq *)signal->getDataPtrSend();
+ req->primaryTableId = 2;
+ req->secondaryTableId = RNIL;
+ req->userPtr = 2;
+ req->userRef = DBDICT_REF;
+ sendSignal(cownBlockref, GSN_DROP_TABFILEREQ, signal,
+ DropTabFileReq::SignalLength, JBB);
+ return;
+ }//if
+ if (type == 104) {
+ DropTabFileReq * const req = (DropTabFileReq *)signal->getDataPtrSend();
+ req->primaryTableId = 4;
+ req->secondaryTableId = 5;
+ req->userPtr = 4;
+ req->userRef = DBDICT_REF;
+ sendSignal(cownBlockref, GSN_DROP_TABFILEREQ, signal,
+ DropTabFileReq::SignalLength, JBB);
+ return;
+ }//if
+ if (type == 105) {
+ DropTabFileReq * const req = (DropTabFileReq *)signal->getDataPtrSend();
+ req->primaryTableId = 6;
+ req->secondaryTableId = 8;
+ req->userPtr = 6;
+ req->userRef = DBDICT_REF;
+ sendSignal(cownBlockref, GSN_DROP_TABFILEREQ, signal,
+ DropTabFileReq::SignalLength, JBB);
+ return;
+ }//if
+#endif
+}//Dbacc::execDUMP_STATE_ORD()
+
+void Dbacc::execSET_VAR_REQ(Signal* signal)
+{
+#if 0
+ SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
+ ConfigParamId var = setVarReq->variable();
+ int val = setVarReq->value();
+
+
+ switch (var) {
+
+ case NoOfDiskPagesToDiskAfterRestartACC:
+ clblPagesPerTick = val;
+ sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
+ break;
+
+ case NoOfDiskPagesToDiskDuringRestartACC:
+ // Valid only during start so value not set.
+ sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
+ break;
+
+ default:
+ sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
+ } // switch
+#endif
+
+}//execSET_VAR_REQ()
+
+void
+Dbacc::execREAD_PSUEDO_REQ(Signal* signal){
+ jamEntry();
+ fragrecptr.i = signal->theData[0];
+ Uint32 attrId = signal->theData[1];
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ rootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ Uint64 tmp;
+ switch(attrId){
+ case AttributeHeader::ROW_COUNT:
+ tmp = rootfragrecptr.p->noOfElements;
+ break;
+ case AttributeHeader::COMMIT_COUNT:
+ tmp = rootfragrecptr.p->m_commit_count;
+ break;
+ default:
+ tmp = 0;
+ }
+ memcpy(signal->theData, &tmp, 8); /* must be memcpy, gives strange results on
+ * ithanium gcc (GCC) 3.4.1 smp linux 2.4
+ * otherwise
+ */
+ // Uint32 * src = (Uint32*)&tmp;
+ // signal->theData[0] = src[0];
+ // signal->theData[1] = src[1];
+}
+
diff --git a/storage/ndb/src/kernel/blocks/dbacc/Makefile.am b/storage/ndb/src/kernel/blocks/dbacc/Makefile.am
new file mode 100644
index 00000000000..ca1b1efac37
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbacc/Makefile.am
@@ -0,0 +1,26 @@
+
+noinst_LIBRARIES = libdbacc.a
+
+libdbacc_a_SOURCES = DbaccInit.cpp DbaccMain.cpp
+
+INCLUDES_LOC = -I$(top_srcdir)/ndb/src/kernel/blocks/dbtup
+
+include $(top_srcdir)/ndb/config/common.mk.am
+include $(top_srcdir)/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libdbacc.dsp
+
+libdbacc.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-lib.am \
+ $(top_srcdir)/ndb/config/win-name \
+ $(top_srcdir)/ndb/config/win-includes \
+ $(top_srcdir)/ndb/config/win-sources \
+ $(top_srcdir)/ndb/config/win-libraries
+ cat $(top_srcdir)/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/ndb/config/win-sources $@ $(libdbacc_a_SOURCES)
+ @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/storage/ndb/src/kernel/blocks/dbdict/CreateIndex.txt b/storage/ndb/src/kernel/blocks/dbdict/CreateIndex.txt
new file mode 100644
index 00000000000..3d11e501c07
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbdict/CreateIndex.txt
@@ -0,0 +1,152 @@
+Unique Hash Index
+=================
+
+unique hash index X on T(A1,...,An) becomes:
+table X with primary key A1,...,An and extra attribute NDB$PK
+
+NDB$PK is primary key of T concatenated at 4-byte boundaries
+
+Protocols:
+
+U - user, initiator of protocol
+C - coordinator
+P - participants, including coordinator node
+
+RT_ - request type, current state
+
+P always replies to C with current RT_ (initially RT_DICT_PREPARE)
+C replies to U at the end
+
+CREATE INDEX
+------------
+
+U: RT_USER
+
+C: forward request to P's
+P: check and reply
+
+C: invoke CREATE TABLE for index table
+
+C: invoke ALTER INDEX online
+
+C: send RT_DICT_COMMIT to P's
+P: reply
+
+C: reply to U
+
+DROP INDEX
+----------
+
+[ todo ]
+
+ALTER INDEX online
+------------------
+
+U: RT_USER, RT_CREATE_INDEX, RT_NODERESTART, RT_SYSTEMRESTART
+
+C: forward request to P's
+P: check and reply
+
+C: send RT_DICT_TC to P's
+P: create index in local TC, and reply
+
+C: invoke CREATE TRIGGER for insert/update/delete triggers
+
+C: invoke BUILD INDEX
+
+C: send RT_DICT_COMMIT to P's
+P: reply
+
+C: reply to U
+
+ALTER INDEX offline
+-------------------
+
+[ todo ]
+
+BUILD INDEX
+-----------
+
+U: RT_USER, RT_ALTER_INDEX
+
+C: forward request to P's
+P: check and reply
+
+C: invoke CREATE TRIGGER for read-only constraint on NDB$PK
+
+C: send RT_DICT_TRIX to P's
+P: build index via local TRIX, and reply
+
+C: invoke DROP TRIGGER for read-only constraint on NDB$PK
+
+C: send RT_DICT_TC to P's
+P: online index in local TC, and reply
+
+CREATE TRIGGER
+--------------
+
+U: RT_USER, RT_ALTER_INDEX, RT_BUILD_INDEX
+
+C: forward request to P's
+P: check and reply
+
+C: seize trigger id and send RT_DICT_CREATE to P's
+P: create trigger in DICT (also connect to index record), and reply
+
+C: invoke ALTER TRIGGER online [ not if subscription trigger ]
+
+C: send RT_DICT_COMMIT to P's
+P: reply
+
+C: reply to U
+
+DROP TRIGGER
+------------
+
+[ todo ]
+
+ALTER TRIGGER online
+--------------------
+
+U: RT_USER, RT_CREATE_TRIGGER
+
+C: forward request to P's
+P: check and reply
+
+C: send RT_DICT_TC to P's
+P: create trigger in local TC, and reply
+
+C: send RT_DICT_LQH to P's
+P: create trigger in local LQH (which just forwards to TUP), and reply
+
+C: send RT_DICT_COMMIT to P's
+P: reply
+
+C: reply to U
+
+ALTER TRIGGER offline
+---------------------
+
+[ todo ]
+
+Ordered Index << under work >>
+=============
+
+created as DICT table, as before, to reuse the code
+
+keep NDB$PK as last attribute (not used but logically correct)
+
+create fragments and attributes must be modified
+
+global metadata? implemented but will use signals anyway
+
+create (after-) insert/update/delete triggers as DICT objects, as before
+
+skip following:
+- create index in TC
+- create triggers in TC
+- read-only constraint on NDB$PK
+
+create (before-) commit trigger in TUP
+
+alter online (in TUX, instead of TC) is needed
diff --git a/storage/ndb/src/kernel/blocks/dbdict/CreateTable.new.txt b/storage/ndb/src/kernel/blocks/dbdict/CreateTable.new.txt
new file mode 100644
index 00000000000..d37732dcda1
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbdict/CreateTable.new.txt
@@ -0,0 +1,29 @@
+
+1) Receive from client (sequence of DICTTABINFO)
+
+2) CREATE_FRAGMENTATION_REQ -> local DIH
+ Returns all fragments for table + some other stuff
+ NOTE without side effects in DIH
+
+3) Pack table description
+
+4) CREATE_TAB -> all DICTs (including table data)
+ 1) Write schema file (ADD_STARTED)
+ 2) Write table descriptor to file
+ 3) CREATE_TAB (DIADDTABREQ) -> local DIH (including fragment info)
+ 4) DIH
+ 1) write table descriptor
+ 2) For each local fragment
+ ADD_FRAG -> local DICT
+ LQHFRAGREQ -> local LQH
+ LQHADDATTREQ -> local LQH
+ 5) TAB_COMMITREQ -> local LQH
+
+5) WAIT_GCP
+
+6) ALTER_TAB (activate) -> all DICTs
+ 1) Write schema file (CREATED)
+ 2) TAB_COMMITREQ -> local DIH
+ 3) TC_SCHVERREQ -> local TC
+
+
diff --git a/storage/ndb/src/kernel/blocks/dbdict/CreateTable.txt b/storage/ndb/src/kernel/blocks/dbdict/CreateTable.txt
new file mode 100644
index 00000000000..0b37e5d767f
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbdict/CreateTable.txt
@@ -0,0 +1,35 @@
+
+1) Receive from client (sequence of DICTTABINFO)
+
+2) DICT_SCHEMAREQ -> all DICTs
+ Write ADD_STARTED in schema file
+
+3) Pack table description
+
+4) DICTTABINFO -> all DICTs (but self) (containing packed table info)
+ self -> Write 2 file
+ 1) Write 2 file
+
+5) DICT_SCHEMAREQ -> all DICTs
+ Write UPDATE_PAGE_COUNT in schema file
+
+6) DIADDTABREQ -> local DIH
+ 1) Create fragments
+ 2) For each fragment
+ DIHADDFRAGREQ -> all DIH
+ 3) For each fragment
+ DICTFRAGSREQ -> local DICT
+ 1) LQHFRAGREQ -> concerned LQH
+ 2) For each attribute
+ LQHADDATTREQ -> concerned LQH
+
+7) WAIT_GCP -> local DIH
+
+8) DICT_SCHEMAREQ -> all DICTs
+ Write TABLE_ADD_COMMITTED in schema file
+
+9) TAB_COMMITREQ -> all LQH & DIH
+
+10) TC_SCHVERREQ -> all TC
+
+11) UNBLO_DICTREQ -> all DICT
diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
new file mode 100644
index 00000000000..4bc5b127a8f
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
@@ -0,0 +1,11884 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <ndb_global.h>
+#include <my_sys.h>
+
+#define DBDICT_C
+#include "Dbdict.hpp"
+
+#include <ndb_limits.h>
+#include <NdbOut.hpp>
+#include <Properties.hpp>
+#include <Configuration.hpp>
+#include <SectionReader.hpp>
+#include <SimpleProperties.hpp>
+#include <AttributeHeader.hpp>
+#include <signaldata/DictSchemaInfo.hpp>
+#include <signaldata/DictTabInfo.hpp>
+#include <signaldata/DropTabFile.hpp>
+
+#include <signaldata/EventReport.hpp>
+#include <signaldata/FsCloseReq.hpp>
+#include <signaldata/FsConf.hpp>
+#include <signaldata/FsOpenReq.hpp>
+#include <signaldata/FsReadWriteReq.hpp>
+#include <signaldata/FsRef.hpp>
+#include <signaldata/GetTabInfo.hpp>
+#include <signaldata/GetTableId.hpp>
+#include <signaldata/HotSpareRep.hpp>
+#include <signaldata/NFCompleteRep.hpp>
+#include <signaldata/NodeFailRep.hpp>
+#include <signaldata/ReadNodesConf.hpp>
+#include <signaldata/RelTabMem.hpp>
+#include <signaldata/WaitGCP.hpp>
+#include <signaldata/ListTables.hpp>
+
+#include <signaldata/CreateTrig.hpp>
+#include <signaldata/AlterTrig.hpp>
+#include <signaldata/DropTrig.hpp>
+#include <signaldata/CreateIndx.hpp>
+#include <signaldata/DropIndx.hpp>
+#include <signaldata/BuildIndx.hpp>
+
+#include <signaldata/CreateEvnt.hpp>
+#include <signaldata/UtilPrepare.hpp>
+#include <signaldata/UtilExecute.hpp>
+#include <signaldata/UtilRelease.hpp>
+#include <signaldata/SumaImpl.hpp>
+#include <GrepError.hpp>
+//#include <signaldata/DropEvnt.hpp>
+
+#include <signaldata/LqhFrag.hpp>
+
+#include <signaldata/DiAddTab.hpp>
+#include <signaldata/DihStartTab.hpp>
+
+#include <signaldata/DropTable.hpp>
+#include <signaldata/DropTab.hpp>
+#include <signaldata/PrepDropTab.hpp>
+
+#include <signaldata/CreateTable.hpp>
+#include <signaldata/AlterTable.hpp>
+#include <signaldata/AlterTab.hpp>
+#include <signaldata/CreateFragmentation.hpp>
+#include <signaldata/CreateTab.hpp>
+#include <NdbSleep.h>
+
+#define ZNOT_FOUND 626
+#define ZALREADYEXIST 630
+
+//#define EVENT_PH2_DEBUG
+//#define EVENT_PH3_DEBUG
+//#define EVENT_DEBUG
+
+#define EVENT_TRACE \
+// ndbout_c("Event debug trace: File: %s Line: %u", __FILE__, __LINE__)
+
+#define DIV(x,y) (((x)+(y)-1)/(y))
+#include <ndb_version.h>
+
+/* **************************************************************** */
+/* ---------------------------------------------------------------- */
+/* MODULE: GENERAL MODULE -------------------------------- */
+/* ---------------------------------------------------------------- */
+/* */
+/* This module contains general stuff. Mostly debug signals and */
+/* general signals that go into a specific module after checking a */
+/* state variable. Also general subroutines used by many. */
+/* ---------------------------------------------------------------- */
+/* **************************************************************** */
+
+/* ---------------------------------------------------------------- */
+// This signal is used to dump states of various variables in the
+// block by command.
+/* ---------------------------------------------------------------- */
+void
+Dbdict::execDUMP_STATE_ORD(Signal* signal)
+{
+ jamEntry();
+
+#ifdef VM_TRACE
+ if(signal->theData[0] == 1222){
+ const Uint32 tab = signal->theData[1];
+ PrepDropTabReq* req = (PrepDropTabReq*)signal->getDataPtr();
+ req->senderRef = reference();
+ req->senderData = 1222;
+ req->tableId = tab;
+ sendSignal(DBLQH_REF, GSN_PREP_DROP_TAB_REQ, signal,
+ PrepDropTabReq::SignalLength, JBB);
+ }
+
+ if(signal->theData[0] == 1223){
+ const Uint32 tab = signal->theData[1];
+ PrepDropTabReq* req = (PrepDropTabReq*)signal->getDataPtr();
+ req->senderRef = reference();
+ req->senderData = 1222;
+ req->tableId = tab;
+ sendSignal(DBTC_REF, GSN_PREP_DROP_TAB_REQ, signal,
+ PrepDropTabReq::SignalLength, JBB);
+ }
+
+ if(signal->theData[0] == 1224){
+ const Uint32 tab = signal->theData[1];
+ PrepDropTabReq* req = (PrepDropTabReq*)signal->getDataPtr();
+ req->senderRef = reference();
+ req->senderData = 1222;
+ req->tableId = tab;
+ sendSignal(DBDIH_REF, GSN_PREP_DROP_TAB_REQ, signal,
+ PrepDropTabReq::SignalLength, JBB);
+ }
+
+ if(signal->theData[0] == 1225){
+ const Uint32 tab = signal->theData[1];
+ const Uint32 ver = signal->theData[2];
+ TableRecordPtr tabRecPtr;
+ c_tableRecordPool.getPtr(tabRecPtr, tab);
+ DropTableReq * req = (DropTableReq*)signal->getDataPtr();
+ req->senderData = 1225;
+ req->senderRef = numberToRef(1,1);
+ req->tableId = tab;
+ req->tableVersion = tabRecPtr.p->tableVersion + ver;
+ sendSignal(DBDICT_REF, GSN_DROP_TABLE_REQ, signal,
+ DropTableReq::SignalLength, JBB);
+ }
+#endif
+
+ return;
+}//Dbdict::execDUMP_STATE_ORD()
+
+/* ---------------------------------------------------------------- */
+/* ---------------------------------------------------------------- */
+// CONTINUEB is used when a real-time break is needed for long
+// processes.
+/* ---------------------------------------------------------------- */
+/* ---------------------------------------------------------------- */
+void Dbdict::execCONTINUEB(Signal* signal)
+{
+ jamEntry();
+ switch (signal->theData[0]) {
+ case ZPACK_TABLE_INTO_PAGES :
+ jam();
+ packTableIntoPages(signal, signal->theData[1], signal->theData[2]);
+ break;
+
+ case ZSEND_GET_TAB_RESPONSE :
+ jam();
+ sendGetTabResponse(signal);
+ break;
+
+ default :
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//execCONTINUEB()
+
+/* ---------------------------------------------------------------- */
+/* ---------------------------------------------------------------- */
+// Routine to handle pack table into pages.
+/* ---------------------------------------------------------------- */
+/* ---------------------------------------------------------------- */
+
+void Dbdict::packTableIntoPages(Signal* signal, Uint32 tableId, Uint32 pageId)
+{
+
+ PageRecordPtr pagePtr;
+ TableRecordPtr tablePtr;
+ c_pageRecordArray.getPtr(pagePtr, pageId);
+
+ memset(&pagePtr.p->word[0], 0, 4 * ZPAGE_HEADER_SIZE);
+ c_tableRecordPool.getPtr(tablePtr, tableId);
+ LinearWriter w(&pagePtr.p->word[ZPAGE_HEADER_SIZE],
+ 8 * ZSIZE_OF_PAGES_IN_WORDS);
+
+ w.first();
+ packTableIntoPagesImpl(w, tablePtr, signal);
+
+ Uint32 wordsOfTable = w.getWordsUsed();
+ Uint32 pagesUsed =
+ DIV(wordsOfTable + ZPAGE_HEADER_SIZE, ZSIZE_OF_PAGES_IN_WORDS);
+ pagePtr.p->word[ZPOS_CHECKSUM] =
+ computeChecksum(&pagePtr.p->word[0], pagesUsed * ZSIZE_OF_PAGES_IN_WORDS);
+
+ switch (c_packTable.m_state) {
+ case PackTable::PTS_IDLE:
+ case PackTable::PTS_ADD_TABLE_MASTER:
+ case PackTable::PTS_ADD_TABLE_SLAVE:
+ case PackTable::PTS_RESTART:
+ ndbrequire(false);
+ break;
+ case PackTable::PTS_GET_TAB:
+ jam();
+ c_retrieveRecord.retrievedNoOfPages = pagesUsed;
+ c_retrieveRecord.retrievedNoOfWords = wordsOfTable;
+ sendGetTabResponse(signal);
+ return;
+ break;
+ }//switch
+ ndbrequire(false);
+ return;
+}//packTableIntoPages()
+
+void
+Dbdict::packTableIntoPagesImpl(SimpleProperties::Writer & w,
+ TableRecordPtr tablePtr,
+ Signal* signal){
+
+ w.add(DictTabInfo::TableName, tablePtr.p->tableName);
+ w.add(DictTabInfo::TableId, tablePtr.i);
+#ifdef HAVE_TABLE_REORG
+ w.add(DictTabInfo::SecondTableId, tablePtr.p->secondTable);
+#else
+ w.add(DictTabInfo::SecondTableId, (Uint32)0);
+#endif
+ w.add(DictTabInfo::TableVersion, tablePtr.p->tableVersion);
+ w.add(DictTabInfo::NoOfKeyAttr, tablePtr.p->noOfPrimkey);
+ w.add(DictTabInfo::NoOfAttributes, tablePtr.p->noOfAttributes);
+ w.add(DictTabInfo::NoOfNullable, tablePtr.p->noOfNullAttr);
+ w.add(DictTabInfo::NoOfVariable, (Uint32)0);
+ w.add(DictTabInfo::KeyLength, tablePtr.p->tupKeyLength);
+
+ w.add(DictTabInfo::TableLoggedFlag, tablePtr.p->storedTable);
+ w.add(DictTabInfo::MinLoadFactor, tablePtr.p->minLoadFactor);
+ w.add(DictTabInfo::MaxLoadFactor, tablePtr.p->maxLoadFactor);
+ w.add(DictTabInfo::TableKValue, tablePtr.p->kValue);
+ w.add(DictTabInfo::FragmentTypeVal, tablePtr.p->fragmentType);
+ w.add(DictTabInfo::TableTypeVal, tablePtr.p->tableType);
+
+ if(!signal)
+ {
+ w.add(DictTabInfo::FragmentCount, tablePtr.p->fragmentCount);
+ }
+ else
+ {
+ Uint32 * theData = signal->getDataPtrSend();
+ CreateFragmentationReq * const req = (CreateFragmentationReq*)theData;
+ req->senderRef = 0;
+ req->senderData = RNIL;
+ req->fragmentationType = tablePtr.p->fragmentType;
+ req->noOfFragments = 0;
+ req->fragmentNode = 0;
+ req->primaryTableId = tablePtr.i;
+ EXECUTE_DIRECT(DBDIH, GSN_CREATE_FRAGMENTATION_REQ, signal,
+ CreateFragmentationReq::SignalLength);
+ if(signal->theData[0] == 0)
+ {
+ Uint16 *data = (Uint16*)&signal->theData[25];
+ Uint32 count = 2 + data[0] * data[1];
+ w.add(DictTabInfo::FragmentDataLen, 2*count);
+ w.add(DictTabInfo::FragmentData, data, 2*count);
+ }
+ }
+
+ if (tablePtr.p->primaryTableId != RNIL){
+ TableRecordPtr primTab;
+ c_tableRecordPool.getPtr(primTab, tablePtr.p->primaryTableId);
+ w.add(DictTabInfo::PrimaryTable, primTab.p->tableName);
+ w.add(DictTabInfo::PrimaryTableId, tablePtr.p->primaryTableId);
+ w.add(DictTabInfo::IndexState, tablePtr.p->indexState);
+ w.add(DictTabInfo::InsertTriggerId, tablePtr.p->insertTriggerId);
+ w.add(DictTabInfo::UpdateTriggerId, tablePtr.p->updateTriggerId);
+ w.add(DictTabInfo::DeleteTriggerId, tablePtr.p->deleteTriggerId);
+ w.add(DictTabInfo::CustomTriggerId, tablePtr.p->customTriggerId);
+ }
+ w.add(DictTabInfo::FrmLen, tablePtr.p->frmLen);
+ w.add(DictTabInfo::FrmData, tablePtr.p->frmData, tablePtr.p->frmLen);
+
+ Uint32 nextAttribute = tablePtr.p->firstAttribute;
+ AttributeRecordPtr attrPtr;
+ do {
+ jam();
+ c_attributeRecordPool.getPtr(attrPtr, nextAttribute);
+
+ w.add(DictTabInfo::AttributeName, attrPtr.p->attributeName);
+ w.add(DictTabInfo::AttributeId, attrPtr.p->attributeId);
+ w.add(DictTabInfo::AttributeKeyFlag, attrPtr.p->tupleKey > 0);
+
+ const Uint32 desc = attrPtr.p->attributeDescriptor;
+ const Uint32 attrType = AttributeDescriptor::getType(desc);
+ const Uint32 attrSize = AttributeDescriptor::getSize(desc);
+ const Uint32 arraySize = AttributeDescriptor::getArraySize(desc);
+ const Uint32 nullable = AttributeDescriptor::getNullable(desc);
+ const Uint32 DKey = AttributeDescriptor::getDKey(desc);
+
+ // AttributeType deprecated
+ w.add(DictTabInfo::AttributeSize, attrSize);
+ w.add(DictTabInfo::AttributeArraySize, arraySize);
+ w.add(DictTabInfo::AttributeNullableFlag, nullable);
+ w.add(DictTabInfo::AttributeDKey, DKey);
+ w.add(DictTabInfo::AttributeExtType, attrType);
+ w.add(DictTabInfo::AttributeExtPrecision, attrPtr.p->extPrecision);
+ w.add(DictTabInfo::AttributeExtScale, attrPtr.p->extScale);
+ w.add(DictTabInfo::AttributeExtLength, attrPtr.p->extLength);
+ w.add(DictTabInfo::AttributeAutoIncrement,
+ (Uint32)attrPtr.p->autoIncrement);
+ w.add(DictTabInfo::AttributeDefaultValue, attrPtr.p->defaultValue);
+
+ w.add(DictTabInfo::AttributeEnd, 1);
+ nextAttribute = attrPtr.p->nextAttrInTable;
+ } while (nextAttribute != RNIL);
+
+ w.add(DictTabInfo::TableEnd, 1);
+}
+
+/* ---------------------------------------------------------------- */
+/* ---------------------------------------------------------------- */
+// The routines to handle responses from file system.
+/* ---------------------------------------------------------------- */
+/* ---------------------------------------------------------------- */
+
+/* ---------------------------------------------------------------- */
+// A file was successfully closed.
+/* ---------------------------------------------------------------- */
+void Dbdict::execFSCLOSECONF(Signal* signal)
+{
+ FsConnectRecordPtr fsPtr;
+ FsConf * const fsConf = (FsConf *)&signal->theData[0];
+ jamEntry();
+ c_fsConnectRecordPool.getPtr(fsPtr, fsConf->userPointer);
+ switch (fsPtr.p->fsState) {
+ case FsConnectRecord::CLOSE_WRITE_SCHEMA:
+ jam();
+ closeWriteSchemaConf(signal, fsPtr);
+ break;
+ case FsConnectRecord::CLOSE_READ_SCHEMA:
+ jam();
+ closeReadSchemaConf(signal, fsPtr);
+ break;
+ case FsConnectRecord::CLOSE_READ_TAB_FILE:
+ jam();
+ closeReadTableConf(signal, fsPtr);
+ break;
+ case FsConnectRecord::CLOSE_WRITE_TAB_FILE:
+ jam();
+ closeWriteTableConf(signal, fsPtr);
+ break;
+ default:
+ jamLine((fsPtr.p->fsState & 0xFFF));
+ ndbrequire(false);
+ break;
+ }//switch
+}//execFSCLOSECONF()
+
+/* ---------------------------------------------------------------- */
+// A close file was refused.
+/* ---------------------------------------------------------------- */
+void Dbdict::execFSCLOSEREF(Signal* signal)
+{
+ jamEntry();
+ progError(0, 0);
+}//execFSCLOSEREF()
+
+/* ---------------------------------------------------------------- */
+// A file was successfully opened.
+/* ---------------------------------------------------------------- */
+void Dbdict::execFSOPENCONF(Signal* signal)
+{
+ FsConnectRecordPtr fsPtr;
+ jamEntry();
+ FsConf * const fsConf = (FsConf *)&signal->theData[0];
+ c_fsConnectRecordPool.getPtr(fsPtr, fsConf->userPointer);
+
+ Uint32 filePointer = fsConf->filePointer;
+ fsPtr.p->filePtr = filePointer;
+ switch (fsPtr.p->fsState) {
+ case FsConnectRecord::OPEN_WRITE_SCHEMA:
+ jam();
+ fsPtr.p->fsState = FsConnectRecord::WRITE_SCHEMA;
+ writeSchemaFile(signal, filePointer, fsPtr.i);
+ break;
+ case FsConnectRecord::OPEN_READ_SCHEMA1:
+ jam();
+ fsPtr.p->fsState = FsConnectRecord::READ_SCHEMA1;
+ readSchemaFile(signal, filePointer, fsPtr.i);
+ break;
+ case FsConnectRecord::OPEN_READ_SCHEMA2:
+ jam();
+ fsPtr.p->fsState = FsConnectRecord::READ_SCHEMA2;
+ readSchemaFile(signal, filePointer, fsPtr.i);
+ break;
+ case FsConnectRecord::OPEN_READ_TAB_FILE1:
+ jam();
+ fsPtr.p->fsState = FsConnectRecord::READ_TAB_FILE1;
+ readTableFile(signal, filePointer, fsPtr.i);
+ break;
+ case FsConnectRecord::OPEN_READ_TAB_FILE2:
+ jam();
+ fsPtr.p->fsState = FsConnectRecord::READ_TAB_FILE2;
+ readTableFile(signal, filePointer, fsPtr.i);
+ break;
+ case FsConnectRecord::OPEN_WRITE_TAB_FILE:
+ jam();
+ fsPtr.p->fsState = FsConnectRecord::WRITE_TAB_FILE;
+ writeTableFile(signal, filePointer, fsPtr.i);
+ break;
+ default:
+ jamLine((fsPtr.p->fsState & 0xFFF));
+ ndbrequire(false);
+ break;
+ }//switch
+}//execFSOPENCONF()
+
+/* ---------------------------------------------------------------- */
+// An open file was refused.
+/* ---------------------------------------------------------------- */
+void Dbdict::execFSOPENREF(Signal* signal)
+{
+ jamEntry();
+ FsRef * const fsRef = (FsRef *)&signal->theData[0];
+ FsConnectRecordPtr fsPtr;
+ c_fsConnectRecordPool.getPtr(fsPtr, fsRef->userPointer);
+ switch (fsPtr.p->fsState) {
+ case FsConnectRecord::OPEN_READ_SCHEMA1:
+ openReadSchemaRef(signal, fsPtr);
+ break;
+ case FsConnectRecord::OPEN_READ_TAB_FILE1:
+ jam();
+ openReadTableRef(signal, fsPtr);
+ break;
+ default:
+ jamLine((fsPtr.p->fsState & 0xFFF));
+ ndbrequire(false);
+ break;
+ }//switch
+}//execFSOPENREF()
+
+/* ---------------------------------------------------------------- */
+// A file was successfully read.
+/* ---------------------------------------------------------------- */
+void Dbdict::execFSREADCONF(Signal* signal)
+{
+ jamEntry();
+ FsConf * const fsConf = (FsConf *)&signal->theData[0];
+ FsConnectRecordPtr fsPtr;
+ c_fsConnectRecordPool.getPtr(fsPtr, fsConf->userPointer);
+ switch (fsPtr.p->fsState) {
+ case FsConnectRecord::READ_SCHEMA1:
+ case FsConnectRecord::READ_SCHEMA2:
+ readSchemaConf(signal ,fsPtr);
+ break;
+ case FsConnectRecord::READ_TAB_FILE1:
+ case FsConnectRecord::READ_TAB_FILE2:
+ jam();
+ readTableConf(signal ,fsPtr);
+ break;
+ default:
+ jamLine((fsPtr.p->fsState & 0xFFF));
+ ndbrequire(false);
+ break;
+ }//switch
+}//execFSREADCONF()
+
+/* ---------------------------------------------------------------- */
+// A read file was refused.
+/* ---------------------------------------------------------------- */
+void Dbdict::execFSREADREF(Signal* signal)
+{
+ jamEntry();
+ FsRef * const fsRef = (FsRef *)&signal->theData[0];
+ FsConnectRecordPtr fsPtr;
+ c_fsConnectRecordPool.getPtr(fsPtr, fsRef->userPointer);
+ switch (fsPtr.p->fsState) {
+ case FsConnectRecord::READ_SCHEMA1:
+ readSchemaRef(signal, fsPtr);
+ break;
+ case FsConnectRecord::READ_TAB_FILE1:
+ jam();
+ readTableRef(signal, fsPtr);
+ break;
+ default:
+ jamLine((fsPtr.p->fsState & 0xFFF));
+ ndbrequire(false);
+ break;
+ }//switch
+}//execFSREADREF()
+
+/* ---------------------------------------------------------------- */
+// A file was successfully written.
+/* ---------------------------------------------------------------- */
+void Dbdict::execFSWRITECONF(Signal* signal)
+{
+ FsConf * const fsConf = (FsConf *)&signal->theData[0];
+ FsConnectRecordPtr fsPtr;
+ jamEntry();
+ c_fsConnectRecordPool.getPtr(fsPtr, fsConf->userPointer);
+ switch (fsPtr.p->fsState) {
+ case FsConnectRecord::WRITE_TAB_FILE:
+ writeTableConf(signal, fsPtr);
+ break;
+ case FsConnectRecord::WRITE_SCHEMA:
+ jam();
+ writeSchemaConf(signal, fsPtr);
+ break;
+ default:
+ jamLine((fsPtr.p->fsState & 0xFFF));
+ ndbrequire(false);
+ break;
+ }//switch
+}//execFSWRITECONF()
+
+/* ---------------------------------------------------------------- */
+// A write file was refused.
+/* ---------------------------------------------------------------- */
+void Dbdict::execFSWRITEREF(Signal* signal)
+{
+ jamEntry();
+ progError(0, 0);
+}//execFSWRITEREF()
+
+/* ---------------------------------------------------------------- */
+// Routines to handle Read/Write of Table Files
+/* ---------------------------------------------------------------- */
+void
+Dbdict::writeTableFile(Signal* signal, Uint32 tableId,
+ SegmentedSectionPtr tabInfoPtr, Callback* callback){
+
+ ndbrequire(c_writeTableRecord.tableWriteState == WriteTableRecord::IDLE);
+
+ Uint32 sz = tabInfoPtr.sz + ZPAGE_HEADER_SIZE;
+
+ c_writeTableRecord.noOfPages = DIV(sz, ZSIZE_OF_PAGES_IN_WORDS);
+ c_writeTableRecord.tableWriteState = WriteTableRecord::TWR_CALLBACK;
+ c_writeTableRecord.m_callback = * callback;
+
+ c_writeTableRecord.pageId = 0;
+ ndbrequire(c_writeTableRecord.noOfPages < 8);
+
+ PageRecordPtr pageRecPtr;
+ c_pageRecordArray.getPtr(pageRecPtr, c_writeTableRecord.pageId);
+ copy(&pageRecPtr.p->word[ZPAGE_HEADER_SIZE], tabInfoPtr);
+
+ memset(&pageRecPtr.p->word[0], 0, 4 * ZPAGE_HEADER_SIZE);
+ pageRecPtr.p->word[ZPOS_CHECKSUM] =
+ computeChecksum(&pageRecPtr.p->word[0],
+ c_writeTableRecord.noOfPages * ZSIZE_OF_PAGES_IN_WORDS);
+
+ startWriteTableFile(signal, tableId);
+
+}
+
+void Dbdict::startWriteTableFile(Signal* signal, Uint32 tableId)
+{
+ FsConnectRecordPtr fsPtr;
+ c_writeTableRecord.tableId = tableId;
+ c_fsConnectRecordPool.getPtr(fsPtr, getFsConnRecord());
+ fsPtr.p->fsState = FsConnectRecord::OPEN_WRITE_TAB_FILE;
+ openTableFile(signal, 0, fsPtr.i, tableId, true);
+ c_writeTableRecord.noOfTableFilesHandled = 0;
+}//Dbdict::startWriteTableFile()
+
+void Dbdict::openTableFile(Signal* signal,
+ Uint32 fileNo,
+ Uint32 fsConPtr,
+ Uint32 tableId,
+ bool writeFlag)
+{
+ TableRecordPtr tablePtr;
+ FsOpenReq * const fsOpenReq = (FsOpenReq *)&signal->theData[0];
+ c_tableRecordPool.getPtr(tablePtr, tableId);
+
+ fsOpenReq->userReference = reference();
+ fsOpenReq->userPointer = fsConPtr;
+ if (writeFlag) {
+ jam();
+ fsOpenReq->fileFlags =
+ FsOpenReq::OM_WRITEONLY |
+ FsOpenReq::OM_TRUNCATE |
+ FsOpenReq::OM_CREATE |
+ FsOpenReq::OM_SYNC;
+ } else {
+ jam();
+ fsOpenReq->fileFlags = FsOpenReq::OM_READONLY;
+ }//if
+ ndbrequire(tablePtr.p->tableVersion < ZNIL);
+ fsOpenReq->fileNumber[3] = 0; // Initialise before byte changes
+ FsOpenReq::setVersion(fsOpenReq->fileNumber, 1);
+ FsOpenReq::setSuffix(fsOpenReq->fileNumber, FsOpenReq::S_TABLELIST);
+ FsOpenReq::v1_setDisk(fsOpenReq->fileNumber, (fileNo + 1));
+ FsOpenReq::v1_setTable(fsOpenReq->fileNumber, tableId);
+ FsOpenReq::v1_setFragment(fsOpenReq->fileNumber, (Uint32)-1);
+ FsOpenReq::v1_setS(fsOpenReq->fileNumber, tablePtr.p->tableVersion);
+ FsOpenReq::v1_setP(fsOpenReq->fileNumber, 255);
+/* ---------------------------------------------------------------- */
+// File name : D1/DBDICT/T0/S1.TableList
+// D1 means Disk 1 (set by fileNo + 1)
+// T0 means table id = 0
+// S1 means tableVersion 1
+// TableList indicates that this is a file for a table description.
+/* ---------------------------------------------------------------- */
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA);
+}//openTableFile()
+
+void Dbdict::writeTableFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
+{
+ FsReadWriteReq * const fsRWReq = (FsReadWriteReq *)&signal->theData[0];
+
+ fsRWReq->filePointer = filePtr;
+ fsRWReq->userReference = reference();
+ fsRWReq->userPointer = fsConPtr;
+ fsRWReq->operationFlag = 0; // Initialise before bit changes
+ FsReadWriteReq::setSyncFlag(fsRWReq->operationFlag, 1);
+ FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag,
+ FsReadWriteReq::fsFormatArrayOfPages);
+ fsRWReq->varIndex = ZALLOCATE;
+ fsRWReq->numberOfPages = c_writeTableRecord.noOfPages;
+ fsRWReq->data.arrayOfPages.varIndex = c_writeTableRecord.pageId;
+ fsRWReq->data.arrayOfPages.fileOffset = 0; // Write to file page 0
+ sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA);
+}//writeTableFile()
+
+void Dbdict::writeTableConf(Signal* signal,
+ FsConnectRecordPtr fsPtr)
+{
+ fsPtr.p->fsState = FsConnectRecord::CLOSE_WRITE_TAB_FILE;
+ closeFile(signal, fsPtr.p->filePtr, fsPtr.i);
+ return;
+}//Dbdict::writeTableConf()
+
+void Dbdict::closeWriteTableConf(Signal* signal,
+ FsConnectRecordPtr fsPtr)
+{
+ c_writeTableRecord.noOfTableFilesHandled++;
+ if (c_writeTableRecord.noOfTableFilesHandled < 2) {
+ jam();
+ fsPtr.p->fsState = FsConnectRecord::OPEN_WRITE_TAB_FILE;
+ openTableFile(signal, 1, fsPtr.i, c_writeTableRecord.tableId, true);
+ return;
+ }
+ ndbrequire(c_writeTableRecord.noOfTableFilesHandled == 2);
+ c_fsConnectRecordPool.release(fsPtr);
+ WriteTableRecord::TableWriteState state = c_writeTableRecord.tableWriteState;
+ c_writeTableRecord.tableWriteState = WriteTableRecord::IDLE;
+ switch (state) {
+ case WriteTableRecord::IDLE:
+ case WriteTableRecord::WRITE_ADD_TABLE_MASTER :
+ case WriteTableRecord::WRITE_ADD_TABLE_SLAVE :
+ case WriteTableRecord::WRITE_RESTART_FROM_MASTER :
+ case WriteTableRecord::WRITE_RESTART_FROM_OWN :
+ ndbrequire(false);
+ break;
+ case WriteTableRecord::TWR_CALLBACK:
+ jam();
+ execute(signal, c_writeTableRecord.m_callback, 0);
+ return;
+ }
+ ndbrequire(false);
+}//Dbdict::closeWriteTableConf()
+
+void Dbdict::startReadTableFile(Signal* signal, Uint32 tableId)
+{
+ //globalSignalLoggers.log(number(), "startReadTableFile");
+ ndbrequire(!c_readTableRecord.inUse);
+
+ FsConnectRecordPtr fsPtr;
+ c_fsConnectRecordPool.getPtr(fsPtr, getFsConnRecord());
+ c_readTableRecord.inUse = true;
+ c_readTableRecord.tableId = tableId;
+ fsPtr.p->fsState = FsConnectRecord::OPEN_READ_TAB_FILE1;
+ openTableFile(signal, 0, fsPtr.i, tableId, false);
+}//Dbdict::startReadTableFile()
+
+void Dbdict::openReadTableRef(Signal* signal,
+ FsConnectRecordPtr fsPtr)
+{
+ fsPtr.p->fsState = FsConnectRecord::OPEN_READ_TAB_FILE2;
+ openTableFile(signal, 1, fsPtr.i, c_readTableRecord.tableId, false);
+ return;
+}//Dbdict::openReadTableConf()
+
+void Dbdict::readTableFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
+{
+ FsReadWriteReq * const fsRWReq = (FsReadWriteReq *)&signal->theData[0];
+
+ fsRWReq->filePointer = filePtr;
+ fsRWReq->userReference = reference();
+ fsRWReq->userPointer = fsConPtr;
+ fsRWReq->operationFlag = 0; // Initialise before bit changes
+ FsReadWriteReq::setSyncFlag(fsRWReq->operationFlag, 0);
+ FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag,
+ FsReadWriteReq::fsFormatArrayOfPages);
+ fsRWReq->varIndex = ZALLOCATE;
+ fsRWReq->numberOfPages = c_readTableRecord.noOfPages;
+ fsRWReq->data.arrayOfPages.varIndex = c_readTableRecord.pageId;
+ fsRWReq->data.arrayOfPages.fileOffset = 0; // Write to file page 0
+ sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 8, JBA);
+}//readTableFile()
+
+void Dbdict::readTableConf(Signal* signal,
+ FsConnectRecordPtr fsPtr)
+{
+ /* ---------------------------------------------------------------- */
+ // Verify the data read from disk
+ /* ---------------------------------------------------------------- */
+ bool crashInd;
+ if (fsPtr.p->fsState == FsConnectRecord::READ_TAB_FILE1) {
+ jam();
+ crashInd = false;
+ } else {
+ jam();
+ crashInd = true;
+ }//if
+
+ PageRecordPtr tmpPagePtr;
+ c_pageRecordArray.getPtr(tmpPagePtr, c_readTableRecord.pageId);
+ Uint32 sz = c_readTableRecord.noOfPages * ZSIZE_OF_PAGES_IN_WORDS;
+ Uint32 chk = computeChecksum((const Uint32*)tmpPagePtr.p, sz);
+
+ ndbrequire((chk == 0) || !crashInd);
+ if(chk != 0){
+ jam();
+ ndbrequire(fsPtr.p->fsState == FsConnectRecord::READ_TAB_FILE1);
+ readTableRef(signal, fsPtr);
+ return;
+ }//if
+
+ fsPtr.p->fsState = FsConnectRecord::CLOSE_READ_TAB_FILE;
+ closeFile(signal, fsPtr.p->filePtr, fsPtr.i);
+ return;
+}//Dbdict::readTableConf()
+
+void Dbdict::readTableRef(Signal* signal,
+ FsConnectRecordPtr fsPtr)
+{
+ fsPtr.p->fsState = FsConnectRecord::OPEN_READ_TAB_FILE2;
+ openTableFile(signal, 1, fsPtr.i, c_readTableRecord.tableId, false);
+ return;
+}//Dbdict::readTableRef()
+
+void Dbdict::closeReadTableConf(Signal* signal,
+ FsConnectRecordPtr fsPtr)
+{
+ c_fsConnectRecordPool.release(fsPtr);
+ c_readTableRecord.inUse = false;
+
+ execute(signal, c_readTableRecord.m_callback, 0);
+ return;
+}//Dbdict::closeReadTableConf()
+
+/* ---------------------------------------------------------------- */
+// Routines to handle Read/Write of Schema Files
+/* ---------------------------------------------------------------- */
+void
+Dbdict::updateSchemaState(Signal* signal, Uint32 tableId,
+ SchemaFile::TableEntry* te, Callback* callback){
+
+ jam();
+ PageRecordPtr pagePtr;
+ c_pageRecordArray.getPtr(pagePtr, c_schemaRecord.schemaPage);
+
+ ndbrequire(tableId < c_tableRecordPool.getSize());
+ SchemaFile::TableEntry * tableEntry = getTableEntry(pagePtr.p, tableId);
+
+ SchemaFile::TableState newState =
+ (SchemaFile::TableState)te->m_tableState;
+ SchemaFile::TableState oldState =
+ (SchemaFile::TableState)tableEntry->m_tableState;
+
+ Uint32 newVersion = te->m_tableVersion;
+ Uint32 oldVersion = tableEntry->m_tableVersion;
+
+ bool ok = false;
+ switch(newState){
+ case SchemaFile::ADD_STARTED:
+ jam();
+ ok = true;
+ ndbrequire((oldVersion + 1) == newVersion);
+ ndbrequire(oldState == SchemaFile::INIT ||
+ oldState == SchemaFile::DROP_TABLE_COMMITTED);
+ break;
+ case SchemaFile::TABLE_ADD_COMMITTED:
+ jam();
+ ok = true;
+ ndbrequire(newVersion == oldVersion);
+ ndbrequire(oldState == SchemaFile::ADD_STARTED);
+ break;
+ case SchemaFile::ALTER_TABLE_COMMITTED:
+ jam();
+ ok = true;
+ ndbrequire((oldVersion + 1) == newVersion);
+ ndbrequire(oldState == SchemaFile::TABLE_ADD_COMMITTED ||
+ oldState == SchemaFile::ALTER_TABLE_COMMITTED);
+ break;
+ case SchemaFile::DROP_TABLE_STARTED:
+ jam();
+ case SchemaFile::DROP_TABLE_COMMITTED:
+ jam();
+ ok = true;
+ ndbrequire(false);
+ break;
+ case SchemaFile::INIT:
+ jam();
+ ok = true;
+ ndbrequire((oldState == SchemaFile::ADD_STARTED));
+ }//if
+ ndbrequire(ok);
+
+ * tableEntry = * te;
+ computeChecksum((SchemaFile*)pagePtr.p);
+
+ ndbrequire(c_writeSchemaRecord.inUse == false);
+ c_writeSchemaRecord.inUse = true;
+
+ c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage;
+ c_writeSchemaRecord.m_callback = * callback;
+
+ startWriteSchemaFile(signal);
+}
+
+void Dbdict::startWriteSchemaFile(Signal* signal)
+{
+ FsConnectRecordPtr fsPtr;
+ c_fsConnectRecordPool.getPtr(fsPtr, getFsConnRecord());
+ fsPtr.p->fsState = FsConnectRecord::OPEN_WRITE_SCHEMA;
+ openSchemaFile(signal, 0, fsPtr.i, true);
+ c_writeSchemaRecord.noOfSchemaFilesHandled = 0;
+}//Dbdict::startWriteSchemaFile()
+
+void Dbdict::openSchemaFile(Signal* signal,
+ Uint32 fileNo,
+ Uint32 fsConPtr,
+ bool writeFlag)
+{
+ FsOpenReq * const fsOpenReq = (FsOpenReq *)&signal->theData[0];
+ fsOpenReq->userReference = reference();
+ fsOpenReq->userPointer = fsConPtr;
+ if (writeFlag) {
+ jam();
+ fsOpenReq->fileFlags =
+ FsOpenReq::OM_WRITEONLY |
+ FsOpenReq::OM_TRUNCATE |
+ FsOpenReq::OM_CREATE |
+ FsOpenReq::OM_SYNC;
+ } else {
+ jam();
+ fsOpenReq->fileFlags = FsOpenReq::OM_READONLY;
+ }//if
+ fsOpenReq->fileNumber[3] = 0; // Initialise before byte changes
+ FsOpenReq::setVersion(fsOpenReq->fileNumber, 1);
+ FsOpenReq::setSuffix(fsOpenReq->fileNumber, FsOpenReq::S_SCHEMALOG);
+ FsOpenReq::v1_setDisk(fsOpenReq->fileNumber, (fileNo + 1));
+ FsOpenReq::v1_setTable(fsOpenReq->fileNumber, (Uint32)-1);
+ FsOpenReq::v1_setFragment(fsOpenReq->fileNumber, (Uint32)-1);
+ FsOpenReq::v1_setS(fsOpenReq->fileNumber, (Uint32)-1);
+ FsOpenReq::v1_setP(fsOpenReq->fileNumber, 0);
+/* ---------------------------------------------------------------- */
+// File name : D1/DBDICT/P0.SchemaLog
+// D1 means Disk 1 (set by fileNo + 1). Writes to both D1 and D2
+// SchemaLog indicates that this is a file giving a list of current tables.
+/* ---------------------------------------------------------------- */
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA);
+}//openSchemaFile()
+
+void Dbdict::writeSchemaFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
+{
+ FsReadWriteReq * const fsRWReq = (FsReadWriteReq *)&signal->theData[0];
+
+ fsRWReq->filePointer = filePtr;
+ fsRWReq->userReference = reference();
+ fsRWReq->userPointer = fsConPtr;
+ fsRWReq->operationFlag = 0; // Initialise before bit changes
+ FsReadWriteReq::setSyncFlag(fsRWReq->operationFlag, 1);
+ FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag,
+ FsReadWriteReq::fsFormatArrayOfPages);
+ fsRWReq->varIndex = ZALLOCATE;
+ fsRWReq->numberOfPages = 1;
+// Write from memory page
+ fsRWReq->data.arrayOfPages.varIndex = c_writeSchemaRecord.pageId;
+ fsRWReq->data.arrayOfPages.fileOffset = 0; // Write to file page 0
+ sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA);
+}//writeSchemaFile()
+
+void Dbdict::writeSchemaConf(Signal* signal,
+ FsConnectRecordPtr fsPtr)
+{
+ fsPtr.p->fsState = FsConnectRecord::CLOSE_WRITE_SCHEMA;
+ closeFile(signal, fsPtr.p->filePtr, fsPtr.i);
+ return;
+}//Dbdict::writeSchemaConf()
+
+void Dbdict::closeFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
+{
+ FsCloseReq * const fsCloseReq = (FsCloseReq *)&signal->theData[0];
+ fsCloseReq->filePointer = filePtr;
+ fsCloseReq->userReference = reference();
+ fsCloseReq->userPointer = fsConPtr;
+ FsCloseReq::setRemoveFileFlag(fsCloseReq->fileFlag, false);
+ sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, FsCloseReq::SignalLength, JBA);
+ return;
+}//closeFile()
+
+void Dbdict::closeWriteSchemaConf(Signal* signal,
+ FsConnectRecordPtr fsPtr)
+{
+ c_writeSchemaRecord.noOfSchemaFilesHandled++;
+ if (c_writeSchemaRecord.noOfSchemaFilesHandled < 2) {
+ jam();
+ fsPtr.p->fsState = FsConnectRecord::OPEN_WRITE_SCHEMA;
+ openSchemaFile(signal, 1, fsPtr.i, true);
+ return;
+ }
+ ndbrequire(c_writeSchemaRecord.noOfSchemaFilesHandled == 2);
+
+ c_fsConnectRecordPool.release(fsPtr);
+
+ c_writeSchemaRecord.inUse = false;
+ execute(signal, c_writeSchemaRecord.m_callback, 0);
+ return;
+}//Dbdict::closeWriteSchemaConf()
+
+void Dbdict::startReadSchemaFile(Signal* signal)
+{
+ //globalSignalLoggers.log(number(), "startReadSchemaFile");
+ FsConnectRecordPtr fsPtr;
+ c_fsConnectRecordPool.getPtr(fsPtr, getFsConnRecord());
+ fsPtr.p->fsState = FsConnectRecord::OPEN_READ_SCHEMA1;
+ openSchemaFile(signal, 0, fsPtr.i, false);
+}//Dbdict::startReadSchemaFile()
+
+void Dbdict::openReadSchemaRef(Signal* signal,
+ FsConnectRecordPtr fsPtr)
+{
+ fsPtr.p->fsState = FsConnectRecord::OPEN_READ_SCHEMA2;
+ openSchemaFile(signal, 1, fsPtr.i, false);
+}//Dbdict::openReadSchemaRef()
+
+void Dbdict::readSchemaFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
+{
+ FsReadWriteReq * const fsRWReq = (FsReadWriteReq *)&signal->theData[0];
+
+ fsRWReq->filePointer = filePtr;
+ fsRWReq->userReference = reference();
+ fsRWReq->userPointer = fsConPtr;
+ fsRWReq->operationFlag = 0; // Initialise before bit changes
+ FsReadWriteReq::setSyncFlag(fsRWReq->operationFlag, 0);
+ FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag,
+ FsReadWriteReq::fsFormatArrayOfPages);
+ fsRWReq->varIndex = ZALLOCATE;
+ fsRWReq->numberOfPages = 1;
+ fsRWReq->data.arrayOfPages.varIndex = c_readSchemaRecord.pageId;
+ fsRWReq->data.arrayOfPages.fileOffset = 0;
+ sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 8, JBA);
+}//readSchemaFile()
+
+void Dbdict::readSchemaConf(Signal* signal,
+ FsConnectRecordPtr fsPtr)
+{
+/* ---------------------------------------------------------------- */
+// Verify the data read from disk
+/* ---------------------------------------------------------------- */
+ bool crashInd;
+ if (fsPtr.p->fsState == FsConnectRecord::READ_SCHEMA1) {
+ jam();
+ crashInd = false;
+ } else {
+ jam();
+ crashInd = true;
+ }//if
+ PageRecordPtr tmpPagePtr;
+ c_pageRecordArray.getPtr(tmpPagePtr, c_readSchemaRecord.pageId);
+
+ Uint32 sz = ZSIZE_OF_PAGES_IN_WORDS;
+ Uint32 chk = computeChecksum((const Uint32*)tmpPagePtr.p, sz);
+
+ ndbrequire((chk == 0) || !crashInd);
+
+ if (chk != 0){
+ jam();
+ ndbrequire(fsPtr.p->fsState == FsConnectRecord::READ_SCHEMA1);
+ readSchemaRef(signal, fsPtr);
+ return;
+ }//if
+ fsPtr.p->fsState = FsConnectRecord::CLOSE_READ_SCHEMA;
+ closeFile(signal, fsPtr.p->filePtr, fsPtr.i);
+ return;
+}//Dbdict::readSchemaConf()
+
+void Dbdict::readSchemaRef(Signal* signal,
+ FsConnectRecordPtr fsPtr)
+{
+ fsPtr.p->fsState = FsConnectRecord::OPEN_READ_SCHEMA2;
+ openSchemaFile(signal, 1, fsPtr.i, false);
+ return;
+}//Dbdict::readSchemaRef()
+
+void Dbdict::closeReadSchemaConf(Signal* signal,
+ FsConnectRecordPtr fsPtr)
+{
+ c_fsConnectRecordPool.release(fsPtr);
+ ReadSchemaRecord::SchemaReadState state = c_readSchemaRecord.schemaReadState;
+ c_readSchemaRecord.schemaReadState = ReadSchemaRecord::IDLE;
+
+ switch(state) {
+ case ReadSchemaRecord::INITIAL_READ :
+ jam();
+ sendNDB_STTORRY(signal);
+ break;
+
+ default :
+ ndbrequire(false);
+ break;
+
+ }//switch
+}//Dbdict::closeReadSchemaConf()
+
+/* **************************************************************** */
+/* ---------------------------------------------------------------- */
+/* MODULE: INITIALISATION MODULE ------------------------- */
+/* ---------------------------------------------------------------- */
+/* */
+/* This module contains initialisation of data at start/restart. */
+/* ---------------------------------------------------------------- */
+/* **************************************************************** */
+
+Dbdict::Dbdict(const class Configuration & conf):
+ SimulatedBlock(DBDICT, conf),
+ c_tableRecordHash(c_tableRecordPool),
+ c_attributeRecordHash(c_attributeRecordPool),
+ c_triggerRecordHash(c_triggerRecordPool),
+ c_opCreateTable(c_opRecordPool),
+ c_opDropTable(c_opRecordPool),
+ c_opCreateIndex(c_opRecordPool),
+ c_opDropIndex(c_opRecordPool),
+ c_opAlterIndex(c_opRecordPool),
+ c_opBuildIndex(c_opRecordPool),
+ c_opCreateEvent(c_opRecordPool),
+ c_opSubEvent(c_opRecordPool),
+ c_opDropEvent(c_opRecordPool),
+ c_opSignalUtil(c_opRecordPool),
+ c_opCreateTrigger(c_opRecordPool),
+ c_opDropTrigger(c_opRecordPool),
+ c_opAlterTrigger(c_opRecordPool),
+ c_opRecordSequence(0)
+{
+ BLOCK_CONSTRUCTOR(Dbdict);
+
+ const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ ndb_mgm_get_int_parameter(p, CFG_DB_NO_TRIGGERS, &c_maxNoOfTriggers);
+ // Transit signals
+ addRecSignal(GSN_DUMP_STATE_ORD, &Dbdict::execDUMP_STATE_ORD);
+ addRecSignal(GSN_GET_TABINFOREQ, &Dbdict::execGET_TABINFOREQ);
+ addRecSignal(GSN_GET_TABLEID_REQ, &Dbdict::execGET_TABLEDID_REQ);
+ addRecSignal(GSN_GET_TABINFO_CONF, &Dbdict::execGET_TABINFO_CONF);
+ addRecSignal(GSN_CONTINUEB, &Dbdict::execCONTINUEB);
+
+ addRecSignal(GSN_CREATE_TABLE_REQ, &Dbdict::execCREATE_TABLE_REQ);
+ addRecSignal(GSN_CREATE_TAB_REQ, &Dbdict::execCREATE_TAB_REQ);
+ addRecSignal(GSN_CREATE_TAB_REF, &Dbdict::execCREATE_TAB_REF);
+ addRecSignal(GSN_CREATE_TAB_CONF, &Dbdict::execCREATE_TAB_CONF);
+ addRecSignal(GSN_CREATE_FRAGMENTATION_REF, &Dbdict::execCREATE_FRAGMENTATION_REF);
+ addRecSignal(GSN_CREATE_FRAGMENTATION_CONF, &Dbdict::execCREATE_FRAGMENTATION_CONF);
+ addRecSignal(GSN_DIADDTABCONF, &Dbdict::execDIADDTABCONF);
+ addRecSignal(GSN_DIADDTABREF, &Dbdict::execDIADDTABREF);
+ addRecSignal(GSN_ADD_FRAGREQ, &Dbdict::execADD_FRAGREQ);
+ addRecSignal(GSN_TAB_COMMITCONF, &Dbdict::execTAB_COMMITCONF);
+ addRecSignal(GSN_TAB_COMMITREF, &Dbdict::execTAB_COMMITREF);
+ addRecSignal(GSN_ALTER_TABLE_REQ, &Dbdict::execALTER_TABLE_REQ);
+ addRecSignal(GSN_ALTER_TAB_REQ, &Dbdict::execALTER_TAB_REQ);
+ addRecSignal(GSN_ALTER_TAB_REF, &Dbdict::execALTER_TAB_REF);
+ addRecSignal(GSN_ALTER_TAB_CONF, &Dbdict::execALTER_TAB_CONF);
+
+ // Index signals
+ addRecSignal(GSN_CREATE_INDX_REQ, &Dbdict::execCREATE_INDX_REQ);
+ addRecSignal(GSN_CREATE_INDX_CONF, &Dbdict::execCREATE_INDX_CONF);
+ addRecSignal(GSN_CREATE_INDX_REF, &Dbdict::execCREATE_INDX_REF);
+
+ addRecSignal(GSN_ALTER_INDX_REQ, &Dbdict::execALTER_INDX_REQ);
+ addRecSignal(GSN_ALTER_INDX_CONF, &Dbdict::execALTER_INDX_CONF);
+ addRecSignal(GSN_ALTER_INDX_REF, &Dbdict::execALTER_INDX_REF);
+
+ addRecSignal(GSN_CREATE_TABLE_CONF, &Dbdict::execCREATE_TABLE_CONF);
+ addRecSignal(GSN_CREATE_TABLE_REF, &Dbdict::execCREATE_TABLE_REF);
+
+ addRecSignal(GSN_DROP_INDX_REQ, &Dbdict::execDROP_INDX_REQ);
+ addRecSignal(GSN_DROP_INDX_CONF, &Dbdict::execDROP_INDX_CONF);
+ addRecSignal(GSN_DROP_INDX_REF, &Dbdict::execDROP_INDX_REF);
+
+ addRecSignal(GSN_DROP_TABLE_CONF, &Dbdict::execDROP_TABLE_CONF);
+ addRecSignal(GSN_DROP_TABLE_REF, &Dbdict::execDROP_TABLE_REF);
+
+ addRecSignal(GSN_BUILDINDXREQ, &Dbdict::execBUILDINDXREQ);
+ addRecSignal(GSN_BUILDINDXCONF, &Dbdict::execBUILDINDXCONF);
+ addRecSignal(GSN_BUILDINDXREF, &Dbdict::execBUILDINDXREF);
+
+ // Util signals
+ addRecSignal(GSN_UTIL_PREPARE_CONF, &Dbdict::execUTIL_PREPARE_CONF);
+ addRecSignal(GSN_UTIL_PREPARE_REF, &Dbdict::execUTIL_PREPARE_REF);
+
+ addRecSignal(GSN_UTIL_EXECUTE_CONF, &Dbdict::execUTIL_EXECUTE_CONF);
+ addRecSignal(GSN_UTIL_EXECUTE_REF, &Dbdict::execUTIL_EXECUTE_REF);
+
+ addRecSignal(GSN_UTIL_RELEASE_CONF, &Dbdict::execUTIL_RELEASE_CONF);
+ addRecSignal(GSN_UTIL_RELEASE_REF, &Dbdict::execUTIL_RELEASE_REF);
+
+ // Event signals
+ addRecSignal(GSN_CREATE_EVNT_REQ, &Dbdict::execCREATE_EVNT_REQ);
+ addRecSignal(GSN_CREATE_EVNT_CONF, &Dbdict::execCREATE_EVNT_CONF);
+ addRecSignal(GSN_CREATE_EVNT_REF, &Dbdict::execCREATE_EVNT_REF);
+
+ addRecSignal(GSN_CREATE_SUBID_CONF, &Dbdict::execCREATE_SUBID_CONF);
+ addRecSignal(GSN_CREATE_SUBID_REF, &Dbdict::execCREATE_SUBID_REF);
+
+ addRecSignal(GSN_SUB_CREATE_CONF, &Dbdict::execSUB_CREATE_CONF);
+ addRecSignal(GSN_SUB_CREATE_REF, &Dbdict::execSUB_CREATE_REF);
+
+ addRecSignal(GSN_SUB_START_REQ, &Dbdict::execSUB_START_REQ);
+ addRecSignal(GSN_SUB_START_CONF, &Dbdict::execSUB_START_CONF);
+ addRecSignal(GSN_SUB_START_REF, &Dbdict::execSUB_START_REF);
+
+ addRecSignal(GSN_SUB_STOP_REQ, &Dbdict::execSUB_STOP_REQ);
+ addRecSignal(GSN_SUB_STOP_CONF, &Dbdict::execSUB_STOP_CONF);
+ addRecSignal(GSN_SUB_STOP_REF, &Dbdict::execSUB_STOP_REF);
+
+ addRecSignal(GSN_SUB_SYNC_CONF, &Dbdict::execSUB_SYNC_CONF);
+ addRecSignal(GSN_SUB_SYNC_REF, &Dbdict::execSUB_SYNC_REF);
+
+ addRecSignal(GSN_DROP_EVNT_REQ, &Dbdict::execDROP_EVNT_REQ);
+
+ addRecSignal(GSN_SUB_REMOVE_REQ, &Dbdict::execSUB_REMOVE_REQ);
+ addRecSignal(GSN_SUB_REMOVE_CONF, &Dbdict::execSUB_REMOVE_CONF);
+ addRecSignal(GSN_SUB_REMOVE_REF, &Dbdict::execSUB_REMOVE_REF);
+
+ // Trigger signals
+ addRecSignal(GSN_CREATE_TRIG_REQ, &Dbdict::execCREATE_TRIG_REQ);
+ addRecSignal(GSN_CREATE_TRIG_CONF, &Dbdict::execCREATE_TRIG_CONF);
+ addRecSignal(GSN_CREATE_TRIG_REF, &Dbdict::execCREATE_TRIG_REF);
+ addRecSignal(GSN_ALTER_TRIG_REQ, &Dbdict::execALTER_TRIG_REQ);
+ addRecSignal(GSN_ALTER_TRIG_CONF, &Dbdict::execALTER_TRIG_CONF);
+ addRecSignal(GSN_ALTER_TRIG_REF, &Dbdict::execALTER_TRIG_REF);
+ addRecSignal(GSN_DROP_TRIG_REQ, &Dbdict::execDROP_TRIG_REQ);
+ addRecSignal(GSN_DROP_TRIG_CONF, &Dbdict::execDROP_TRIG_CONF);
+ addRecSignal(GSN_DROP_TRIG_REF, &Dbdict::execDROP_TRIG_REF);
+
+ // Received signals
+ addRecSignal(GSN_HOT_SPAREREP, &Dbdict::execHOT_SPAREREP);
+ addRecSignal(GSN_GET_SCHEMA_INFOREQ, &Dbdict::execGET_SCHEMA_INFOREQ);
+ addRecSignal(GSN_SCHEMA_INFO, &Dbdict::execSCHEMA_INFO);
+ addRecSignal(GSN_SCHEMA_INFOCONF, &Dbdict::execSCHEMA_INFOCONF);
+ addRecSignal(GSN_DICTSTARTREQ, &Dbdict::execDICTSTARTREQ);
+ addRecSignal(GSN_READ_NODESCONF, &Dbdict::execREAD_NODESCONF);
+ addRecSignal(GSN_FSOPENCONF, &Dbdict::execFSOPENCONF);
+ addRecSignal(GSN_FSOPENREF, &Dbdict::execFSOPENREF);
+ addRecSignal(GSN_FSCLOSECONF, &Dbdict::execFSCLOSECONF);
+ addRecSignal(GSN_FSCLOSEREF, &Dbdict::execFSCLOSEREF);
+ addRecSignal(GSN_FSWRITECONF, &Dbdict::execFSWRITECONF);
+ addRecSignal(GSN_FSWRITEREF, &Dbdict::execFSWRITEREF);
+ addRecSignal(GSN_FSREADCONF, &Dbdict::execFSREADCONF);
+ addRecSignal(GSN_FSREADREF, &Dbdict::execFSREADREF);
+ addRecSignal(GSN_LQHFRAGCONF, &Dbdict::execLQHFRAGCONF);
+ addRecSignal(GSN_LQHADDATTCONF, &Dbdict::execLQHADDATTCONF);
+ addRecSignal(GSN_LQHADDATTREF, &Dbdict::execLQHADDATTREF);
+ addRecSignal(GSN_LQHFRAGREF, &Dbdict::execLQHFRAGREF);
+ addRecSignal(GSN_NDB_STTOR, &Dbdict::execNDB_STTOR);
+ addRecSignal(GSN_READ_CONFIG_REQ, &Dbdict::execREAD_CONFIG_REQ, true);
+ addRecSignal(GSN_STTOR, &Dbdict::execSTTOR);
+ addRecSignal(GSN_TC_SCHVERCONF, &Dbdict::execTC_SCHVERCONF);
+ addRecSignal(GSN_NODE_FAILREP, &Dbdict::execNODE_FAILREP);
+ addRecSignal(GSN_INCL_NODEREQ, &Dbdict::execINCL_NODEREQ);
+ addRecSignal(GSN_API_FAILREQ, &Dbdict::execAPI_FAILREQ);
+
+ addRecSignal(GSN_WAIT_GCP_REF, &Dbdict::execWAIT_GCP_REF);
+ addRecSignal(GSN_WAIT_GCP_CONF, &Dbdict::execWAIT_GCP_CONF);
+
+ addRecSignal(GSN_LIST_TABLES_REQ, &Dbdict::execLIST_TABLES_REQ);
+
+ addRecSignal(GSN_DROP_TABLE_REQ, &Dbdict::execDROP_TABLE_REQ);
+
+ addRecSignal(GSN_PREP_DROP_TAB_REQ, &Dbdict::execPREP_DROP_TAB_REQ);
+ addRecSignal(GSN_PREP_DROP_TAB_REF, &Dbdict::execPREP_DROP_TAB_REF);
+ addRecSignal(GSN_PREP_DROP_TAB_CONF, &Dbdict::execPREP_DROP_TAB_CONF);
+
+ addRecSignal(GSN_DROP_TAB_REQ, &Dbdict::execDROP_TAB_REQ);
+ addRecSignal(GSN_DROP_TAB_REF, &Dbdict::execDROP_TAB_REF);
+ addRecSignal(GSN_DROP_TAB_CONF, &Dbdict::execDROP_TAB_CONF);
+}//Dbdict::Dbdict()
+
+Dbdict::~Dbdict()
+{
+}//Dbdict::~Dbdict()
+
+BLOCK_FUNCTIONS(Dbdict)
+
+void Dbdict::initCommonData()
+{
+/* ---------------------------------------------------------------- */
+// Initialise all common variables.
+/* ---------------------------------------------------------------- */
+ initRetrieveRecord(0, 0, 0);
+ initSchemaRecord();
+ initRestartRecord();
+ initSendSchemaRecord();
+ initReadTableRecord();
+ initWriteTableRecord();
+ initReadSchemaRecord();
+ initWriteSchemaRecord();
+
+ c_masterNodeId = ZNIL;
+ c_numberNode = 0;
+ c_noNodesFailed = 0;
+ c_failureNr = 0;
+ c_blockState = BS_IDLE;
+ c_packTable.m_state = PackTable::PTS_IDLE;
+ c_startPhase = 0;
+ c_restartType = 255; //Ensure not used restartType
+ c_tabinfoReceived = 0;
+ c_initialStart = false;
+ c_systemRestart = false;
+ c_initialNodeRestart = false;
+ c_nodeRestart = false;
+}//Dbdict::initCommonData()
+
+void Dbdict::initRecords()
+{
+ initNodeRecords();
+ initPageRecords();
+ initTableRecords();
+ initTriggerRecords();
+}//Dbdict::initRecords()
+
+void Dbdict::initSendSchemaRecord()
+{
+ c_sendSchemaRecord.noOfWords = (Uint32)-1;
+ c_sendSchemaRecord.pageId = RNIL;
+ c_sendSchemaRecord.noOfWordsCurrentlySent = 0;
+ c_sendSchemaRecord.noOfSignalsSentSinceDelay = 0;
+ c_sendSchemaRecord.inUse = false;
+ //c_sendSchemaRecord.sendSchemaState = SendSchemaRecord::IDLE;
+}//initSendSchemaRecord()
+
+void Dbdict::initReadTableRecord()
+{
+ c_readTableRecord.noOfPages = (Uint32)-1;
+ c_readTableRecord.pageId = RNIL;
+ c_readTableRecord.tableId = ZNIL;
+ c_readTableRecord.inUse = false;
+}//initReadTableRecord()
+
+void Dbdict::initWriteTableRecord()
+{
+ c_writeTableRecord.noOfPages = (Uint32)-1;
+ c_writeTableRecord.pageId = RNIL;
+ c_writeTableRecord.noOfTableFilesHandled = 3;
+ c_writeTableRecord.tableId = ZNIL;
+ c_writeTableRecord.tableWriteState = WriteTableRecord::IDLE;
+}//initWriteTableRecord()
+
+void Dbdict::initReadSchemaRecord()
+{
+ c_readSchemaRecord.pageId = RNIL;
+ c_readSchemaRecord.schemaReadState = ReadSchemaRecord::IDLE;
+}//initReadSchemaRecord()
+
+void Dbdict::initWriteSchemaRecord()
+{
+ c_writeSchemaRecord.inUse = false;
+ c_writeSchemaRecord.pageId = RNIL;
+ c_writeSchemaRecord.noOfSchemaFilesHandled = 3;
+}//initWriteSchemaRecord()
+
+void Dbdict::initRetrieveRecord(Signal* signal, Uint32 i, Uint32 returnCode)
+{
+ c_retrieveRecord.busyState = false;
+ c_retrieveRecord.blockRef = 0;
+ c_retrieveRecord.m_senderData = RNIL;
+ c_retrieveRecord.tableId = RNIL;
+ c_retrieveRecord.currentSent = 0;
+ c_retrieveRecord.retrievedNoOfPages = 0;
+ c_retrieveRecord.retrievedNoOfWords = 0;
+ c_retrieveRecord.m_useLongSig = false;
+}//initRetrieveRecord()
+
+void Dbdict::initSchemaRecord()
+{
+ c_schemaRecord.schemaPage = RNIL;
+}//Dbdict::initSchemaRecord()
+
+void Dbdict::initRestartRecord()
+{
+ c_restartRecord.gciToRestart = 0;
+ c_restartRecord.activeTable = ZNIL;
+}//Dbdict::initRestartRecord()
+
+void Dbdict::initNodeRecords()
+{
+ jam();
+ for (unsigned i = 1; i < MAX_NODES; i++) {
+ NodeRecordPtr nodePtr;
+ c_nodes.getPtr(nodePtr, i);
+ nodePtr.p->hotSpare = false;
+ nodePtr.p->nodeState = NodeRecord::API_NODE;
+ }//for
+}//Dbdict::initNodeRecords()
+
+void Dbdict::initPageRecords()
+{
+ c_schemaRecord.schemaPage = ZMAX_PAGES_OF_TABLE_DEFINITION;
+ c_schemaRecord.oldSchemaPage = ZMAX_PAGES_OF_TABLE_DEFINITION + 1;
+ c_retrieveRecord.retrievePage = ZMAX_PAGES_OF_TABLE_DEFINITION + 2;
+ ndbrequire(ZNUMBER_OF_PAGES >= (2 * ZMAX_PAGES_OF_TABLE_DEFINITION + 2));
+}//Dbdict::initPageRecords()
+
+void Dbdict::initTableRecords()
+{
+ TableRecordPtr tablePtr;
+ while (1) {
+ jam();
+ refresh_watch_dog();
+ c_tableRecordPool.seize(tablePtr);
+ if (tablePtr.i == RNIL) {
+ jam();
+ break;
+ }//if
+ initialiseTableRecord(tablePtr);
+ }//while
+}//Dbdict::initTableRecords()
+
+void Dbdict::initialiseTableRecord(TableRecordPtr tablePtr)
+{
+ tablePtr.p->activePage = RNIL;
+ tablePtr.p->filePtr[0] = RNIL;
+ tablePtr.p->filePtr[1] = RNIL;
+ tablePtr.p->firstAttribute = RNIL;
+ tablePtr.p->firstPage = RNIL;
+ tablePtr.p->lastAttribute = RNIL;
+ tablePtr.p->tableId = tablePtr.i;
+ tablePtr.p->tableVersion = (Uint32)-1;
+ tablePtr.p->tabState = TableRecord::NOT_DEFINED;
+ tablePtr.p->tabReturnState = TableRecord::TRS_IDLE;
+ tablePtr.p->myConnect = RNIL;
+ tablePtr.p->fragmentType = DictTabInfo::AllNodesSmallTable;
+ memset(tablePtr.p->tableName, 0, sizeof(tablePtr.p->tableName));
+ tablePtr.p->gciTableCreated = 0;
+ tablePtr.p->noOfAttributes = ZNIL;
+ tablePtr.p->noOfNullAttr = 0;
+ tablePtr.p->frmLen = 0;
+ memset(tablePtr.p->frmData, 0, sizeof(tablePtr.p->frmData));
+ /*
+ tablePtr.p->lh3PageIndexBits = 0;
+ tablePtr.p->lh3DistrBits = 0;
+ tablePtr.p->lh3PageBits = 6;
+ */
+ tablePtr.p->kValue = 6;
+ tablePtr.p->localKeyLen = 1;
+ tablePtr.p->maxLoadFactor = 80;
+ tablePtr.p->minLoadFactor = 70;
+ tablePtr.p->noOfPrimkey = 1;
+ tablePtr.p->tupKeyLength = 1;
+ tablePtr.p->storedTable = true;
+ tablePtr.p->tableType = DictTabInfo::UserTable;
+ tablePtr.p->primaryTableId = RNIL;
+ // volatile elements
+ tablePtr.p->indexState = TableRecord::IS_UNDEFINED;
+ tablePtr.p->insertTriggerId = RNIL;
+ tablePtr.p->updateTriggerId = RNIL;
+ tablePtr.p->deleteTriggerId = RNIL;
+ tablePtr.p->customTriggerId = RNIL;
+ tablePtr.p->buildTriggerId = RNIL;
+ tablePtr.p->indexLocal = 0;
+}//Dbdict::initialiseTableRecord()
+
+void Dbdict::initTriggerRecords()
+{
+ TriggerRecordPtr triggerPtr;
+ while (1) {
+ jam();
+ refresh_watch_dog();
+ c_triggerRecordPool.seize(triggerPtr);
+ if (triggerPtr.i == RNIL) {
+ jam();
+ break;
+ }//if
+ initialiseTriggerRecord(triggerPtr);
+ }//while
+}
+
+void Dbdict::initialiseTriggerRecord(TriggerRecordPtr triggerPtr)
+{
+ triggerPtr.p->triggerState = TriggerRecord::TS_NOT_DEFINED;
+ triggerPtr.p->triggerLocal = 0;
+ memset(triggerPtr.p->triggerName, 0, sizeof(triggerPtr.p->triggerName));
+ triggerPtr.p->triggerId = RNIL;
+ triggerPtr.p->tableId = RNIL;
+ triggerPtr.p->triggerType = (TriggerType::Value)~0;
+ triggerPtr.p->triggerActionTime = (TriggerActionTime::Value)~0;
+ triggerPtr.p->triggerEvent = (TriggerEvent::Value)~0;
+ triggerPtr.p->monitorReplicas = false;
+ triggerPtr.p->monitorAllAttributes = false;
+ triggerPtr.p->attributeMask.clear();
+ triggerPtr.p->indexId = RNIL;
+}
+
+Uint32 Dbdict::getFsConnRecord()
+{
+ FsConnectRecordPtr fsPtr;
+ c_fsConnectRecordPool.seize(fsPtr);
+ ndbrequire(fsPtr.i != RNIL);
+ fsPtr.p->filePtr = (Uint32)-1;
+ fsPtr.p->ownerPtr = RNIL;
+ fsPtr.p->fsState = FsConnectRecord::IDLE;
+ return fsPtr.i;
+}//Dbdict::getFsConnRecord()
+
+Uint32 Dbdict::getFreeTableRecord(Uint32 primaryTableId)
+{
+ Uint32 minId = (primaryTableId == RNIL ? 0 : primaryTableId + 1);
+ TableRecordPtr tablePtr;
+ TableRecordPtr firstTablePtr;
+ bool firstFound = false;
+ Uint32 tabSize = c_tableRecordPool.getSize();
+ for (tablePtr.i = minId; tablePtr.i < tabSize ; tablePtr.i++) {
+ jam();
+ c_tableRecordPool.getPtr(tablePtr);
+ if (tablePtr.p->tabState == TableRecord::NOT_DEFINED) {
+ jam();
+ initialiseTableRecord(tablePtr);
+ tablePtr.p->tabState = TableRecord::DEFINING;
+ firstFound = true;
+ firstTablePtr.i = tablePtr.i;
+ firstTablePtr.p = tablePtr.p;
+ break;
+ }//if
+ }//for
+ if (!firstFound) {
+ jam();
+ return RNIL;
+ }//if
+#ifdef HAVE_TABLE_REORG
+ bool secondFound = false;
+ for (tablePtr.i = firstTablePtr.i + 1; tablePtr.i < tabSize ; tablePtr.i++) {
+ jam();
+ c_tableRecordPool.getPtr(tablePtr);
+ if (tablePtr.p->tabState == TableRecord::NOT_DEFINED) {
+ jam();
+ initialiseTableRecord(tablePtr);
+ tablePtr.p->tabState = TableRecord::REORG_TABLE_PREPARED;
+ tablePtr.p->secondTable = firstTablePtr.i;
+ firstTablePtr.p->secondTable = tablePtr.i;
+ secondFound = true;
+ break;
+ }//if
+ }//for
+ if (!secondFound) {
+ jam();
+ firstTablePtr.p->tabState = TableRecord::NOT_DEFINED;
+ return RNIL;
+ }//if
+#endif
+ return firstTablePtr.i;
+}//Dbdict::getFreeTableRecord()
+
+Uint32 Dbdict::getFreeTriggerRecord()
+{
+ const Uint32 size = c_triggerRecordPool.getSize();
+ TriggerRecordPtr triggerPtr;
+ for (triggerPtr.i = 0; triggerPtr.i < size; triggerPtr.i++) {
+ jam();
+ c_triggerRecordPool.getPtr(triggerPtr);
+ if (triggerPtr.p->triggerState == TriggerRecord::TS_NOT_DEFINED) {
+ jam();
+ initialiseTriggerRecord(triggerPtr);
+ return triggerPtr.i;
+ }
+ }
+ return RNIL;
+}
+
+bool
+Dbdict::getNewAttributeRecord(TableRecordPtr tablePtr,
+ AttributeRecordPtr & attrPtr)
+{
+ c_attributeRecordPool.seize(attrPtr);
+ if(attrPtr.i == RNIL){
+ return false;
+ }
+
+ memset(attrPtr.p->attributeName, 0, sizeof(attrPtr.p->attributeName));
+ attrPtr.p->attributeDescriptor = 0x00012255; //Default value
+ attrPtr.p->attributeId = ZNIL;
+ attrPtr.p->nextAttrInTable = RNIL;
+ attrPtr.p->tupleKey = 0;
+ memset(attrPtr.p->defaultValue, 0, sizeof(attrPtr.p->defaultValue));
+
+ /* ---------------------------------------------------------------- */
+ // A free attribute record has been acquired. We will now link it
+ // to the table record.
+ /* ---------------------------------------------------------------- */
+ if (tablePtr.p->lastAttribute == RNIL) {
+ jam();
+ tablePtr.p->firstAttribute = attrPtr.i;
+ } else {
+ jam();
+ AttributeRecordPtr lastAttrPtr;
+ c_attributeRecordPool.getPtr(lastAttrPtr, tablePtr.p->lastAttribute);
+ lastAttrPtr.p->nextAttrInTable = attrPtr.i;
+ }//if
+ tablePtr.p->lastAttribute = attrPtr.i;
+ return true;
+}//Dbdict::getNewAttributeRecord()
+
+/* **************************************************************** */
+/* ---------------------------------------------------------------- */
+/* MODULE: START/RESTART HANDLING ------------------------ */
+/* ---------------------------------------------------------------- */
+/* */
+/* This module contains the code that is common for all */
+/* start/restart types. */
+/* ---------------------------------------------------------------- */
+/* **************************************************************** */
+
+/* ---------------------------------------------------------------- */
+// This is sent as the first signal during start/restart.
+/* ---------------------------------------------------------------- */
+void Dbdict::execSTTOR(Signal* signal)
+{
+ jamEntry();
+ c_startPhase = signal->theData[1];
+ switch (c_startPhase) {
+ case 1:
+ break;
+ case 3:
+ c_restartType = signal->theData[7]; /* valid if 3 */
+ ndbrequire(c_restartType == NodeState::ST_INITIAL_START ||
+ c_restartType == NodeState::ST_SYSTEM_RESTART ||
+ c_restartType == NodeState::ST_INITIAL_NODE_RESTART ||
+ c_restartType == NodeState::ST_NODE_RESTART);
+ break;
+ }
+ sendSTTORRY(signal);
+}//execSTTOR()
+
+void Dbdict::sendSTTORRY(Signal* signal)
+{
+ signal->theData[0] = 0; /* garbage SIGNAL KEY */
+ signal->theData[1] = 0; /* garbage SIGNAL VERSION NUMBER */
+ signal->theData[2] = 0; /* garbage */
+ signal->theData[3] = 1; /* first wanted start phase */
+ signal->theData[4] = 3; /* get type of start */
+ signal->theData[5] = ZNOMOREPHASES;
+ sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 6, JBB);
+}
+
+/* ---------------------------------------------------------------- */
+// We receive information about sizes of records.
+/* ---------------------------------------------------------------- */
+void Dbdict::execREAD_CONFIG_REQ(Signal* signal)
+{
+ const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr();
+ Uint32 ref = req->senderRef;
+ Uint32 senderData = req->senderData;
+ ndbrequire(req->noOfParameters == 0);
+
+ jamEntry();
+
+ const ndb_mgm_configuration_iterator * p =
+ theConfiguration.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ Uint32 attributesize, tablerecSize;
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DICT_ATTRIBUTE,&attributesize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DICT_TABLE, &tablerecSize));
+
+ c_attributeRecordPool.setSize(attributesize);
+ c_attributeRecordHash.setSize(64);
+ c_fsConnectRecordPool.setSize(ZFS_CONNECT_SIZE);
+ c_nodes.setSize(MAX_NODES);
+ c_pageRecordArray.setSize(ZNUMBER_OF_PAGES);
+ c_tableRecordPool.setSize(tablerecSize);
+ c_tableRecordHash.setSize(tablerecSize);
+ c_triggerRecordPool.setSize(c_maxNoOfTriggers);
+ c_triggerRecordHash.setSize(c_maxNoOfTriggers);
+ c_opRecordPool.setSize(256); // XXX need config params
+ c_opCreateTable.setSize(8);
+ c_opDropTable.setSize(8);
+ c_opCreateIndex.setSize(8);
+ c_opCreateEvent.setSize(8);
+ c_opSubEvent.setSize(8);
+ c_opDropEvent.setSize(8);
+ c_opSignalUtil.setSize(8);
+ c_opDropIndex.setSize(8);
+ c_opAlterIndex.setSize(8);
+ c_opBuildIndex.setSize(8);
+ c_opCreateTrigger.setSize(8);
+ c_opDropTrigger.setSize(8);
+ c_opAlterTrigger.setSize(8);
+
+ // Initialize BAT for interface to file system
+ PageRecordPtr pageRecPtr;
+ c_pageRecordArray.getPtr(pageRecPtr, 0);
+ NewVARIABLE* bat = allocateBat(2);
+ bat[1].WA = &pageRecPtr.p->word[0];
+ bat[1].nrr = ZNUMBER_OF_PAGES;
+ bat[1].ClusterSize = ZSIZE_OF_PAGES_IN_WORDS * 4;
+ bat[1].bits.q = ZLOG_SIZE_OF_PAGES_IN_WORDS; // 2**13 = 8192 elements
+ bat[1].bits.v = 5; // 32 bits per element
+
+ initCommonData();
+ initRecords();
+
+ ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = senderData;
+ sendSignal(ref, GSN_READ_CONFIG_CONF, signal,
+ ReadConfigConf::SignalLength, JBB);
+}//execSIZEALT_REP()
+
+/* ---------------------------------------------------------------- */
+// Start phase signals sent by CNTR. We reply with NDB_STTORRY when
+// we completed this phase.
+/* ---------------------------------------------------------------- */
+void Dbdict::execNDB_STTOR(Signal* signal)
+{
+ jamEntry();
+ c_startPhase = signal->theData[2];
+ const Uint32 restartType = signal->theData[3];
+ if (restartType == NodeState::ST_INITIAL_START) {
+ jam();
+ c_initialStart = true;
+ } else if (restartType == NodeState::ST_SYSTEM_RESTART) {
+ jam();
+ c_systemRestart = true;
+ } else if (restartType == NodeState::ST_INITIAL_NODE_RESTART) {
+ jam();
+ c_initialNodeRestart = true;
+ } else if (restartType == NodeState::ST_NODE_RESTART) {
+ jam();
+ c_nodeRestart = true;
+ } else {
+ ndbrequire(false);
+ }//if
+ switch (c_startPhase) {
+ case 1:
+ jam();
+ initSchemaFile(signal);
+ break;
+ case 3:
+ jam();
+ signal->theData[0] = reference();
+ sendSignal(NDBCNTR_REF, GSN_READ_NODESREQ, signal, 1, JBB);
+ break;
+ case 6:
+ jam();
+ c_initialStart = false;
+ c_systemRestart = false;
+ c_initialNodeRestart = false;
+ c_nodeRestart = false;
+ sendNDB_STTORRY(signal);
+ break;
+ case 7:
+ // uses c_restartType
+ if(restartType == NodeState::ST_SYSTEM_RESTART &&
+ c_masterNodeId == getOwnNodeId()){
+ rebuildIndexes(signal, 0);
+ return;
+ }
+ sendNDB_STTORRY(signal);
+ break;
+ default:
+ jam();
+ sendNDB_STTORRY(signal);
+ break;
+ }//switch
+}//execNDB_STTOR()
+
+void Dbdict::sendNDB_STTORRY(Signal* signal)
+{
+ signal->theData[0] = reference();
+ sendSignal(NDBCNTR_REF, GSN_NDB_STTORRY, signal, 1, JBB);
+ return;
+}//sendNDB_STTORRY()
+
+/* ---------------------------------------------------------------- */
+// We receive the information about which nodes that are up and down.
+/* ---------------------------------------------------------------- */
+void Dbdict::execREAD_NODESCONF(Signal* signal)
+{
+ jamEntry();
+
+ ReadNodesConf * const readNodes = (ReadNodesConf *)&signal->theData[0];
+ c_numberNode = readNodes->noOfNodes;
+ c_masterNodeId = readNodes->masterNodeId;
+
+ c_noNodesFailed = 0;
+ c_aliveNodes.clear();
+ for (unsigned i = 1; i < MAX_NDB_NODES; i++) {
+ jam();
+ NodeRecordPtr nodePtr;
+ c_nodes.getPtr(nodePtr, i);
+
+ if (NodeBitmask::get(readNodes->allNodes, i)) {
+ jam();
+ nodePtr.p->nodeState = NodeRecord::NDB_NODE_ALIVE;
+ if (NodeBitmask::get(readNodes->inactiveNodes, i)) {
+ jam();
+ /**-------------------------------------------------------------------
+ *
+ * THIS NODE IS DEFINED IN THE CLUSTER BUT IS NOT ALIVE CURRENTLY.
+ * WE ADD THE NODE TO THE SET OF FAILED NODES AND ALSO SET THE
+ * BLOCKSTATE TO BUSY TO AVOID ADDING TABLES WHILE NOT ALL NODES ARE
+ * ALIVE.
+ *------------------------------------------------------------------*/
+ nodePtr.p->nodeState = NodeRecord::NDB_NODE_DEAD;
+ c_noNodesFailed++;
+ } else {
+ c_aliveNodes.set(i);
+ }
+ }//if
+ }//for
+ sendNDB_STTORRY(signal);
+}//execREAD_NODESCONF()
+
+/* ---------------------------------------------------------------- */
+// HOT_SPAREREP informs DBDICT about which nodes that have become
+// hot spare nodes.
+/* ---------------------------------------------------------------- */
+void Dbdict::execHOT_SPAREREP(Signal* signal)
+{
+ Uint32 hotSpareNodes = 0;
+ jamEntry();
+ HotSpareRep * const hotSpare = (HotSpareRep*)&signal->theData[0];
+ for (unsigned i = 1; i < MAX_NDB_NODES; i++) {
+ if (NodeBitmask::get(hotSpare->theHotSpareNodes, i)) {
+ NodeRecordPtr nodePtr;
+ c_nodes.getPtr(nodePtr, i);
+ nodePtr.p->hotSpare = true;
+ hotSpareNodes++;
+ }//if
+ }//for
+ ndbrequire(hotSpareNodes == hotSpare->noHotSpareNodes);
+ c_noHotSpareNodes = hotSpareNodes;
+ return;
+}//execHOT_SPAREREP()
+
+void Dbdict::initSchemaFile(Signal* signal)
+{
+ PageRecordPtr pagePtr;
+ c_pageRecordArray.getPtr(pagePtr, c_schemaRecord.schemaPage);
+ SchemaFile * schemaFile = (SchemaFile *)pagePtr.p;
+ initSchemaFile(schemaFile, 4 * ZSIZE_OF_PAGES_IN_WORDS);
+
+ if (c_initialStart || c_initialNodeRestart) {
+ jam();
+ ndbrequire(c_writeSchemaRecord.inUse == false);
+ c_writeSchemaRecord.inUse = true;
+ c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage;
+
+ c_writeSchemaRecord.m_callback.m_callbackFunction =
+ safe_cast(&Dbdict::initSchemaFile_conf);
+
+ startWriteSchemaFile(signal);
+ } else if (c_systemRestart || c_nodeRestart) {
+ jam();
+ ndbrequire(c_readSchemaRecord.schemaReadState == ReadSchemaRecord::IDLE);
+ c_readSchemaRecord.pageId = c_schemaRecord.oldSchemaPage;
+ c_readSchemaRecord.schemaReadState = ReadSchemaRecord::INITIAL_READ;
+ startReadSchemaFile(signal);
+ } else {
+ ndbrequire(false);
+ }//if
+}//Dbdict::initSchemaFile()
+
+void
+Dbdict::initSchemaFile_conf(Signal* signal, Uint32 callbackData, Uint32 rv){
+ jam();
+ sendNDB_STTORRY(signal);
+}
+
+void
+Dbdict::activateIndexes(Signal* signal, Uint32 i)
+{
+ AlterIndxReq* req = (AlterIndxReq*)signal->getDataPtrSend();
+ TableRecordPtr tablePtr;
+ for (; i < c_tableRecordPool.getSize(); i++) {
+ tablePtr.i = i;
+ c_tableRecordPool.getPtr(tablePtr);
+ if (tablePtr.p->tabState != TableRecord::DEFINED)
+ continue;
+ if (! tablePtr.p->isIndex())
+ continue;
+ jam();
+ req->setUserRef(reference());
+ req->setConnectionPtr(i);
+ req->setTableId(tablePtr.p->primaryTableId);
+ req->setIndexId(tablePtr.i);
+ req->setIndexVersion(tablePtr.p->tableVersion);
+ req->setOnline(true);
+ if (c_restartType == NodeState::ST_SYSTEM_RESTART) {
+ if (c_masterNodeId != getOwnNodeId())
+ continue;
+ // from file index state is not defined currently
+ req->setRequestType(AlterIndxReq::RT_SYSTEMRESTART);
+ req->addRequestFlag((Uint32)RequestFlag::RF_NOBUILD);
+ }
+ else if (
+ c_restartType == NodeState::ST_NODE_RESTART ||
+ c_restartType == NodeState::ST_INITIAL_NODE_RESTART) {
+ // from master index must be online
+ if (tablePtr.p->indexState != TableRecord::IS_ONLINE)
+ continue;
+ req->setRequestType(AlterIndxReq::RT_NODERESTART);
+ // activate locally, rebuild not needed
+ req->addRequestFlag((Uint32)RequestFlag::RF_LOCAL);
+ req->addRequestFlag((Uint32)RequestFlag::RF_NOBUILD);
+ } else {
+ ndbrequire(false);
+ }
+ sendSignal(reference(), GSN_ALTER_INDX_REQ,
+ signal, AlterIndxReq::SignalLength, JBB);
+ return;
+ }
+ signal->theData[0] = reference();
+ sendSignal(c_restartRecord.returnBlockRef, GSN_DICTSTARTCONF,
+ signal, 1, JBB);
+}
+
+void
+Dbdict::rebuildIndexes(Signal* signal, Uint32 i){
+ BuildIndxReq* const req = (BuildIndxReq*)signal->getDataPtrSend();
+
+ TableRecordPtr indexPtr;
+ for (; i < c_tableRecordPool.getSize(); i++) {
+ indexPtr.i = i;
+ c_tableRecordPool.getPtr(indexPtr);
+ if (indexPtr.p->tabState != TableRecord::DEFINED)
+ continue;
+ if (! indexPtr.p->isIndex())
+ continue;
+
+ jam();
+
+ req->setUserRef(reference());
+ req->setConnectionPtr(i);
+ req->setRequestType(BuildIndxReq::RT_SYSTEMRESTART);
+ req->setBuildId(0); // not used
+ req->setBuildKey(0); // not used
+ req->setIndexType(indexPtr.p->tableType);
+ req->setIndexId(indexPtr.i);
+ req->setTableId(indexPtr.p->primaryTableId);
+ req->setParallelism(16);
+
+ // from file index state is not defined currently
+ if (indexPtr.p->storedTable) {
+ // rebuild not needed
+ req->addRequestFlag((Uint32)RequestFlag::RF_NOBUILD);
+ }
+
+ // send
+ sendSignal(reference(), GSN_BUILDINDXREQ,
+ signal, BuildIndxReq::SignalLength, JBB);
+ return;
+ }
+ sendNDB_STTORRY(signal);
+}
+
+
+/* **************************************************************** */
+/* ---------------------------------------------------------------- */
+/* MODULE: SYSTEM RESTART MODULE ------------------------- */
+/* ---------------------------------------------------------------- */
+/* */
+/* This module contains code specific for system restart */
+/* ---------------------------------------------------------------- */
+/* **************************************************************** */
+
+/* ---------------------------------------------------------------- */
+// DIH asks DICT to read in table data from disk during system
+// restart. DIH also asks DICT to send information about which
+// tables that should be started as part of this system restart.
+// DICT will also activate the tables in TC as part of this process.
+/* ---------------------------------------------------------------- */
+void Dbdict::execDICTSTARTREQ(Signal* signal)
+{
+ jamEntry();
+ c_restartRecord.gciToRestart = signal->theData[0];
+ c_restartRecord.returnBlockRef = signal->theData[1];
+ if (c_nodeRestart || c_initialNodeRestart) {
+ jam();
+
+ CRASH_INSERTION(6000);
+
+ BlockReference dictRef = calcDictBlockRef(c_masterNodeId);
+ signal->theData[0] = getOwnNodeId();
+ sendSignal(dictRef, GSN_GET_SCHEMA_INFOREQ, signal, 1, JBB);
+ return;
+ }
+ ndbrequire(c_systemRestart);
+ ndbrequire(c_masterNodeId == getOwnNodeId());
+
+ c_schemaRecord.m_callback.m_callbackData = 0;
+ c_schemaRecord.m_callback.m_callbackFunction =
+ safe_cast(&Dbdict::masterRestart_checkSchemaStatusComplete);
+
+ c_restartRecord.activeTable = 0;
+ c_schemaRecord.schemaPage = c_schemaRecord.oldSchemaPage;
+ checkSchemaStatus(signal);
+}//execDICTSTARTREQ()
+
+void
+Dbdict::masterRestart_checkSchemaStatusComplete(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode){
+
+ c_schemaRecord.schemaPage = ZMAX_PAGES_OF_TABLE_DEFINITION;
+
+ LinearSectionPtr ptr[3];
+
+ PageRecordPtr pagePtr;
+ c_pageRecordArray.getPtr(pagePtr, c_schemaRecord.oldSchemaPage);
+
+ ptr[0].p = &pagePtr.p->word[0];
+ ptr[0].sz = ZSIZE_OF_PAGES_IN_WORDS;
+
+ c_sendSchemaRecord.m_SCHEMAINFO_Counter = c_aliveNodes;
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+
+ rg.m_nodes.clear(getOwnNodeId());
+ Callback c = { 0, 0 };
+ sendFragmentedSignal(rg,
+ GSN_SCHEMA_INFO,
+ signal,
+ 1, //SchemaInfo::SignalLength,
+ JBB,
+ ptr,
+ 1,
+ c);
+
+ PageRecordPtr newPagePtr;
+ c_pageRecordArray.getPtr(newPagePtr, c_schemaRecord.schemaPage);
+ memcpy(&newPagePtr.p->word[0], &pagePtr.p->word[0],
+ 4 * ZSIZE_OF_PAGES_IN_WORDS);
+
+ signal->theData[0] = getOwnNodeId();
+ sendSignal(reference(), GSN_SCHEMA_INFOCONF, signal, 1, JBB);
+}
+
+void
+Dbdict::execGET_SCHEMA_INFOREQ(Signal* signal){
+
+ const Uint32 ref = signal->getSendersBlockRef();
+ //const Uint32 senderData = signal->theData[0];
+
+ ndbrequire(c_sendSchemaRecord.inUse == false);
+ c_sendSchemaRecord.inUse = true;
+
+ LinearSectionPtr ptr[3];
+
+ PageRecordPtr pagePtr;
+ c_pageRecordArray.getPtr(pagePtr, c_schemaRecord.schemaPage);
+
+ ptr[0].p = &pagePtr.p->word[0];
+ ptr[0].sz = ZSIZE_OF_PAGES_IN_WORDS;
+
+ Callback c = { safe_cast(&Dbdict::sendSchemaComplete), 0 };
+ sendFragmentedSignal(ref,
+ GSN_SCHEMA_INFO,
+ signal,
+ 1, //GetSchemaInfoConf::SignalLength,
+ JBB,
+ ptr,
+ 1,
+ c);
+}//Dbdict::execGET_SCHEMA_INFOREQ()
+
+void
+Dbdict::sendSchemaComplete(Signal * signal,
+ Uint32 callbackData,
+ Uint32 returnCode){
+ ndbrequire(c_sendSchemaRecord.inUse == true);
+ c_sendSchemaRecord.inUse = false;
+
+}
+
+
+/* ---------------------------------------------------------------- */
+// We receive the schema info from master as part of all restarts
+// except the initial start where no tables exists.
+/* ---------------------------------------------------------------- */
+void Dbdict::execSCHEMA_INFO(Signal* signal)
+{
+ jamEntry();
+ if(!assembleFragments(signal)){
+ jam();
+ return;
+ }
+
+ if(getNodeState().getNodeRestartInProgress()){
+ CRASH_INSERTION(6001);
+ }
+
+ SegmentedSectionPtr schemaDataPtr;
+ signal->getSection(schemaDataPtr, 0);
+
+ PageRecordPtr pagePtr;
+ c_pageRecordArray.getPtr(pagePtr, c_schemaRecord.schemaPage);
+ copy(&pagePtr.p->word[0], schemaDataPtr);
+ releaseSections(signal);
+
+ validateChecksum((SchemaFile*)pagePtr.p);
+
+ ndbrequire(signal->getSendersBlockRef() != reference());
+
+ /* ---------------------------------------------------------------- */
+ // Synchronise our view on data with other nodes in the cluster.
+ // This is an important part of restart handling where we will handle
+ // cases where the table have been added but only partially, where
+ // tables have been deleted but not completed the deletion yet and
+ // other scenarios needing synchronisation.
+ /* ---------------------------------------------------------------- */
+ c_schemaRecord.m_callback.m_callbackData = 0;
+ c_schemaRecord.m_callback.m_callbackFunction =
+ safe_cast(&Dbdict::restart_checkSchemaStatusComplete);
+ c_restartRecord.activeTable = 0;
+ checkSchemaStatus(signal);
+}//execSCHEMA_INFO()
+
+void
+Dbdict::restart_checkSchemaStatusComplete(Signal * signal,
+ Uint32 callbackData,
+ Uint32 returnCode){
+
+ ndbrequire(c_writeSchemaRecord.inUse == false);
+ c_writeSchemaRecord.inUse = true;
+ c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage;
+ c_writeSchemaRecord.m_callback.m_callbackData = 0;
+ c_writeSchemaRecord.m_callback.m_callbackFunction =
+ safe_cast(&Dbdict::restart_writeSchemaConf);
+
+ startWriteSchemaFile(signal);
+}
+
+void
+Dbdict::restart_writeSchemaConf(Signal * signal,
+ Uint32 callbackData,
+ Uint32 returnCode){
+
+ if(c_systemRestart){
+ jam();
+ signal->theData[0] = getOwnNodeId();
+ sendSignal(calcDictBlockRef(c_masterNodeId), GSN_SCHEMA_INFOCONF,
+ signal, 1, JBB);
+ return;
+ }
+
+ ndbrequire(c_nodeRestart || c_initialNodeRestart);
+ c_blockState = BS_IDLE;
+ activateIndexes(signal, 0);
+ return;
+}
+
+void Dbdict::execSCHEMA_INFOCONF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(signal->getNoOfSections() == 0);
+
+/* ---------------------------------------------------------------- */
+// This signal is received in the master as part of system restart
+// from all nodes (including the master) after they have synchronised
+// their data with the master node's schema information.
+/* ---------------------------------------------------------------- */
+ const Uint32 nodeId = signal->theData[0];
+ c_sendSchemaRecord.m_SCHEMAINFO_Counter.clearWaitingFor(nodeId);
+
+ if (!c_sendSchemaRecord.m_SCHEMAINFO_Counter.done()){
+ jam();
+ return;
+ }//if
+ activateIndexes(signal, 0);
+}//execSCHEMA_INFOCONF()
+
+void Dbdict::checkSchemaStatus(Signal* signal)
+{
+ PageRecordPtr pagePtr;
+ c_pageRecordArray.getPtr(pagePtr, c_schemaRecord.schemaPage);
+
+ PageRecordPtr oldPagePtr;
+ c_pageRecordArray.getPtr(oldPagePtr, c_schemaRecord.oldSchemaPage);
+
+ for (; c_restartRecord.activeTable < MAX_TABLES;
+ c_restartRecord.activeTable++) {
+ jam();
+
+ Uint32 tableId = c_restartRecord.activeTable;
+ SchemaFile::TableEntry *newEntry = getTableEntry(pagePtr.p, tableId);
+ SchemaFile::TableEntry *oldEntry = getTableEntry(oldPagePtr.p, tableId,
+ true);
+ SchemaFile::TableState schemaState =
+ (SchemaFile::TableState)newEntry->m_tableState;
+ SchemaFile::TableState oldSchemaState =
+ (SchemaFile::TableState)oldEntry->m_tableState;
+
+ if (c_restartRecord.activeTable >= c_tableRecordPool.getSize()) {
+ jam();
+ ndbrequire(schemaState == SchemaFile::INIT);
+ ndbrequire(oldSchemaState == SchemaFile::INIT);
+ continue;
+ }//if
+
+ switch(schemaState){
+ case SchemaFile::INIT:{
+ jam();
+ bool ok = false;
+ switch(oldSchemaState) {
+ case SchemaFile::INIT:
+ jam();
+ case SchemaFile::DROP_TABLE_COMMITTED:
+ jam();
+ ok = true;
+ jam();
+ break;
+
+ case SchemaFile::ADD_STARTED:
+ jam();
+ case SchemaFile::TABLE_ADD_COMMITTED:
+ jam();
+ case SchemaFile::DROP_TABLE_STARTED:
+ jam();
+ case SchemaFile::ALTER_TABLE_COMMITTED:
+ jam();
+ ok = true;
+ jam();
+ newEntry->m_tableState = SchemaFile::INIT;
+ restartDropTab(signal, tableId);
+ return;
+ }//switch
+ ndbrequire(ok);
+ break;
+ }
+ case SchemaFile::ADD_STARTED:{
+ jam();
+ bool ok = false;
+ switch(oldSchemaState) {
+ case SchemaFile::INIT:
+ jam();
+ case SchemaFile::DROP_TABLE_COMMITTED:
+ jam();
+ ok = true;
+ break;
+ case SchemaFile::ADD_STARTED:
+ jam();
+ case SchemaFile::DROP_TABLE_STARTED:
+ jam();
+ case SchemaFile::TABLE_ADD_COMMITTED:
+ jam();
+ case SchemaFile::ALTER_TABLE_COMMITTED:
+ jam();
+ ok = true;
+ //------------------------------------------------------------------
+ // Add Table was started but not completed. Will be dropped in all
+ // nodes. Update schema information (restore table version).
+ //------------------------------------------------------------------
+ newEntry->m_tableState = SchemaFile::INIT;
+ restartDropTab(signal, tableId);
+ return;
+ }
+ ndbrequire(ok);
+ break;
+ }
+ case SchemaFile::TABLE_ADD_COMMITTED:{
+ jam();
+ bool ok = false;
+ switch(oldSchemaState) {
+ case SchemaFile::INIT:
+ jam();
+ case SchemaFile::ADD_STARTED:
+ jam();
+ case SchemaFile::DROP_TABLE_STARTED:
+ jam();
+ case SchemaFile::DROP_TABLE_COMMITTED:
+ jam();
+ ok = true;
+ //------------------------------------------------------------------
+ // Table was added in the master node but not in our node. We can
+ // retrieve the table definition from the master.
+ //------------------------------------------------------------------
+ restartCreateTab(signal, tableId, oldEntry, false);
+ return;
+ break;
+ case SchemaFile::TABLE_ADD_COMMITTED:
+ jam();
+ case SchemaFile::ALTER_TABLE_COMMITTED:
+ jam();
+ ok = true;
+ //------------------------------------------------------------------
+ // Table was added in both our node and the master node. We can
+ // retrieve the table definition from our own disk.
+ //------------------------------------------------------------------
+ if(* newEntry == * oldEntry){
+ jam();
+
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, tableId);
+ tablePtr.p->tableVersion = oldEntry->m_tableVersion;
+ tablePtr.p->tableType = (DictTabInfo::TableType)oldEntry->m_tableType;
+
+ // On NR get index from master because index state is not on file
+ const bool file = c_systemRestart || tablePtr.p->isTable();
+ restartCreateTab(signal, tableId, oldEntry, file);
+
+ return;
+ } else {
+ //------------------------------------------------------------------
+ // Must be a new version of the table if anything differs. Both table
+ // version and global checkpoint must be different.
+ // This should not happen for the master node. This can happen after
+ // drop table followed by add table or after change table.
+ // Not supported in this version.
+ //------------------------------------------------------------------
+ ndbrequire(c_masterNodeId != getOwnNodeId());
+ ndbrequire(newEntry->m_tableVersion != oldEntry->m_tableVersion);
+ jam();
+
+ restartCreateTab(signal, tableId, oldEntry, false);
+ return;
+ }//if
+ }
+ ndbrequire(ok);
+ break;
+ }
+ case SchemaFile::DROP_TABLE_STARTED:
+ jam();
+ case SchemaFile::DROP_TABLE_COMMITTED:{
+ jam();
+ bool ok = false;
+ switch(oldSchemaState){
+ case SchemaFile::INIT:
+ jam();
+ case SchemaFile::DROP_TABLE_COMMITTED:
+ jam();
+ ok = true;
+ break;
+ case SchemaFile::ADD_STARTED:
+ jam();
+ case SchemaFile::TABLE_ADD_COMMITTED:
+ jam();
+ case SchemaFile::DROP_TABLE_STARTED:
+ jam();
+ case SchemaFile::ALTER_TABLE_COMMITTED:
+ jam();
+ newEntry->m_tableState = SchemaFile::INIT;
+ restartDropTab(signal, tableId);
+ return;
+ }
+ ndbrequire(ok);
+ break;
+ }
+ case SchemaFile::ALTER_TABLE_COMMITTED: {
+ jam();
+ bool ok = false;
+ switch(oldSchemaState) {
+ case SchemaFile::INIT:
+ jam();
+ case SchemaFile::ADD_STARTED:
+ jam();
+ case SchemaFile::DROP_TABLE_STARTED:
+ jam();
+ case SchemaFile::DROP_TABLE_COMMITTED:
+ jam();
+ case SchemaFile::TABLE_ADD_COMMITTED:
+ jam();
+ ok = true;
+ //------------------------------------------------------------------
+ // Table was altered in the master node but not in our node. We can
+ // retrieve the altered table definition from the master.
+ //------------------------------------------------------------------
+ restartCreateTab(signal, tableId, oldEntry, false);
+ return;
+ break;
+ case SchemaFile::ALTER_TABLE_COMMITTED:
+ jam();
+ ok = true;
+
+ //------------------------------------------------------------------
+ // Table was altered in both our node and the master node. We can
+ // retrieve the table definition from our own disk.
+ //------------------------------------------------------------------
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, tableId);
+ tablePtr.p->tableVersion = oldEntry->m_tableVersion;
+ tablePtr.p->tableType = (DictTabInfo::TableType)oldEntry->m_tableType;
+
+ // On NR get index from master because index state is not on file
+ const bool file = c_systemRestart || tablePtr.p->isTable();
+ restartCreateTab(signal, tableId, oldEntry, file);
+
+ return;
+ }
+ ndbrequire(ok);
+ break;
+ }
+ }
+ }
+
+ execute(signal, c_schemaRecord.m_callback, 0);
+}//checkSchemaStatus()
+
+void
+Dbdict::restartCreateTab(Signal* signal, Uint32 tableId,
+ const SchemaFile::TableEntry * te, bool file){
+ jam();
+
+ CreateTableRecordPtr createTabPtr;
+ c_opCreateTable.seize(createTabPtr);
+ ndbrequire(!createTabPtr.isNull());
+
+ createTabPtr.p->key = ++c_opRecordSequence;
+ c_opCreateTable.add(createTabPtr);
+
+ createTabPtr.p->m_errorCode = 0;
+ createTabPtr.p->m_tablePtrI = tableId;
+ createTabPtr.p->m_coordinatorRef = reference();
+ createTabPtr.p->m_senderRef = 0;
+ createTabPtr.p->m_senderData = RNIL;
+ createTabPtr.p->m_tabInfoPtrI = RNIL;
+ createTabPtr.p->m_dihAddFragPtr = RNIL;
+
+ if(file && !ERROR_INSERTED(6002)){
+ jam();
+
+ c_readTableRecord.noOfPages = te->m_noOfPages;
+ c_readTableRecord.pageId = 0;
+ c_readTableRecord.m_callback.m_callbackData = createTabPtr.p->key;
+ c_readTableRecord.m_callback.m_callbackFunction =
+ safe_cast(&Dbdict::restartCreateTab_readTableConf);
+
+ startReadTableFile(signal, tableId);
+ return;
+ } else {
+
+ ndbrequire(c_masterNodeId != getOwnNodeId());
+
+ /**
+ * Get from master
+ */
+ GetTabInfoReq * const req = (GetTabInfoReq *)&signal->theData[0];
+ req->senderRef = reference();
+ req->senderData = createTabPtr.p->key;
+ req->requestType = GetTabInfoReq::RequestById |
+ GetTabInfoReq::LongSignalConf;
+ req->tableId = tableId;
+ sendSignal(calcDictBlockRef(c_masterNodeId), GSN_GET_TABINFOREQ, signal,
+ GetTabInfoReq::SignalLength, JBB);
+
+ if(ERROR_INSERTED(6002)){
+ NdbSleep_MilliSleep(10);
+ CRASH_INSERTION(6002);
+ }
+ }
+}
+
+void
+Dbdict::restartCreateTab_readTableConf(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode){
+ jam();
+
+ PageRecordPtr pageRecPtr;
+ c_pageRecordArray.getPtr(pageRecPtr, c_readTableRecord.pageId);
+
+ ParseDictTabInfoRecord parseRecord;
+ parseRecord.requestType = DictTabInfo::GetTabInfoConf;
+ parseRecord.errorCode = 0;
+
+ Uint32 sz = c_readTableRecord.noOfPages * ZSIZE_OF_PAGES_IN_WORDS;
+ SimplePropertiesLinearReader r(&pageRecPtr.p->word[0], sz);
+ handleTabInfoInit(r, &parseRecord);
+ ndbrequire(parseRecord.errorCode == 0);
+
+ /* ---------------------------------------------------------------- */
+ // We have read the table description from disk as part of system restart.
+ // We will also write it back again to ensure that both copies are ok.
+ /* ---------------------------------------------------------------- */
+ ndbrequire(c_writeTableRecord.tableWriteState == WriteTableRecord::IDLE);
+ c_writeTableRecord.noOfPages = c_readTableRecord.noOfPages;
+ c_writeTableRecord.pageId = c_readTableRecord.pageId;
+ c_writeTableRecord.tableWriteState = WriteTableRecord::TWR_CALLBACK;
+ c_writeTableRecord.m_callback.m_callbackData = callbackData;
+ c_writeTableRecord.m_callback.m_callbackFunction =
+ safe_cast(&Dbdict::restartCreateTab_writeTableConf);
+ startWriteTableFile(signal, c_readTableRecord.tableId);
+}
+
+void
+Dbdict::execGET_TABINFO_CONF(Signal* signal){
+ jamEntry();
+
+ if(!assembleFragments(signal)){
+ jam();
+ return;
+ }
+
+ GetTabInfoConf * const conf = (GetTabInfoConf*)signal->getDataPtr();
+
+ const Uint32 tableId = conf->tableId;
+ const Uint32 senderData = conf->senderData;
+
+ SegmentedSectionPtr tabInfoPtr;
+ signal->getSection(tabInfoPtr, GetTabInfoConf::DICT_TAB_INFO);
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, senderData));
+ ndbrequire(!createTabPtr.isNull());
+ ndbrequire(createTabPtr.p->m_tablePtrI == tableId);
+
+ /**
+ * Put data into table record
+ */
+ ParseDictTabInfoRecord parseRecord;
+ parseRecord.requestType = DictTabInfo::GetTabInfoConf;
+ parseRecord.errorCode = 0;
+
+ SimplePropertiesSectionReader r(tabInfoPtr, getSectionSegmentPool());
+ handleTabInfoInit(r, &parseRecord);
+ ndbrequire(parseRecord.errorCode == 0);
+
+ Callback callback;
+ callback.m_callbackData = createTabPtr.p->key;
+ callback.m_callbackFunction =
+ safe_cast(&Dbdict::restartCreateTab_writeTableConf);
+
+ signal->header.m_noOfSections = 0;
+ writeTableFile(signal, createTabPtr.p->m_tablePtrI, tabInfoPtr, &callback);
+ signal->setSection(tabInfoPtr, 0);
+ releaseSections(signal);
+}
+
+void
+Dbdict::restartCreateTab_writeTableConf(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode){
+ jam();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
+
+ Callback callback;
+ callback.m_callbackData = callbackData;
+ callback.m_callbackFunction =
+ safe_cast(&Dbdict::restartCreateTab_dihComplete);
+
+ SegmentedSectionPtr fragDataPtr;
+ fragDataPtr.sz = 0;
+ fragDataPtr.setNull();
+ createTab_dih(signal, createTabPtr, fragDataPtr, &callback);
+}
+
+void
+Dbdict::restartCreateTab_dihComplete(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode){
+ jam();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
+
+ //@todo check error
+ ndbrequire(createTabPtr.p->m_errorCode == 0);
+
+ Callback callback;
+ callback.m_callbackData = callbackData;
+ callback.m_callbackFunction =
+ safe_cast(&Dbdict::restartCreateTab_activateComplete);
+
+ alterTab_activate(signal, createTabPtr, &callback);
+}
+
+void
+Dbdict::restartCreateTab_activateComplete(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode){
+ jam();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
+
+ TableRecordPtr tabPtr;
+ c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
+ tabPtr.p->tabState = TableRecord::DEFINED;
+
+ c_opCreateTable.release(createTabPtr);
+
+ c_restartRecord.activeTable++;
+ checkSchemaStatus(signal);
+}
+
+void
+Dbdict::restartDropTab(Signal* signal, Uint32 tableId){
+
+ const Uint32 key = ++c_opRecordSequence;
+
+ DropTableRecordPtr dropTabPtr;
+ ndbrequire(c_opDropTable.seize(dropTabPtr));
+
+ dropTabPtr.p->key = key;
+ c_opDropTable.add(dropTabPtr);
+
+ dropTabPtr.p->m_errorCode = 0;
+ dropTabPtr.p->m_request.tableId = tableId;
+ dropTabPtr.p->m_coordinatorRef = 0;
+ dropTabPtr.p->m_requestType = DropTabReq::RestartDropTab;
+ dropTabPtr.p->m_participantData.m_gsn = GSN_DROP_TAB_REQ;
+
+
+ dropTabPtr.p->m_participantData.m_block = 0;
+ dropTabPtr.p->m_participantData.m_callback.m_callbackData = key;
+ dropTabPtr.p->m_participantData.m_callback.m_callbackFunction =
+ safe_cast(&Dbdict::restartDropTab_complete);
+ dropTab_nextStep(signal, dropTabPtr);
+}
+
+void
+Dbdict::restartDropTab_complete(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode){
+ jam();
+
+ DropTableRecordPtr dropTabPtr;
+ ndbrequire(c_opDropTable.find(dropTabPtr, callbackData));
+
+ //@todo check error
+
+ c_opDropTable.release(dropTabPtr);
+
+ c_restartRecord.activeTable++;
+ checkSchemaStatus(signal);
+}
+
+/* **************************************************************** */
+/* ---------------------------------------------------------------- */
+/* MODULE: NODE FAILURE HANDLING ------------------------- */
+/* ---------------------------------------------------------------- */
+/* */
+/* This module contains the code that is used when nodes */
+/* (kernel/api) fails. */
+/* ---------------------------------------------------------------- */
+/* **************************************************************** */
+
+/* ---------------------------------------------------------------- */
+// We receive a report of an API that failed.
+/* ---------------------------------------------------------------- */
+void Dbdict::execAPI_FAILREQ(Signal* signal)
+{
+ jamEntry();
+ Uint32 failedApiNode = signal->theData[0];
+ BlockReference retRef = signal->theData[1];
+
+#if 0
+ Uint32 userNode = refToNode(c_connRecord.userBlockRef);
+ if (userNode == failedApiNode) {
+ jam();
+ c_connRecord.userBlockRef = (Uint32)-1;
+ }//if
+#endif
+
+ signal->theData[0] = failedApiNode;
+ signal->theData[1] = reference();
+ sendSignal(retRef, GSN_API_FAILCONF, signal, 2, JBB);
+}//execAPI_FAILREQ()
+
+/* ---------------------------------------------------------------- */
+// We receive a report of one or more node failures of kernel nodes.
+/* ---------------------------------------------------------------- */
+void Dbdict::execNODE_FAILREP(Signal* signal)
+{
+ jamEntry();
+ NodeFailRep * const nodeFail = (NodeFailRep *)&signal->theData[0];
+
+ c_failureNr = nodeFail->failNo;
+ const Uint32 numberOfFailedNodes = nodeFail->noOfNodes;
+ const bool masterFailed = (c_masterNodeId != nodeFail->masterNodeId);
+ c_masterNodeId = nodeFail->masterNodeId;
+
+ c_noNodesFailed += numberOfFailedNodes;
+ Uint32 theFailedNodes[NodeBitmask::Size];
+ memcpy(theFailedNodes, nodeFail->theNodes, sizeof(theFailedNodes));
+
+ c_counterMgr.execNODE_FAILREP(signal);
+
+ bool ok = false;
+ switch(c_blockState){
+ case BS_IDLE:
+ jam();
+ ok = true;
+ if(c_opRecordPool.getSize() != c_opRecordPool.getNoOfFree()){
+ jam();
+ c_blockState = BS_NODE_FAILURE;
+ }
+ break;
+ case BS_CREATE_TAB:
+ jam();
+ ok = true;
+ if(!masterFailed)
+ break;
+ // fall through
+ case BS_BUSY:
+ case BS_NODE_FAILURE:
+ jam();
+ c_blockState = BS_NODE_FAILURE;
+ ok = true;
+ break;
+ }
+ ndbrequire(ok);
+
+ for(unsigned i = 1; i < MAX_NDB_NODES; i++) {
+ jam();
+ if(NodeBitmask::get(theFailedNodes, i)) {
+ jam();
+ NodeRecordPtr nodePtr;
+ c_nodes.getPtr(nodePtr, i);
+
+ nodePtr.p->nodeState = NodeRecord::NDB_NODE_DEAD;
+ NFCompleteRep * const nfCompRep = (NFCompleteRep *)&signal->theData[0];
+ nfCompRep->blockNo = DBDICT;
+ nfCompRep->nodeId = getOwnNodeId();
+ nfCompRep->failedNodeId = nodePtr.i;
+ sendSignal(DBDIH_REF, GSN_NF_COMPLETEREP, signal,
+ NFCompleteRep::SignalLength, JBB);
+
+ c_aliveNodes.clear(i);
+ }//if
+ }//for
+
+}//execNODE_FAILREP()
+
+
+/* **************************************************************** */
+/* ---------------------------------------------------------------- */
+/* MODULE: NODE START HANDLING --------------------------- */
+/* ---------------------------------------------------------------- */
+/* */
+/* This module contains the code that is used when kernel nodes */
+/* starts. */
+/* ---------------------------------------------------------------- */
+/* **************************************************************** */
+
+/* ---------------------------------------------------------------- */
+// Include a starting node in list of nodes to be part of adding
+// and dropping tables.
+/* ---------------------------------------------------------------- */
+void Dbdict::execINCL_NODEREQ(Signal* signal)
+{
+ jamEntry();
+ NodeRecordPtr nodePtr;
+ BlockReference retRef = signal->theData[0];
+ nodePtr.i = signal->theData[1];
+
+ ndbrequire(c_noNodesFailed > 0);
+ c_noNodesFailed--;
+
+ c_nodes.getPtr(nodePtr);
+ ndbrequire(nodePtr.p->nodeState == NodeRecord::NDB_NODE_DEAD);
+ nodePtr.p->nodeState = NodeRecord::NDB_NODE_ALIVE;
+ signal->theData[0] = reference();
+ sendSignal(retRef, GSN_INCL_NODECONF, signal, 1, JBB);
+
+ c_aliveNodes.set(nodePtr.i);
+}//execINCL_NODEREQ()
+
+/* **************************************************************** */
+/* ---------------------------------------------------------------- */
+/* MODULE: ADD TABLE HANDLING ---------------------------- */
+/* ---------------------------------------------------------------- */
+/* */
+/* This module contains the code that is used when adding a table. */
+/* ---------------------------------------------------------------- */
+/* **************************************************************** */
+
+/* ---------------------------------------------------------------- */
+// This signal receives information about a table from either:
+// API, Ndbcntr or from other DICT.
+/* ---------------------------------------------------------------- */
+void
+Dbdict::execCREATE_TABLE_REQ(Signal* signal){
+ jamEntry();
+ if(!assembleFragments(signal)){
+ return;
+ }
+
+ CreateTableReq* const req = (CreateTableReq*)signal->getDataPtr();
+ const Uint32 senderRef = req->senderRef;
+ const Uint32 senderData = req->senderData;
+
+ ParseDictTabInfoRecord parseRecord;
+ do {
+ if(getOwnNodeId() != c_masterNodeId){
+ jam();
+ parseRecord.errorCode = CreateTableRef::NotMaster;
+ break;
+ }
+
+ if (c_blockState != BS_IDLE){
+ jam();
+ parseRecord.errorCode = CreateTableRef::Busy;
+ break;
+ }
+
+ CreateTableRecordPtr createTabPtr;
+ c_opCreateTable.seize(createTabPtr);
+
+ if(createTabPtr.isNull()){
+ jam();
+ parseRecord.errorCode = CreateTableRef::Busy;
+ break;
+ }
+
+ parseRecord.requestType = DictTabInfo::CreateTableFromAPI;
+ parseRecord.errorCode = 0;
+
+ SegmentedSectionPtr ptr;
+ signal->getSection(ptr, CreateTableReq::DICT_TAB_INFO);
+ SimplePropertiesSectionReader r(ptr, getSectionSegmentPool());
+
+ handleTabInfoInit(r, &parseRecord);
+ releaseSections(signal);
+
+ if(parseRecord.errorCode != 0){
+ jam();
+ c_opCreateTable.release(createTabPtr);
+ break;
+ }
+
+ createTabPtr.p->key = ++c_opRecordSequence;
+ c_opCreateTable.add(createTabPtr);
+ createTabPtr.p->m_errorCode = 0;
+ createTabPtr.p->m_senderRef = senderRef;
+ createTabPtr.p->m_senderData = senderData;
+ createTabPtr.p->m_tablePtrI = parseRecord.tablePtr.i;
+ createTabPtr.p->m_coordinatorRef = reference();
+ createTabPtr.p->m_fragmentsPtrI = RNIL;
+ createTabPtr.p->m_dihAddFragPtr = RNIL;
+
+ Uint32 * theData = signal->getDataPtrSend();
+ CreateFragmentationReq * const req = (CreateFragmentationReq*)theData;
+ req->senderRef = reference();
+ req->senderData = createTabPtr.p->key;
+ req->fragmentationType = parseRecord.tablePtr.p->fragmentType;
+ req->noOfFragments = 0;
+ req->fragmentNode = 0;
+ req->primaryTableId = RNIL;
+ if (parseRecord.tablePtr.p->isOrderedIndex()) {
+ // ordered index has same fragmentation as the table
+ const Uint32 primaryTableId = parseRecord.tablePtr.p->primaryTableId;
+ TableRecordPtr primaryTablePtr;
+ c_tableRecordPool.getPtr(primaryTablePtr, primaryTableId);
+ // fragmentationType must be consistent
+ req->fragmentationType = primaryTablePtr.p->fragmentType;
+ req->primaryTableId = primaryTableId;
+ }
+ sendSignal(DBDIH_REF, GSN_CREATE_FRAGMENTATION_REQ, signal,
+ CreateFragmentationReq::SignalLength, JBB);
+
+ c_blockState = BS_CREATE_TAB;
+ return;
+ } while(0);
+
+ /**
+ * Something went wrong
+ */
+ releaseSections(signal);
+
+ CreateTableRef * ref = (CreateTableRef*)signal->getDataPtrSend();
+ ref->senderData = senderData;
+ ref->senderRef = reference();
+ ref->masterNodeId = c_masterNodeId;
+ ref->errorCode = parseRecord.errorCode;
+ ref->errorLine = parseRecord.errorLine;
+ ref->errorKey = parseRecord.errorKey;
+ ref->status = parseRecord.status;
+ sendSignal(senderRef, GSN_CREATE_TABLE_REF, signal,
+ CreateTableRef::SignalLength, JBB);
+}
+
+void
+Dbdict::execALTER_TABLE_REQ(Signal* signal)
+{
+ // Received by master
+ jamEntry();
+ if(!assembleFragments(signal)){
+ return;
+ }
+ AlterTableReq* const req = (AlterTableReq*)signal->getDataPtr();
+ const Uint32 senderRef = req->senderRef;
+ const Uint32 senderData = req->senderData;
+ const Uint32 changeMask = req->changeMask;
+ const Uint32 tableId = req->tableId;
+ const Uint32 tableVersion = req->tableVersion;
+ ParseDictTabInfoRecord* aParseRecord;
+
+ // Get table definition
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, tableId, false);
+ if(tablePtr.isNull()){
+ jam();
+ alterTableRef(signal, req, AlterTableRef::NoSuchTable);
+ return;
+ }
+
+ if(getOwnNodeId() != c_masterNodeId){
+ jam();
+ alterTableRef(signal, req, AlterTableRef::NotMaster);
+ return;
+ }
+
+ if(c_blockState != BS_IDLE){
+ jam();
+ alterTableRef(signal, req, AlterTableRef::Busy);
+ return;
+ }
+
+ const TableRecord::TabState tabState = tablePtr.p->tabState;
+ bool ok = false;
+ switch(tabState){
+ case TableRecord::NOT_DEFINED:
+ case TableRecord::REORG_TABLE_PREPARED:
+ case TableRecord::DEFINING:
+ case TableRecord::CHECKED:
+ jam();
+ alterTableRef(signal, req, AlterTableRef::NoSuchTable);
+ return;
+ case TableRecord::DEFINED:
+ ok = true;
+ jam();
+ break;
+ case TableRecord::PREPARE_DROPPING:
+ case TableRecord::DROPPING:
+ jam();
+ alterTableRef(signal, req, AlterTableRef::DropInProgress);
+ return;
+ }
+ ndbrequire(ok);
+
+ if(tablePtr.p->tableVersion != tableVersion){
+ jam();
+ alterTableRef(signal, req, AlterTableRef::InvalidTableVersion);
+ return;
+ }
+ // Parse new table defintion
+ ParseDictTabInfoRecord parseRecord;
+ aParseRecord = &parseRecord;
+
+ CreateTableRecordPtr alterTabPtr; // Reuse create table records
+ c_opCreateTable.seize(alterTabPtr);
+ CreateTableRecord * regAlterTabPtr = alterTabPtr.p;
+
+ if(alterTabPtr.isNull()){
+ jam();
+ alterTableRef(signal, req, AlterTableRef::Busy);
+ return;
+ }
+
+ regAlterTabPtr->m_changeMask = changeMask;
+ parseRecord.requestType = DictTabInfo::AlterTableFromAPI;
+ parseRecord.errorCode = 0;
+
+ SegmentedSectionPtr ptr;
+ signal->getSection(ptr, AlterTableReq::DICT_TAB_INFO);
+ SimplePropertiesSectionReader r(ptr, getSectionSegmentPool());
+
+ handleTabInfoInit(r, &parseRecord, false); // Will not save info
+
+ if(parseRecord.errorCode != 0){
+ jam();
+ c_opCreateTable.release(alterTabPtr);
+ alterTableRef(signal, req,
+ (AlterTableRef::ErrorCode) parseRecord.errorCode,
+ aParseRecord);
+ return;
+ }
+
+ releaseSections(signal);
+ regAlterTabPtr->key = ++c_opRecordSequence;
+ c_opCreateTable.add(alterTabPtr);
+ ndbrequire(c_opCreateTable.find(alterTabPtr, regAlterTabPtr->key));
+ regAlterTabPtr->m_errorCode = 0;
+ regAlterTabPtr->m_senderRef = senderRef;
+ regAlterTabPtr->m_senderData = senderData;
+ regAlterTabPtr->m_tablePtrI = parseRecord.tablePtr.i;
+ regAlterTabPtr->m_alterTableFailed = false;
+ regAlterTabPtr->m_coordinatorRef = reference();
+ regAlterTabPtr->m_fragmentsPtrI = RNIL;
+ regAlterTabPtr->m_dihAddFragPtr = RNIL;
+
+ // Alter table on all nodes
+ c_blockState = BS_BUSY;
+
+ // Send prepare request to all alive nodes
+ SimplePropertiesSectionWriter w(getSectionSegmentPool());
+ packTableIntoPagesImpl(w, parseRecord.tablePtr);
+
+ SegmentedSectionPtr tabInfoPtr;
+ w.getPtr(tabInfoPtr);
+ signal->setSection(tabInfoPtr, AlterTabReq::DICT_TAB_INFO);
+
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ regAlterTabPtr->m_coordinatorData.m_gsn = GSN_ALTER_TAB_REQ;
+ SafeCounter safeCounter(c_counterMgr, regAlterTabPtr->m_coordinatorData.m_counter);
+ safeCounter.init<AlterTabRef>(rg, regAlterTabPtr->key);
+
+ AlterTabReq * const lreq = (AlterTabReq*)signal->getDataPtrSend();
+ lreq->senderRef = reference();
+ lreq->senderData = regAlterTabPtr->key;
+ lreq->clientRef = regAlterTabPtr->m_senderRef;
+ lreq->clientData = regAlterTabPtr->m_senderData;
+ lreq->changeMask = changeMask;
+ lreq->tableId = tableId;
+ lreq->tableVersion = tableVersion + 1;
+ lreq->gci = tablePtr.p->gciTableCreated;
+ lreq->requestType = AlterTabReq::AlterTablePrepare;
+
+ sendSignal(rg, GSN_ALTER_TAB_REQ, signal,
+ AlterTabReq::SignalLength, JBB);
+
+}
+
+void Dbdict::alterTableRef(Signal * signal,
+ AlterTableReq * req,
+ AlterTableRef::ErrorCode errCode,
+ ParseDictTabInfoRecord* parseRecord)
+{
+ jam();
+ releaseSections(signal);
+ AlterTableRef * ref = (AlterTableRef*)signal->getDataPtrSend();
+ Uint32 senderRef = req->senderRef;
+ ref->senderData = req->senderData;
+ ref->senderRef = reference();
+ ref->masterNodeId = c_masterNodeId;
+ if (parseRecord) {
+ ref->errorCode = parseRecord->errorCode;
+ ref->errorLine = parseRecord->errorLine;
+ ref->errorKey = parseRecord->errorKey;
+ ref->status = parseRecord->status;
+ }
+ else {
+ ref->errorCode = errCode;
+ ref->errorLine = 0;
+ ref->errorKey = 0;
+ ref->status = 0;
+ }
+ sendSignal(senderRef, GSN_ALTER_TABLE_REF, signal,
+ AlterTableRef::SignalLength, JBB);
+}
+
+void
+Dbdict::execALTER_TAB_REQ(Signal * signal)
+{
+ // Received in all nodes to handle change locally
+ jamEntry();
+
+ if(!assembleFragments(signal)){
+ return;
+ }
+ AlterTabReq* const req = (AlterTabReq*)signal->getDataPtr();
+ const Uint32 senderRef = req->senderRef;
+ const Uint32 senderData = req->senderData;
+ const Uint32 changeMask = req->changeMask;
+ const Uint32 tableId = req->tableId;
+ const Uint32 tableVersion = req->tableVersion;
+ const Uint32 gci = req->gci;
+ AlterTabReq::RequestType requestType =
+ (AlterTabReq::RequestType) req->requestType;
+
+ SegmentedSectionPtr tabInfoPtr;
+ signal->getSection(tabInfoPtr, AlterTabReq::DICT_TAB_INFO);
+
+ CreateTableRecordPtr alterTabPtr; // Reuse create table records
+
+ if (senderRef != reference()) {
+ jam();
+ c_blockState = BS_BUSY;
+ }
+ if ((requestType == AlterTabReq::AlterTablePrepare)
+ && (senderRef != reference())) {
+ jam();
+ c_opCreateTable.seize(alterTabPtr);
+ if(!alterTabPtr.isNull())
+ alterTabPtr.p->m_changeMask = changeMask;
+ }
+ else {
+ jam();
+ ndbrequire(c_opCreateTable.find(alterTabPtr, senderData));
+ }
+ if(alterTabPtr.isNull()){
+ jam();
+ alterTabRef(signal, req, AlterTableRef::Busy);
+ return;
+ }
+ CreateTableRecord * regAlterTabPtr = alterTabPtr.p;
+ regAlterTabPtr->m_alterTableId = tableId;
+ regAlterTabPtr->m_coordinatorRef = senderRef;
+
+ // Get table definition
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, tableId, false);
+ if(tablePtr.isNull()){
+ jam();
+ alterTabRef(signal, req, AlterTableRef::NoSuchTable);
+ return;
+ }
+
+ switch(requestType) {
+ case(AlterTabReq::AlterTablePrepare): {
+ ParseDictTabInfoRecord* aParseRecord;
+
+ const TableRecord::TabState tabState = tablePtr.p->tabState;
+ bool ok = false;
+ switch(tabState){
+ case TableRecord::NOT_DEFINED:
+ case TableRecord::REORG_TABLE_PREPARED:
+ case TableRecord::DEFINING:
+ case TableRecord::CHECKED:
+ jam();
+ alterTabRef(signal, req, AlterTableRef::NoSuchTable);
+ return;
+ case TableRecord::DEFINED:
+ ok = true;
+ jam();
+ break;
+ case TableRecord::PREPARE_DROPPING:
+ case TableRecord::DROPPING:
+ jam();
+ alterTabRef(signal, req, AlterTableRef::DropInProgress);
+ return;
+ }
+ ndbrequire(ok);
+
+ if(tablePtr.p->tableVersion + 1 != tableVersion){
+ jam();
+ alterTabRef(signal, req, AlterTableRef::InvalidTableVersion);
+ return;
+ }
+ TableRecordPtr newTablePtr;
+ if (senderRef != reference()) {
+ jam();
+ // Parse altered table defintion
+ ParseDictTabInfoRecord parseRecord;
+ aParseRecord = &parseRecord;
+
+ parseRecord.requestType = DictTabInfo::AlterTableFromAPI;
+ parseRecord.errorCode = 0;
+
+ SimplePropertiesSectionReader r(tabInfoPtr, getSectionSegmentPool());
+
+ handleTabInfoInit(r, &parseRecord, false); // Will not save info
+
+ if(parseRecord.errorCode != 0){
+ jam();
+ c_opCreateTable.release(alterTabPtr);
+ alterTabRef(signal, req,
+ (AlterTableRef::ErrorCode) parseRecord.errorCode,
+ aParseRecord);
+ return;
+ }
+ regAlterTabPtr->key = senderData;
+ c_opCreateTable.add(alterTabPtr);
+ regAlterTabPtr->m_errorCode = 0;
+ regAlterTabPtr->m_senderRef = senderRef;
+ regAlterTabPtr->m_senderData = senderData;
+ regAlterTabPtr->m_tablePtrI = parseRecord.tablePtr.i;
+ regAlterTabPtr->m_fragmentsPtrI = RNIL;
+ regAlterTabPtr->m_dihAddFragPtr = RNIL;
+ newTablePtr = parseRecord.tablePtr;
+ newTablePtr.p->tableVersion = tableVersion;
+ }
+ else { // (req->senderRef == reference())
+ jam();
+ c_tableRecordPool.getPtr(newTablePtr, regAlterTabPtr->m_tablePtrI);
+ newTablePtr.p->tableVersion = tableVersion;
+ }
+ if (handleAlterTab(req, regAlterTabPtr, tablePtr, newTablePtr) == -1) {
+ jam();
+ c_opCreateTable.release(alterTabPtr);
+ alterTabRef(signal, req, AlterTableRef::UnsupportedChange);
+ return;
+ }
+ releaseSections(signal);
+ // Propagate alter table to other local blocks
+ AlterTabReq * req = (AlterTabReq*)signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->senderData = senderData;
+ req->changeMask = changeMask;
+ req->tableId = tableId;
+ req->tableVersion = tableVersion;
+ req->gci = gci;
+ req->requestType = requestType;
+ sendSignal(DBLQH_REF, GSN_ALTER_TAB_REQ, signal,
+ AlterTabReq::SignalLength, JBB);
+ return;
+ }
+ case(AlterTabReq::AlterTableCommit): {
+ jam();
+ // Write schema for altered table to disk
+ SegmentedSectionPtr tabInfoPtr;
+ signal->getSection(tabInfoPtr, AlterTabReq::DICT_TAB_INFO);
+ regAlterTabPtr->m_tabInfoPtrI = tabInfoPtr.i;
+
+ signal->header.m_noOfSections = 0;
+
+ // Update table record
+ tablePtr.p->packedSize = tabInfoPtr.sz;
+ tablePtr.p->tableVersion = tableVersion;
+ tablePtr.p->gciTableCreated = gci;
+
+ SchemaFile::TableEntry tabEntry;
+ tabEntry.m_tableVersion = tableVersion;
+ tabEntry.m_tableType = tablePtr.p->tableType;
+ tabEntry.m_tableState = SchemaFile::ALTER_TABLE_COMMITTED;
+ tabEntry.m_gcp = gci;
+ tabEntry.m_noOfPages =
+ DIV(tabInfoPtr.sz + ZPAGE_HEADER_SIZE, ZSIZE_OF_PAGES_IN_WORDS);
+
+ Callback callback;
+ callback.m_callbackData = senderData;
+ callback.m_callbackFunction =
+ safe_cast(&Dbdict::alterTab_writeSchemaConf);
+
+ updateSchemaState(signal, tableId, &tabEntry, &callback);
+ break;
+ }
+ case(AlterTabReq::AlterTableRevert): {
+ jam();
+ // Revert failed alter table
+ revertAlterTable(signal, changeMask, tableId, regAlterTabPtr);
+ // Acknowledge the reverted alter table
+ AlterTabConf * conf = (AlterTabConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = senderData;
+ conf->changeMask = changeMask;
+ conf->tableId = tableId;
+ conf->tableVersion = tableVersion;
+ conf->gci = gci;
+ conf->requestType = requestType;
+ sendSignal(senderRef, GSN_ALTER_TAB_CONF, signal,
+ AlterTabConf::SignalLength, JBB);
+ break;
+ }
+ default: ndbrequire(false);
+ }
+}
+
+void Dbdict::alterTabRef(Signal * signal,
+ AlterTabReq * req,
+ AlterTableRef::ErrorCode errCode,
+ ParseDictTabInfoRecord* parseRecord)
+{
+ jam();
+ releaseSections(signal);
+ AlterTabRef * ref = (AlterTabRef*)signal->getDataPtrSend();
+ Uint32 senderRef = req->senderRef;
+ ref->senderData = req->senderData;
+ ref->senderRef = reference();
+ if (parseRecord) {
+ jam();
+ ref->errorCode = parseRecord->errorCode;
+ ref->errorLine = parseRecord->errorLine;
+ ref->errorKey = parseRecord->errorKey;
+ ref->errorStatus = parseRecord->status;
+ }
+ else {
+ jam();
+ ref->errorCode = errCode;
+ ref->errorLine = 0;
+ ref->errorKey = 0;
+ ref->errorStatus = 0;
+ }
+ sendSignal(senderRef, GSN_ALTER_TAB_REF, signal,
+ AlterTabRef::SignalLength, JBB);
+
+ c_blockState = BS_IDLE;
+}
+
+void Dbdict::execALTER_TAB_REF(Signal * signal){
+ jamEntry();
+
+ AlterTabRef * ref = (AlterTabRef*)signal->getDataPtr();
+
+ Uint32 senderRef = ref->senderRef;
+ Uint32 senderData = ref->senderData;
+ Uint32 errorCode = ref->errorCode;
+ Uint32 errorLine = ref->errorLine;
+ Uint32 errorKey = ref->errorKey;
+ Uint32 errorStatus = ref->errorStatus;
+ AlterTabReq::RequestType requestType =
+ (AlterTabReq::RequestType) ref->requestType;
+ CreateTableRecordPtr alterTabPtr;
+ ndbrequire(c_opCreateTable.find(alterTabPtr, senderData));
+ CreateTableRecord * regAlterTabPtr = alterTabPtr.p;
+ Uint32 changeMask = regAlterTabPtr->m_changeMask;
+ SafeCounter safeCounter(c_counterMgr, regAlterTabPtr->m_coordinatorData.m_counter);
+ safeCounter.clearWaitingFor(refToNode(senderRef));
+ switch (requestType) {
+ case(AlterTabReq::AlterTablePrepare): {
+ if (safeCounter.done()) {
+ jam();
+ // Send revert request to all alive nodes
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, regAlterTabPtr->m_alterTableId);
+ Uint32 tableId = tablePtr.p->tableId;
+ Uint32 tableVersion = tablePtr.p->tableVersion;
+ Uint32 gci = tablePtr.p->gciTableCreated;
+ SimplePropertiesSectionWriter w(getSectionSegmentPool());
+ packTableIntoPagesImpl(w, tablePtr);
+ SegmentedSectionPtr spDataPtr;
+ w.getPtr(spDataPtr);
+ signal->setSection(spDataPtr, AlterTabReq::DICT_TAB_INFO);
+
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ regAlterTabPtr->m_coordinatorData.m_gsn = GSN_ALTER_TAB_REQ;
+ safeCounter.init<AlterTabRef>(rg, regAlterTabPtr->key);
+
+ AlterTabReq * const lreq = (AlterTabReq*)signal->getDataPtrSend();
+ lreq->senderRef = reference();
+ lreq->senderData = regAlterTabPtr->key;
+ lreq->clientRef = regAlterTabPtr->m_senderRef;
+ lreq->clientData = regAlterTabPtr->m_senderData;
+ lreq->changeMask = changeMask;
+ lreq->tableId = tableId;
+ lreq->tableVersion = tableVersion;
+ lreq->gci = gci;
+ lreq->requestType = AlterTabReq::AlterTableRevert;
+
+ sendSignal(rg, GSN_ALTER_TAB_REQ, signal,
+ AlterTabReq::SignalLength, JBB);
+ }
+ else {
+ jam();
+ regAlterTabPtr->m_alterTableFailed = true;
+ }
+ break;
+ }
+ case(AlterTabReq::AlterTableCommit):
+ jam();
+ case(AlterTabReq::AlterTableRevert): {
+ AlterTableRef * apiRef = (AlterTableRef*)signal->getDataPtrSend();
+
+ apiRef->senderData = senderData;
+ apiRef->senderRef = reference();
+ apiRef->masterNodeId = c_masterNodeId;
+ apiRef->errorCode = errorCode;
+ apiRef->errorLine = errorLine;
+ apiRef->errorKey = errorKey;
+ apiRef->status = errorStatus;
+ if (safeCounter.done()) {
+ jam();
+ sendSignal(senderRef, GSN_ALTER_TABLE_REF, signal,
+ AlterTableRef::SignalLength, JBB);
+ c_blockState = BS_IDLE;
+ }
+ else {
+ jam();
+ regAlterTabPtr->m_alterTableFailed = true;
+ regAlterTabPtr->m_alterTableRef = *apiRef;
+ }
+ break;
+ }
+ default: ndbrequire(false);
+ }
+}
+
+void
+Dbdict::execALTER_TAB_CONF(Signal * signal){
+ jamEntry();
+ AlterTabConf * const conf = (AlterTabConf*)signal->getDataPtr();
+ Uint32 senderRef = conf->senderRef;
+ Uint32 senderData = conf->senderData;
+ Uint32 changeMask = conf->changeMask;
+ Uint32 tableId = conf->tableId;
+ Uint32 tableVersion = conf->tableVersion;
+ Uint32 gci = conf->gci;
+ AlterTabReq::RequestType requestType =
+ (AlterTabReq::RequestType) conf->requestType;
+ CreateTableRecordPtr alterTabPtr;
+ ndbrequire(c_opCreateTable.find(alterTabPtr, senderData));
+ CreateTableRecord * regAlterTabPtr = alterTabPtr.p;
+
+ switch (requestType) {
+ case(AlterTabReq::AlterTablePrepare): {
+ switch(refToBlock(signal->getSendersBlockRef())) {
+ case DBLQH: {
+ jam();
+ AlterTabReq * req = (AlterTabReq*)signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->senderData = senderData;
+ req->changeMask = changeMask;
+ req->tableId = tableId;
+ req->tableVersion = tableVersion;
+ req->gci = gci;
+ req->requestType = requestType;
+ sendSignal(DBDIH_REF, GSN_ALTER_TAB_REQ, signal,
+ AlterTabReq::SignalLength, JBB);
+ return;
+ }
+ case DBDIH: {
+ jam();
+ AlterTabReq * req = (AlterTabReq*)signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->senderData = senderData;
+ req->changeMask = changeMask;
+ req->tableId = tableId;
+ req->tableVersion = tableVersion;
+ req->gci = gci;
+ req->requestType = requestType;
+ sendSignal(DBTC_REF, GSN_ALTER_TAB_REQ, signal,
+ AlterTabReq::SignalLength, JBB);
+ return;
+ }
+ case DBTC: {
+ jam();
+ // Participant is done with prepare phase, send conf to coordinator
+ AlterTabConf * conf = (AlterTabConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = senderData;
+ conf->changeMask = changeMask;
+ conf->tableId = tableId;
+ conf->tableVersion = tableVersion;
+ conf->gci = gci;
+ conf->requestType = requestType;
+ sendSignal(regAlterTabPtr->m_coordinatorRef, GSN_ALTER_TAB_CONF, signal,
+ AlterTabConf::SignalLength, JBB);
+ return;
+ }
+ default :break;
+ }
+ // Coordinator only
+ SafeCounter safeCounter(c_counterMgr, regAlterTabPtr->m_coordinatorData.m_counter);
+ safeCounter.clearWaitingFor(refToNode(senderRef));
+ if (safeCounter.done()) {
+ jam();
+ // We have received all local confirmations
+ if (regAlterTabPtr->m_alterTableFailed) {
+ jam();
+ // Send revert request to all alive nodes
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, regAlterTabPtr->m_alterTableId);
+ Uint32 tableId = tablePtr.p->tableId;
+ Uint32 tableVersion = tablePtr.p->tableVersion;
+ Uint32 gci = tablePtr.p->gciTableCreated;
+ SimplePropertiesSectionWriter w(getSectionSegmentPool());
+ packTableIntoPagesImpl(w, tablePtr);
+ SegmentedSectionPtr spDataPtr;
+ w.getPtr(spDataPtr);
+ signal->setSection(spDataPtr, AlterTabReq::DICT_TAB_INFO);
+
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ regAlterTabPtr->m_coordinatorData.m_gsn = GSN_ALTER_TAB_REQ;
+ safeCounter.init<AlterTabRef>(rg, regAlterTabPtr->key);
+
+ AlterTabReq * const lreq = (AlterTabReq*)signal->getDataPtrSend();
+ lreq->senderRef = reference();
+ lreq->senderData = regAlterTabPtr->key;
+ lreq->clientRef = regAlterTabPtr->m_senderRef;
+ lreq->clientData = regAlterTabPtr->m_senderData;
+ lreq->changeMask = changeMask;
+ lreq->tableId = tableId;
+ lreq->tableVersion = tableVersion;
+ lreq->gci = gci;
+ lreq->requestType = AlterTabReq::AlterTableRevert;
+
+ sendSignal(rg, GSN_ALTER_TAB_REQ, signal,
+ AlterTabReq::SignalLength, JBB);
+ }
+ else {
+ jam();
+ // Send commit request to all alive nodes
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, tableId);
+ SimplePropertiesSectionWriter w(getSectionSegmentPool());
+ packTableIntoPagesImpl(w, tablePtr);
+ SegmentedSectionPtr spDataPtr;
+ w.getPtr(spDataPtr);
+ signal->setSection(spDataPtr, AlterTabReq::DICT_TAB_INFO);
+
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ regAlterTabPtr->m_coordinatorData.m_gsn = GSN_ALTER_TAB_REQ;
+ safeCounter.init<AlterTabRef>(rg, regAlterTabPtr->key);
+
+ AlterTabReq * const lreq = (AlterTabReq*)signal->getDataPtrSend();
+ lreq->senderRef = reference();
+ lreq->senderData = regAlterTabPtr->key;
+ lreq->clientRef = regAlterTabPtr->m_senderRef;
+ lreq->clientData = regAlterTabPtr->m_senderData;
+ lreq->changeMask = changeMask;
+ lreq->tableId = tableId;
+ lreq->tableVersion = tableVersion;
+ lreq->gci = gci;
+ lreq->requestType = AlterTabReq::AlterTableCommit;
+
+ sendSignal(rg, GSN_ALTER_TAB_REQ, signal,
+ AlterTabReq::SignalLength, JBB);
+ }
+ }
+ else {
+ // (!safeCounter.done())
+ jam();
+ }
+ break;
+ }
+ case(AlterTabReq::AlterTableRevert):
+ jam();
+ case(AlterTabReq::AlterTableCommit): {
+ SafeCounter safeCounter(c_counterMgr, regAlterTabPtr->m_coordinatorData.m_counter);
+ safeCounter.clearWaitingFor(refToNode(senderRef));
+ if (safeCounter.done()) {
+ jam();
+ // We have received all local confirmations
+ releaseSections(signal);
+ if (regAlterTabPtr->m_alterTableFailed) {
+ jam();
+ AlterTableRef * apiRef =
+ (AlterTableRef*)signal->getDataPtrSend();
+ *apiRef = regAlterTabPtr->m_alterTableRef;
+ sendSignal(regAlterTabPtr->m_senderRef, GSN_ALTER_TABLE_REF, signal,
+ AlterTableRef::SignalLength, JBB);
+ }
+ else {
+ jam();
+ // Alter table completed, inform API
+ AlterTableConf * const apiConf =
+ (AlterTableConf*)signal->getDataPtrSend();
+ apiConf->senderRef = reference();
+ apiConf->senderData = regAlterTabPtr->m_senderData;
+ apiConf->tableId = tableId;
+ apiConf->tableVersion = tableVersion;
+
+ //@todo check api failed
+ sendSignal(regAlterTabPtr->m_senderRef, GSN_ALTER_TABLE_CONF, signal,
+ AlterTableConf::SignalLength, JBB);
+ }
+
+ // Release resources
+ TableRecordPtr tabPtr;
+ c_tableRecordPool.getPtr(tabPtr, regAlterTabPtr->m_tablePtrI);
+ releaseTableObject(tabPtr.i, false);
+ c_opCreateTable.release(alterTabPtr);
+ c_blockState = BS_IDLE;
+ }
+ else {
+ // (!safeCounter.done())
+ jam();
+ }
+ break;
+ }
+ default: ndbrequire(false);
+ }
+}
+
+// For debugging
+inline
+void Dbdict::printTables()
+{
+ DLHashTable<TableRecord>::Iterator iter;
+ bool moreTables = c_tableRecordHash.first(iter);
+ printf("TABLES IN DICT:\n");
+ while (moreTables) {
+ TableRecordPtr tablePtr = iter.curr;
+ printf("%s ", tablePtr.p->tableName);
+ moreTables = c_tableRecordHash.next(iter);
+ }
+ printf("\n");
+}
+
+int Dbdict::handleAlterTab(AlterTabReq * req,
+ CreateTableRecord * regAlterTabPtr,
+ TableRecordPtr origTablePtr,
+ TableRecordPtr newTablePtr)
+{
+ Uint32 changeMask = req->changeMask;
+
+ if (AlterTableReq::getNameFlag(changeMask)) {
+ jam();
+ // Table rename
+ // Remove from hashtable
+#ifdef VM_TRACE
+ TableRecordPtr tmp;
+ ndbrequire(c_tableRecordHash.find(tmp, *origTablePtr.p));
+#endif
+ c_tableRecordHash.remove(origTablePtr);
+ strcpy(regAlterTabPtr->previousTableName, origTablePtr.p->tableName);
+ strcpy(origTablePtr.p->tableName, newTablePtr.p->tableName);
+ // Set new schema version
+ origTablePtr.p->tableVersion = newTablePtr.p->tableVersion;
+ // Put it back
+#ifdef VM_TRACE
+ ndbrequire(!c_tableRecordHash.find(tmp, *origTablePtr.p));
+#endif
+ c_tableRecordHash.add(origTablePtr);
+
+ return 0;
+ }
+ jam();
+ return -1;
+}
+
+void Dbdict::revertAlterTable(Signal * signal,
+ Uint32 changeMask,
+ Uint32 tableId,
+ CreateTableRecord * regAlterTabPtr)
+{
+ if (AlterTableReq::getNameFlag(changeMask)) {
+ jam();
+ // Table rename
+ // Restore previous name
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, tableId);
+ // Remove from hashtable
+#ifdef VM_TRACE
+ TableRecordPtr tmp;
+ ndbrequire(c_tableRecordHash.find(tmp, * tablePtr.p));
+#endif
+ c_tableRecordHash.remove(tablePtr);
+ // Restore name
+ strcpy(tablePtr.p->tableName, regAlterTabPtr->previousTableName);
+ // Revert schema version
+ tablePtr.p->tableVersion = tablePtr.p->tableVersion - 1;
+ // Put it back
+#ifdef VM_TRACE
+ ndbrequire(!c_tableRecordHash.find(tmp, * tablePtr.p));
+#endif
+ c_tableRecordHash.add(tablePtr);
+
+ return;
+ }
+
+ ndbrequire(false);
+}
+
+void
+Dbdict::alterTab_writeSchemaConf(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode)
+{
+ jam();
+ Uint32 key = callbackData;
+ CreateTableRecordPtr alterTabPtr;
+ ndbrequire(c_opCreateTable.find(alterTabPtr, key));
+ CreateTableRecord * regAlterTabPtr = alterTabPtr.p;
+ Uint32 tableId = regAlterTabPtr->m_alterTableId;
+
+ Callback callback;
+ callback.m_callbackData = regAlterTabPtr->key;
+ callback.m_callbackFunction =
+ safe_cast(&Dbdict::alterTab_writeTableConf);
+
+ SegmentedSectionPtr tabInfoPtr;
+ getSection(tabInfoPtr, regAlterTabPtr->m_tabInfoPtrI);
+
+ writeTableFile(signal, tableId, tabInfoPtr, &callback);
+
+ signal->setSection(tabInfoPtr, 0);
+ releaseSections(signal);
+}
+
+void
+Dbdict::alterTab_writeTableConf(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode)
+{
+ jam();
+ CreateTableRecordPtr alterTabPtr;
+ ndbrequire(c_opCreateTable.find(alterTabPtr, callbackData));
+ CreateTableRecord * regAlterTabPtr = alterTabPtr.p;
+ Uint32 coordinatorRef = regAlterTabPtr->m_coordinatorRef;
+ TableRecordPtr tabPtr;
+ c_tableRecordPool.getPtr(tabPtr, regAlterTabPtr->m_alterTableId);
+
+ // Alter table commit request handled successfully
+ AlterTabConf * conf = (AlterTabConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = callbackData;
+ conf->tableId = tabPtr.p->tableId;
+ conf->tableVersion = tabPtr.p->tableVersion;
+ conf->gci = tabPtr.p->gciTableCreated;
+ conf->requestType = AlterTabReq::AlterTableCommit;
+ sendSignal(coordinatorRef, GSN_ALTER_TAB_CONF, signal,
+ AlterTabConf::SignalLength, JBB);
+ if(coordinatorRef != reference()) {
+ jam();
+ // Release resources
+ c_tableRecordPool.getPtr(tabPtr, regAlterTabPtr->m_tablePtrI);
+ releaseTableObject(tabPtr.i, false);
+ c_opCreateTable.release(alterTabPtr);
+ c_blockState = BS_IDLE;
+ }
+}
+
+void
+Dbdict::execCREATE_FRAGMENTATION_REF(Signal * signal){
+ jamEntry();
+ const Uint32 * theData = signal->getDataPtr();
+ CreateFragmentationRef * const ref = (CreateFragmentationRef*)theData;
+ (void)ref;
+ ndbrequire(false);
+}
+
+void
+Dbdict::execCREATE_FRAGMENTATION_CONF(Signal* signal){
+ jamEntry();
+ const Uint32 * theData = signal->getDataPtr();
+ CreateFragmentationConf * const conf = (CreateFragmentationConf*)theData;
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, conf->senderData));
+
+ ndbrequire(signal->getNoOfSections() == 1);
+
+ SegmentedSectionPtr fragDataPtr;
+ signal->getSection(fragDataPtr, CreateFragmentationConf::FRAGMENTS);
+ signal->header.m_noOfSections = 0;
+
+ /**
+ * Get table
+ */
+ TableRecordPtr tabPtr;
+ c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
+
+ /**
+ * Save fragment count
+ */
+ tabPtr.p->fragmentCount = conf->noOfFragments;
+
+ /**
+ * Update table version
+ */
+ PageRecordPtr pagePtr;
+ c_pageRecordArray.getPtr(pagePtr, c_schemaRecord.schemaPage);
+ SchemaFile::TableEntry * tabEntry = getTableEntry(pagePtr.p, tabPtr.i);
+
+ tabPtr.p->tableVersion = tabEntry->m_tableVersion + 1;
+
+ /**
+ * Pack
+ */
+ SimplePropertiesSectionWriter w(getSectionSegmentPool());
+ packTableIntoPagesImpl(w, tabPtr);
+
+ SegmentedSectionPtr spDataPtr;
+ w.getPtr(spDataPtr);
+
+ signal->setSection(spDataPtr, CreateTabReq::DICT_TAB_INFO);
+ signal->setSection(fragDataPtr, CreateTabReq::FRAGMENTATION);
+
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ SafeCounter tmp(c_counterMgr, createTabPtr.p->m_coordinatorData.m_counter);
+ createTabPtr.p->m_coordinatorData.m_gsn = GSN_CREATE_TAB_REQ;
+ createTabPtr.p->m_coordinatorData.m_requestType = CreateTabReq::CreateTablePrepare;
+ tmp.init<CreateTabRef>(rg, GSN_CREATE_TAB_REF, createTabPtr.p->key);
+
+ CreateTabReq * const req = (CreateTabReq*)theData;
+ req->senderRef = reference();
+ req->senderData = createTabPtr.p->key;
+ req->clientRef = createTabPtr.p->m_senderRef;
+ req->clientData = createTabPtr.p->m_senderData;
+ req->requestType = CreateTabReq::CreateTablePrepare;
+
+ req->gci = 0;
+ req->tableId = tabPtr.i;
+ req->tableVersion = tabEntry->m_tableVersion + 1;
+
+ sendFragmentedSignal(rg, GSN_CREATE_TAB_REQ, signal,
+ CreateTabReq::SignalLength, JBB);
+
+ return;
+}
+
+void
+Dbdict::execCREATE_TAB_REF(Signal* signal){
+ jamEntry();
+
+ CreateTabRef * const ref = (CreateTabRef*)signal->getDataPtr();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, ref->senderData));
+
+ ndbrequire(createTabPtr.p->m_coordinatorRef == reference());
+ ndbrequire(createTabPtr.p->m_coordinatorData.m_gsn == GSN_CREATE_TAB_REQ);
+
+ if(ref->errorCode != CreateTabRef::NF_FakeErrorREF){
+ createTabPtr.p->setErrorCode(ref->errorCode);
+ }
+ createTab_reply(signal, createTabPtr, refToNode(ref->senderRef));
+}
+
+void
+Dbdict::execCREATE_TAB_CONF(Signal* signal){
+ jamEntry();
+
+ ndbrequire(signal->getNoOfSections() == 0);
+
+ CreateTabConf * const conf = (CreateTabConf*)signal->getDataPtr();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, conf->senderData));
+
+ ndbrequire(createTabPtr.p->m_coordinatorRef == reference());
+ ndbrequire(createTabPtr.p->m_coordinatorData.m_gsn == GSN_CREATE_TAB_REQ);
+
+ createTab_reply(signal, createTabPtr, refToNode(conf->senderRef));
+}
+
+void
+Dbdict::createTab_reply(Signal* signal,
+ CreateTableRecordPtr createTabPtr,
+ Uint32 nodeId)
+{
+
+ SafeCounter tmp(c_counterMgr, createTabPtr.p->m_coordinatorData.m_counter);
+ if(!tmp.clearWaitingFor(nodeId)){
+ jam();
+ return;
+ }
+
+ switch(createTabPtr.p->m_coordinatorData.m_requestType){
+ case CreateTabReq::CreateTablePrepare:{
+
+ if(createTabPtr.p->m_errorCode != 0){
+ jam();
+ /**
+ * Failed to prepare on atleast one node -> abort on all
+ */
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ createTabPtr.p->m_coordinatorData.m_gsn = GSN_CREATE_TAB_REQ;
+ createTabPtr.p->m_coordinatorData.m_requestType = CreateTabReq::CreateTableDrop;
+ ndbrequire(tmp.init<CreateTabRef>(rg, createTabPtr.p->key));
+
+ CreateTabReq * const req = (CreateTabReq*)signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->senderData = createTabPtr.p->key;
+ req->requestType = CreateTabReq::CreateTableDrop;
+
+ sendSignal(rg, GSN_CREATE_TAB_REQ, signal,
+ CreateTabReq::SignalLength, JBB);
+ return;
+ }
+
+ /**
+ * Lock mutex before commiting table
+ */
+ Mutex mutex(signal, c_mutexMgr, createTabPtr.p->m_startLcpMutex);
+ Callback c = { safe_cast(&Dbdict::createTab_startLcpMutex_locked),
+ createTabPtr.p->key};
+
+ ndbrequire(mutex.lock(c));
+ return;
+ }
+ case CreateTabReq::CreateTableCommit:{
+ jam();
+ ndbrequire(createTabPtr.p->m_errorCode == 0);
+
+ /**
+ * Unlock mutex before commiting table
+ */
+ Mutex mutex(signal, c_mutexMgr, createTabPtr.p->m_startLcpMutex);
+ Callback c = { safe_cast(&Dbdict::createTab_startLcpMutex_unlocked),
+ createTabPtr.p->key};
+ mutex.unlock(c);
+ return;
+ }
+ case CreateTabReq::CreateTableDrop:{
+ jam();
+ CreateTableRef * const ref = (CreateTableRef*)signal->getDataPtr();
+ ref->senderRef = reference();
+ ref->senderData = createTabPtr.p->m_senderData;
+ ref->errorCode = createTabPtr.p->m_errorCode;
+ ref->masterNodeId = c_masterNodeId;
+ ref->status = 0;
+ ref->errorKey = 0;
+ ref->errorLine = 0;
+
+ //@todo check api failed
+ sendSignal(createTabPtr.p->m_senderRef, GSN_CREATE_TABLE_REF, signal,
+ CreateTableRef::SignalLength, JBB);
+ c_opCreateTable.release(createTabPtr);
+ c_blockState = BS_IDLE;
+ return;
+ }
+ }
+ ndbrequire(false);
+}
+
+void
+Dbdict::createTab_startLcpMutex_locked(Signal* signal,
+ Uint32 callbackData,
+ Uint32 retValue){
+ jamEntry();
+
+ ndbrequire(retValue == 0);
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
+
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ createTabPtr.p->m_coordinatorData.m_gsn = GSN_CREATE_TAB_REQ;
+ createTabPtr.p->m_coordinatorData.m_requestType = CreateTabReq::CreateTableCommit;
+ SafeCounter tmp(c_counterMgr, createTabPtr.p->m_coordinatorData.m_counter);
+ tmp.init<CreateTabRef>(rg, GSN_CREATE_TAB_REF, createTabPtr.p->key);
+
+ CreateTabReq * const req = (CreateTabReq*)signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->senderData = createTabPtr.p->key;
+ req->requestType = CreateTabReq::CreateTableCommit;
+
+ sendSignal(rg, GSN_CREATE_TAB_REQ, signal,
+ CreateTabReq::SignalLength, JBB);
+}
+
+void
+Dbdict::createTab_startLcpMutex_unlocked(Signal* signal,
+ Uint32 callbackData,
+ Uint32 retValue){
+ jamEntry();
+
+ ndbrequire(retValue == 0);
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
+
+ createTabPtr.p->m_startLcpMutex.release(c_mutexMgr);
+
+ TableRecordPtr tabPtr;
+ c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
+
+ CreateTableConf * const conf = (CreateTableConf*)signal->getDataPtr();
+ conf->senderRef = reference();
+ conf->senderData = createTabPtr.p->m_senderData;
+ conf->tableId = createTabPtr.p->m_tablePtrI;
+ conf->tableVersion = tabPtr.p->tableVersion;
+
+ //@todo check api failed
+ sendSignal(createTabPtr.p->m_senderRef, GSN_CREATE_TABLE_CONF, signal,
+ CreateTableConf::SignalLength, JBB);
+ c_opCreateTable.release(createTabPtr);
+ c_blockState = BS_IDLE;
+ return;
+}
+
+/***********************************************************
+ * CreateTable participant code
+ **********************************************************/
+void
+Dbdict::execCREATE_TAB_REQ(Signal* signal){
+ jamEntry();
+
+ if(!assembleFragments(signal)){
+ jam();
+ return;
+ }
+
+ CreateTabReq * const req = (CreateTabReq*)signal->getDataPtr();
+
+ CreateTabReq::RequestType rt = (CreateTabReq::RequestType)req->requestType;
+ switch(rt){
+ case CreateTabReq::CreateTablePrepare:
+ CRASH_INSERTION2(6003, getOwnNodeId() != c_masterNodeId);
+ createTab_prepare(signal, req);
+ return;
+ case CreateTabReq::CreateTableCommit:
+ CRASH_INSERTION2(6004, getOwnNodeId() != c_masterNodeId);
+ createTab_commit(signal, req);
+ return;
+ case CreateTabReq::CreateTableDrop:
+ CRASH_INSERTION2(6005, getOwnNodeId() != c_masterNodeId);
+ createTab_drop(signal, req);
+ return;
+ }
+ ndbrequire(false);
+}
+
+void
+Dbdict::createTab_prepare(Signal* signal, CreateTabReq * req){
+
+ const Uint32 gci = req->gci;
+ const Uint32 tableId = req->tableId;
+ const Uint32 tableVersion = req->tableVersion;
+
+ SegmentedSectionPtr tabInfoPtr;
+ signal->getSection(tabInfoPtr, CreateTabReq::DICT_TAB_INFO);
+
+ CreateTableRecordPtr createTabPtr;
+ if(req->senderRef == reference()){
+ jam();
+ ndbrequire(c_opCreateTable.find(createTabPtr, req->senderData));
+ } else {
+ jam();
+ c_opCreateTable.seize(createTabPtr);
+
+ ndbrequire(!createTabPtr.isNull());
+
+ createTabPtr.p->key = req->senderData;
+ c_opCreateTable.add(createTabPtr);
+ createTabPtr.p->m_errorCode = 0;
+ createTabPtr.p->m_tablePtrI = tableId;
+ createTabPtr.p->m_coordinatorRef = req->senderRef;
+ createTabPtr.p->m_senderRef = req->clientRef;
+ createTabPtr.p->m_senderData = req->clientData;
+ createTabPtr.p->m_dihAddFragPtr = RNIL;
+
+ /**
+ * Put data into table record
+ */
+ ParseDictTabInfoRecord parseRecord;
+ parseRecord.requestType = DictTabInfo::AddTableFromDict;
+ parseRecord.errorCode = 0;
+
+ SimplePropertiesSectionReader r(tabInfoPtr, getSectionSegmentPool());
+
+ handleTabInfoInit(r, &parseRecord);
+
+ ndbrequire(parseRecord.errorCode == 0);
+ }
+
+ ndbrequire(!createTabPtr.isNull());
+
+ SegmentedSectionPtr fragPtr;
+ signal->getSection(fragPtr, CreateTabReq::FRAGMENTATION);
+
+ createTabPtr.p->m_tabInfoPtrI = tabInfoPtr.i;
+ createTabPtr.p->m_fragmentsPtrI = fragPtr.i;
+
+ signal->header.m_noOfSections = 0;
+
+ TableRecordPtr tabPtr;
+ c_tableRecordPool.getPtr(tabPtr, tableId);
+ tabPtr.p->packedSize = tabInfoPtr.sz;
+ tabPtr.p->tableVersion = tableVersion;
+ tabPtr.p->gciTableCreated = gci;
+
+ SchemaFile::TableEntry tabEntry;
+ tabEntry.m_tableVersion = tableVersion;
+ tabEntry.m_tableType = tabPtr.p->tableType;
+ tabEntry.m_tableState = SchemaFile::ADD_STARTED;
+ tabEntry.m_gcp = gci;
+ tabEntry.m_noOfPages =
+ DIV(tabInfoPtr.sz + ZPAGE_HEADER_SIZE, ZSIZE_OF_PAGES_IN_WORDS);
+
+ Callback callback;
+ callback.m_callbackData = createTabPtr.p->key;
+ callback.m_callbackFunction =
+ safe_cast(&Dbdict::createTab_writeSchemaConf1);
+
+ updateSchemaState(signal, tableId, &tabEntry, &callback);
+}
+
+void getSection(SegmentedSectionPtr & ptr, Uint32 i);
+
+void
+Dbdict::createTab_writeSchemaConf1(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode){
+ jam();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
+
+ Callback callback;
+ callback.m_callbackData = createTabPtr.p->key;
+ callback.m_callbackFunction =
+ safe_cast(&Dbdict::createTab_writeTableConf);
+
+ SegmentedSectionPtr tabInfoPtr;
+ getSection(tabInfoPtr, createTabPtr.p->m_tabInfoPtrI);
+ writeTableFile(signal, createTabPtr.p->m_tablePtrI, tabInfoPtr, &callback);
+
+ createTabPtr.p->m_tabInfoPtrI = RNIL;
+ signal->setSection(tabInfoPtr, 0);
+ releaseSections(signal);
+}
+
+void
+Dbdict::createTab_writeTableConf(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode){
+ jam();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
+
+ SegmentedSectionPtr fragDataPtr;
+ getSection(fragDataPtr, createTabPtr.p->m_fragmentsPtrI);
+
+ Callback callback;
+ callback.m_callbackData = callbackData;
+ callback.m_callbackFunction =
+ safe_cast(&Dbdict::createTab_dihComplete);
+
+ createTab_dih(signal, createTabPtr, fragDataPtr, &callback);
+}
+
+void
+Dbdict::createTab_dih(Signal* signal,
+ CreateTableRecordPtr createTabPtr,
+ SegmentedSectionPtr fragDataPtr,
+ Callback * c){
+ jam();
+
+ createTabPtr.p->m_callback = * c;
+
+ TableRecordPtr tabPtr;
+ c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
+
+ DiAddTabReq * req = (DiAddTabReq*)signal->getDataPtrSend();
+ req->connectPtr = createTabPtr.p->key;
+ req->tableId = tabPtr.i;
+ req->fragType = tabPtr.p->fragmentType;
+ req->kValue = tabPtr.p->kValue;
+ req->noOfReplicas = 0;
+ req->storedTable = tabPtr.p->storedTable;
+ req->tableType = tabPtr.p->tableType;
+ req->schemaVersion = tabPtr.p->tableVersion;
+ req->primaryTableId = tabPtr.p->primaryTableId;
+
+ if(!fragDataPtr.isNull()){
+ signal->setSection(fragDataPtr, DiAddTabReq::FRAGMENTATION);
+ }
+
+ sendSignal(DBDIH_REF, GSN_DIADDTABREQ, signal,
+ DiAddTabReq::SignalLength, JBB);
+}
+
+static
+void
+calcLHbits(Uint32 * lhPageBits, Uint32 * lhDistrBits,
+ Uint32 fid, Uint32 totalFragments)
+{
+ Uint32 distrBits = 0;
+ Uint32 pageBits = 0;
+
+ Uint32 tmp = 1;
+ while (tmp < totalFragments) {
+ jam();
+ tmp <<= 1;
+ distrBits++;
+ }//while
+ if (tmp != totalFragments) {
+ tmp >>= 1;
+ if ((fid >= (totalFragments - tmp)) && (fid < (tmp - 1))) {
+ distrBits--;
+ }//if
+ }//if
+ * lhPageBits = pageBits;
+ * lhDistrBits = distrBits;
+
+}//calcLHbits()
+
+
+void
+Dbdict::execADD_FRAGREQ(Signal* signal) {
+ jamEntry();
+
+ AddFragReq * const req = (AddFragReq*)signal->getDataPtr();
+
+ Uint32 dihPtr = req->dihPtr;
+ Uint32 senderData = req->senderData;
+ Uint32 tableId = req->tableId;
+ Uint32 fragId = req->fragmentId;
+ Uint32 node = req->nodeId;
+ Uint32 lcpNo = req->nextLCP;
+ Uint32 fragCount = req->totalFragments;
+ Uint32 requestInfo = req->requestInfo;
+ Uint32 startGci = req->startGci;
+
+ ndbrequire(node == getOwnNodeId());
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, senderData));
+
+ createTabPtr.p->m_dihAddFragPtr = dihPtr;
+
+ TableRecordPtr tabPtr;
+ c_tableRecordPool.getPtr(tabPtr, tableId);
+
+#if 0
+ tabPtr.p->gciTableCreated = (startGci > tabPtr.p->gciTableCreated ? startGci:
+ startGci > tabPtr.p->gciTableCreated);
+#endif
+
+ /**
+ * Calc lh3PageBits
+ */
+ Uint32 lhDistrBits = 0;
+ Uint32 lhPageBits = 0;
+ ::calcLHbits(&lhPageBits, &lhDistrBits, fragId, fragCount);
+
+ {
+ LqhFragReq* req = (LqhFragReq*)signal->getDataPtrSend();
+ req->senderData = senderData;
+ req->senderRef = reference();
+ req->fragmentId = fragId;
+ req->requestInfo = requestInfo;
+ req->tableId = tableId;
+ req->localKeyLength = tabPtr.p->localKeyLen;
+ req->maxLoadFactor = tabPtr.p->maxLoadFactor;
+ req->minLoadFactor = tabPtr.p->minLoadFactor;
+ req->kValue = tabPtr.p->kValue;
+ req->lh3DistrBits = 0; //lhDistrBits;
+ req->lh3PageBits = 0; //lhPageBits;
+ req->noOfAttributes = tabPtr.p->noOfAttributes;
+ req->noOfNullAttributes = tabPtr.p->noOfNullBits;
+ req->noOfPagesToPreAllocate = 0;
+ req->schemaVersion = tabPtr.p->tableVersion;
+ Uint32 keyLen = tabPtr.p->tupKeyLength;
+ req->keyLength = keyLen; // wl-2066 no more "long keys"
+ req->nextLCP = lcpNo;
+
+ req->noOfKeyAttr = tabPtr.p->noOfPrimkey;
+ req->noOfNewAttr = 0;
+ // noOfCharsets passed to TUP in upper half
+ req->noOfNewAttr |= (tabPtr.p->noOfCharsets << 16);
+ req->checksumIndicator = 1;
+ req->noOfAttributeGroups = 1;
+ req->GCPIndicator = 0;
+ req->startGci = startGci;
+ req->tableType = tabPtr.p->tableType;
+ req->primaryTableId = tabPtr.p->primaryTableId;
+ sendSignal(DBLQH_REF, GSN_LQHFRAGREQ, signal,
+ LqhFragReq::SignalLength, JBB);
+ }
+}
+
+void
+Dbdict::execLQHFRAGREF(Signal * signal){
+ jamEntry();
+ LqhFragRef * const ref = (LqhFragRef*)signal->getDataPtr();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, ref->senderData));
+
+ createTabPtr.p->setErrorCode(ref->errorCode);
+
+ {
+ AddFragRef * const ref = (AddFragRef*)signal->getDataPtr();
+ ref->dihPtr = createTabPtr.p->m_dihAddFragPtr;
+ sendSignal(DBDIH_REF, GSN_ADD_FRAGREF, signal,
+ AddFragRef::SignalLength, JBB);
+ }
+}
+
+void
+Dbdict::execLQHFRAGCONF(Signal * signal){
+ jamEntry();
+ LqhFragConf * const conf = (LqhFragConf*)signal->getDataPtr();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, conf->senderData));
+
+ createTabPtr.p->m_lqhFragPtr = conf->lqhFragPtr;
+
+ TableRecordPtr tabPtr;
+ c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
+ sendLQHADDATTRREQ(signal, createTabPtr, tabPtr.p->firstAttribute);
+}
+
+void
+Dbdict::sendLQHADDATTRREQ(Signal* signal,
+ CreateTableRecordPtr createTabPtr,
+ Uint32 attributePtrI){
+ jam();
+ TableRecordPtr tabPtr;
+ c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
+ LqhAddAttrReq * const req = (LqhAddAttrReq*)signal->getDataPtrSend();
+ Uint32 i = 0;
+ for(i = 0; i<LqhAddAttrReq::MAX_ATTRIBUTES && attributePtrI != RNIL; i++){
+ jam();
+ AttributeRecordPtr attrPtr;
+ c_attributeRecordPool.getPtr(attrPtr, attributePtrI);
+ LqhAddAttrReq::Entry& entry = req->attributes[i];
+ entry.attrId = attrPtr.p->attributeId;
+ entry.attrDescriptor = attrPtr.p->attributeDescriptor;
+ entry.extTypeInfo = 0;
+ // charset number passed to TUP, TUX in upper half
+ entry.extTypeInfo |= (attrPtr.p->extPrecision & ~0xFFFF);
+ if (tabPtr.p->isIndex()) {
+ Uint32 primaryAttrId;
+ if (attrPtr.p->nextAttrInTable != RNIL) {
+ getIndexAttr(tabPtr, attributePtrI, &primaryAttrId);
+ } else {
+ primaryAttrId = ZNIL;
+ if (tabPtr.p->isOrderedIndex())
+ entry.attrId = 0; // attribute goes to TUP
+ }
+ entry.attrId |= (primaryAttrId << 16);
+ }
+ attributePtrI = attrPtr.p->nextAttrInTable;
+ }
+ req->lqhFragPtr = createTabPtr.p->m_lqhFragPtr;
+ req->senderData = createTabPtr.p->key;
+ req->senderAttrPtr = attributePtrI;
+ req->noOfAttributes = i;
+
+ sendSignal(DBLQH_REF, GSN_LQHADDATTREQ, signal,
+ LqhAddAttrReq::HeaderLength + LqhAddAttrReq::EntryLength * i, JBB);
+}
+
+void
+Dbdict::execLQHADDATTREF(Signal * signal){
+ jamEntry();
+ LqhAddAttrRef * const ref = (LqhAddAttrRef*)signal->getDataPtr();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, ref->senderData));
+
+ createTabPtr.p->setErrorCode(ref->errorCode);
+
+ {
+ AddFragRef * const ref = (AddFragRef*)signal->getDataPtr();
+ ref->dihPtr = createTabPtr.p->m_dihAddFragPtr;
+ sendSignal(DBDIH_REF, GSN_ADD_FRAGREF, signal,
+ AddFragRef::SignalLength, JBB);
+ }
+
+}
+
+void
+Dbdict::execLQHADDATTCONF(Signal * signal){
+ jamEntry();
+ LqhAddAttrConf * const conf = (LqhAddAttrConf*)signal->getDataPtr();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, conf->senderData));
+
+ const Uint32 fragId = conf->fragId;
+ const Uint32 nextAttrPtr = conf->senderAttrPtr;
+ if(nextAttrPtr != RNIL){
+ jam();
+ sendLQHADDATTRREQ(signal, createTabPtr, nextAttrPtr);
+ return;
+ }
+
+ {
+ AddFragConf * const conf = (AddFragConf*)signal->getDataPtr();
+ conf->dihPtr = createTabPtr.p->m_dihAddFragPtr;
+ conf->fragId = fragId;
+ sendSignal(DBDIH_REF, GSN_ADD_FRAGCONF, signal,
+ AddFragConf::SignalLength, JBB);
+ }
+}
+
+void
+Dbdict::execDIADDTABREF(Signal* signal){
+ jam();
+
+ DiAddTabRef * const ref = (DiAddTabRef*)signal->getDataPtr();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, ref->senderData));
+
+ createTabPtr.p->setErrorCode(ref->errorCode);
+ execute(signal, createTabPtr.p->m_callback, 0);
+}
+
+void
+Dbdict::execDIADDTABCONF(Signal* signal){
+ jam();
+
+ DiAddTabConf * const conf = (DiAddTabConf*)signal->getDataPtr();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, conf->senderData));
+
+ signal->theData[0] = createTabPtr.p->key;
+ signal->theData[1] = reference();
+ signal->theData[2] = createTabPtr.p->m_tablePtrI;
+
+ if(createTabPtr.p->m_dihAddFragPtr != RNIL){
+ jam();
+
+ /**
+ * We did perform at least one LQHFRAGREQ
+ */
+ sendSignal(DBLQH_REF, GSN_TAB_COMMITREQ, signal, 3, JBB);
+ return;
+ } else {
+ /**
+ * No local fragment (i.e. no LQHFRAGREQ)
+ */
+ execute(signal, createTabPtr.p->m_callback, 0);
+ return;
+ //sendSignal(DBDIH_REF, GSN_TAB_COMMITREQ, signal, 3, JBB);
+ }
+}
+
+void
+Dbdict::execTAB_COMMITREF(Signal* signal) {
+ jamEntry();
+ ndbrequire(false);
+}//execTAB_COMMITREF()
+
+void
+Dbdict::execTAB_COMMITCONF(Signal* signal){
+ jamEntry();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, signal->theData[0]));
+
+ if(refToBlock(signal->getSendersBlockRef()) == DBLQH){
+
+ execute(signal, createTabPtr.p->m_callback, 0);
+ return;
+ }
+
+ if(refToBlock(signal->getSendersBlockRef()) == DBDIH){
+ TableRecordPtr tabPtr;
+ c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
+
+ signal->theData[0] = tabPtr.i;
+ signal->theData[1] = tabPtr.p->tableVersion;
+ signal->theData[2] = (Uint32)tabPtr.p->storedTable;
+ signal->theData[3] = reference();
+ signal->theData[4] = (Uint32)tabPtr.p->tableType;
+ signal->theData[5] = createTabPtr.p->key;
+ signal->theData[6] = (Uint32)tabPtr.p->noOfPrimkey;
+
+ Uint32 buf[2 * MAX_ATTRIBUTES_IN_INDEX];
+ Uint32 sz = 0;
+ Uint32 tAttr = tabPtr.p->firstAttribute;
+ while (tAttr != RNIL) {
+ jam();
+ AttributeRecord* aRec = c_attributeRecordPool.getPtr(tAttr);
+ if (aRec->tupleKey) {
+ buf[sz++] = aRec->attributeDescriptor;
+ buf[sz++] = (aRec->extPrecision >> 16); // charset number
+ }
+ tAttr = aRec->nextAttrInTable;
+ }
+ ndbrequire((int)sz == 2 * tabPtr.p->noOfPrimkey);
+
+ LinearSectionPtr lsPtr[3];
+ lsPtr[0].p = buf;
+ lsPtr[0].sz = sz;
+ // note: ACC does not reply
+ if (tabPtr.p->isTable() || tabPtr.p->isHashIndex())
+ sendSignal(DBACC_REF, GSN_TC_SCHVERREQ, signal, 7, JBB, lsPtr, 1);
+ sendSignal(DBTC_REF, GSN_TC_SCHVERREQ, signal, 7, JBB, lsPtr, 1);
+ return;
+ }
+
+ ndbrequire(false);
+}
+
+void
+Dbdict::createTab_dihComplete(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode){
+ jam();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
+
+ //@todo check for master failed
+
+ if(createTabPtr.p->m_errorCode == 0){
+ jam();
+
+ CreateTabConf * const conf = (CreateTabConf*)signal->getDataPtr();
+ conf->senderRef = reference();
+ conf->senderData = createTabPtr.p->key;
+ sendSignal(createTabPtr.p->m_coordinatorRef, GSN_CREATE_TAB_CONF,
+ signal, CreateTabConf::SignalLength, JBB);
+ return;
+ }
+
+ CreateTabRef * const ref = (CreateTabRef*)signal->getDataPtr();
+ ref->senderRef = reference();
+ ref->senderData = createTabPtr.p->key;
+ ref->errorCode = createTabPtr.p->m_errorCode;
+ ref->errorLine = 0;
+ ref->errorKey = 0;
+ ref->errorStatus = 0;
+
+ sendSignal(createTabPtr.p->m_coordinatorRef, GSN_CREATE_TAB_REF,
+ signal, CreateTabRef::SignalLength, JBB);
+}
+
+void
+Dbdict::createTab_commit(Signal * signal, CreateTabReq * req){
+ jam();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, req->senderData));
+
+ TableRecordPtr tabPtr;
+ c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
+
+ SchemaFile::TableEntry tabEntry;
+ tabEntry.m_tableVersion = tabPtr.p->tableVersion;
+ tabEntry.m_tableType = tabPtr.p->tableType;
+ tabEntry.m_tableState = SchemaFile::TABLE_ADD_COMMITTED;
+ tabEntry.m_gcp = tabPtr.p->gciTableCreated;
+ tabEntry.m_noOfPages =
+ DIV(tabPtr.p->packedSize + ZPAGE_HEADER_SIZE, ZSIZE_OF_PAGES_IN_WORDS);
+
+ Callback callback;
+ callback.m_callbackData = createTabPtr.p->key;
+ callback.m_callbackFunction =
+ safe_cast(&Dbdict::createTab_writeSchemaConf2);
+
+ updateSchemaState(signal, tabPtr.i, &tabEntry, &callback);
+}
+
+void
+Dbdict::createTab_writeSchemaConf2(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode){
+ jam();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
+
+ Callback c;
+ c.m_callbackData = callbackData;
+ c.m_callbackFunction = safe_cast(&Dbdict::createTab_alterComplete);
+ alterTab_activate(signal, createTabPtr, &c);
+}
+
+void
+Dbdict::createTab_alterComplete(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode){
+ jam();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
+
+ TableRecordPtr tabPtr;
+ c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
+ tabPtr.p->tabState = TableRecord::DEFINED;
+
+ //@todo check error
+ //@todo check master failed
+
+ CreateTabConf * const conf = (CreateTabConf*)signal->getDataPtr();
+ conf->senderRef = reference();
+ conf->senderData = createTabPtr.p->key;
+ sendSignal(createTabPtr.p->m_coordinatorRef, GSN_CREATE_TAB_CONF,
+ signal, CreateTabConf::SignalLength, JBB);
+
+ if(createTabPtr.p->m_coordinatorRef != reference()){
+ jam();
+ c_opCreateTable.release(createTabPtr);
+ }
+}
+
+void
+Dbdict::createTab_drop(Signal* signal, CreateTabReq * req){
+ jam();
+
+ const Uint32 key = req->senderData;
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, key));
+
+ TableRecordPtr tabPtr;
+ c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
+ tabPtr.p->tabState = TableRecord::DROPPING;
+
+ DropTableRecordPtr dropTabPtr;
+ ndbrequire(c_opDropTable.seize(dropTabPtr));
+
+ dropTabPtr.p->key = key;
+ c_opDropTable.add(dropTabPtr);
+
+ dropTabPtr.p->m_errorCode = 0;
+ dropTabPtr.p->m_request.tableId = createTabPtr.p->m_tablePtrI;
+ dropTabPtr.p->m_requestType = DropTabReq::CreateTabDrop;
+ dropTabPtr.p->m_coordinatorRef = createTabPtr.p->m_coordinatorRef;
+ dropTabPtr.p->m_participantData.m_gsn = GSN_DROP_TAB_REQ;
+
+ dropTabPtr.p->m_participantData.m_block = 0;
+ dropTabPtr.p->m_participantData.m_callback.m_callbackData = req->senderData;
+ dropTabPtr.p->m_participantData.m_callback.m_callbackFunction =
+ safe_cast(&Dbdict::createTab_dropComplete);
+ dropTab_nextStep(signal, dropTabPtr);
+}
+
+void
+Dbdict::createTab_dropComplete(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode){
+ jam();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, callbackData));
+
+ DropTableRecordPtr dropTabPtr;
+ ndbrequire(c_opDropTable.find(dropTabPtr, callbackData));
+
+ TableRecordPtr tabPtr;
+ c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
+
+ releaseTableObject(tabPtr.i);
+ PageRecordPtr pagePtr;
+ c_pageRecordArray.getPtr(pagePtr, c_schemaRecord.schemaPage);
+
+ SchemaFile::TableEntry * tableEntry = getTableEntry(pagePtr.p, tabPtr.i);
+ tableEntry->m_tableState = SchemaFile::DROP_TABLE_COMMITTED;
+
+ //@todo check error
+ //@todo check master failed
+
+ CreateTabConf * const conf = (CreateTabConf*)signal->getDataPtr();
+ conf->senderRef = reference();
+ conf->senderData = createTabPtr.p->key;
+ sendSignal(createTabPtr.p->m_coordinatorRef, GSN_CREATE_TAB_CONF,
+ signal, CreateTabConf::SignalLength, JBB);
+
+ if(createTabPtr.p->m_coordinatorRef != reference()){
+ jam();
+ c_opCreateTable.release(createTabPtr);
+ }
+
+ c_opDropTable.release(dropTabPtr);
+}
+
+void
+Dbdict::alterTab_activate(Signal* signal, CreateTableRecordPtr createTabPtr,
+ Callback * c){
+
+ createTabPtr.p->m_callback = * c;
+
+ signal->theData[0] = createTabPtr.p->key;
+ signal->theData[1] = reference();
+ signal->theData[2] = createTabPtr.p->m_tablePtrI;
+ sendSignal(DBDIH_REF, GSN_TAB_COMMITREQ, signal, 3, JBB);
+}
+
+void
+Dbdict::execTC_SCHVERCONF(Signal* signal){
+ jamEntry();
+
+ CreateTableRecordPtr createTabPtr;
+ ndbrequire(c_opCreateTable.find(createTabPtr, signal->theData[1]));
+
+ execute(signal, createTabPtr.p->m_callback, 0);
+}
+
+#define tabRequire(cond, error) \
+ if (!(cond)) { \
+ jam(); \
+ parseP->errorCode = error; parseP->errorLine = __LINE__; \
+ parseP->errorKey = it.getKey(); \
+ return; \
+ }//if
+
+// handleAddTableFailure(signal, __LINE__, allocatedTable);
+
+void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
+ ParseDictTabInfoRecord * parseP,
+ bool checkExist)
+{
+/* ---------------------------------------------------------------- */
+// We always start by handling table name since this must be the first
+// item in the list. Through the table name we can derive if it is a
+// correct name, a new name or an already existing table.
+/* ---------------------------------------------------------------- */
+
+ it.first();
+
+ SimpleProperties::UnpackStatus status;
+ DictTabInfo::Table tableDesc; tableDesc.init();
+ status = SimpleProperties::unpack(it, &tableDesc,
+ DictTabInfo::TableMapping,
+ DictTabInfo::TableMappingSize,
+ true, true);
+
+ if(status != SimpleProperties::Break){
+ parseP->errorCode = CreateTableRef::InvalidFormat;
+ parseP->status = status;
+ parseP->errorKey = it.getKey();
+ parseP->errorLine = __LINE__;
+ return;
+ }
+
+ if(parseP->requestType == DictTabInfo::AlterTableFromAPI)
+ {
+ ndbrequire(!checkExist);
+ }
+ if(!checkExist)
+ {
+ ndbrequire(parseP->requestType == DictTabInfo::AlterTableFromAPI);
+ }
+
+ /* ---------------------------------------------------------------- */
+ // Verify that table name is an allowed table name.
+ // TODO
+ /* ---------------------------------------------------------------- */
+ const Uint32 tableNameLength = strlen(tableDesc.TableName) + 1;
+
+ TableRecord keyRecord;
+ tabRequire(tableNameLength <= sizeof(keyRecord.tableName),
+ CreateTableRef::TableNameTooLong);
+ strcpy(keyRecord.tableName, tableDesc.TableName);
+
+ TableRecordPtr tablePtr;
+ c_tableRecordHash.find(tablePtr, keyRecord);
+
+ if (checkExist){
+ jam();
+ /* ---------------------------------------------------------------- */
+ // Check if table already existed.
+ /* ---------------------------------------------------------------- */
+ tabRequire(tablePtr.i == RNIL, CreateTableRef::TableAlreadyExist);
+ }
+
+ switch (parseP->requestType) {
+ case DictTabInfo::CreateTableFromAPI: {
+ jam();
+ }
+ case DictTabInfo::AlterTableFromAPI:{
+ jam();
+ tablePtr.i = getFreeTableRecord(tableDesc.PrimaryTableId);
+ /* ---------------------------------------------------------------- */
+ // Check if no free tables existed.
+ /* ---------------------------------------------------------------- */
+ tabRequire(tablePtr.i != RNIL, CreateTableRef::NoMoreTableRecords);
+
+ c_tableRecordPool.getPtr(tablePtr);
+ break;
+ }
+ case DictTabInfo::AddTableFromDict:
+ case DictTabInfo::ReadTableFromDiskSR:
+ case DictTabInfo::GetTabInfoConf:
+ {
+/* ---------------------------------------------------------------- */
+// Get table id and check that table doesn't already exist
+/* ---------------------------------------------------------------- */
+ tablePtr.i = tableDesc.TableId;
+
+ if (parseP->requestType == DictTabInfo::ReadTableFromDiskSR) {
+ ndbrequire(tablePtr.i == c_restartRecord.activeTable);
+ }//if
+ if (parseP->requestType == DictTabInfo::GetTabInfoConf) {
+ ndbrequire(tablePtr.i == c_restartRecord.activeTable);
+ }//if
+
+ c_tableRecordPool.getPtr(tablePtr);
+ ndbrequire(tablePtr.p->tabState == TableRecord::NOT_DEFINED);
+
+ //Uint32 oldTableVersion = tablePtr.p->tableVersion;
+ initialiseTableRecord(tablePtr);
+ if (parseP->requestType == DictTabInfo::AddTableFromDict) {
+ jam();
+ tablePtr.p->tabState = TableRecord::DEFINING;
+ }//if
+#ifdef HAVE_TABLE_REORG
+/* ---------------------------------------------------------------- */
+// Get id of second table id and check that table doesn't already exist
+// and set up links between first and second table.
+/* ---------------------------------------------------------------- */
+ TableRecordPtr secondTablePtr;
+ secondTablePtr.i = tableDesc.SecondTableId;
+ c_tableRecordPool.getPtr(secondTablePtr);
+ ndbrequire(secondTablePtr.p->tabState == TableRecord::NOT_DEFINED);
+
+ initialiseTableRecord(secondTablePtr);
+ secondTablePtr.p->tabState = TableRecord::REORG_TABLE_PREPARED;
+ secondTablePtr.p->secondTable = tablePtr.i;
+ tablePtr.p->secondTable = secondTablePtr.i;
+#endif
+/* ---------------------------------------------------------------- */
+// Set table version
+/* ---------------------------------------------------------------- */
+ Uint32 tableVersion = tableDesc.TableVersion;
+ tablePtr.p->tableVersion = tableVersion;
+
+ break;
+ }
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ parseP->tablePtr = tablePtr;
+
+ strcpy(tablePtr.p->tableName, keyRecord.tableName);
+ if (parseP->requestType != DictTabInfo::AlterTableFromAPI) {
+ jam();
+#ifdef VM_TRACE
+ ndbout_c("Dbdict: name=%s,id=%u", tablePtr.p->tableName, tablePtr.i);
+ TableRecordPtr tmp;
+ ndbrequire(!c_tableRecordHash.find(tmp, * tablePtr.p));
+#endif
+ c_tableRecordHash.add(tablePtr);
+ }
+
+ //tablePtr.p->noOfPrimkey = tableDesc.NoOfKeyAttr;
+ //tablePtr.p->noOfNullAttr = tableDesc.NoOfNullable;
+ //tablePtr.p->tupKeyLength = tableDesc.KeyLength;
+ tablePtr.p->noOfAttributes = tableDesc.NoOfAttributes;
+ tablePtr.p->storedTable = tableDesc.TableLoggedFlag;
+ tablePtr.p->minLoadFactor = tableDesc.MinLoadFactor;
+ tablePtr.p->maxLoadFactor = tableDesc.MaxLoadFactor;
+ tablePtr.p->fragmentType = (DictTabInfo::FragmentType)tableDesc.FragmentType;
+ tablePtr.p->tableType = (DictTabInfo::TableType)tableDesc.TableType;
+ tablePtr.p->kValue = tableDesc.TableKValue;
+ tablePtr.p->fragmentCount = tableDesc.FragmentCount;
+
+ tablePtr.p->frmLen = tableDesc.FrmLen;
+ memcpy(tablePtr.p->frmData, tableDesc.FrmData, tableDesc.FrmLen);
+
+ if(tableDesc.PrimaryTableId != RNIL) {
+
+ tablePtr.p->primaryTableId = tableDesc.PrimaryTableId;
+ tablePtr.p->indexState = (TableRecord::IndexState)tableDesc.IndexState;
+ tablePtr.p->insertTriggerId = tableDesc.InsertTriggerId;
+ tablePtr.p->updateTriggerId = tableDesc.UpdateTriggerId;
+ tablePtr.p->deleteTriggerId = tableDesc.DeleteTriggerId;
+ tablePtr.p->customTriggerId = tableDesc.CustomTriggerId;
+ } else {
+ tablePtr.p->primaryTableId = RNIL;
+ tablePtr.p->indexState = TableRecord::IS_UNDEFINED;
+ tablePtr.p->insertTriggerId = RNIL;
+ tablePtr.p->updateTriggerId = RNIL;
+ tablePtr.p->deleteTriggerId = RNIL;
+ tablePtr.p->customTriggerId = RNIL;
+ }
+ tablePtr.p->buildTriggerId = RNIL;
+ tablePtr.p->indexLocal = 0;
+
+ handleTabInfo(it, parseP);
+
+ if(parseP->errorCode != 0)
+ {
+ /**
+ * Release table
+ */
+ releaseTableObject(tablePtr.i, checkExist);
+ }
+}//handleTabInfoInit()
+
+void Dbdict::handleTabInfo(SimpleProperties::Reader & it,
+ ParseDictTabInfoRecord * parseP)
+{
+ TableRecordPtr tablePtr = parseP->tablePtr;
+
+ SimpleProperties::UnpackStatus status;
+
+ Uint32 keyCount = 0;
+ Uint32 keyLength = 0;
+ Uint32 attrCount = tablePtr.p->noOfAttributes;
+ Uint32 nullCount = 0;
+ Uint32 nullBits = 0;
+ Uint32 noOfCharsets = 0;
+ Uint16 charsets[128];
+ Uint32 recordLength = 0;
+ AttributeRecordPtr attrPtr;
+ c_attributeRecordHash.removeAll();
+
+ for(Uint32 i = 0; i<attrCount; i++){
+ /**
+ * Attribute Name
+ */
+ DictTabInfo::Attribute attrDesc; attrDesc.init();
+ status = SimpleProperties::unpack(it, &attrDesc,
+ DictTabInfo::AttributeMapping,
+ DictTabInfo::AttributeMappingSize,
+ true, true);
+ if(status != SimpleProperties::Break){
+ parseP->errorCode = CreateTableRef::InvalidFormat;
+ parseP->status = status;
+ parseP->errorKey = it.getKey();
+ parseP->errorLine = __LINE__;
+ return;
+ }
+
+ /**
+ * Check that attribute is not defined twice
+ */
+ AttributeRecord tmpAttr;
+ {
+ strcpy(tmpAttr.attributeName, attrDesc.AttributeName);
+
+ AttributeRecordPtr attrPtr;
+ c_attributeRecordHash.find(attrPtr, tmpAttr);
+
+ if(attrPtr.i != RNIL){
+ parseP->errorCode = CreateTableRef::AttributeNameTwice;
+ return;
+ }
+ }
+
+ if(!getNewAttributeRecord(tablePtr, attrPtr)){
+ jam();
+ parseP->errorCode = CreateTableRef::NoMoreAttributeRecords;
+ return;
+ }
+
+ /**
+ * TmpAttrib to Attribute mapping
+ */
+ strcpy(attrPtr.p->attributeName, attrDesc.AttributeName);
+ attrPtr.p->attributeId = attrDesc.AttributeId;
+ attrPtr.p->tupleKey = (keyCount + 1) * attrDesc.AttributeKeyFlag;
+
+ attrPtr.p->extPrecision = attrDesc.AttributeExtPrecision;
+ attrPtr.p->extScale = attrDesc.AttributeExtScale;
+ attrPtr.p->extLength = attrDesc.AttributeExtLength;
+ // charset in upper half of precision
+ unsigned csNumber = (attrPtr.p->extPrecision >> 16);
+ if (csNumber != 0) {
+ /*
+ * A new charset is first accessed here on this node.
+ * TODO use separate thread (e.g. via NDBFS) if need to load from file
+ */
+ CHARSET_INFO* cs = get_charset(csNumber, MYF(0));
+ if (cs == NULL) {
+ parseP->errorCode = CreateTableRef::InvalidCharset;
+ parseP->errorLine = __LINE__;
+ return;
+ }
+ // XXX should be done somewhere in mysql
+ all_charsets[cs->number] = cs;
+ unsigned i = 0;
+ while (i < noOfCharsets) {
+ if (charsets[i] == csNumber)
+ break;
+ i++;
+ }
+ if (i == noOfCharsets) {
+ noOfCharsets++;
+ if (noOfCharsets > sizeof(charsets)/sizeof(charsets[0])) {
+ parseP->errorCode = CreateTableRef::InvalidFormat;
+ parseP->errorLine = __LINE__;
+ return;
+ }
+ charsets[i] = csNumber;
+ }
+ }
+
+ // compute attribute size and array size
+ bool translateOk = attrDesc.translateExtType();
+ tabRequire(translateOk, CreateTableRef::Inconsistency);
+
+ if(attrDesc.AttributeArraySize > 65535){
+ parseP->errorCode = CreateTableRef::ArraySizeTooBig;
+ parseP->status = status;
+ parseP->errorKey = it.getKey();
+ parseP->errorLine = __LINE__;
+ return;
+ }
+
+ Uint32 desc = 0;
+ AttributeDescriptor::setType(desc, attrDesc.AttributeExtType);
+ AttributeDescriptor::setSize(desc, attrDesc.AttributeSize);
+ AttributeDescriptor::setArray(desc, attrDesc.AttributeArraySize);
+ AttributeDescriptor::setNullable(desc, attrDesc.AttributeNullableFlag);
+ AttributeDescriptor::setDKey(desc, attrDesc.AttributeDKey);
+ AttributeDescriptor::setPrimaryKey(desc, attrDesc.AttributeKeyFlag);
+ attrPtr.p->attributeDescriptor = desc;
+ attrPtr.p->autoIncrement = attrDesc.AttributeAutoIncrement;
+ strcpy(attrPtr.p->defaultValue, attrDesc.AttributeDefaultValue);
+
+ tabRequire(attrDesc.AttributeId == i, CreateTableRef::InvalidFormat);
+
+ attrCount ++;
+ keyCount += attrDesc.AttributeKeyFlag;
+ nullCount += attrDesc.AttributeNullableFlag;
+
+ const Uint32 aSz = (1 << attrDesc.AttributeSize);
+ Uint32 sz;
+ if(aSz != 1)
+ {
+ sz = ((aSz * attrDesc.AttributeArraySize) + 31) >> 5;
+ }
+ else
+ {
+ sz = 0;
+ nullBits += attrDesc.AttributeArraySize;
+ }
+
+ if(attrDesc.AttributeArraySize == 0)
+ {
+ parseP->errorCode = CreateTableRef::InvalidArraySize;
+ parseP->status = status;
+ parseP->errorKey = it.getKey();
+ parseP->errorLine = __LINE__;
+ return;
+ }
+
+ recordLength += sz;
+ if(attrDesc.AttributeKeyFlag){
+ keyLength += sz;
+
+ if(attrDesc.AttributeNullableFlag){
+ parseP->errorCode = CreateTableRef::NullablePrimaryKey;
+ parseP->status = status;
+ parseP->errorKey = it.getKey();
+ parseP->errorLine = __LINE__;
+ return;
+ }
+ }
+
+ if (parseP->requestType != DictTabInfo::AlterTableFromAPI)
+ c_attributeRecordHash.add(attrPtr);
+
+ if(!it.next())
+ break;
+
+ if(it.getKey() != DictTabInfo::AttributeName)
+ break;
+ }//while
+
+ tablePtr.p->noOfPrimkey = keyCount;
+ tablePtr.p->noOfNullAttr = nullCount;
+ tablePtr.p->noOfCharsets = noOfCharsets;
+ tablePtr.p->tupKeyLength = keyLength;
+ tablePtr.p->noOfNullBits = nullCount + nullBits;
+
+ tabRequire(recordLength<= MAX_TUPLE_SIZE_IN_WORDS,
+ CreateTableRef::RecordTooBig);
+ tabRequire(keyLength <= MAX_KEY_SIZE_IN_WORDS,
+ CreateTableRef::InvalidPrimaryKeySize);
+ tabRequire(keyLength > 0,
+ CreateTableRef::InvalidPrimaryKeySize);
+
+}//handleTabInfo()
+
+
+/* ---------------------------------------------------------------- */
+// DICTTABCONF is sent when participants have received all DICTTABINFO
+// and successfully handled it.
+// Also sent to self (DICT master) when index table creation ready.
+/* ---------------------------------------------------------------- */
+void Dbdict::execCREATE_TABLE_CONF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(signal->getNoOfSections() == 0);
+
+ CreateTableConf * const conf = (CreateTableConf *)signal->getDataPtr();
+ // assume part of create index operation
+ OpCreateIndexPtr opPtr;
+ c_opCreateIndex.find(opPtr, conf->senderData);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->m_request.setIndexId(conf->tableId);
+ opPtr.p->m_request.setIndexVersion(conf->tableVersion);
+ createIndex_fromCreateTable(signal, opPtr);
+}//execCREATE_TABLE_CONF()
+
+void Dbdict::execCREATE_TABLE_REF(Signal* signal)
+{
+ jamEntry();
+
+ CreateTableRef * const ref = (CreateTableRef *)signal->getDataPtr();
+ // assume part of create index operation
+ OpCreateIndexPtr opPtr;
+ c_opCreateIndex.find(opPtr, ref->senderData);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ createIndex_fromCreateTable(signal, opPtr);
+}//execCREATE_TABLE_REF()
+
+/* ---------------------------------------------------------------- */
+// New global checkpoint created.
+/* ---------------------------------------------------------------- */
+void Dbdict::execWAIT_GCP_CONF(Signal* signal)
+{
+#if 0
+ TableRecordPtr tablePtr;
+ jamEntry();
+ WaitGCPConf* const conf = (WaitGCPConf*)&signal->theData[0];
+ c_tableRecordPool.getPtr(tablePtr, c_connRecord.connTableId);
+ tablePtr.p->gciTableCreated = conf->gcp;
+ sendUpdateSchemaState(signal,
+ tablePtr.i,
+ SchemaFile::TABLE_ADD_COMMITTED,
+ c_connRecord.noOfPagesForTable,
+ conf->gcp);
+#endif
+}//execWAIT_GCP_CONF()
+
+/* ---------------------------------------------------------------- */
+// Refused new global checkpoint.
+/* ---------------------------------------------------------------- */
+void Dbdict::execWAIT_GCP_REF(Signal* signal)
+{
+ jamEntry();
+ WaitGCPRef* const ref = (WaitGCPRef*)&signal->theData[0];
+/* ---------------------------------------------------------------- */
+// Error Handling code needed
+/* ---------------------------------------------------------------- */
+ progError(ref->errorCode, 0);
+}//execWAIT_GCP_REF()
+
+
+/* **************************************************************** */
+/* ---------------------------------------------------------------- */
+/* MODULE: DROP TABLE -------------------- */
+/* ---------------------------------------------------------------- */
+/* */
+/* This module contains the code used to drop a table. */
+/* ---------------------------------------------------------------- */
+/* **************************************************************** */
+void
+Dbdict::execDROP_TABLE_REQ(Signal* signal){
+ jamEntry();
+ DropTableReq* req = (DropTableReq*)signal->getDataPtr();
+
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, req->tableId, false);
+ if(tablePtr.isNull()){
+ jam();
+ dropTableRef(signal, req, DropTableRef::NoSuchTable);
+ return;
+ }
+
+ if(getOwnNodeId() != c_masterNodeId){
+ jam();
+ dropTableRef(signal, req, DropTableRef::NotMaster);
+ return;
+ }
+
+ if(c_blockState != BS_IDLE){
+ jam();
+ dropTableRef(signal, req, DropTableRef::Busy);
+ return;
+ }
+
+ const TableRecord::TabState tabState = tablePtr.p->tabState;
+ bool ok = false;
+ switch(tabState){
+ case TableRecord::NOT_DEFINED:
+ case TableRecord::REORG_TABLE_PREPARED:
+ case TableRecord::DEFINING:
+ case TableRecord::CHECKED:
+ jam();
+ dropTableRef(signal, req, DropTableRef::NoSuchTable);
+ return;
+ case TableRecord::DEFINED:
+ ok = true;
+ jam();
+ break;
+ case TableRecord::PREPARE_DROPPING:
+ case TableRecord::DROPPING:
+ jam();
+ dropTableRef(signal, req, DropTableRef::DropInProgress);
+ return;
+ }
+ ndbrequire(ok);
+
+ if(tablePtr.p->tableVersion != req->tableVersion){
+ jam();
+ dropTableRef(signal, req, DropTableRef::InvalidTableVersion);
+ return;
+ }
+
+ /**
+ * Seems ok
+ */
+ DropTableRecordPtr dropTabPtr;
+ c_opDropTable.seize(dropTabPtr);
+
+ if(dropTabPtr.isNull()){
+ jam();
+ dropTableRef(signal, req, DropTableRef::NoDropTableRecordAvailable);
+ return;
+ }
+
+ c_blockState = BS_BUSY;
+
+ dropTabPtr.p->key = ++c_opRecordSequence;
+ c_opDropTable.add(dropTabPtr);
+
+ tablePtr.p->tabState = TableRecord::PREPARE_DROPPING;
+
+ dropTabPtr.p->m_request = * req;
+ dropTabPtr.p->m_errorCode = 0;
+ dropTabPtr.p->m_requestType = DropTabReq::OnlineDropTab;
+ dropTabPtr.p->m_coordinatorRef = reference();
+ dropTabPtr.p->m_coordinatorData.m_gsn = GSN_PREP_DROP_TAB_REQ;
+ dropTabPtr.p->m_coordinatorData.m_block = 0;
+ prepDropTab_nextStep(signal, dropTabPtr);
+}
+
+void
+Dbdict::dropTableRef(Signal * signal,
+ DropTableReq * req, DropTableRef::ErrorCode errCode){
+
+ Uint32 tableId = req->tableId;
+ Uint32 tabVersion = req->tableVersion;
+ Uint32 senderData = req->senderData;
+ Uint32 senderRef = req->senderRef;
+
+ DropTableRef * ref = (DropTableRef*)signal->getDataPtrSend();
+ ref->tableId = tableId;
+ ref->tableVersion = tabVersion;
+ ref->senderData = senderData;
+ ref->senderRef = reference();
+ ref->errorCode = errCode;
+ ref->masterNodeId = c_masterNodeId;
+ sendSignal(senderRef, GSN_DROP_TABLE_REF, signal,
+ DropTableRef::SignalLength, JBB);
+}
+
+void
+Dbdict::prepDropTab_nextStep(Signal* signal, DropTableRecordPtr dropTabPtr){
+
+ /**
+ * No errors currently allowed
+ */
+ ndbrequire(dropTabPtr.p->m_errorCode == 0);
+
+ Uint32 block = 0;
+ switch(dropTabPtr.p->m_coordinatorData.m_block){
+ case 0:
+ jam();
+ block = dropTabPtr.p->m_coordinatorData.m_block = DBDICT;
+ break;
+ case DBDICT:
+ jam();
+ block = dropTabPtr.p->m_coordinatorData.m_block = DBLQH;
+ break;
+ case DBLQH:
+ jam();
+ block = dropTabPtr.p->m_coordinatorData.m_block = DBTC;
+ break;
+ case DBTC:
+ jam();
+ block = dropTabPtr.p->m_coordinatorData.m_block = DBDIH;
+ break;
+ case DBDIH:
+ jam();
+ prepDropTab_complete(signal, dropTabPtr);
+ return;
+ default:
+ ndbrequire(false);
+ }
+
+ PrepDropTabReq * prep = (PrepDropTabReq*)signal->getDataPtrSend();
+ prep->senderRef = reference();
+ prep->senderData = dropTabPtr.p->key;
+ prep->tableId = dropTabPtr.p->m_request.tableId;
+ prep->requestType = dropTabPtr.p->m_requestType;
+
+ dropTabPtr.p->m_coordinatorData.m_signalCounter = c_aliveNodes;
+ NodeReceiverGroup rg(block, c_aliveNodes);
+ sendSignal(rg, GSN_PREP_DROP_TAB_REQ, signal,
+ PrepDropTabReq::SignalLength, JBB);
+
+#if 0
+ for (Uint32 i = 1; i < MAX_NDB_NODES; i++){
+ if(c_aliveNodes.get(i)){
+ jam();
+ BlockReference ref = numberToRef(block, i);
+
+ dropTabPtr.p->m_coordinatorData.m_signalCounter.setWaitingFor(i);
+ }
+ }
+#endif
+}
+
+void
+Dbdict::execPREP_DROP_TAB_CONF(Signal * signal){
+ jamEntry();
+
+ PrepDropTabConf * prep = (PrepDropTabConf*)signal->getDataPtr();
+
+ DropTableRecordPtr dropTabPtr;
+ ndbrequire(c_opDropTable.find(dropTabPtr, prep->senderData));
+
+ ndbrequire(dropTabPtr.p->m_coordinatorRef == reference());
+ ndbrequire(dropTabPtr.p->m_request.tableId == prep->tableId);
+ ndbrequire(dropTabPtr.p->m_coordinatorData.m_gsn == GSN_PREP_DROP_TAB_REQ);
+
+ Uint32 nodeId = refToNode(prep->senderRef);
+ dropTabPtr.p->m_coordinatorData.m_signalCounter.clearWaitingFor(nodeId);
+
+ if(!dropTabPtr.p->m_coordinatorData.m_signalCounter.done()){
+ jam();
+ return;
+ }
+ prepDropTab_nextStep(signal, dropTabPtr);
+}
+
+void
+Dbdict::execPREP_DROP_TAB_REF(Signal* signal){
+ jamEntry();
+
+ PrepDropTabRef * prep = (PrepDropTabRef*)signal->getDataPtr();
+
+ DropTableRecordPtr dropTabPtr;
+ ndbrequire(c_opDropTable.find(dropTabPtr, prep->senderData));
+
+ ndbrequire(dropTabPtr.p->m_coordinatorRef == reference());
+ ndbrequire(dropTabPtr.p->m_request.tableId == prep->tableId);
+ ndbrequire(dropTabPtr.p->m_coordinatorData.m_gsn == GSN_PREP_DROP_TAB_REQ);
+
+ Uint32 nodeId = refToNode(prep->senderRef);
+ dropTabPtr.p->m_coordinatorData.m_signalCounter.clearWaitingFor(nodeId);
+
+ Uint32 block = refToBlock(prep->senderRef);
+ if((prep->errorCode == PrepDropTabRef::NoSuchTable && block == DBLQH) ||
+ (prep->errorCode == PrepDropTabRef::NF_FakeErrorREF)){
+ jam();
+ /**
+ * Ignore errors:
+ * 1) no such table and LQH, it might not exists in different LQH's
+ * 2) node failure...
+ */
+ } else {
+ dropTabPtr.p->setErrorCode((Uint32)prep->errorCode);
+ }
+
+ if(!dropTabPtr.p->m_coordinatorData.m_signalCounter.done()){
+ jam();
+ return;
+ }
+ prepDropTab_nextStep(signal, dropTabPtr);
+}
+
+void
+Dbdict::prepDropTab_complete(Signal* signal, DropTableRecordPtr dropTabPtr){
+ jam();
+
+ dropTabPtr.p->m_coordinatorData.m_gsn = GSN_DROP_TAB_REQ;
+ dropTabPtr.p->m_coordinatorData.m_block = DBDICT;
+
+ DropTabReq * req = (DropTabReq*)signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->senderData = dropTabPtr.p->key;
+ req->tableId = dropTabPtr.p->m_request.tableId;
+ req->requestType = dropTabPtr.p->m_requestType;
+
+ dropTabPtr.p->m_coordinatorData.m_signalCounter = c_aliveNodes;
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ sendSignal(rg, GSN_DROP_TAB_REQ, signal,
+ DropTabReq::SignalLength, JBB);
+}
+
+void
+Dbdict::execDROP_TAB_REF(Signal* signal){
+ jamEntry();
+
+ DropTabRef * const req = (DropTabRef*)signal->getDataPtr();
+
+ Uint32 block = refToBlock(req->senderRef);
+ ndbrequire(req->errorCode == DropTabRef::NF_FakeErrorREF ||
+ (req->errorCode == DropTabRef::NoSuchTable &&
+ (block == DBTUP || block == DBACC || block == DBLQH)));
+
+ if(block != DBDICT){
+ jam();
+ ndbrequire(refToNode(req->senderRef) == getOwnNodeId());
+ dropTab_localDROP_TAB_CONF(signal);
+ return;
+ }
+ ndbrequire(false);
+}
+
+void
+Dbdict::execDROP_TAB_CONF(Signal* signal){
+ jamEntry();
+
+ DropTabConf * const req = (DropTabConf*)signal->getDataPtr();
+
+ if(refToBlock(req->senderRef) != DBDICT){
+ jam();
+ ndbrequire(refToNode(req->senderRef) == getOwnNodeId());
+ dropTab_localDROP_TAB_CONF(signal);
+ return;
+ }
+
+ DropTableRecordPtr dropTabPtr;
+ ndbrequire(c_opDropTable.find(dropTabPtr, req->senderData));
+
+ ndbrequire(dropTabPtr.p->m_coordinatorRef == reference());
+ ndbrequire(dropTabPtr.p->m_request.tableId == req->tableId);
+ ndbrequire(dropTabPtr.p->m_coordinatorData.m_gsn == GSN_DROP_TAB_REQ);
+
+ Uint32 nodeId = refToNode(req->senderRef);
+ dropTabPtr.p->m_coordinatorData.m_signalCounter.clearWaitingFor(nodeId);
+
+ if(!dropTabPtr.p->m_coordinatorData.m_signalCounter.done()){
+ jam();
+ return;
+ }
+
+ DropTableConf* conf = (DropTableConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = dropTabPtr.p->m_request.senderData;
+ conf->tableId = dropTabPtr.p->m_request.tableId;
+ conf->tableVersion = dropTabPtr.p->m_request.tableVersion;
+
+ Uint32 ref = dropTabPtr.p->m_request.senderRef;
+ sendSignal(ref, GSN_DROP_TABLE_CONF, signal,
+ DropTableConf::SignalLength, JBB);
+
+ c_opDropTable.release(dropTabPtr);
+ c_blockState = BS_IDLE;
+}
+
+/**
+ * DROP TABLE PARTICIPANT CODE
+ */
+void
+Dbdict::execPREP_DROP_TAB_REQ(Signal* signal){
+ jamEntry();
+ PrepDropTabReq * prep = (PrepDropTabReq*)signal->getDataPtrSend();
+
+ DropTableRecordPtr dropTabPtr;
+ if(prep->senderRef == reference()){
+ jam();
+ ndbrequire(c_opDropTable.find(dropTabPtr, prep->senderData));
+ ndbrequire(dropTabPtr.p->m_requestType == prep->requestType);
+ } else {
+ jam();
+ c_opDropTable.seize(dropTabPtr);
+ if(!dropTabPtr.isNull()){
+ dropTabPtr.p->key = prep->senderData;
+ c_opDropTable.add(dropTabPtr);
+ }
+ }
+
+ ndbrequire(!dropTabPtr.isNull());
+
+ dropTabPtr.p->m_errorCode = 0;
+ dropTabPtr.p->m_request.tableId = prep->tableId;
+ dropTabPtr.p->m_requestType = prep->requestType;
+ dropTabPtr.p->m_coordinatorRef = prep->senderRef;
+ dropTabPtr.p->m_participantData.m_gsn = GSN_PREP_DROP_TAB_REQ;
+
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, prep->tableId);
+ tablePtr.p->tabState = TableRecord::PREPARE_DROPPING;
+
+ /**
+ * Modify schema
+ */
+ PageRecordPtr pagePtr;
+ c_pageRecordArray.getPtr(pagePtr, c_schemaRecord.schemaPage);
+
+ SchemaFile::TableEntry * tableEntry = getTableEntry(pagePtr.p, tablePtr.i);
+ SchemaFile::TableState tabState =
+ (SchemaFile::TableState)tableEntry->m_tableState;
+ ndbrequire(tabState == SchemaFile::TABLE_ADD_COMMITTED ||
+ tabState == SchemaFile::ALTER_TABLE_COMMITTED);
+ tableEntry->m_tableState = SchemaFile::DROP_TABLE_STARTED;
+ computeChecksum((SchemaFile*)pagePtr.p);
+
+ ndbrequire(c_writeSchemaRecord.inUse == false);
+ c_writeSchemaRecord.inUse = true;
+
+ c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage;
+ c_writeSchemaRecord.m_callback.m_callbackData = dropTabPtr.p->key;
+ c_writeSchemaRecord.m_callback.m_callbackFunction =
+ safe_cast(&Dbdict::prepDropTab_writeSchemaConf);
+ startWriteSchemaFile(signal);
+}
+
+void
+Dbdict::prepDropTab_writeSchemaConf(Signal* signal,
+ Uint32 dropTabPtrI,
+ Uint32 returnCode){
+ jam();
+
+ DropTableRecordPtr dropTabPtr;
+ ndbrequire(c_opDropTable.find(dropTabPtr, dropTabPtrI));
+
+ ndbrequire(dropTabPtr.p->m_participantData.m_gsn == GSN_PREP_DROP_TAB_REQ);
+
+ /**
+ * There probably should be node fail handlign here
+ *
+ * To check that coordinator hasn't died
+ */
+
+ PrepDropTabConf * prep = (PrepDropTabConf*)signal->getDataPtr();
+ prep->senderRef = reference();
+ prep->senderData = dropTabPtrI;
+ prep->tableId = dropTabPtr.p->m_request.tableId;
+
+ dropTabPtr.p->m_participantData.m_gsn = GSN_PREP_DROP_TAB_CONF;
+ sendSignal(dropTabPtr.p->m_coordinatorRef, GSN_PREP_DROP_TAB_CONF, signal,
+ PrepDropTabConf::SignalLength, JBB);
+}
+
+void
+Dbdict::execDROP_TAB_REQ(Signal* signal){
+ jamEntry();
+ DropTabReq * req = (DropTabReq*)signal->getDataPtrSend();
+
+ DropTableRecordPtr dropTabPtr;
+ ndbrequire(c_opDropTable.find(dropTabPtr, req->senderData));
+
+ ndbrequire(dropTabPtr.p->m_participantData.m_gsn == GSN_PREP_DROP_TAB_CONF);
+ dropTabPtr.p->m_participantData.m_gsn = GSN_DROP_TAB_REQ;
+
+ ndbrequire(dropTabPtr.p->m_requestType == req->requestType);
+
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, dropTabPtr.p->m_request.tableId);
+ tablePtr.p->tabState = TableRecord::DROPPING;
+
+ dropTabPtr.p->m_participantData.m_block = 0;
+ dropTabPtr.p->m_participantData.m_callback.m_callbackData = dropTabPtr.p->key;
+ dropTabPtr.p->m_participantData.m_callback.m_callbackFunction =
+ safe_cast(&Dbdict::dropTab_complete);
+ dropTab_nextStep(signal, dropTabPtr);
+}
+
+#include <DebuggerNames.hpp>
+
+void
+Dbdict::dropTab_nextStep(Signal* signal, DropTableRecordPtr dropTabPtr){
+
+ /**
+ * No errors currently allowed
+ */
+ ndbrequire(dropTabPtr.p->m_errorCode == 0);
+
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, dropTabPtr.p->m_request.tableId);
+
+ Uint32 block = 0;
+ switch(dropTabPtr.p->m_participantData.m_block){
+ case 0:
+ jam();
+ block = DBTC;
+ break;
+ case DBTC:
+ jam();
+ if (tablePtr.p->isTable() || tablePtr.p->isHashIndex())
+ block = DBACC;
+ if (tablePtr.p->isOrderedIndex())
+ block = DBTUP;
+ break;
+ case DBACC:
+ jam();
+ block = DBTUP;
+ break;
+ case DBTUP:
+ jam();
+ if (tablePtr.p->isTable() || tablePtr.p->isHashIndex())
+ block = DBLQH;
+ if (tablePtr.p->isOrderedIndex())
+ block = DBTUX;
+ break;
+ case DBTUX:
+ jam();
+ block = DBLQH;
+ break;
+ case DBLQH:
+ jam();
+ block = DBDIH;
+ break;
+ case DBDIH:
+ jam();
+ execute(signal, dropTabPtr.p->m_participantData.m_callback, 0);
+ return;
+ }
+ ndbrequire(block != 0);
+ dropTabPtr.p->m_participantData.m_block = block;
+
+ DropTabReq * req = (DropTabReq*)signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->senderData = dropTabPtr.p->key;
+ req->tableId = dropTabPtr.p->m_request.tableId;
+ req->requestType = dropTabPtr.p->m_requestType;
+
+ const Uint32 nodeId = getOwnNodeId();
+ dropTabPtr.p->m_participantData.m_signalCounter.clearWaitingFor();
+ dropTabPtr.p->m_participantData.m_signalCounter.setWaitingFor(nodeId);
+ BlockReference ref = numberToRef(block, 0);
+ sendSignal(ref, GSN_DROP_TAB_REQ, signal, DropTabReq::SignalLength, JBB);
+}
+
+void
+Dbdict::dropTab_localDROP_TAB_CONF(Signal* signal){
+ jamEntry();
+
+ DropTabConf * conf = (DropTabConf*)signal->getDataPtr();
+
+ DropTableRecordPtr dropTabPtr;
+ ndbrequire(c_opDropTable.find(dropTabPtr, conf->senderData));
+
+ ndbrequire(dropTabPtr.p->m_request.tableId == conf->tableId);
+ ndbrequire(dropTabPtr.p->m_participantData.m_gsn == GSN_DROP_TAB_REQ);
+
+ Uint32 nodeId = refToNode(conf->senderRef);
+ dropTabPtr.p->m_participantData.m_signalCounter.clearWaitingFor(nodeId);
+
+ if(!dropTabPtr.p->m_participantData.m_signalCounter.done()){
+ jam();
+ ndbrequire(false);
+ return;
+ }
+ dropTab_nextStep(signal, dropTabPtr);
+}
+
+void
+Dbdict::dropTab_complete(Signal* signal,
+ Uint32 dropTabPtrI,
+ Uint32 returnCode){
+ jam();
+
+ DropTableRecordPtr dropTabPtr;
+ ndbrequire(c_opDropTable.find(dropTabPtr, dropTabPtrI));
+
+ Uint32 tableId = dropTabPtr.p->m_request.tableId;
+
+ /**
+ * Write to schema file
+ */
+ PageRecordPtr pagePtr;
+ c_pageRecordArray.getPtr(pagePtr, c_schemaRecord.schemaPage);
+
+ SchemaFile::TableEntry * tableEntry = getTableEntry(pagePtr.p, tableId);
+ SchemaFile::TableState tabState =
+ (SchemaFile::TableState)tableEntry->m_tableState;
+ ndbrequire(tabState == SchemaFile::DROP_TABLE_STARTED);
+ tableEntry->m_tableState = SchemaFile::DROP_TABLE_COMMITTED;
+ computeChecksum((SchemaFile*)pagePtr.p);
+
+ ndbrequire(c_writeSchemaRecord.inUse == false);
+ c_writeSchemaRecord.inUse = true;
+
+ c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage;
+ c_writeSchemaRecord.m_callback.m_callbackData = dropTabPtr.p->key;
+ c_writeSchemaRecord.m_callback.m_callbackFunction =
+ safe_cast(&Dbdict::dropTab_writeSchemaConf);
+ startWriteSchemaFile(signal);
+}
+
+void
+Dbdict::dropTab_writeSchemaConf(Signal* signal,
+ Uint32 dropTabPtrI,
+ Uint32 returnCode){
+ jam();
+
+ DropTableRecordPtr dropTabPtr;
+ ndbrequire(c_opDropTable.find(dropTabPtr, dropTabPtrI));
+
+ ndbrequire(dropTabPtr.p->m_participantData.m_gsn == GSN_DROP_TAB_REQ);
+
+ dropTabPtr.p->m_participantData.m_gsn = GSN_DROP_TAB_CONF;
+
+ releaseTableObject(dropTabPtr.p->m_request.tableId);
+
+ DropTabConf * conf = (DropTabConf*)signal->getDataPtr();
+ conf->senderRef = reference();
+ conf->senderData = dropTabPtrI;
+ conf->tableId = dropTabPtr.p->m_request.tableId;
+
+ dropTabPtr.p->m_participantData.m_gsn = GSN_DROP_TAB_CONF;
+ sendSignal(dropTabPtr.p->m_coordinatorRef, GSN_DROP_TAB_CONF, signal,
+ DropTabConf::SignalLength, JBB);
+
+ if(dropTabPtr.p->m_coordinatorRef != reference()){
+ c_opDropTable.release(dropTabPtr);
+ }
+}
+
+void Dbdict::releaseTableObject(Uint32 tableId, bool removeFromHash)
+{
+ TableRecordPtr tablePtr;
+ AttributeRecordPtr attrPtr;
+ c_tableRecordPool.getPtr(tablePtr, tableId);
+ if (removeFromHash)
+ {
+#ifdef VM_TRACE
+ TableRecordPtr tmp;
+ ndbrequire(c_tableRecordHash.find(tmp, * tablePtr.p));
+#endif
+ c_tableRecordHash.remove(tablePtr);
+ }
+ tablePtr.p->tabState = TableRecord::NOT_DEFINED;
+
+ Uint32 nextAttrRecord = tablePtr.p->firstAttribute;
+ while (nextAttrRecord != RNIL) {
+ jam();
+/* ---------------------------------------------------------------- */
+// Release all attribute records
+/* ---------------------------------------------------------------- */
+ c_attributeRecordPool.getPtr(attrPtr, nextAttrRecord);
+ nextAttrRecord = attrPtr.p->nextAttrInTable;
+ c_attributeRecordPool.release(attrPtr);
+ }//if
+#ifdef HAVE_TABLE_REORG
+ Uint32 secondTableId = tablePtr.p->secondTable;
+ initialiseTableRecord(tablePtr);
+ c_tableRecordPool.getPtr(tablePtr, secondTableId);
+ initialiseTableRecord(tablePtr);
+#endif
+ return;
+}//releaseTableObject()
+
+/**
+ * DICT receives these on index create and drop.
+ */
+void Dbdict::execDROP_TABLE_CONF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(signal->getNoOfSections() == 0);
+
+ DropTableConf * const conf = (DropTableConf *)signal->getDataPtr();
+ // assume part of drop index operation
+ OpDropIndexPtr opPtr;
+ c_opDropIndex.find(opPtr, conf->senderData);
+ ndbrequire(! opPtr.isNull());
+ ndbrequire(opPtr.p->m_request.getIndexId() == conf->tableId);
+ ndbrequire(opPtr.p->m_request.getIndexVersion() == conf->tableVersion);
+ dropIndex_fromDropTable(signal, opPtr);
+}
+
+void Dbdict::execDROP_TABLE_REF(Signal* signal)
+{
+ jamEntry();
+
+ DropTableRef * const ref = (DropTableRef *)signal->getDataPtr();
+ // assume part of drop index operation
+ OpDropIndexPtr opPtr;
+ c_opDropIndex.find(opPtr, ref->senderData);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ opPtr.p->m_errorLine = __LINE__;
+ dropIndex_fromDropTable(signal, opPtr);
+}
+
+/* **************************************************************** */
+/* ---------------------------------------------------------------- */
+/* MODULE: EXTERNAL INTERFACE TO DATA -------------------- */
+/* ---------------------------------------------------------------- */
+/* */
+/* This module contains the code that is used by other modules to. */
+/* access the data within DBDICT. */
+/* ---------------------------------------------------------------- */
+/* **************************************************************** */
+
+void Dbdict::execGET_TABLEDID_REQ(Signal * signal)
+{
+ jamEntry();
+ ndbrequire(signal->getNoOfSections() == 1);
+ GetTableIdReq const * req = (GetTableIdReq *)signal->getDataPtr();
+ Uint32 senderData = req->senderData;
+ Uint32 senderRef = req->senderRef;
+ Uint32 len = req->len;
+
+ if(len>MAX_TAB_NAME_SIZE)
+ {
+ jam();
+ sendGET_TABLEID_REF((Signal*)signal,
+ (GetTableIdReq *)req,
+ GetTableIdRef::TableNameTooLong);
+ return;
+ }
+
+ char tableName[MAX_TAB_NAME_SIZE];
+ TableRecord keyRecord;
+ SegmentedSectionPtr ssPtr;
+ signal->getSection(ssPtr,GetTableIdReq::TABLE_NAME);
+ copy((Uint32*)tableName, ssPtr);
+ strcpy(keyRecord.tableName, tableName);
+ releaseSections(signal);
+
+ if(len > sizeof(keyRecord.tableName)){
+ jam();
+ sendGET_TABLEID_REF((Signal*)signal,
+ (GetTableIdReq *)req,
+ GetTableIdRef::TableNameTooLong);
+ return;
+ }
+
+ TableRecordPtr tablePtr;
+ if(!c_tableRecordHash.find(tablePtr, keyRecord)) {
+ jam();
+ sendGET_TABLEID_REF((Signal*)signal,
+ (GetTableIdReq *)req,
+ GetTableIdRef::TableNotDefined);
+ return;
+ }
+ GetTableIdConf * conf = (GetTableIdConf *)req;
+ conf->tableId = tablePtr.p->tableId;
+ conf->schemaVersion = tablePtr.p->tableVersion;
+ conf->senderData = senderData;
+ sendSignal(senderRef, GSN_GET_TABLEID_CONF, signal,
+ GetTableIdConf::SignalLength, JBB);
+
+}
+
+
+void Dbdict::sendGET_TABLEID_REF(Signal* signal,
+ GetTableIdReq * req,
+ GetTableIdRef::ErrorCode errorCode)
+{
+ GetTableIdRef * const ref = (GetTableIdRef *)req;
+ /**
+ * The format of GetTabInfo Req/Ref is the same
+ */
+ BlockReference retRef = req->senderRef;
+ ref->err = errorCode;
+ sendSignal(retRef, GSN_GET_TABLEID_REF, signal,
+ GetTableIdRef::SignalLength, JBB);
+}//sendGET_TABINFOREF()
+
+/* ---------------------------------------------------------------- */
+// Get a full table description.
+/* ---------------------------------------------------------------- */
+void Dbdict::execGET_TABINFOREQ(Signal* signal)
+{
+ jamEntry();
+ if(!assembleFragments(signal))
+ {
+ return;
+ }
+
+ GetTabInfoReq * const req = (GetTabInfoReq *)&signal->theData[0];
+
+ /**
+ * If I get a GET_TABINFO_REQ from myself
+ * it's is a one from the time queue
+ */
+ bool fromTimeQueue = (signal->senderBlockRef() == reference());
+
+ if (c_retrieveRecord.busyState && fromTimeQueue == true) {
+ jam();
+
+ sendSignalWithDelay(reference(), GSN_GET_TABINFOREQ, signal, 30,
+ signal->length());
+ return;
+ }//if
+
+ const Uint32 MAX_WAITERS = 5;
+
+ if(c_retrieveRecord.busyState && fromTimeQueue == false){
+ jam();
+ if(c_retrieveRecord.noOfWaiters < MAX_WAITERS){
+ jam();
+ c_retrieveRecord.noOfWaiters++;
+
+ sendSignalWithDelay(reference(), GSN_GET_TABINFOREQ, signal, 30,
+ signal->length());
+ return;
+ }
+
+ sendGET_TABINFOREF(signal, req, GetTabInfoRef::Busy);
+ return;
+ }
+
+ if(fromTimeQueue){
+ jam();
+ c_retrieveRecord.noOfWaiters--;
+ }
+
+ const bool useLongSig = (req->requestType & GetTabInfoReq::LongSignalConf);
+ const Uint32 reqType = req->requestType & (~GetTabInfoReq::LongSignalConf);
+
+ TableRecordPtr tablePtr;
+ if(reqType == GetTabInfoReq::RequestByName){
+ jam();
+ ndbrequire(signal->getNoOfSections() == 1);
+ const Uint32 len = req->tableNameLen;
+
+ TableRecord keyRecord;
+ if(len > sizeof(keyRecord.tableName)){
+ jam();
+ releaseSections(signal);
+ sendGET_TABINFOREF(signal, req, GetTabInfoRef::TableNameTooLong);
+ return;
+ }
+
+ char tableName[MAX_TAB_NAME_SIZE];
+ SegmentedSectionPtr ssPtr;
+ signal->getSection(ssPtr,GetTabInfoReq::TABLE_NAME);
+ SimplePropertiesSectionReader r0(ssPtr, getSectionSegmentPool());
+ r0.reset(); // undo implicit first()
+ if(r0.getWords((Uint32*)tableName, ((len + 3)/4)))
+ memcpy(keyRecord.tableName, tableName, len);
+ else {
+ jam();
+ releaseSections(signal);
+ sendGET_TABINFOREF(signal, req, GetTabInfoRef::TableNotDefined);
+ return;
+ }
+ releaseSections(signal);
+ // memcpy(keyRecord.tableName, req->tableName, len);
+ //ntohS(&keyRecord.tableName[0], len);
+
+ c_tableRecordHash.find(tablePtr, keyRecord);
+ } else {
+ jam();
+ c_tableRecordPool.getPtr(tablePtr, req->tableId, false);
+ }
+
+ // The table seached for was not found
+ if(tablePtr.i == RNIL){
+ jam();
+ sendGET_TABINFOREF(signal, req, GetTabInfoRef::InvalidTableId);
+ return;
+ }//if
+
+ if (tablePtr.p->tabState != TableRecord::DEFINED) {
+ jam();
+ sendGET_TABINFOREF(signal, req, GetTabInfoRef::TableNotDefined);
+ return;
+ }//if
+
+ c_retrieveRecord.busyState = true;
+ c_retrieveRecord.blockRef = req->senderRef;
+ c_retrieveRecord.m_senderData = req->senderData;
+ c_retrieveRecord.tableId = tablePtr.i;
+ c_retrieveRecord.currentSent = 0;
+ c_retrieveRecord.m_useLongSig = useLongSig;
+
+ c_packTable.m_state = PackTable::PTS_GET_TAB;
+
+ signal->theData[0] = ZPACK_TABLE_INTO_PAGES;
+ signal->theData[1] = tablePtr.i;
+ signal->theData[2] = c_retrieveRecord.retrievePage;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+}//execGET_TABINFOREQ()
+
+void Dbdict::sendGetTabResponse(Signal* signal)
+{
+ PageRecordPtr pagePtr;
+ DictTabInfo * const conf = (DictTabInfo *)&signal->theData[0];
+ conf->senderRef = reference();
+ conf->senderData = c_retrieveRecord.m_senderData;
+ conf->requestType = DictTabInfo::GetTabInfoConf;
+ conf->totalLen = c_retrieveRecord.retrievedNoOfWords;
+
+ c_pageRecordArray.getPtr(pagePtr, c_retrieveRecord.retrievePage);
+ Uint32* pagePointer = (Uint32*)&pagePtr.p->word[0] + ZPAGE_HEADER_SIZE;
+
+ if(c_retrieveRecord.m_useLongSig){
+ jam();
+ GetTabInfoConf* conf = (GetTabInfoConf*)signal->getDataPtr();
+ conf->gci = 0;
+ conf->tableId = c_retrieveRecord.tableId;
+ conf->senderData = c_retrieveRecord.m_senderData;
+ conf->totalLen = c_retrieveRecord.retrievedNoOfWords;
+
+ Callback c = { safe_cast(&Dbdict::initRetrieveRecord), 0 };
+ LinearSectionPtr ptr[3];
+ ptr[0].p = pagePointer;
+ ptr[0].sz = c_retrieveRecord.retrievedNoOfWords;
+ sendFragmentedSignal(c_retrieveRecord.blockRef,
+ GSN_GET_TABINFO_CONF,
+ signal,
+ GetTabInfoConf::SignalLength,
+ JBB,
+ ptr,
+ 1,
+ c);
+ return;
+ }
+
+ ndbrequire(false);
+}//sendGetTabResponse()
+
+void Dbdict::sendGET_TABINFOREF(Signal* signal,
+ GetTabInfoReq * req,
+ GetTabInfoRef::ErrorCode errorCode)
+{
+ jamEntry();
+ GetTabInfoRef * const ref = (GetTabInfoRef *)&signal->theData[0];
+ /**
+ * The format of GetTabInfo Req/Ref is the same
+ */
+ BlockReference retRef = req->senderRef;
+ ref->errorCode = errorCode;
+
+ sendSignal(retRef, GSN_GET_TABINFOREF, signal, signal->length(), JBB);
+}//sendGET_TABINFOREF()
+
+Uint32 convertEndian(Uint32 in) {
+#ifdef WORDS_BIGENDIAN
+ Uint32 ut = 0;
+ ut += ((in >> 24) & 255);
+ ut += (((in >> 16) & 255) << 8);
+ ut += (((in >> 8) & 255) << 16);
+ ut += ((in & 255) << 24);
+ return ut;
+#else
+ return in;
+#endif
+}
+void
+Dbdict::execLIST_TABLES_REQ(Signal* signal)
+{
+ jamEntry();
+ Uint32 i;
+ ListTablesReq * req = (ListTablesReq*)signal->getDataPtr();
+ Uint32 senderRef = req->senderRef;
+ Uint32 senderData = req->senderData;
+ // save req flags
+ const Uint32 reqTableId = req->getTableId();
+ const Uint32 reqTableType = req->getTableType();
+ const bool reqListNames = req->getListNames();
+ const bool reqListIndexes = req->getListIndexes();
+ // init the confs
+ ListTablesConf * conf = (ListTablesConf *)signal->getDataPtrSend();
+ conf->senderData = senderData;
+ conf->counter = 0;
+ Uint32 pos = 0;
+ for (i = 0; i < c_tableRecordPool.getSize(); i++) {
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, i);
+ // filter
+ if (tablePtr.p->tabState == TableRecord::NOT_DEFINED ||
+ tablePtr.p->tabState == TableRecord::REORG_TABLE_PREPARED)
+ continue;
+
+
+ if ((reqTableType != (Uint32)0) && (reqTableType != (unsigned)tablePtr.p->tableType))
+ continue;
+ if (reqListIndexes && reqTableId != tablePtr.p->primaryTableId)
+ continue;
+ conf->tableData[pos] = 0;
+ // id
+ conf->setTableId(pos, tablePtr.i);
+ // type
+ conf->setTableType(pos, tablePtr.p->tableType);
+ // state
+ if (tablePtr.p->isTable()) {
+ switch (tablePtr.p->tabState) {
+ case TableRecord::DEFINING:
+ case TableRecord::CHECKED:
+ conf->setTableState(pos, DictTabInfo::StateBuilding);
+ break;
+ case TableRecord::PREPARE_DROPPING:
+ case TableRecord::DROPPING:
+ conf->setTableState(pos, DictTabInfo::StateDropping);
+ break;
+ case TableRecord::DEFINED:
+ conf->setTableState(pos, DictTabInfo::StateOnline);
+ break;
+ default:
+ conf->setTableState(pos, DictTabInfo::StateBroken);
+ break;
+ }
+ }
+ if (tablePtr.p->isIndex()) {
+ switch (tablePtr.p->indexState) {
+ case TableRecord::IS_OFFLINE:
+ conf->setTableState(pos, DictTabInfo::StateOffline);
+ break;
+ case TableRecord::IS_BUILDING:
+ conf->setTableState(pos, DictTabInfo::StateBuilding);
+ break;
+ case TableRecord::IS_DROPPING:
+ conf->setTableState(pos, DictTabInfo::StateDropping);
+ break;
+ case TableRecord::IS_ONLINE:
+ conf->setTableState(pos, DictTabInfo::StateOnline);
+ break;
+ default:
+ conf->setTableState(pos, DictTabInfo::StateBroken);
+ break;
+ }
+ }
+ // store
+ if (! tablePtr.p->storedTable) {
+ conf->setTableStore(pos, DictTabInfo::StoreTemporary);
+ } else {
+ conf->setTableStore(pos, DictTabInfo::StorePermanent);
+ }
+ pos++;
+ if (pos >= ListTablesConf::DataLength) {
+ sendSignal(senderRef, GSN_LIST_TABLES_CONF, signal,
+ ListTablesConf::SignalLength, JBB);
+ conf->counter++;
+ pos = 0;
+ }
+ if (! reqListNames)
+ continue;
+ const Uint32 size = strlen(tablePtr.p->tableName) + 1;
+ conf->tableData[pos] = size;
+ pos++;
+ if (pos >= ListTablesConf::DataLength) {
+ sendSignal(senderRef, GSN_LIST_TABLES_CONF, signal,
+ ListTablesConf::SignalLength, JBB);
+ conf->counter++;
+ pos = 0;
+ }
+ Uint32 k = 0;
+ while (k < size) {
+ char* p = (char*)&conf->tableData[pos];
+ for (Uint32 j = 0; j < 4; j++) {
+ if (k < size)
+ *p++ = tablePtr.p->tableName[k++];
+ else
+ *p++ = 0;
+ }
+ pos++;
+ if (pos >= ListTablesConf::DataLength) {
+ sendSignal(senderRef, GSN_LIST_TABLES_CONF, signal,
+ ListTablesConf::SignalLength, JBB);
+ conf->counter++;
+ pos = 0;
+ }
+ }
+ }
+ // XXX merge with above somehow
+ for (i = 0; i < c_triggerRecordPool.getSize(); i++) {
+ if (reqListIndexes)
+ break;
+ TriggerRecordPtr triggerPtr;
+ c_triggerRecordPool.getPtr(triggerPtr, i);
+ if (triggerPtr.p->triggerState == TriggerRecord::TS_NOT_DEFINED)
+ continue;
+ // constant 10 hardcoded
+ Uint32 type = 10 + triggerPtr.p->triggerType;
+ if (reqTableType != 0 && reqTableType != type)
+ continue;
+ conf->tableData[pos] = 0;
+ conf->setTableId(pos, triggerPtr.i);
+ conf->setTableType(pos, type);
+ switch (triggerPtr.p->triggerState) {
+ case TriggerRecord::TS_OFFLINE:
+ conf->setTableState(pos, DictTabInfo::StateOffline);
+ break;
+ case TriggerRecord::TS_ONLINE:
+ conf->setTableState(pos, DictTabInfo::StateOnline);
+ break;
+ default:
+ conf->setTableState(pos, DictTabInfo::StateBroken);
+ break;
+ }
+ conf->setTableStore(pos, DictTabInfo::StoreTemporary);
+ pos++;
+ if (pos >= ListTablesConf::DataLength) {
+ sendSignal(senderRef, GSN_LIST_TABLES_CONF, signal,
+ ListTablesConf::SignalLength, JBB);
+ conf->counter++;
+ pos = 0;
+ }
+ if (! reqListNames)
+ continue;
+ const Uint32 size = strlen(triggerPtr.p->triggerName) + 1;
+ conf->tableData[pos] = size;
+ pos++;
+ if (pos >= ListTablesConf::DataLength) {
+ sendSignal(senderRef, GSN_LIST_TABLES_CONF, signal,
+ ListTablesConf::SignalLength, JBB);
+ conf->counter++;
+ pos = 0;
+ }
+ Uint32 k = 0;
+ while (k < size) {
+ char* p = (char*)&conf->tableData[pos];
+ for (Uint32 j = 0; j < 4; j++) {
+ if (k < size)
+ *p++ = triggerPtr.p->triggerName[k++];
+ else
+ *p++ = 0;
+ }
+ pos++;
+ if (pos >= ListTablesConf::DataLength) {
+ sendSignal(senderRef, GSN_LIST_TABLES_CONF, signal,
+ ListTablesConf::SignalLength, JBB);
+ conf->counter++;
+ pos = 0;
+ }
+ }
+ }
+ // last signal must have less than max length
+ sendSignal(senderRef, GSN_LIST_TABLES_CONF, signal,
+ ListTablesConf::HeaderLength + pos, JBB);
+}
+
+/**
+ * MODULE: Create index
+ *
+ * Create index in DICT via create table operation. Then invoke alter
+ * index opearation to online the index.
+ *
+ * Request type in CREATE_INDX signals:
+ *
+ * RT_USER - from API to DICT master
+ * RT_DICT_PREPARE - prepare participants
+ * RT_DICT_COMMIT - commit participants
+ * RT_TC - create index in TC (part of alter index operation)
+ */
+
+void
+Dbdict::execCREATE_INDX_REQ(Signal* signal)
+{
+ jamEntry();
+ CreateIndxReq* const req = (CreateIndxReq*)signal->getDataPtrSend();
+ OpCreateIndexPtr opPtr;
+ const Uint32 senderRef = signal->senderBlockRef();
+ const CreateIndxReq::RequestType requestType = req->getRequestType();
+ if (requestType == CreateIndxReq::RT_USER) {
+ jam();
+ if (! assembleFragments(signal)) {
+ jam();
+ return;
+ }
+ if (signal->getLength() == CreateIndxReq::SignalLength) {
+ jam();
+ if (getOwnNodeId() != c_masterNodeId) {
+ jam();
+
+ releaseSections(signal);
+ OpCreateIndex opBusy;
+ opPtr.p = &opBusy;
+ opPtr.p->save(req);
+ opPtr.p->m_isMaster = (senderRef == reference());
+ opPtr.p->key = 0;
+ opPtr.p->m_requestType = CreateIndxReq::RT_DICT_PREPARE;
+ opPtr.p->m_errorCode = CreateIndxRef::NotMaster;
+ opPtr.p->m_errorLine = __LINE__;
+ opPtr.p->m_errorNode = c_masterNodeId;
+ createIndex_sendReply(signal, opPtr, true);
+ return;
+ }
+
+ // forward initial request plus operation key to all
+ req->setOpKey(++c_opRecordSequence);
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ sendSignal(rg, GSN_CREATE_INDX_REQ,
+ signal, CreateIndxReq::SignalLength + 1, JBB);
+ return;
+ }
+ // seize operation record
+ ndbrequire(signal->getLength() == CreateIndxReq::SignalLength + 1);
+ const Uint32 opKey = req->getOpKey();
+ OpCreateIndex opBusy;
+ if (! c_opCreateIndex.seize(opPtr))
+ opPtr.p = &opBusy;
+ opPtr.p->save(req);
+ opPtr.p->m_coordinatorRef = senderRef;
+ opPtr.p->m_isMaster = (senderRef == reference());
+ opPtr.p->key = opKey;
+ opPtr.p->m_requestType = CreateIndxReq::RT_DICT_PREPARE;
+ if (opPtr.p == &opBusy) {
+ jam();
+ opPtr.p->m_errorCode = CreateIndxRef::Busy;
+ opPtr.p->m_errorLine = __LINE__;
+ releaseSections(signal);
+ createIndex_sendReply(signal, opPtr, opPtr.p->m_isMaster);
+ return;
+ }
+ c_opCreateIndex.add(opPtr);
+ // save attribute list
+ SegmentedSectionPtr ssPtr;
+ signal->getSection(ssPtr, CreateIndxReq::ATTRIBUTE_LIST_SECTION);
+ SimplePropertiesSectionReader r0(ssPtr, getSectionSegmentPool());
+ r0.reset(); // undo implicit first()
+ if (! r0.getWord(&opPtr.p->m_attrList.sz) ||
+ ! r0.getWords(opPtr.p->m_attrList.id, opPtr.p->m_attrList.sz)) {
+ jam();
+ opPtr.p->m_errorCode = CreateIndxRef::InvalidName;
+ opPtr.p->m_errorLine = __LINE__;
+ releaseSections(signal);
+ createIndex_sendReply(signal, opPtr, opPtr.p->m_isMaster);
+ return;
+ }
+ // save name and index table properties
+ signal->getSection(ssPtr, CreateIndxReq::INDEX_NAME_SECTION);
+ SimplePropertiesSectionReader r1(ssPtr, getSectionSegmentPool());
+ DictTabInfo::Table tableDesc;
+ tableDesc.init();
+ SimpleProperties::UnpackStatus status = SimpleProperties::unpack(
+ r1, &tableDesc,
+ DictTabInfo::TableMapping, DictTabInfo::TableMappingSize,
+ true, true);
+ if (status != SimpleProperties::Eof) {
+ opPtr.p->m_errorCode = CreateIndxRef::InvalidName;
+ opPtr.p->m_errorLine = __LINE__;
+ releaseSections(signal);
+ createIndex_sendReply(signal, opPtr, opPtr.p->m_isMaster);
+ return;
+ }
+ memcpy(opPtr.p->m_indexName, tableDesc.TableName, MAX_TAB_NAME_SIZE);
+ opPtr.p->m_storedIndex = tableDesc.TableLoggedFlag;
+ releaseSections(signal);
+ // master expects to hear from all
+ if (opPtr.p->m_isMaster)
+ opPtr.p->m_signalCounter = c_aliveNodes;
+ createIndex_slavePrepare(signal, opPtr);
+ createIndex_sendReply(signal, opPtr, false);
+ return;
+ }
+ c_opCreateIndex.find(opPtr, req->getConnectionPtr());
+ if (! opPtr.isNull()) {
+ opPtr.p->m_requestType = requestType;
+ if (requestType == CreateIndxReq::RT_DICT_COMMIT ||
+ requestType == CreateIndxReq::RT_DICT_ABORT) {
+ jam();
+ if (requestType == CreateIndxReq::RT_DICT_COMMIT) {
+ opPtr.p->m_request.setIndexId(req->getIndexId());
+ opPtr.p->m_request.setIndexVersion(req->getIndexVersion());
+ createIndex_slaveCommit(signal, opPtr);
+ } else {
+ createIndex_slaveAbort(signal, opPtr);
+ }
+ createIndex_sendReply(signal, opPtr, false);
+ // done in slave
+ if (! opPtr.p->m_isMaster)
+ c_opCreateIndex.release(opPtr);
+ return;
+ }
+ }
+ jam();
+ // return to sender
+ releaseSections(signal);
+ OpCreateIndex opBad;
+ opPtr.p = &opBad;
+ opPtr.p->save(req);
+ opPtr.p->m_errorCode = CreateIndxRef::BadRequestType;
+ opPtr.p->m_errorLine = __LINE__;
+ createIndex_sendReply(signal, opPtr, true);
+}
+
+void
+Dbdict::execCREATE_INDX_CONF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(signal->getNoOfSections() == 0);
+ CreateIndxConf* conf = (CreateIndxConf*)signal->getDataPtrSend();
+ createIndex_recvReply(signal, conf, 0);
+}
+
+void
+Dbdict::execCREATE_INDX_REF(Signal* signal)
+{
+ jamEntry();
+ CreateIndxRef* ref = (CreateIndxRef*)signal->getDataPtrSend();
+ createIndex_recvReply(signal, ref->getConf(), ref);
+}
+
+void
+Dbdict::createIndex_recvReply(Signal* signal, const CreateIndxConf* conf,
+ const CreateIndxRef* ref)
+{
+ jam();
+ const Uint32 senderRef = signal->senderBlockRef();
+ const CreateIndxReq::RequestType requestType = conf->getRequestType();
+ const Uint32 key = conf->getConnectionPtr();
+ if (requestType == CreateIndxReq::RT_TC) {
+ jam();
+ // part of alter index operation
+ OpAlterIndexPtr opPtr;
+ c_opAlterIndex.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ alterIndex_fromCreateTc(signal, opPtr);
+ return;
+ }
+ OpCreateIndexPtr opPtr;
+ c_opCreateIndex.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ ndbrequire(opPtr.p->m_isMaster);
+ ndbrequire(opPtr.p->m_requestType == requestType);
+ opPtr.p->setError(ref);
+ opPtr.p->m_signalCounter.clearWaitingFor(refToNode(senderRef));
+ if (! opPtr.p->m_signalCounter.done()) {
+ jam();
+ return;
+ }
+ if (requestType == CreateIndxReq::RT_DICT_COMMIT ||
+ requestType == CreateIndxReq::RT_DICT_ABORT) {
+ jam();
+ // send reply to user
+ createIndex_sendReply(signal, opPtr, true);
+ c_opCreateIndex.release(opPtr);
+ return;
+ }
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = CreateIndxReq::RT_DICT_ABORT;
+ createIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ if (requestType == CreateIndxReq::RT_DICT_PREPARE) {
+ jam();
+ // start index table create
+ createIndex_toCreateTable(signal, opPtr);
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = CreateIndxReq::RT_DICT_ABORT;
+ createIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ return;
+ }
+ ndbrequire(false);
+}
+
+void
+Dbdict::createIndex_slavePrepare(Signal* signal, OpCreateIndexPtr opPtr)
+{
+ jam();
+}
+
+void
+Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr)
+{
+ Uint32 attrid_map[MAX_ATTRIBUTES_IN_INDEX];
+ Uint32 k;
+ jam();
+ const CreateIndxReq* const req = &opPtr.p->m_request;
+ // signal data writer
+ Uint32* wbuffer = &c_indexPage.word[0];
+ LinearWriter w(wbuffer, sizeof(c_indexPage) >> 2);
+ w.first();
+ // get table being indexed
+ if (! (req->getTableId() < c_tableRecordPool.getSize())) {
+ jam();
+ opPtr.p->m_errorCode = CreateIndxRef::InvalidPrimaryTable;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, req->getTableId());
+ if (tablePtr.p->tabState != TableRecord::DEFINED) {
+ jam();
+ opPtr.p->m_errorCode = CreateIndxRef::InvalidPrimaryTable;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ if (! tablePtr.p->isTable()) {
+ jam();
+ opPtr.p->m_errorCode = CreateIndxRef::InvalidPrimaryTable;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ // compute index table record
+ TableRecord indexRec;
+ TableRecordPtr indexPtr;
+ indexPtr.i = RNIL; // invalid
+ indexPtr.p = &indexRec;
+ initialiseTableRecord(indexPtr);
+ if (req->getIndexType() == DictTabInfo::UniqueHashIndex) {
+ indexPtr.p->storedTable = opPtr.p->m_storedIndex;
+ indexPtr.p->fragmentType = tablePtr.p->fragmentType;
+ } else if (req->getIndexType() == DictTabInfo::OrderedIndex) {
+ // first version will not supported logging
+ if (opPtr.p->m_storedIndex) {
+ jam();
+ opPtr.p->m_errorCode = CreateIndxRef::InvalidIndexType;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ indexPtr.p->storedTable = false;
+ // follows table fragmentation
+ indexPtr.p->fragmentType = tablePtr.p->fragmentType;
+ } else {
+ jam();
+ opPtr.p->m_errorCode = CreateIndxRef::InvalidIndexType;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ indexPtr.p->tableType = (DictTabInfo::TableType)req->getIndexType();
+ indexPtr.p->primaryTableId = req->getTableId();
+ indexPtr.p->noOfAttributes = opPtr.p->m_attrList.sz;
+ indexPtr.p->tupKeyLength = 0;
+ if (indexPtr.p->noOfAttributes == 0) {
+ jam();
+ opPtr.p->m_errorCode = CreateIndxRef::InvalidIndexType;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ if (indexPtr.p->isOrderedIndex()) {
+ // tree node size in words (make configurable later)
+ indexPtr.p->tupKeyLength = MAX_TTREE_NODE_SIZE;
+ }
+
+ AttributeMask mask;
+ mask.clear();
+ for (k = 0; k < opPtr.p->m_attrList.sz; k++) {
+ jam();
+ unsigned current_id= opPtr.p->m_attrList.id[k];
+ AttributeRecord* aRec= NULL;
+ Uint32 tAttr= tablePtr.p->firstAttribute;
+ for (; tAttr != RNIL; tAttr= aRec->nextAttrInTable)
+ {
+ aRec = c_attributeRecordPool.getPtr(tAttr);
+ if (aRec->attributeId != current_id)
+ continue;
+ jam();
+ break;
+ }
+ if (tAttr == RNIL) {
+ jam();
+ opPtr.p->m_errorCode = CreateIndxRef::BadRequestType;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ if (mask.get(current_id))
+ {
+ jam();
+ opPtr.p->m_errorCode = CreateIndxRef::DuplicateAttributes;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ mask.set(current_id);
+
+ const Uint32 a = aRec->attributeDescriptor;
+ unsigned kk= k;
+ if (indexPtr.p->isHashIndex()) {
+ const Uint32 s1 = AttributeDescriptor::getSize(a);
+ const Uint32 s2 = AttributeDescriptor::getArraySize(a);
+ indexPtr.p->tupKeyLength += ((1 << s1) * s2 + 31) >> 5;
+ // reorder the attributes according to the tableid order
+ // for unque indexes
+ for (; kk > 0 && current_id < attrid_map[kk-1]>>16; kk--)
+ attrid_map[kk]= attrid_map[kk-1];
+ }
+ attrid_map[kk]= k | (current_id << 16);
+ }
+ indexPtr.p->noOfPrimkey = indexPtr.p->noOfAttributes;
+ // plus concatenated primary table key attribute
+ indexPtr.p->noOfAttributes += 1;
+ indexPtr.p->noOfNullAttr = 0;
+ // write index table
+ w.add(DictTabInfo::TableName, opPtr.p->m_indexName);
+ w.add(DictTabInfo::TableLoggedFlag, indexPtr.p->storedTable);
+ w.add(DictTabInfo::FragmentTypeVal, indexPtr.p->fragmentType);
+ w.add(DictTabInfo::TableTypeVal, indexPtr.p->tableType);
+ w.add(DictTabInfo::PrimaryTable, tablePtr.p->tableName);
+ w.add(DictTabInfo::PrimaryTableId, tablePtr.i);
+ w.add(DictTabInfo::NoOfAttributes, indexPtr.p->noOfAttributes);
+ w.add(DictTabInfo::NoOfKeyAttr, indexPtr.p->noOfPrimkey);
+ w.add(DictTabInfo::NoOfNullable, indexPtr.p->noOfNullAttr);
+ w.add(DictTabInfo::KeyLength, indexPtr.p->tupKeyLength);
+ // write index key attributes
+ AttributeRecordPtr aRecPtr;
+ c_attributeRecordPool.getPtr(aRecPtr, tablePtr.p->firstAttribute);
+ for (k = 0; k < opPtr.p->m_attrList.sz; k++) {
+ // insert the attributes in the order decided above in attrid_map
+ // k is new order, current_id is in previous order
+ // ToDo: make sure "current_id" is stored with the table and
+ // passed up to NdbDictionary
+ unsigned current_id= opPtr.p->m_attrList.id[attrid_map[k] & 0xffff];
+ jam();
+ for (Uint32 tAttr = tablePtr.p->firstAttribute; tAttr != RNIL; ) {
+ AttributeRecord* aRec = c_attributeRecordPool.getPtr(tAttr);
+ tAttr = aRec->nextAttrInTable;
+ if (aRec->attributeId != current_id)
+ continue;
+ jam();
+ const Uint32 a = aRec->attributeDescriptor;
+ bool isNullable = AttributeDescriptor::getNullable(a);
+ Uint32 attrType = AttributeDescriptor::getType(a);
+ w.add(DictTabInfo::AttributeName, aRec->attributeName);
+ w.add(DictTabInfo::AttributeId, k);
+ if (indexPtr.p->isHashIndex()) {
+ w.add(DictTabInfo::AttributeKeyFlag, (Uint32)true);
+ w.add(DictTabInfo::AttributeNullableFlag, (Uint32)false);
+ }
+ if (indexPtr.p->isOrderedIndex()) {
+ w.add(DictTabInfo::AttributeKeyFlag, (Uint32)false);
+ w.add(DictTabInfo::AttributeNullableFlag, (Uint32)isNullable);
+ }
+ w.add(DictTabInfo::AttributeExtType, attrType);
+ w.add(DictTabInfo::AttributeExtPrecision, aRec->extPrecision);
+ w.add(DictTabInfo::AttributeExtScale, aRec->extScale);
+ w.add(DictTabInfo::AttributeExtLength, aRec->extLength);
+ w.add(DictTabInfo::AttributeEnd, (Uint32)true);
+ }
+ }
+ if (indexPtr.p->isHashIndex()) {
+ jam();
+ // write concatenated primary table key attribute
+ w.add(DictTabInfo::AttributeName, "NDB$PK");
+ w.add(DictTabInfo::AttributeId, opPtr.p->m_attrList.sz);
+ w.add(DictTabInfo::AttributeKeyFlag, (Uint32)false);
+ w.add(DictTabInfo::AttributeNullableFlag, (Uint32)false);
+ w.add(DictTabInfo::AttributeExtType, (Uint32)DictTabInfo::ExtUnsigned);
+ w.add(DictTabInfo::AttributeExtLength, tablePtr.p->tupKeyLength);
+ w.add(DictTabInfo::AttributeEnd, (Uint32)true);
+ }
+ if (indexPtr.p->isOrderedIndex()) {
+ jam();
+ // write index tree node as Uint32 array attribute
+ w.add(DictTabInfo::AttributeName, "NDB$TNODE");
+ w.add(DictTabInfo::AttributeId, opPtr.p->m_attrList.sz);
+ w.add(DictTabInfo::AttributeKeyFlag, (Uint32)true);
+ w.add(DictTabInfo::AttributeNullableFlag, (Uint32)false);
+ w.add(DictTabInfo::AttributeExtType, (Uint32)DictTabInfo::ExtUnsigned);
+ w.add(DictTabInfo::AttributeExtLength, indexPtr.p->tupKeyLength);
+ w.add(DictTabInfo::AttributeEnd, (Uint32)true);
+ }
+ // finish
+ w.add(DictTabInfo::TableEnd, (Uint32)true);
+ // remember to...
+ releaseSections(signal);
+ // send create index table request
+ CreateTableReq * const cre = (CreateTableReq*)signal->getDataPtrSend();
+ cre->senderRef = reference();
+ cre->senderData = opPtr.p->key;
+ LinearSectionPtr lsPtr[3];
+ lsPtr[0].p = wbuffer;
+ lsPtr[0].sz = w.getWordsUsed();
+ sendSignal(DBDICT_REF, GSN_CREATE_TABLE_REQ,
+ signal, CreateTableReq::SignalLength, JBB, lsPtr, 1);
+}
+
+void
+Dbdict::createIndex_fromCreateTable(Signal* signal, OpCreateIndexPtr opPtr)
+{
+ jam();
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = CreateIndxReq::RT_DICT_ABORT;
+ createIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ if (! opPtr.p->m_request.getOnline()) {
+ jam();
+ opPtr.p->m_requestType = CreateIndxReq::RT_DICT_COMMIT;
+ createIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ createIndex_toAlterIndex(signal, opPtr);
+}
+
+void
+Dbdict::createIndex_toAlterIndex(Signal* signal, OpCreateIndexPtr opPtr)
+{
+ jam();
+ AlterIndxReq* const req = (AlterIndxReq*)signal->getDataPtrSend();
+ req->setUserRef(reference());
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(AlterIndxReq::RT_CREATE_INDEX);
+ req->addRequestFlag(opPtr.p->m_requestFlag);
+ req->setTableId(opPtr.p->m_request.getTableId());
+ req->setIndexId(opPtr.p->m_request.getIndexId());
+ req->setIndexVersion(opPtr.p->m_request.getIndexVersion());
+ req->setOnline(true);
+ sendSignal(reference(), GSN_ALTER_INDX_REQ,
+ signal, AlterIndxReq::SignalLength, JBB);
+}
+
+void
+Dbdict::createIndex_fromAlterIndex(Signal* signal, OpCreateIndexPtr opPtr)
+{
+ jam();
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = CreateIndxReq::RT_DICT_ABORT;
+ createIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ opPtr.p->m_requestType = CreateIndxReq::RT_DICT_COMMIT;
+ createIndex_sendSlaveReq(signal, opPtr);
+}
+
+void
+Dbdict::createIndex_slaveCommit(Signal* signal, OpCreateIndexPtr opPtr)
+{
+ jam();
+ const Uint32 indexId = opPtr.p->m_request.getIndexId();
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, indexId);
+ if (! opPtr.p->m_request.getOnline()) {
+ ndbrequire(indexPtr.p->indexState == TableRecord::IS_UNDEFINED);
+ indexPtr.p->indexState = TableRecord::IS_OFFLINE;
+ } else {
+ ndbrequire(indexPtr.p->indexState == TableRecord::IS_ONLINE);
+ }
+}
+
+void
+Dbdict::createIndex_slaveAbort(Signal* signal, OpCreateIndexPtr opPtr)
+{
+ jam();
+ CreateIndxReq* const req = &opPtr.p->m_request;
+ const Uint32 indexId = req->getIndexId();
+ if (indexId >= c_tableRecordPool.getSize()) {
+ jam();
+ return;
+ }
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, indexId);
+ if (! indexPtr.p->isIndex()) {
+ jam();
+ return;
+ }
+ indexPtr.p->indexState = TableRecord::IS_BROKEN;
+}
+
+void
+Dbdict::createIndex_sendSlaveReq(Signal* signal, OpCreateIndexPtr opPtr)
+{
+ jam();
+ CreateIndxReq* const req = (CreateIndxReq*)signal->getDataPtrSend();
+ *req = opPtr.p->m_request;
+ req->setUserRef(opPtr.p->m_coordinatorRef);
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(opPtr.p->m_requestType);
+ req->addRequestFlag(opPtr.p->m_requestFlag);
+ opPtr.p->m_signalCounter = c_aliveNodes;
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ sendSignal(rg, GSN_CREATE_INDX_REQ,
+ signal, CreateIndxReq::SignalLength, JBB);
+}
+
+void
+Dbdict::createIndex_sendReply(Signal* signal, OpCreateIndexPtr opPtr,
+ bool toUser)
+{
+ CreateIndxRef* rep = (CreateIndxRef*)signal->getDataPtrSend();
+ Uint32 gsn = GSN_CREATE_INDX_CONF;
+ Uint32 length = CreateIndxConf::InternalLength;
+ bool sendRef = opPtr.p->hasError();
+ if (! toUser) {
+ rep->setUserRef(opPtr.p->m_coordinatorRef);
+ rep->setConnectionPtr(opPtr.p->key);
+ rep->setRequestType(opPtr.p->m_requestType);
+ if (opPtr.p->m_requestType == CreateIndxReq::RT_DICT_ABORT)
+ sendRef = false;
+ } else {
+ rep->setUserRef(opPtr.p->m_request.getUserRef());
+ rep->setConnectionPtr(opPtr.p->m_request.getConnectionPtr());
+ rep->setRequestType(opPtr.p->m_request.getRequestType());
+ length = CreateIndxConf::SignalLength;
+ }
+ rep->setTableId(opPtr.p->m_request.getTableId());
+ rep->setIndexId(opPtr.p->m_request.getIndexId());
+ rep->setIndexVersion(opPtr.p->m_request.getIndexVersion());
+ if (sendRef) {
+ if (opPtr.p->m_errorNode == 0)
+ opPtr.p->m_errorNode = getOwnNodeId();
+ rep->setErrorCode(opPtr.p->m_errorCode);
+ rep->setErrorLine(opPtr.p->m_errorLine);
+ rep->setErrorNode(opPtr.p->m_errorNode);
+ gsn = GSN_CREATE_INDX_REF;
+ length = CreateIndxRef::SignalLength;
+ }
+ sendSignal(rep->getUserRef(), gsn, signal, length, JBB);
+}
+
+/**
+ * MODULE: Drop index.
+ *
+ * Drop index. First alters the index offline (i.e. drops metadata in
+ * other blocks) and then drops the index table.
+ */
+
+void
+Dbdict::execDROP_INDX_REQ(Signal* signal)
+{
+ jamEntry();
+ DropIndxReq* const req = (DropIndxReq*)signal->getDataPtrSend();
+ OpDropIndexPtr opPtr;
+
+ int err = DropIndxRef::BadRequestType;
+ const Uint32 senderRef = signal->senderBlockRef();
+ const DropIndxReq::RequestType requestType = req->getRequestType();
+ if (requestType == DropIndxReq::RT_USER) {
+ jam();
+ if (signal->getLength() == DropIndxReq::SignalLength) {
+ jam();
+ if (getOwnNodeId() != c_masterNodeId) {
+ jam();
+
+ err = DropIndxRef::NotMaster;
+ goto error;
+ }
+ // forward initial request plus operation key to all
+ Uint32 indexId= req->getIndexId();
+ Uint32 indexVersion= req->getIndexVersion();
+ TableRecordPtr tmp;
+ int res = getMetaTablePtr(tmp, indexId, indexVersion);
+ switch(res){
+ case MetaData::InvalidArgument:
+ err = DropIndxRef::IndexNotFound;
+ goto error;
+ case MetaData::TableNotFound:
+ case MetaData::InvalidTableVersion:
+ err = DropIndxRef::InvalidIndexVersion;
+ goto error;
+ }
+
+ if (! tmp.p->isIndex()) {
+ jam();
+ err = DropIndxRef::NotAnIndex;
+ goto error;
+ }
+
+ if (tmp.p->indexState == TableRecord::IS_DROPPING){
+ jam();
+ err = DropIndxRef::IndexNotFound;
+ goto error;
+ }
+
+ tmp.p->indexState = TableRecord::IS_DROPPING;
+
+ req->setOpKey(++c_opRecordSequence);
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ sendSignal(rg, GSN_DROP_INDX_REQ,
+ signal, DropIndxReq::SignalLength + 1, JBB);
+ return;
+ }
+ // seize operation record
+ ndbrequire(signal->getLength() == DropIndxReq::SignalLength + 1);
+ const Uint32 opKey = req->getOpKey();
+ OpDropIndex opBusy;
+ if (! c_opDropIndex.seize(opPtr))
+ opPtr.p = &opBusy;
+ opPtr.p->save(req);
+ opPtr.p->m_coordinatorRef = senderRef;
+ opPtr.p->m_isMaster = (senderRef == reference());
+ opPtr.p->key = opKey;
+ opPtr.p->m_requestType = DropIndxReq::RT_DICT_PREPARE;
+ if (opPtr.p == &opBusy) {
+ jam();
+ opPtr.p->m_errorCode = DropIndxRef::Busy;
+ opPtr.p->m_errorLine = __LINE__;
+ dropIndex_sendReply(signal, opPtr, opPtr.p->m_isMaster);
+ return;
+ }
+ c_opDropIndex.add(opPtr);
+ // master expects to hear from all
+ if (opPtr.p->m_isMaster)
+ opPtr.p->m_signalCounter = c_aliveNodes;
+ dropIndex_slavePrepare(signal, opPtr);
+ dropIndex_sendReply(signal, opPtr, false);
+ return;
+ }
+ c_opDropIndex.find(opPtr, req->getConnectionPtr());
+ if (! opPtr.isNull()) {
+ opPtr.p->m_requestType = requestType;
+ if (requestType == DropIndxReq::RT_DICT_COMMIT ||
+ requestType == DropIndxReq::RT_DICT_ABORT) {
+ jam();
+ if (requestType == DropIndxReq::RT_DICT_COMMIT)
+ dropIndex_slaveCommit(signal, opPtr);
+ else
+ dropIndex_slaveAbort(signal, opPtr);
+ dropIndex_sendReply(signal, opPtr, false);
+ // done in slave
+ if (! opPtr.p->m_isMaster)
+ c_opDropIndex.release(opPtr);
+ return;
+ }
+ }
+error:
+ jam();
+ // return to sender
+ OpDropIndex opBad;
+ opPtr.p = &opBad;
+ opPtr.p->save(req);
+ opPtr.p->m_errorCode = (DropIndxRef::ErrorCode)err;
+ opPtr.p->m_errorLine = __LINE__;
+ opPtr.p->m_errorNode = c_masterNodeId;
+ dropIndex_sendReply(signal, opPtr, true);
+}
+
+void
+Dbdict::execDROP_INDX_CONF(Signal* signal)
+{
+ jamEntry();
+ DropIndxConf* conf = (DropIndxConf*)signal->getDataPtrSend();
+ dropIndex_recvReply(signal, conf, 0);
+}
+
+void
+Dbdict::execDROP_INDX_REF(Signal* signal)
+{
+ jamEntry();
+ DropIndxRef* ref = (DropIndxRef*)signal->getDataPtrSend();
+ dropIndex_recvReply(signal, ref->getConf(), ref);
+}
+
+void
+Dbdict::dropIndex_recvReply(Signal* signal, const DropIndxConf* conf,
+ const DropIndxRef* ref)
+{
+ jam();
+ const Uint32 senderRef = signal->senderBlockRef();
+ const DropIndxReq::RequestType requestType = conf->getRequestType();
+ const Uint32 key = conf->getConnectionPtr();
+ if (requestType == DropIndxReq::RT_TC) {
+ jam();
+ // part of alter index operation
+ OpAlterIndexPtr opPtr;
+ c_opAlterIndex.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ alterIndex_fromDropTc(signal, opPtr);
+ return;
+ }
+ OpDropIndexPtr opPtr;
+ c_opDropIndex.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ ndbrequire(opPtr.p->m_isMaster);
+ ndbrequire(opPtr.p->m_requestType == requestType);
+ opPtr.p->setError(ref);
+ opPtr.p->m_signalCounter.clearWaitingFor(refToNode(senderRef));
+ if (! opPtr.p->m_signalCounter.done()) {
+ jam();
+ return;
+ }
+ if (requestType == DropIndxReq::RT_DICT_COMMIT ||
+ requestType == DropIndxReq::RT_DICT_ABORT) {
+ jam();
+ // send reply to user
+ dropIndex_sendReply(signal, opPtr, true);
+ c_opDropIndex.release(opPtr);
+ return;
+ }
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = DropIndxReq::RT_DICT_ABORT;
+ dropIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ if (requestType == DropIndxReq::RT_DICT_PREPARE) {
+ jam();
+ // start alter offline
+ dropIndex_toAlterIndex(signal, opPtr);
+ return;
+ }
+ ndbrequire(false);
+}
+
+void
+Dbdict::dropIndex_slavePrepare(Signal* signal, OpDropIndexPtr opPtr)
+{
+ jam();
+ DropIndxReq* const req = &opPtr.p->m_request;
+ // check index exists
+ TableRecordPtr indexPtr;
+ if (! (req->getIndexId() < c_tableRecordPool.getSize())) {
+ jam();
+ opPtr.p->m_errorCode = DropIndxRef::IndexNotFound;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ c_tableRecordPool.getPtr(indexPtr, req->getIndexId());
+ if (indexPtr.p->tabState != TableRecord::DEFINED) {
+ jam();
+ opPtr.p->m_errorCode = DropIndxRef::IndexNotFound;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ if (! indexPtr.p->isIndex()) {
+ jam();
+ opPtr.p->m_errorCode = DropIndxRef::NotAnIndex;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ // ignore incoming primary table id
+ req->setTableId(indexPtr.p->primaryTableId);
+}
+
+void
+Dbdict::dropIndex_toAlterIndex(Signal* signal, OpDropIndexPtr opPtr)
+{
+ jam();
+ AlterIndxReq* const req = (AlterIndxReq*)signal->getDataPtrSend();
+ req->setUserRef(reference());
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(AlterIndxReq::RT_DROP_INDEX);
+ req->addRequestFlag(opPtr.p->m_requestFlag);
+ req->setTableId(opPtr.p->m_request.getTableId());
+ req->setIndexId(opPtr.p->m_request.getIndexId());
+ req->setIndexVersion(opPtr.p->m_request.getIndexVersion());
+ req->setOnline(false);
+ sendSignal(reference(), GSN_ALTER_INDX_REQ,
+ signal, AlterIndxReq::SignalLength, JBB);
+}
+
+void
+Dbdict::dropIndex_fromAlterIndex(Signal* signal, OpDropIndexPtr opPtr)
+{
+ jam();
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = DropIndxReq::RT_DICT_ABORT;
+ dropIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ dropIndex_toDropTable(signal, opPtr);
+}
+
+void
+Dbdict::dropIndex_toDropTable(Signal* signal, OpDropIndexPtr opPtr)
+{
+ jam();
+ DropTableReq* const req = (DropTableReq*)signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->senderData = opPtr.p->key;
+ req->tableId = opPtr.p->m_request.getIndexId();
+ req->tableVersion = opPtr.p->m_request.getIndexVersion();
+ sendSignal(reference(), GSN_DROP_TABLE_REQ,
+ signal,DropTableReq::SignalLength, JBB);
+}
+
+void
+Dbdict::dropIndex_fromDropTable(Signal* signal, OpDropIndexPtr opPtr)
+{
+ jam();
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = DropIndxReq::RT_DICT_ABORT;
+ dropIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ opPtr.p->m_requestType = DropIndxReq::RT_DICT_COMMIT;
+ dropIndex_sendSlaveReq(signal, opPtr);
+}
+
+void
+Dbdict::dropIndex_slaveCommit(Signal* signal, OpDropIndexPtr opPtr)
+{
+ jam();
+}
+
+void
+Dbdict::dropIndex_slaveAbort(Signal* signal, OpDropIndexPtr opPtr)
+{
+ jam();
+ DropIndxReq* const req = &opPtr.p->m_request;
+ const Uint32 indexId = req->getIndexId();
+ if (indexId >= c_tableRecordPool.getSize()) {
+ jam();
+ return;
+ }
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, indexId);
+ indexPtr.p->indexState = TableRecord::IS_BROKEN;
+}
+
+void
+Dbdict::dropIndex_sendSlaveReq(Signal* signal, OpDropIndexPtr opPtr)
+{
+ DropIndxReq* const req = (DropIndxReq*)signal->getDataPtrSend();
+ *req = opPtr.p->m_request;
+ req->setUserRef(opPtr.p->m_coordinatorRef);
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(opPtr.p->m_requestType);
+ req->addRequestFlag(opPtr.p->m_requestFlag);
+ opPtr.p->m_signalCounter = c_aliveNodes;
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ sendSignal(rg, GSN_DROP_INDX_REQ,
+ signal, DropIndxReq::SignalLength, JBB);
+}
+
+void
+Dbdict::dropIndex_sendReply(Signal* signal, OpDropIndexPtr opPtr,
+ bool toUser)
+{
+ DropIndxRef* rep = (DropIndxRef*)signal->getDataPtrSend();
+ Uint32 gsn = GSN_DROP_INDX_CONF;
+ Uint32 length = DropIndxConf::InternalLength;
+ bool sendRef = opPtr.p->hasError();
+ if (! toUser) {
+ rep->setUserRef(opPtr.p->m_coordinatorRef);
+ rep->setConnectionPtr(opPtr.p->key);
+ rep->setRequestType(opPtr.p->m_requestType);
+ if (opPtr.p->m_requestType == DropIndxReq::RT_DICT_ABORT)
+ sendRef = false;
+ } else {
+ rep->setUserRef(opPtr.p->m_request.getUserRef());
+ rep->setConnectionPtr(opPtr.p->m_request.getConnectionPtr());
+ rep->setRequestType(opPtr.p->m_request.getRequestType());
+ length = DropIndxConf::SignalLength;
+ }
+ rep->setTableId(opPtr.p->m_request.getTableId());
+ rep->setIndexId(opPtr.p->m_request.getIndexId());
+ rep->setIndexVersion(opPtr.p->m_request.getIndexVersion());
+ if (sendRef) {
+ if (opPtr.p->m_errorNode == 0)
+ opPtr.p->m_errorNode = getOwnNodeId();
+ rep->setErrorCode(opPtr.p->m_errorCode);
+ rep->setErrorLine(opPtr.p->m_errorLine);
+ rep->setErrorNode(opPtr.p->m_errorNode);
+ gsn = GSN_DROP_INDX_REF;
+ length = DropIndxRef::SignalLength;
+ }
+ sendSignal(rep->getUserRef(), gsn, signal, length, JBB);
+}
+
+/*****************************************************
+ *
+ * Util signalling
+ *
+ *****************************************************/
+
+int
+Dbdict::sendSignalUtilReq(Callback *pcallback,
+ BlockReference ref,
+ GlobalSignalNumber gsn,
+ Signal* signal,
+ Uint32 length,
+ JobBufferLevel jbuf,
+ LinearSectionPtr ptr[3],
+ Uint32 noOfSections)
+{
+ jam();
+ EVENT_TRACE;
+ OpSignalUtilPtr utilRecPtr;
+
+ // Seize a Util Send record
+ if (!c_opSignalUtil.seize(utilRecPtr)) {
+ // Failed to allocate util record
+ return -1;
+ }
+ utilRecPtr.p->m_callback = *pcallback;
+
+ // should work for all util signal classes
+ UtilPrepareReq *req = (UtilPrepareReq*)signal->getDataPtrSend();
+ utilRecPtr.p->m_userData = req->getSenderData();
+ req->setSenderData(utilRecPtr.i);
+
+ if (ptr) {
+ jam();
+ sendSignal(ref, gsn, signal, length, jbuf, ptr, noOfSections);
+ } else {
+ jam();
+ sendSignal(ref, gsn, signal, length, jbuf);
+ }
+
+ return 0;
+}
+
+int
+Dbdict::recvSignalUtilReq(Signal* signal, Uint32 returnCode)
+{
+ jam();
+ EVENT_TRACE;
+ UtilPrepareConf * const req = (UtilPrepareConf*)signal->getDataPtr();
+ OpSignalUtilPtr utilRecPtr;
+ utilRecPtr.i = req->getSenderData();
+ if ((utilRecPtr.p = c_opSignalUtil.getPtr(utilRecPtr.i)) == NULL) {
+ jam();
+ return -1;
+ }
+
+ req->setSenderData(utilRecPtr.p->m_userData);
+ Callback c = utilRecPtr.p->m_callback;
+ c_opSignalUtil.release(utilRecPtr);
+
+ execute(signal, c, returnCode);
+ return 0;
+}
+
+void Dbdict::execUTIL_PREPARE_CONF(Signal *signal)
+{
+ jamEntry();
+ EVENT_TRACE;
+ ndbrequire(recvSignalUtilReq(signal, 0) == 0);
+}
+
+void
+Dbdict::execUTIL_PREPARE_REF(Signal *signal)
+{
+ jamEntry();
+ EVENT_TRACE;
+ ndbrequire(recvSignalUtilReq(signal, 1) == 0);
+}
+
+void Dbdict::execUTIL_EXECUTE_CONF(Signal *signal)
+{
+ jamEntry();
+ EVENT_TRACE;
+ ndbrequire(recvSignalUtilReq(signal, 0) == 0);
+}
+
+void Dbdict::execUTIL_EXECUTE_REF(Signal *signal)
+{
+ jamEntry();
+ EVENT_TRACE;
+
+#ifdef EVENT_DEBUG
+ UtilExecuteRef * ref = (UtilExecuteRef *)signal->getDataPtrSend();
+
+ ndbout_c("execUTIL_EXECUTE_REF");
+ ndbout_c("senderData %u",ref->getSenderData());
+ ndbout_c("errorCode %u",ref->getErrorCode());
+ ndbout_c("TCErrorCode %u",ref->getTCErrorCode());
+#endif
+
+ ndbrequire(recvSignalUtilReq(signal, 1) == 0);
+}
+void Dbdict::execUTIL_RELEASE_CONF(Signal *signal)
+{
+ jamEntry();
+ EVENT_TRACE;
+ ndbrequire(false);
+ ndbrequire(recvSignalUtilReq(signal, 0) == 0);
+}
+void Dbdict::execUTIL_RELEASE_REF(Signal *signal)
+{
+ jamEntry();
+ EVENT_TRACE;
+ ndbrequire(false);
+ ndbrequire(recvSignalUtilReq(signal, 1) == 0);
+}
+
+/**
+ * MODULE: Create event
+ *
+ * Create event in DICT.
+ *
+ *
+ * Request type in CREATE_EVNT signals:
+ *
+ * Signalflow see Dbdict.txt
+ *
+ */
+
+/*****************************************************************
+ *
+ * Systable stuff
+ *
+ */
+
+const Uint32 Dbdict::sysTab_NDBEVENTS_0_szs[EVENT_SYSTEM_TABLE_LENGTH] = {
+ sizeof(((sysTab_NDBEVENTS_0*)0)->NAME),
+ sizeof(((sysTab_NDBEVENTS_0*)0)->EVENT_TYPE),
+ sizeof(((sysTab_NDBEVENTS_0*)0)->TABLE_NAME),
+ sizeof(((sysTab_NDBEVENTS_0*)0)->ATTRIBUTE_MASK),
+ sizeof(((sysTab_NDBEVENTS_0*)0)->SUBID),
+ sizeof(((sysTab_NDBEVENTS_0*)0)->SUBKEY)
+};
+
+void
+Dbdict::prepareTransactionEventSysTable (Callback *pcallback,
+ Signal* signal,
+ Uint32 senderData,
+ UtilPrepareReq::OperationTypeValue prepReq)
+{
+ // find table id for event system table
+ TableRecord keyRecord;
+ strcpy(keyRecord.tableName, EVENT_SYSTEM_TABLE_NAME);
+
+ TableRecordPtr tablePtr;
+ c_tableRecordHash.find(tablePtr, keyRecord);
+
+ ndbrequire(tablePtr.i != RNIL); // system table must exist
+
+ Uint32 tableId = tablePtr.p->tableId; /* System table */
+ Uint32 noAttr = tablePtr.p->noOfAttributes;
+ ndbrequire(noAttr == EVENT_SYSTEM_TABLE_LENGTH);
+
+ switch (prepReq) {
+ case UtilPrepareReq::Update:
+ case UtilPrepareReq::Insert:
+ case UtilPrepareReq::Write:
+ case UtilPrepareReq::Read:
+ jam();
+ break;
+ case UtilPrepareReq::Delete:
+ jam();
+ noAttr = 1; // only involves Primary key which should be the first
+ break;
+ }
+ prepareUtilTransaction(pcallback, signal, senderData, tableId, NULL,
+ prepReq, noAttr, NULL, NULL);
+}
+
+void
+Dbdict::prepareUtilTransaction(Callback *pcallback,
+ Signal* signal,
+ Uint32 senderData,
+ Uint32 tableId,
+ const char* tableName,
+ UtilPrepareReq::OperationTypeValue prepReq,
+ Uint32 noAttr,
+ Uint32 attrIds[],
+ const char *attrNames[])
+{
+ jam();
+ EVENT_TRACE;
+
+ UtilPrepareReq * utilPrepareReq =
+ (UtilPrepareReq *)signal->getDataPtrSend();
+
+ utilPrepareReq->setSenderRef(reference());
+ utilPrepareReq->setSenderData(senderData);
+
+ const Uint32 pageSizeInWords = 128;
+ Uint32 propPage[pageSizeInWords];
+ LinearWriter w(&propPage[0],128);
+ w.first();
+ w.add(UtilPrepareReq::NoOfOperations, 1);
+ w.add(UtilPrepareReq::OperationType, prepReq);
+ if (tableName) {
+ jam();
+ w.add(UtilPrepareReq::TableName, tableName);
+ } else {
+ jam();
+ w.add(UtilPrepareReq::TableId, tableId);
+ }
+ for(Uint32 i = 0; i < noAttr; i++)
+ if (tableName) {
+ jam();
+ w.add(UtilPrepareReq::AttributeName, attrNames[i]);
+ } else {
+ if (attrIds) {
+ jam();
+ w.add(UtilPrepareReq::AttributeId, attrIds[i]);
+ } else {
+ jam();
+ w.add(UtilPrepareReq::AttributeId, i);
+ }
+ }
+#ifdef EVENT_DEBUG
+ // Debugging
+ SimplePropertiesLinearReader reader(propPage, w.getWordsUsed());
+ printf("Dict::prepareInsertTransactions: Sent SimpleProperties:\n");
+ reader.printAll(ndbout);
+#endif
+
+ struct LinearSectionPtr sectionsPtr[UtilPrepareReq::NoOfSections];
+ sectionsPtr[UtilPrepareReq::PROPERTIES_SECTION].p = propPage;
+ sectionsPtr[UtilPrepareReq::PROPERTIES_SECTION].sz = w.getWordsUsed();
+
+ sendSignalUtilReq(pcallback, DBUTIL_REF, GSN_UTIL_PREPARE_REQ, signal,
+ UtilPrepareReq::SignalLength, JBB,
+ sectionsPtr, UtilPrepareReq::NoOfSections);
+}
+
+/*****************************************************************
+ *
+ * CREATE_EVNT_REQ has three types RT_CREATE, RT_GET (from user)
+ * and RT_DICT_AFTER_GET send from master DICT to slaves
+ *
+ * This function just dscpaches these to
+ *
+ * createEvent_RT_USER_CREATE
+ * createEvent_RT_USER_GET
+ * createEvent_RT_DICT_AFTER_GET
+ *
+ * repectively
+ *
+ */
+
+void
+Dbdict::execCREATE_EVNT_REQ(Signal* signal)
+{
+ jamEntry();
+
+#if 0
+ {
+ SafeCounterHandle handle;
+ {
+ SafeCounter tmp(c_counterMgr, handle);
+ tmp.init<CreateEvntRef>(CMVMI, GSN_DUMP_STATE_ORD, /* senderData */ 13);
+ tmp.clearWaitingFor();
+ tmp.setWaitingFor(3);
+ ndbrequire(!tmp.done());
+ ndbout_c("Allocted");
+ }
+ ndbrequire(!handle.done());
+ {
+ SafeCounter tmp(c_counterMgr, handle);
+ tmp.clearWaitingFor(3);
+ ndbrequire(tmp.done());
+ ndbout_c("Deallocted");
+ }
+ ndbrequire(handle.done());
+ }
+ {
+ NodeBitmask nodes;
+ nodes.clear();
+
+ nodes.set(2);
+ nodes.set(3);
+ nodes.set(4);
+ nodes.set(5);
+
+ {
+ Uint32 i = 0;
+ while((i = nodes.find(i)) != NodeBitmask::NotFound){
+ ndbout_c("1 Node id = %u", i);
+ i++;
+ }
+ }
+
+ NodeReceiverGroup rg(DBDICT, nodes);
+ RequestTracker rt2;
+ ndbrequire(rt2.done());
+ ndbrequire(!rt2.hasRef());
+ ndbrequire(!rt2.hasConf());
+ rt2.init<CreateEvntRef>(c_counterMgr, rg, GSN_CREATE_EVNT_REF, 13);
+
+ RequestTracker rt3;
+ rt3.init<CreateEvntRef>(c_counterMgr, rg, GSN_CREATE_EVNT_REF, 13);
+
+ ndbrequire(!rt2.done());
+ ndbrequire(!rt3.done());
+
+ rt2.reportRef(c_counterMgr, 2);
+ rt3.reportConf(c_counterMgr, 2);
+
+ ndbrequire(!rt2.done());
+ ndbrequire(!rt3.done());
+
+ rt2.reportConf(c_counterMgr, 3);
+ rt3.reportConf(c_counterMgr, 3);
+
+ ndbrequire(!rt2.done());
+ ndbrequire(!rt3.done());
+
+ rt2.reportConf(c_counterMgr, 4);
+ rt3.reportConf(c_counterMgr, 4);
+
+ ndbrequire(!rt2.done());
+ ndbrequire(!rt3.done());
+
+ rt2.reportConf(c_counterMgr, 5);
+ rt3.reportConf(c_counterMgr, 5);
+
+ ndbrequire(rt2.done());
+ ndbrequire(rt3.done());
+ }
+#endif
+
+ if (! assembleFragments(signal)) {
+ jam();
+ return;
+ }
+
+ CreateEvntReq *req = (CreateEvntReq*)signal->getDataPtr();
+ const CreateEvntReq::RequestType requestType = req->getRequestType();
+ const Uint32 requestFlag = req->getRequestFlag();
+
+ OpCreateEventPtr evntRecPtr;
+ // Seize a Create Event record
+ if (!c_opCreateEvent.seize(evntRecPtr)) {
+ // Failed to allocate event record
+ jam();
+ releaseSections(signal);
+
+ CreateEvntRef * ret = (CreateEvntRef *)signal->getDataPtrSend();
+ ret->senderRef = reference();
+ ret->setErrorCode(CreateEvntRef::SeizeError);
+ ret->setErrorLine(__LINE__);
+ ret->setErrorNode(reference());
+ sendSignal(signal->senderBlockRef(), GSN_CREATE_EVNT_REF, signal,
+ CreateEvntRef::SignalLength, JBB);
+ return;
+ }
+
+#ifdef EVENT_DEBUG
+ ndbout_c("DBDICT::execCREATE_EVNT_REQ from %u evntRecId = (%d)", refToNode(signal->getSendersBlockRef()), evntRecPtr.i);
+#endif
+
+ ndbrequire(req->getUserRef() == signal->getSendersBlockRef());
+
+ evntRecPtr.p->init(req,this);
+
+ if (requestFlag & (Uint32)CreateEvntReq::RT_DICT_AFTER_GET) {
+ jam();
+ EVENT_TRACE;
+ createEvent_RT_DICT_AFTER_GET(signal, evntRecPtr);
+ return;
+ }
+ if (requestType == CreateEvntReq::RT_USER_GET) {
+ jam();
+ EVENT_TRACE;
+ createEvent_RT_USER_GET(signal, evntRecPtr);
+ return;
+ }
+ if (requestType == CreateEvntReq::RT_USER_CREATE) {
+ jam();
+ EVENT_TRACE;
+ createEvent_RT_USER_CREATE(signal, evntRecPtr);
+ return;
+ }
+
+#ifdef EVENT_DEBUG
+ ndbout << "Dbdict.cpp: Dbdict::execCREATE_EVNT_REQ other" << endl;
+#endif
+ jam();
+ releaseSections(signal);
+
+ evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
+ evntRecPtr.p->m_errorLine = __LINE__;
+ evntRecPtr.p->m_errorNode = reference();
+
+ createEvent_sendReply(signal, evntRecPtr);
+}
+
+/********************************************************************
+ *
+ * Event creation
+ *
+ *****************************************************************/
+
+void
+Dbdict::createEvent_RT_USER_CREATE(Signal* signal, OpCreateEventPtr evntRecPtr){
+ jam();
+ evntRecPtr.p->m_request.setUserRef(signal->senderBlockRef());
+
+#ifdef EVENT_DEBUG
+ ndbout << "Dbdict.cpp: Dbdict::execCREATE_EVNT_REQ RT_USER" << endl;
+ char buf[128] = {0};
+ AttributeMask mask = evntRecPtr.p->m_request.getAttrListBitmask();
+ mask.getText(buf);
+ ndbout_c("mask = %s", buf);
+#endif
+
+ // Interpret the long signal
+
+ SegmentedSectionPtr ssPtr;
+ // save name and event properties
+ signal->getSection(ssPtr, CreateEvntReq::EVENT_NAME_SECTION);
+
+ SimplePropertiesSectionReader r0(ssPtr, getSectionSegmentPool());
+#ifdef EVENT_DEBUG
+ r0.printAll(ndbout);
+#endif
+ // event name
+ if ((!r0.first()) ||
+ (r0.getValueType() != SimpleProperties::StringValue) ||
+ (r0.getValueLen() <= 0)) {
+ jam();
+ releaseSections(signal);
+
+ evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
+ evntRecPtr.p->m_errorLine = __LINE__;
+ evntRecPtr.p->m_errorNode = reference();
+
+ createEvent_sendReply(signal, evntRecPtr);
+ return;
+ }
+ r0.getString(evntRecPtr.p->m_eventRec.NAME);
+ {
+ int len = strlen(evntRecPtr.p->m_eventRec.NAME);
+ memset(evntRecPtr.p->m_eventRec.NAME+len, 0, MAX_TAB_NAME_SIZE-len);
+#ifdef EVENT_DEBUG
+ printf("CreateEvntReq::RT_USER_CREATE; EventName %s, len %u\n",
+ evntRecPtr.p->m_eventRec.NAME, len);
+ for(int i = 0; i < MAX_TAB_NAME_SIZE/4; i++)
+ printf("H'%.8x ", ((Uint32*)evntRecPtr.p->m_eventRec.NAME)[i]);
+ printf("\n");
+#endif
+ }
+ // table name
+ if ((!r0.next()) ||
+ (r0.getValueType() != SimpleProperties::StringValue) ||
+ (r0.getValueLen() <= 0)) {
+ jam();
+ releaseSections(signal);
+
+ evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
+ evntRecPtr.p->m_errorLine = __LINE__;
+ evntRecPtr.p->m_errorNode = reference();
+
+ createEvent_sendReply(signal, evntRecPtr);
+ return;
+ }
+ r0.getString(evntRecPtr.p->m_eventRec.TABLE_NAME);
+ {
+ int len = strlen(evntRecPtr.p->m_eventRec.TABLE_NAME);
+ memset(evntRecPtr.p->m_eventRec.TABLE_NAME+len, 0, MAX_TAB_NAME_SIZE-len);
+ }
+
+#ifdef EVENT_DEBUG
+ ndbout_c("event name: %s",evntRecPtr.p->m_eventRec.NAME);
+ ndbout_c("table name: %s",evntRecPtr.p->m_eventRec.TABLE_NAME);
+#endif
+
+ releaseSections(signal);
+
+ // Send request to SUMA
+
+ CreateSubscriptionIdReq * sumaIdReq =
+ (CreateSubscriptionIdReq *)signal->getDataPtrSend();
+
+ // make sure we save the original sender for later
+ sumaIdReq->senderData = evntRecPtr.i;
+#ifdef EVENT_DEBUG
+ ndbout << "sumaIdReq->senderData = " << sumaIdReq->senderData << endl;
+#endif
+ sendSignal(SUMA_REF, GSN_CREATE_SUBID_REQ, signal,
+ CreateSubscriptionIdReq::SignalLength, JBB);
+ // we should now return in either execCREATE_SUBID_CONF
+ // or execCREATE_SUBID_REF
+}
+
+void Dbdict::execCREATE_SUBID_REF(Signal* signal)
+{
+ jamEntry();
+ EVENT_TRACE;
+ CreateSubscriptionIdRef * const ref =
+ (CreateSubscriptionIdRef *)signal->getDataPtr();
+ OpCreateEventPtr evntRecPtr;
+
+ evntRecPtr.i = ref->senderData;
+ ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
+
+ evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
+ evntRecPtr.p->m_errorLine = __LINE__;
+ evntRecPtr.p->m_errorNode = reference();
+
+ createEvent_sendReply(signal, evntRecPtr);
+}
+
+void Dbdict::execCREATE_SUBID_CONF(Signal* signal)
+{
+ jamEntry();
+ EVENT_TRACE;
+
+ CreateSubscriptionIdConf const * sumaIdConf =
+ (CreateSubscriptionIdConf *)signal->getDataPtr();
+
+ Uint32 evntRecId = sumaIdConf->senderData;
+ OpCreateEvent *evntRec;
+
+ ndbrequire((evntRec = c_opCreateEvent.getPtr(evntRecId)) != NULL);
+
+ evntRec->m_request.setEventId(sumaIdConf->subscriptionId);
+ evntRec->m_request.setEventKey(sumaIdConf->subscriptionKey);
+
+ releaseSections(signal);
+
+ Callback c = { safe_cast(&Dbdict::createEventUTIL_PREPARE), 0 };
+
+ prepareTransactionEventSysTable(&c, signal, evntRecId,
+ UtilPrepareReq::Insert);
+}
+
+void
+Dbdict::createEventComplete_RT_USER_CREATE(Signal* signal,
+ OpCreateEventPtr evntRecPtr){
+ jam();
+ createEvent_sendReply(signal, evntRecPtr);
+}
+
+/*********************************************************************
+ *
+ * UTIL_PREPARE, UTIL_EXECUTE
+ *
+ * insert or read systable NDB$EVENTS_0
+ */
+
+void interpretUtilPrepareErrorCode(UtilPrepareRef::ErrorCode errorCode,
+ bool& temporary, Uint32& line)
+{
+ switch (errorCode) {
+ case UtilPrepareRef::NO_ERROR:
+ jam();
+ line = __LINE__;
+ EVENT_TRACE;
+ break;
+ case UtilPrepareRef::PREPARE_SEIZE_ERROR:
+ jam();
+ temporary = true;
+ line = __LINE__;
+ EVENT_TRACE;
+ break;
+ case UtilPrepareRef::PREPARE_PAGES_SEIZE_ERROR:
+ jam();
+ line = __LINE__;
+ EVENT_TRACE;
+ break;
+ case UtilPrepareRef::PREPARED_OPERATION_SEIZE_ERROR:
+ jam();
+ line = __LINE__;
+ EVENT_TRACE;
+ break;
+ case UtilPrepareRef::DICT_TAB_INFO_ERROR:
+ jam();
+ line = __LINE__;
+ EVENT_TRACE;
+ break;
+ case UtilPrepareRef::MISSING_PROPERTIES_SECTION:
+ jam();
+ line = __LINE__;
+ EVENT_TRACE;
+ break;
+ default:
+ jam();
+ line = __LINE__;
+ EVENT_TRACE;
+ break;
+ }
+}
+
+void
+Dbdict::createEventUTIL_PREPARE(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode)
+{
+ jam();
+ EVENT_TRACE;
+ if (returnCode == 0) {
+ UtilPrepareConf* const req = (UtilPrepareConf*)signal->getDataPtr();
+ OpCreateEventPtr evntRecPtr;
+ jam();
+ evntRecPtr.i = req->getSenderData();
+ const Uint32 prepareId = req->getPrepareId();
+
+ ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
+
+ Callback c = { safe_cast(&Dbdict::createEventUTIL_EXECUTE), 0 };
+
+ switch (evntRecPtr.p->m_requestType) {
+ case CreateEvntReq::RT_USER_GET:
+#ifdef EVENT_DEBUG
+ printf("get type = %d\n", CreateEvntReq::RT_USER_GET);
+#endif
+ jam();
+ executeTransEventSysTable(&c, signal,
+ evntRecPtr.i, evntRecPtr.p->m_eventRec,
+ prepareId, UtilPrepareReq::Read);
+ break;
+ case CreateEvntReq::RT_USER_CREATE:
+#ifdef EVENT_DEBUG
+ printf("create type = %d\n", CreateEvntReq::RT_USER_CREATE);
+#endif
+ {
+ evntRecPtr.p->m_eventRec.EVENT_TYPE = evntRecPtr.p->m_request.getEventType();
+ AttributeMask m = evntRecPtr.p->m_request.getAttrListBitmask();
+ memcpy(evntRecPtr.p->m_eventRec.ATTRIBUTE_MASK, &m,
+ sizeof(evntRecPtr.p->m_eventRec.ATTRIBUTE_MASK));
+ evntRecPtr.p->m_eventRec.SUBID = evntRecPtr.p->m_request.getEventId();
+ evntRecPtr.p->m_eventRec.SUBKEY = evntRecPtr.p->m_request.getEventKey();
+ }
+ jam();
+ executeTransEventSysTable(&c, signal,
+ evntRecPtr.i, evntRecPtr.p->m_eventRec,
+ prepareId, UtilPrepareReq::Insert);
+ break;
+ default:
+#ifdef EVENT_DEBUG
+ printf("type = %d\n", evntRecPtr.p->m_requestType);
+ printf("bet type = %d\n", CreateEvntReq::RT_USER_GET);
+ printf("create type = %d\n", CreateEvntReq::RT_USER_CREATE);
+#endif
+ ndbrequire(false);
+ }
+ } else { // returnCode != 0
+ UtilPrepareRef* const ref = (UtilPrepareRef*)signal->getDataPtr();
+
+ const UtilPrepareRef::ErrorCode errorCode =
+ (UtilPrepareRef::ErrorCode)ref->getErrorCode();
+
+ OpCreateEventPtr evntRecPtr;
+ evntRecPtr.i = ref->getSenderData();
+ ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
+
+ bool temporary = false;
+ interpretUtilPrepareErrorCode(errorCode,
+ temporary, evntRecPtr.p->m_errorLine);
+ if (temporary) {
+ evntRecPtr.p->m_errorCode =
+ CreateEvntRef::makeTemporary(CreateEvntRef::Undefined);
+ }
+
+ if (evntRecPtr.p->m_errorCode == 0) {
+ evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
+ }
+ evntRecPtr.p->m_errorNode = reference();
+
+ createEvent_sendReply(signal, evntRecPtr);
+ }
+}
+
+void Dbdict::executeTransEventSysTable(Callback *pcallback, Signal *signal,
+ const Uint32 ptrI,
+ sysTab_NDBEVENTS_0& m_eventRec,
+ const Uint32 prepareId,
+ UtilPrepareReq::OperationTypeValue prepReq)
+{
+ jam();
+ const Uint32 noAttr = EVENT_SYSTEM_TABLE_LENGTH;
+ Uint32 total_len = 0;
+
+ Uint32* attrHdr = signal->theData + 25;
+ Uint32* attrPtr = attrHdr;
+
+ Uint32 id=0;
+ // attribute 0 event name: Primary Key
+ {
+ AttributeHeader::init(attrPtr, id, sysTab_NDBEVENTS_0_szs[id]/4);
+ total_len += sysTab_NDBEVENTS_0_szs[id];
+ attrPtr++; id++;
+ }
+
+ switch (prepReq) {
+ case UtilPrepareReq::Read:
+ jam();
+ EVENT_TRACE;
+ // no more
+ while ( id < noAttr )
+ AttributeHeader::init(attrPtr++, id++, 0);
+ ndbrequire(id == (Uint32) noAttr);
+ break;
+ case UtilPrepareReq::Insert:
+ jam();
+ EVENT_TRACE;
+ while ( id < noAttr ) {
+ AttributeHeader::init(attrPtr, id, sysTab_NDBEVENTS_0_szs[id]/4);
+ total_len += sysTab_NDBEVENTS_0_szs[id];
+ attrPtr++; id++;
+ }
+ ndbrequire(id == (Uint32) noAttr);
+ break;
+ case UtilPrepareReq::Delete:
+ ndbrequire(id == 1);
+ break;
+ default:
+ ndbrequire(false);
+ }
+
+ LinearSectionPtr headerPtr;
+ LinearSectionPtr dataPtr;
+
+ headerPtr.p = attrHdr;
+ headerPtr.sz = noAttr;
+
+ dataPtr.p = (Uint32*)&m_eventRec;
+ dataPtr.sz = total_len/4;
+
+ ndbrequire((total_len == sysTab_NDBEVENTS_0_szs[0]) ||
+ (total_len == sizeof(sysTab_NDBEVENTS_0)));
+
+#if 0
+ printf("Header size %u\n", headerPtr.sz);
+ for(int i = 0; i < (int)headerPtr.sz; i++)
+ printf("H'%.8x ", attrHdr[i]);
+ printf("\n");
+
+ printf("Data size %u\n", dataPtr.sz);
+ for(int i = 0; i < (int)dataPtr.sz; i++)
+ printf("H'%.8x ", dataPage[i]);
+ printf("\n");
+#endif
+
+ executeTransaction(pcallback, signal,
+ ptrI,
+ prepareId,
+ id,
+ headerPtr,
+ dataPtr);
+}
+
+void Dbdict::executeTransaction(Callback *pcallback,
+ Signal* signal,
+ Uint32 senderData,
+ Uint32 prepareId,
+ Uint32 noAttr,
+ LinearSectionPtr headerPtr,
+ LinearSectionPtr dataPtr)
+{
+ jam();
+ EVENT_TRACE;
+
+ UtilExecuteReq * utilExecuteReq =
+ (UtilExecuteReq *)signal->getDataPtrSend();
+
+ utilExecuteReq->setSenderRef(reference());
+ utilExecuteReq->setSenderData(senderData);
+ utilExecuteReq->setPrepareId(prepareId);
+ utilExecuteReq->setReleaseFlag(); // must be done after setting prepareId
+
+#if 0
+ printf("Header size %u\n", headerPtr.sz);
+ for(int i = 0; i < (int)headerPtr.sz; i++)
+ printf("H'%.8x ", headerBuffer[i]);
+ printf("\n");
+
+ printf("Data size %u\n", dataPtr.sz);
+ for(int i = 0; i < (int)dataPtr.sz; i++)
+ printf("H'%.8x ", dataBuffer[i]);
+ printf("\n");
+#endif
+
+ struct LinearSectionPtr sectionsPtr[UtilExecuteReq::NoOfSections];
+ sectionsPtr[UtilExecuteReq::HEADER_SECTION].p = headerPtr.p;
+ sectionsPtr[UtilExecuteReq::HEADER_SECTION].sz = noAttr;
+ sectionsPtr[UtilExecuteReq::DATA_SECTION].p = dataPtr.p;
+ sectionsPtr[UtilExecuteReq::DATA_SECTION].sz = dataPtr.sz;
+
+ sendSignalUtilReq(pcallback, DBUTIL_REF, GSN_UTIL_EXECUTE_REQ, signal,
+ UtilExecuteReq::SignalLength, JBB,
+ sectionsPtr, UtilExecuteReq::NoOfSections);
+}
+
+void Dbdict::parseReadEventSys(Signal* signal, sysTab_NDBEVENTS_0& m_eventRec)
+{
+ SegmentedSectionPtr headerPtr, dataPtr;
+ jam();
+ signal->getSection(headerPtr, UtilExecuteReq::HEADER_SECTION);
+ SectionReader headerReader(headerPtr, getSectionSegmentPool());
+
+ signal->getSection(dataPtr, UtilExecuteReq::DATA_SECTION);
+ SectionReader dataReader(dataPtr, getSectionSegmentPool());
+
+ AttributeHeader header;
+ Uint32 *dst = (Uint32*)&m_eventRec;
+
+ for (int i = 0; i < EVENT_SYSTEM_TABLE_LENGTH; i++) {
+ headerReader.getWord((Uint32 *)&header);
+ int sz = header.getDataSize();
+ for (int i=0; i < sz; i++)
+ dataReader.getWord(dst++);
+ }
+
+ ndbrequire( ((char*)dst-(char*)&m_eventRec) == sizeof(m_eventRec) );
+
+ releaseSections(signal);
+}
+
+void Dbdict::createEventUTIL_EXECUTE(Signal *signal,
+ Uint32 callbackData,
+ Uint32 returnCode)
+{
+ jam();
+ EVENT_TRACE;
+ if (returnCode == 0) {
+ // Entry into system table all set
+ UtilExecuteConf* const conf = (UtilExecuteConf*)signal->getDataPtr();
+ jam();
+ OpCreateEventPtr evntRecPtr;
+ evntRecPtr.i = conf->getSenderData();
+
+ ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
+ OpCreateEvent *evntRec = evntRecPtr.p;
+
+ switch (evntRec->m_requestType) {
+ case CreateEvntReq::RT_USER_GET: {
+#ifdef EVENT_DEBUG
+ printf("get type = %d\n", CreateEvntReq::RT_USER_GET);
+#endif
+ parseReadEventSys(signal, evntRecPtr.p->m_eventRec);
+
+ evntRec->m_request.setEventType(evntRecPtr.p->m_eventRec.EVENT_TYPE);
+ evntRec->m_request.setAttrListBitmask(*(AttributeMask*)evntRecPtr.p->m_eventRec.ATTRIBUTE_MASK);
+ evntRec->m_request.setEventId(evntRecPtr.p->m_eventRec.SUBID);
+ evntRec->m_request.setEventKey(evntRecPtr.p->m_eventRec.SUBKEY);
+
+#ifdef EVENT_DEBUG
+ printf("EventName: %s\n", evntRec->m_eventRec.NAME);
+ printf("TableName: %s\n", evntRec->m_eventRec.TABLE_NAME);
+#endif
+
+ // find table id for event table
+ TableRecord keyRecord;
+ strcpy(keyRecord.tableName, evntRecPtr.p->m_eventRec.TABLE_NAME);
+
+ TableRecordPtr tablePtr;
+ c_tableRecordHash.find(tablePtr, keyRecord);
+
+ if (tablePtr.i == RNIL) {
+ jam();
+ evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
+ evntRecPtr.p->m_errorLine = __LINE__;
+ evntRecPtr.p->m_errorNode = reference();
+
+ createEvent_sendReply(signal, evntRecPtr);
+ return;
+ }
+
+ evntRec->m_request.setTableId(tablePtr.p->tableId);
+
+ createEventComplete_RT_USER_GET(signal, evntRecPtr);
+ return;
+ }
+ case CreateEvntReq::RT_USER_CREATE: {
+#ifdef EVENT_DEBUG
+ printf("create type = %d\n", CreateEvntReq::RT_USER_CREATE);
+#endif
+ jam();
+ createEventComplete_RT_USER_CREATE(signal, evntRecPtr);
+ return;
+ }
+ break;
+ default:
+ ndbrequire(false);
+ }
+ } else { // returnCode != 0
+ UtilExecuteRef * const ref = (UtilExecuteRef *)signal->getDataPtr();
+ OpCreateEventPtr evntRecPtr;
+ evntRecPtr.i = ref->getSenderData();
+ ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
+ jam();
+ evntRecPtr.p->m_errorNode = reference();
+ evntRecPtr.p->m_errorLine = __LINE__;
+
+ switch (ref->getErrorCode()) {
+ case UtilExecuteRef::TCError:
+ switch (ref->getTCErrorCode()) {
+ case ZNOT_FOUND:
+ jam();
+ evntRecPtr.p->m_errorCode = CreateEvntRef::EventNotFound;
+ break;
+ case ZALREADYEXIST:
+ jam();
+ evntRecPtr.p->m_errorCode = CreateEvntRef::EventNameExists;
+ break;
+ default:
+ jam();
+ evntRecPtr.p->m_errorCode = CreateEvntRef::UndefinedTCError;
+ break;
+ }
+ break;
+ default:
+ jam();
+ evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
+ break;
+ }
+
+ createEvent_sendReply(signal, evntRecPtr);
+ }
+}
+
+/***********************************************************************
+ *
+ * NdbEventOperation, reading systable, creating event in suma
+ *
+ */
+
+void
+Dbdict::createEvent_RT_USER_GET(Signal* signal, OpCreateEventPtr evntRecPtr){
+ jam();
+ EVENT_TRACE;
+#ifdef EVENT_PH2_DEBUG
+ ndbout_c("DBDICT(Coordinator) got GSN_CREATE_EVNT_REQ::RT_USER_GET evntRecPtr.i = (%d), ref = %u", evntRecPtr.i, evntRecPtr.p->m_request.getUserRef());
+#endif
+
+ SegmentedSectionPtr ssPtr;
+
+ signal->getSection(ssPtr, 0);
+
+ SimplePropertiesSectionReader r0(ssPtr, getSectionSegmentPool());
+#ifdef EVENT_DEBUG
+ r0.printAll(ndbout);
+#endif
+ if ((!r0.first()) ||
+ (r0.getValueType() != SimpleProperties::StringValue) ||
+ (r0.getValueLen() <= 0)) {
+ jam();
+ releaseSections(signal);
+
+ evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
+ evntRecPtr.p->m_errorLine = __LINE__;
+ evntRecPtr.p->m_errorNode = reference();
+
+ createEvent_sendReply(signal, evntRecPtr);
+ return;
+ }
+
+ r0.getString(evntRecPtr.p->m_eventRec.NAME);
+ int len = strlen(evntRecPtr.p->m_eventRec.NAME);
+ memset(evntRecPtr.p->m_eventRec.NAME+len, 0, MAX_TAB_NAME_SIZE-len);
+
+ releaseSections(signal);
+
+ Callback c = { safe_cast(&Dbdict::createEventUTIL_PREPARE), 0 };
+
+ prepareTransactionEventSysTable(&c, signal, evntRecPtr.i,
+ UtilPrepareReq::Read);
+ /*
+ * Will read systable and fill an OpCreateEventPtr
+ * and return below
+ */
+}
+
+void
+Dbdict::createEventComplete_RT_USER_GET(Signal* signal,
+ OpCreateEventPtr evntRecPtr){
+ jam();
+
+ // Send to oneself and the other DICT's
+ CreateEvntReq * req = (CreateEvntReq *)signal->getDataPtrSend();
+
+ *req = evntRecPtr.p->m_request;
+ req->senderRef = reference();
+ req->senderData = evntRecPtr.i;
+
+ req->addRequestFlag(CreateEvntReq::RT_DICT_AFTER_GET);
+
+#ifdef EVENT_PH2_DEBUG
+ ndbout_c("DBDICT(Coordinator) sending GSN_CREATE_EVNT_REQ::RT_DICT_AFTER_GET to DBDICT participants evntRecPtr.i = (%d)", evntRecPtr.i);
+#endif
+
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ RequestTracker & p = evntRecPtr.p->m_reqTracker;
+ p.init<CreateEvntRef>(c_counterMgr, rg, GSN_CREATE_EVNT_REF, evntRecPtr.i);
+
+ sendSignal(rg, GSN_CREATE_EVNT_REQ, signal, CreateEvntReq::SignalLength, JBB);
+}
+
+void
+Dbdict::createEvent_nodeFailCallback(Signal* signal, Uint32 eventRecPtrI,
+ Uint32 returnCode){
+ OpCreateEventPtr evntRecPtr;
+ c_opCreateEvent.getPtr(evntRecPtr, eventRecPtrI);
+ createEvent_sendReply(signal, evntRecPtr);
+}
+
+void Dbdict::execCREATE_EVNT_REF(Signal* signal)
+{
+ jamEntry();
+ EVENT_TRACE;
+ CreateEvntRef * const ref = (CreateEvntRef *)signal->getDataPtr();
+ OpCreateEventPtr evntRecPtr;
+
+ evntRecPtr.i = ref->getUserData();
+
+ ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
+
+#ifdef EVENT_PH2_DEBUG
+ ndbout_c("DBDICT(Coordinator) got GSN_CREATE_EVNT_REF evntRecPtr.i = (%d)", evntRecPtr.i);
+#endif
+
+ if (ref->errorCode == CreateEvntRef::NF_FakeErrorREF){
+ jam();
+ evntRecPtr.p->m_reqTracker.ignoreRef(c_counterMgr, refToNode(ref->senderRef));
+ } else {
+ jam();
+ evntRecPtr.p->m_reqTracker.reportRef(c_counterMgr, refToNode(ref->senderRef));
+ }
+ createEvent_sendReply(signal, evntRecPtr);
+
+ return;
+}
+
+void Dbdict::execCREATE_EVNT_CONF(Signal* signal)
+{
+ jamEntry();
+ EVENT_TRACE;
+ CreateEvntConf * const conf = (CreateEvntConf *)signal->getDataPtr();
+ OpCreateEventPtr evntRecPtr;
+
+ evntRecPtr.i = conf->getUserData();
+
+ ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
+
+#ifdef EVENT_PH2_DEBUG
+ ndbout_c("DBDICT(Coordinator) got GSN_CREATE_EVNT_CONF evntRecPtr.i = (%d)", evntRecPtr.i);
+#endif
+
+ evntRecPtr.p->m_reqTracker.reportConf(c_counterMgr, refToNode(conf->senderRef));
+
+ // we will only have a valid tablename if it the master DICT sending this
+ // but that's ok
+ LinearSectionPtr ptr[1];
+ ptr[0].p = (Uint32 *)evntRecPtr.p->m_eventRec.TABLE_NAME;
+ ptr[0].sz =
+ (strlen(evntRecPtr.p->m_eventRec.TABLE_NAME)+4)/4; // to make sure we have a null
+
+ createEvent_sendReply(signal, evntRecPtr, ptr, 1);
+
+ return;
+}
+
+/************************************************
+ *
+ * Participant stuff
+ *
+ */
+
+void
+Dbdict::createEvent_RT_DICT_AFTER_GET(Signal* signal, OpCreateEventPtr evntRecPtr){
+ jam();
+ evntRecPtr.p->m_request.setUserRef(signal->senderBlockRef());
+
+#ifdef EVENT_PH2_DEBUG
+ ndbout_c("DBDICT(Participant) got CREATE_EVNT_REQ::RT_DICT_AFTER_GET evntRecPtr.i = (%d)", evntRecPtr.i);
+#endif
+
+ // the signal comes from the DICT block that got the first user request!
+ // This code runs on all DICT nodes, including oneself
+
+ // Seize a Create Event record, the Coordinator will now have two seized
+ // but that's ok, it's like a recursion
+
+ SubCreateReq * sumaReq = (SubCreateReq *)signal->getDataPtrSend();
+
+ sumaReq->subscriberRef = reference(); // reference to DICT
+ sumaReq->subscriberData = evntRecPtr.i;
+ sumaReq->subscriptionId = evntRecPtr.p->m_request.getEventId();
+ sumaReq->subscriptionKey = evntRecPtr.p->m_request.getEventKey();
+ sumaReq->subscriptionType = SubCreateReq::TableEvent;
+ sumaReq->tableId = evntRecPtr.p->m_request.getTableId();
+
+#ifdef EVENT_PH2_DEBUG
+ ndbout_c("sending GSN_SUB_CREATE_REQ");
+#endif
+
+ sendSignal(SUMA_REF, GSN_SUB_CREATE_REQ, signal,
+ SubCreateReq::SignalLength+1 /*to get table Id*/, JBB);
+}
+
+void Dbdict::execSUB_CREATE_REF(Signal* signal)
+{
+ jamEntry();
+ EVENT_TRACE;
+ SubCreateRef * const ref = (SubCreateRef *)signal->getDataPtr();
+ OpCreateEventPtr evntRecPtr;
+
+ evntRecPtr.i = ref->subscriberData;
+ ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
+
+#ifdef EVENT_PH2_DEBUG
+ ndbout_c("DBDICT(Participant) got SUB_CREATE_REF evntRecPtr.i = (%d)", evntRecPtr.i);
+#endif
+
+ if (ref->err == GrepError::SUBSCRIPTION_ID_NOT_UNIQUE) {
+ jam();
+#ifdef EVENT_PH2_DEBUG
+ ndbout_c("SUBSCRIPTION_ID_NOT_UNIQUE");
+#endif
+ createEvent_sendReply(signal, evntRecPtr);
+ return;
+ }
+
+#ifdef EVENT_PH2_DEBUG
+ ndbout_c("Other error");
+#endif
+
+ evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
+ evntRecPtr.p->m_errorLine = __LINE__;
+ evntRecPtr.p->m_errorNode = reference();
+
+ createEvent_sendReply(signal, evntRecPtr);
+}
+
+void Dbdict::execSUB_CREATE_CONF(Signal* signal)
+{
+ jamEntry();
+ EVENT_TRACE;
+
+ SubCreateConf * const sumaConf = (SubCreateConf *)signal->getDataPtr();
+
+ const Uint32 subscriptionId = sumaConf->subscriptionId;
+ const Uint32 subscriptionKey = sumaConf->subscriptionKey;
+ const Uint32 evntRecId = sumaConf->subscriberData;
+
+ OpCreateEvent *evntRec;
+ ndbrequire((evntRec = c_opCreateEvent.getPtr(evntRecId)) != NULL);
+
+#ifdef EVENT_PH2_DEBUG
+ ndbout_c("DBDICT(Participant) got SUB_CREATE_CONF evntRecPtr.i = (%d)", evntRecId);
+#endif
+
+ SubSyncReq *sumaSync = (SubSyncReq *)signal->getDataPtrSend();
+
+ sumaSync->subscriptionId = subscriptionId;
+ sumaSync->subscriptionKey = subscriptionKey;
+ sumaSync->part = (Uint32) SubscriptionData::MetaData;
+ sumaSync->subscriberData = evntRecId;
+
+ sendSignal(SUMA_REF, GSN_SUB_SYNC_REQ, signal,
+ SubSyncReq::SignalLength, JBB);
+}
+
+void Dbdict::execSUB_SYNC_REF(Signal* signal)
+{
+ jamEntry();
+ EVENT_TRACE;
+ SubSyncRef * const ref = (SubSyncRef *)signal->getDataPtr();
+ OpCreateEventPtr evntRecPtr;
+
+ evntRecPtr.i = ref->subscriberData;
+ ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
+
+ evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
+ evntRecPtr.p->m_errorLine = __LINE__;
+ evntRecPtr.p->m_errorNode = reference();
+
+ createEvent_sendReply(signal, evntRecPtr);
+}
+
+void Dbdict::execSUB_SYNC_CONF(Signal* signal)
+{
+ jamEntry();
+ EVENT_TRACE;
+
+ SubSyncConf * const sumaSyncConf = (SubSyncConf *)signal->getDataPtr();
+
+ // Uint32 subscriptionId = sumaSyncConf->subscriptionId;
+ // Uint32 subscriptionKey = sumaSyncConf->subscriptionKey;
+ OpCreateEventPtr evntRecPtr;
+
+ evntRecPtr.i = sumaSyncConf->subscriberData;
+ ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
+
+ ndbrequire(sumaSyncConf->part == (Uint32)SubscriptionData::MetaData);
+
+ createEvent_sendReply(signal, evntRecPtr);
+}
+
+/****************************************************
+ *
+ * common create reply method
+ *
+ *******************************************************/
+
+void Dbdict::createEvent_sendReply(Signal* signal,
+ OpCreateEventPtr evntRecPtr,
+ LinearSectionPtr *ptr, int noLSP)
+{
+ jam();
+ EVENT_TRACE;
+
+ // check if we're ready to sent reply
+ // if we are the master dict we might be waiting for conf/ref
+
+ if (!evntRecPtr.p->m_reqTracker.done()) {
+ jam();
+ return; // there's more to come
+ }
+
+ if (evntRecPtr.p->m_reqTracker.hasRef()) {
+ ptr = NULL; // we don't want to return anything if there's an error
+ if (!evntRecPtr.p->hasError()) {
+ evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
+ evntRecPtr.p->m_errorLine = __LINE__;
+ evntRecPtr.p->m_errorNode = reference();
+ jam();
+ } else
+ jam();
+ }
+
+ // reference to API if master DICT
+ // else reference to master DICT
+ Uint32 senderRef = evntRecPtr.p->m_request.getUserRef();
+ Uint32 signalLength;
+ Uint32 gsn;
+
+ if (evntRecPtr.p->hasError()) {
+ jam();
+ EVENT_TRACE;
+ CreateEvntRef * ret = (CreateEvntRef *)signal->getDataPtrSend();
+
+ ret->setEventId(evntRecPtr.p->m_request.getEventId());
+ ret->setEventKey(evntRecPtr.p->m_request.getEventKey());
+ ret->setUserData(evntRecPtr.p->m_request.getUserData());
+ ret->senderRef = reference();
+ ret->setTableId(evntRecPtr.p->m_request.getTableId());
+ ret->setEventType(evntRecPtr.p->m_request.getEventType());
+ ret->setRequestType(evntRecPtr.p->m_request.getRequestType());
+
+ ret->setErrorCode(evntRecPtr.p->m_errorCode);
+ ret->setErrorLine(evntRecPtr.p->m_errorLine);
+ ret->setErrorNode(evntRecPtr.p->m_errorNode);
+
+ signalLength = CreateEvntRef::SignalLength;
+#ifdef EVENT_PH2_DEBUG
+ ndbout_c("DBDICT sending GSN_CREATE_EVNT_REF to evntRecPtr.i = (%d) node = %u ref = %u", evntRecPtr.i, refToNode(senderRef), senderRef);
+ ndbout_c("errorCode = %u", evntRecPtr.p->m_errorCode);
+ ndbout_c("errorLine = %u", evntRecPtr.p->m_errorLine);
+#endif
+ gsn = GSN_CREATE_EVNT_REF;
+
+ } else {
+ jam();
+ EVENT_TRACE;
+ CreateEvntConf * evntConf = (CreateEvntConf *)signal->getDataPtrSend();
+
+ evntConf->setEventId(evntRecPtr.p->m_request.getEventId());
+ evntConf->setEventKey(evntRecPtr.p->m_request.getEventKey());
+ evntConf->setUserData(evntRecPtr.p->m_request.getUserData());
+ evntConf->senderRef = reference();
+ evntConf->setTableId(evntRecPtr.p->m_request.getTableId());
+ evntConf->setAttrListBitmask(evntRecPtr.p->m_request.getAttrListBitmask());
+ evntConf->setEventType(evntRecPtr.p->m_request.getEventType());
+ evntConf->setRequestType(evntRecPtr.p->m_request.getRequestType());
+
+ signalLength = CreateEvntConf::SignalLength;
+#ifdef EVENT_PH2_DEBUG
+ ndbout_c("DBDICT sending GSN_CREATE_EVNT_CONF to evntRecPtr.i = (%d) node = %u ref = %u", evntRecPtr.i, refToNode(senderRef), senderRef);
+#endif
+ gsn = GSN_CREATE_EVNT_CONF;
+ }
+
+ if (ptr) {
+ jam();
+ sendSignal(senderRef, gsn, signal, signalLength, JBB, ptr, noLSP);
+ } else {
+ jam();
+ sendSignal(senderRef, gsn, signal, signalLength, JBB);
+ }
+
+ c_opCreateEvent.release(evntRecPtr);
+}
+
+/*************************************************************/
+
+/********************************************************************
+ *
+ * Start event
+ *
+ *******************************************************************/
+
+void Dbdict::execSUB_START_REQ(Signal* signal)
+{
+ jamEntry();
+
+ Uint32 origSenderRef = signal->senderBlockRef();
+
+ OpSubEventPtr subbPtr;
+ if (!c_opSubEvent.seize(subbPtr)) {
+ SubStartRef * ref = (SubStartRef *)signal->getDataPtrSend();
+ { // fix
+ Uint32 subcriberRef = ((SubStartReq*)signal->getDataPtr())->subscriberRef;
+ ref->subscriberRef = subcriberRef;
+ }
+ jam();
+ // ret->setErrorCode(SubStartRef::SeizeError);
+ // ret->setErrorLine(__LINE__);
+ // ret->setErrorNode(reference());
+ ref->senderRef = reference();
+ ref->setTemporary(SubStartRef::Busy);
+
+ sendSignal(origSenderRef, GSN_SUB_START_REF, signal,
+ SubStartRef::SignalLength2, JBB);
+ return;
+ }
+
+ {
+ const SubStartReq* req = (SubStartReq*) signal->getDataPtr();
+ subbPtr.p->m_senderRef = req->senderRef;
+ subbPtr.p->m_senderData = req->senderData;
+ subbPtr.p->m_errorCode = 0;
+ }
+
+ if (refToBlock(origSenderRef) != DBDICT) {
+ /*
+ * Coordinator
+ */
+ jam();
+
+ subbPtr.p->m_senderRef = origSenderRef; // not sure if API sets correctly
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ RequestTracker & p = subbPtr.p->m_reqTracker;
+ p.init<SubStartRef>(c_counterMgr, rg, GSN_SUB_START_REF, subbPtr.i);
+
+ SubStartReq* req = (SubStartReq*) signal->getDataPtrSend();
+
+ req->senderRef = reference();
+ req->senderData = subbPtr.i;
+
+#ifdef EVENT_PH3_DEBUG
+ ndbout_c("DBDICT(Coordinator) sending GSN_SUB_START_REQ to DBDICT participants subbPtr.i = (%d)", subbPtr.i);
+#endif
+
+ sendSignal(rg, GSN_SUB_START_REQ, signal, SubStartReq::SignalLength2, JBB);
+ return;
+ }
+ /*
+ * Participant
+ */
+ ndbrequire(refToBlock(origSenderRef) == DBDICT);
+
+ {
+ SubStartReq* req = (SubStartReq*) signal->getDataPtrSend();
+
+ req->senderRef = reference();
+ req->senderData = subbPtr.i;
+
+#ifdef EVENT_PH3_DEBUG
+ ndbout_c("DBDICT(Participant) sending GSN_SUB_START_REQ to SUMA subbPtr.i = (%d)", subbPtr.i);
+#endif
+ sendSignal(SUMA_REF, GSN_SUB_START_REQ, signal, SubStartReq::SignalLength2, JBB);
+ }
+}
+
+void Dbdict::execSUB_START_REF(Signal* signal)
+{
+ jamEntry();
+
+ const SubStartRef* ref = (SubStartRef*) signal->getDataPtr();
+ Uint32 senderRef = ref->senderRef;
+
+ OpSubEventPtr subbPtr;
+ c_opSubEvent.getPtr(subbPtr, ref->senderData);
+
+ if (refToBlock(senderRef) == SUMA) {
+ /*
+ * Participant
+ */
+ jam();
+
+#ifdef EVENT_PH3_DEBUG
+ ndbout_c("DBDICT(Participant) got GSN_SUB_START_REF = (%d)", subbPtr.i);
+#endif
+
+ if (ref->isTemporary()){
+ jam();
+ SubStartReq* req = (SubStartReq*)signal->getDataPtrSend();
+ { // fix
+ Uint32 subscriberRef = ref->subscriberRef;
+ req->subscriberRef = subscriberRef;
+ }
+ req->senderRef = reference();
+ req->senderData = subbPtr.i;
+ sendSignal(SUMA_REF, GSN_SUB_START_REQ,
+ signal, SubStartReq::SignalLength2, JBB);
+ } else {
+ jam();
+
+ SubStartRef* ref = (SubStartRef*) signal->getDataPtrSend();
+ ref->senderRef = reference();
+ ref->senderData = subbPtr.p->m_senderData;
+ sendSignal(subbPtr.p->m_senderRef, GSN_SUB_START_REF,
+ signal, SubStartRef::SignalLength2, JBB);
+ c_opSubEvent.release(subbPtr);
+ }
+ return;
+ }
+ /*
+ * Coordinator
+ */
+ ndbrequire(refToBlock(senderRef) == DBDICT);
+#ifdef EVENT_PH3_DEBUG
+ ndbout_c("DBDICT(Coordinator) got GSN_SUB_START_REF = (%d)", subbPtr.i);
+#endif
+ if (ref->errorCode == SubStartRef::NF_FakeErrorREF){
+ jam();
+ subbPtr.p->m_reqTracker.ignoreRef(c_counterMgr, refToNode(senderRef));
+ } else {
+ jam();
+ subbPtr.p->m_reqTracker.reportRef(c_counterMgr, refToNode(senderRef));
+ }
+ completeSubStartReq(signal,subbPtr.i,0);
+}
+
+void Dbdict::execSUB_START_CONF(Signal* signal)
+{
+ jamEntry();
+
+ const SubStartConf* conf = (SubStartConf*) signal->getDataPtr();
+ Uint32 senderRef = conf->senderRef;
+
+ OpSubEventPtr subbPtr;
+ c_opSubEvent.getPtr(subbPtr, conf->senderData);
+
+ if (refToBlock(senderRef) == SUMA) {
+ /*
+ * Participant
+ */
+ jam();
+ SubStartConf* conf = (SubStartConf*) signal->getDataPtrSend();
+
+#ifdef EVENT_PH3_DEBUG
+ ndbout_c("DBDICT(Participant) got GSN_SUB_START_CONF = (%d)", subbPtr.i);
+#endif
+
+ conf->senderRef = reference();
+ conf->senderData = subbPtr.p->m_senderData;
+
+ sendSignal(subbPtr.p->m_senderRef, GSN_SUB_START_CONF,
+ signal, SubStartConf::SignalLength2, JBB);
+ c_opSubEvent.release(subbPtr);
+ return;
+ }
+ /*
+ * Coordinator
+ */
+ ndbrequire(refToBlock(senderRef) == DBDICT);
+#ifdef EVENT_PH3_DEBUG
+ ndbout_c("DBDICT(Coordinator) got GSN_SUB_START_CONF = (%d)", subbPtr.i);
+#endif
+ subbPtr.p->m_reqTracker.reportConf(c_counterMgr, refToNode(senderRef));
+ completeSubStartReq(signal,subbPtr.i,0);
+}
+
+/*
+ * Coordinator
+ */
+void Dbdict::completeSubStartReq(Signal* signal,
+ Uint32 ptrI,
+ Uint32 returnCode){
+ jam();
+
+ OpSubEventPtr subbPtr;
+ c_opSubEvent.getPtr(subbPtr, ptrI);
+
+ if (!subbPtr.p->m_reqTracker.done()){
+ jam();
+ return;
+ }
+
+ if (subbPtr.p->m_reqTracker.hasRef()) {
+ jam();
+#ifdef EVENT_DEBUG
+ ndbout_c("SUB_START_REF");
+#endif
+ sendSignal(subbPtr.p->m_senderRef, GSN_SUB_START_REF,
+ signal, SubStartRef::SignalLength, JBB);
+ if (subbPtr.p->m_reqTracker.hasConf()) {
+ // stopStartedNodes(signal);
+ }
+ c_opSubEvent.release(subbPtr);
+ return;
+ }
+#ifdef EVENT_DEBUG
+ ndbout_c("SUB_START_CONF");
+#endif
+ sendSignal(subbPtr.p->m_senderRef, GSN_SUB_START_CONF,
+ signal, SubStartConf::SignalLength, JBB);
+ c_opSubEvent.release(subbPtr);
+}
+
+/********************************************************************
+ *
+ * Stop event
+ *
+ *******************************************************************/
+
+void Dbdict::execSUB_STOP_REQ(Signal* signal)
+{
+ jamEntry();
+
+ Uint32 origSenderRef = signal->senderBlockRef();
+
+ OpSubEventPtr subbPtr;
+ if (!c_opSubEvent.seize(subbPtr)) {
+ SubStopRef * ref = (SubStopRef *)signal->getDataPtrSend();
+ jam();
+ // ret->setErrorCode(SubStartRef::SeizeError);
+ // ret->setErrorLine(__LINE__);
+ // ret->setErrorNode(reference());
+ ref->senderRef = reference();
+ ref->setTemporary(SubStopRef::Busy);
+
+ sendSignal(origSenderRef, GSN_SUB_STOP_REF, signal,
+ SubStopRef::SignalLength, JBB);
+ return;
+ }
+
+ {
+ const SubStopReq* req = (SubStopReq*) signal->getDataPtr();
+ subbPtr.p->m_senderRef = req->senderRef;
+ subbPtr.p->m_senderData = req->senderData;
+ subbPtr.p->m_errorCode = 0;
+ }
+
+ if (refToBlock(origSenderRef) != DBDICT) {
+ /*
+ * Coordinator
+ */
+ jam();
+#ifdef EVENT_DEBUG
+ ndbout_c("SUB_STOP_REQ 1");
+#endif
+ subbPtr.p->m_senderRef = origSenderRef; // not sure if API sets correctly
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ RequestTracker & p = subbPtr.p->m_reqTracker;
+ p.init<SubStopRef>(c_counterMgr, rg, GSN_SUB_STOP_REF, subbPtr.i);
+
+ SubStopReq* req = (SubStopReq*) signal->getDataPtrSend();
+
+ req->senderRef = reference();
+ req->senderData = subbPtr.i;
+
+ sendSignal(rg, GSN_SUB_STOP_REQ, signal, SubStopReq::SignalLength, JBB);
+ return;
+ }
+ /*
+ * Participant
+ */
+#ifdef EVENT_DEBUG
+ ndbout_c("SUB_STOP_REQ 2");
+#endif
+ ndbrequire(refToBlock(origSenderRef) == DBDICT);
+ {
+ SubStopReq* req = (SubStopReq*) signal->getDataPtrSend();
+
+ req->senderRef = reference();
+ req->senderData = subbPtr.i;
+
+ sendSignal(SUMA_REF, GSN_SUB_STOP_REQ, signal, SubStopReq::SignalLength, JBB);
+ }
+}
+
+void Dbdict::execSUB_STOP_REF(Signal* signal)
+{
+ jamEntry();
+ const SubStopRef* ref = (SubStopRef*) signal->getDataPtr();
+ Uint32 senderRef = ref->senderRef;
+
+ OpSubEventPtr subbPtr;
+ c_opSubEvent.getPtr(subbPtr, ref->senderData);
+
+ if (refToBlock(senderRef) == SUMA) {
+ /*
+ * Participant
+ */
+ jam();
+ if (ref->isTemporary()){
+ jam();
+ SubStopReq* req = (SubStopReq*)signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->senderData = subbPtr.i;
+ sendSignal(SUMA_REF, GSN_SUB_STOP_REQ,
+ signal, SubStopReq::SignalLength, JBB);
+ } else {
+ jam();
+ SubStopRef* ref = (SubStopRef*) signal->getDataPtrSend();
+ ref->senderRef = reference();
+ ref->senderData = subbPtr.p->m_senderData;
+ sendSignal(subbPtr.p->m_senderRef, GSN_SUB_STOP_REF,
+ signal, SubStopRef::SignalLength, JBB);
+ c_opSubEvent.release(subbPtr);
+ }
+ return;
+ }
+ /*
+ * Coordinator
+ */
+ ndbrequire(refToBlock(senderRef) == DBDICT);
+ if (ref->errorCode == SubStopRef::NF_FakeErrorREF){
+ jam();
+ subbPtr.p->m_reqTracker.ignoreRef(c_counterMgr, refToNode(senderRef));
+ } else {
+ jam();
+ subbPtr.p->m_reqTracker.reportRef(c_counterMgr, refToNode(senderRef));
+ }
+ completeSubStopReq(signal,subbPtr.i,0);
+}
+
+void Dbdict::execSUB_STOP_CONF(Signal* signal)
+{
+ jamEntry();
+
+ const SubStopConf* conf = (SubStopConf*) signal->getDataPtr();
+ Uint32 senderRef = conf->senderRef;
+
+ OpSubEventPtr subbPtr;
+ c_opSubEvent.getPtr(subbPtr, conf->senderData);
+
+ if (refToBlock(senderRef) == SUMA) {
+ /*
+ * Participant
+ */
+ jam();
+ SubStopConf* conf = (SubStopConf*) signal->getDataPtrSend();
+
+ conf->senderRef = reference();
+ conf->senderData = subbPtr.p->m_senderData;
+
+ sendSignal(subbPtr.p->m_senderRef, GSN_SUB_STOP_CONF,
+ signal, SubStopConf::SignalLength, JBB);
+ c_opSubEvent.release(subbPtr);
+ return;
+ }
+ /*
+ * Coordinator
+ */
+ ndbrequire(refToBlock(senderRef) == DBDICT);
+ subbPtr.p->m_reqTracker.reportConf(c_counterMgr, refToNode(senderRef));
+ completeSubStopReq(signal,subbPtr.i,0);
+}
+
+/*
+ * Coordinator
+ */
+void Dbdict::completeSubStopReq(Signal* signal,
+ Uint32 ptrI,
+ Uint32 returnCode){
+ OpSubEventPtr subbPtr;
+ c_opSubEvent.getPtr(subbPtr, ptrI);
+
+ if (!subbPtr.p->m_reqTracker.done()){
+ jam();
+ return;
+ }
+
+ if (subbPtr.p->m_reqTracker.hasRef()) {
+ jam();
+#ifdef EVENT_DEBUG
+ ndbout_c("SUB_STOP_REF");
+#endif
+ SubStopRef* ref = (SubStopRef*)signal->getDataPtrSend();
+
+ ref->senderRef = reference();
+ ref->senderData = subbPtr.p->m_senderData;
+ /*
+ ref->subscriptionId = subbPtr.p->m_senderData;
+ ref->subscriptionKey = subbPtr.p->m_senderData;
+ ref->part = subbPtr.p->m_part; // SubscriptionData::Part
+ ref->subscriberData = subbPtr.p->m_subscriberData;
+ ref->subscriberRef = subbPtr.p->m_subscriberRef;
+ */
+ ref->errorCode = subbPtr.p->m_errorCode;
+
+
+ sendSignal(subbPtr.p->m_senderRef, GSN_SUB_STOP_REF,
+ signal, SubStopRef::SignalLength, JBB);
+ if (subbPtr.p->m_reqTracker.hasConf()) {
+ // stopStartedNodes(signal);
+ }
+ c_opSubEvent.release(subbPtr);
+ return;
+ }
+#ifdef EVENT_DEBUG
+ ndbout_c("SUB_STOP_CONF");
+#endif
+ sendSignal(subbPtr.p->m_senderRef, GSN_SUB_STOP_CONF,
+ signal, SubStopConf::SignalLength, JBB);
+ c_opSubEvent.release(subbPtr);
+}
+
+/***************************************************************
+ * MODULE: Drop event.
+ *
+ * Drop event.
+ *
+ * TODO
+ */
+
+void
+Dbdict::execDROP_EVNT_REQ(Signal* signal)
+{
+ jamEntry();
+ EVENT_TRACE;
+
+ DropEvntReq *req = (DropEvntReq*)signal->getDataPtr();
+ const Uint32 senderRef = signal->senderBlockRef();
+ OpDropEventPtr evntRecPtr;
+
+ // Seize a Create Event record
+ if (!c_opDropEvent.seize(evntRecPtr)) {
+ // Failed to allocate event record
+ jam();
+ releaseSections(signal);
+
+ DropEvntRef * ret = (DropEvntRef *)signal->getDataPtrSend();
+ ret->setErrorCode(DropEvntRef::SeizeError);
+ ret->setErrorLine(__LINE__);
+ ret->setErrorNode(reference());
+ sendSignal(senderRef, GSN_DROP_EVNT_REF, signal,
+ DropEvntRef::SignalLength, JBB);
+ return;
+ }
+
+#ifdef EVENT_DEBUG
+ ndbout_c("DBDICT::execDROP_EVNT_REQ evntRecId = (%d)", evntRecPtr.i);
+#endif
+
+ OpDropEvent* evntRec = evntRecPtr.p;
+ evntRec->init(req);
+
+ SegmentedSectionPtr ssPtr;
+
+ signal->getSection(ssPtr, 0);
+
+ SimplePropertiesSectionReader r0(ssPtr, getSectionSegmentPool());
+#ifdef EVENT_DEBUG
+ r0.printAll(ndbout);
+#endif
+ // event name
+ if ((!r0.first()) ||
+ (r0.getValueType() != SimpleProperties::StringValue) ||
+ (r0.getValueLen() <= 0)) {
+ jam();
+ releaseSections(signal);
+
+ evntRecPtr.p->m_errorCode = DropEvntRef::Undefined;
+ evntRecPtr.p->m_errorLine = __LINE__;
+ evntRecPtr.p->m_errorNode = reference();
+
+ dropEvent_sendReply(signal, evntRecPtr);
+ return;
+ }
+ r0.getString(evntRecPtr.p->m_eventRec.NAME);
+ {
+ int len = strlen(evntRecPtr.p->m_eventRec.NAME);
+ memset(evntRecPtr.p->m_eventRec.NAME+len, 0, MAX_TAB_NAME_SIZE-len);
+#ifdef EVENT_DEBUG
+ printf("DropEvntReq; EventName %s, len %u\n",
+ evntRecPtr.p->m_eventRec.NAME, len);
+ for(int i = 0; i < MAX_TAB_NAME_SIZE/4; i++)
+ printf("H'%.8x ", ((Uint32*)evntRecPtr.p->m_eventRec.NAME)[i]);
+ printf("\n");
+#endif
+ }
+
+ releaseSections(signal);
+
+ Callback c = { safe_cast(&Dbdict::dropEventUTIL_PREPARE_READ), 0 };
+
+ prepareTransactionEventSysTable(&c, signal, evntRecPtr.i,
+ UtilPrepareReq::Read);
+}
+
+void
+Dbdict::dropEventUTIL_PREPARE_READ(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode)
+{
+ jam();
+ EVENT_TRACE;
+ if (returnCode != 0) {
+ EVENT_TRACE;
+ dropEventUtilPrepareRef(signal, callbackData, returnCode);
+ return;
+ }
+
+ UtilPrepareConf* const req = (UtilPrepareConf*)signal->getDataPtr();
+ OpDropEventPtr evntRecPtr;
+ evntRecPtr.i = req->getSenderData();
+ const Uint32 prepareId = req->getPrepareId();
+
+ ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL);
+
+ Callback c = { safe_cast(&Dbdict::dropEventUTIL_EXECUTE_READ), 0 };
+
+ executeTransEventSysTable(&c, signal,
+ evntRecPtr.i, evntRecPtr.p->m_eventRec,
+ prepareId, UtilPrepareReq::Read);
+}
+
+void
+Dbdict::dropEventUTIL_EXECUTE_READ(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode)
+{
+ jam();
+ EVENT_TRACE;
+ if (returnCode != 0) {
+ EVENT_TRACE;
+ dropEventUtilExecuteRef(signal, callbackData, returnCode);
+ return;
+ }
+
+ OpDropEventPtr evntRecPtr;
+ UtilExecuteConf * const ref = (UtilExecuteConf *)signal->getDataPtr();
+ jam();
+ evntRecPtr.i = ref->getSenderData();
+ ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL);
+
+ parseReadEventSys(signal, evntRecPtr.p->m_eventRec);
+
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ RequestTracker & p = evntRecPtr.p->m_reqTracker;
+ p.init<SubRemoveRef>(c_counterMgr, rg, GSN_SUB_REMOVE_REF,
+ evntRecPtr.i);
+
+ SubRemoveReq* req = (SubRemoveReq*) signal->getDataPtrSend();
+
+ req->senderRef = reference();
+ req->senderData = evntRecPtr.i;
+ req->subscriptionId = evntRecPtr.p->m_eventRec.SUBID;
+ req->subscriptionKey = evntRecPtr.p->m_eventRec.SUBKEY;
+
+ sendSignal(rg, GSN_SUB_REMOVE_REQ, signal, SubRemoveReq::SignalLength, JBB);
+}
+
+/*
+ * Participant
+ */
+
+void
+Dbdict::execSUB_REMOVE_REQ(Signal* signal)
+{
+ jamEntry();
+
+ Uint32 origSenderRef = signal->senderBlockRef();
+
+ OpSubEventPtr subbPtr;
+ if (!c_opSubEvent.seize(subbPtr)) {
+ SubRemoveRef * ref = (SubRemoveRef *)signal->getDataPtrSend();
+ jam();
+ ref->senderRef = reference();
+ ref->setTemporary(SubRemoveRef::Busy);
+
+ sendSignal(origSenderRef, GSN_SUB_REMOVE_REF, signal,
+ SubRemoveRef::SignalLength, JBB);
+ return;
+ }
+
+ {
+ const SubRemoveReq* req = (SubRemoveReq*) signal->getDataPtr();
+ subbPtr.p->m_senderRef = req->senderRef;
+ subbPtr.p->m_senderData = req->senderData;
+ subbPtr.p->m_errorCode = 0;
+ }
+
+ SubRemoveReq* req = (SubRemoveReq*) signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->senderData = subbPtr.i;
+
+ sendSignal(SUMA_REF, GSN_SUB_REMOVE_REQ, signal, SubRemoveReq::SignalLength, JBB);
+}
+
+/*
+ * Coordintor/Participant
+ */
+
+void
+Dbdict::execSUB_REMOVE_REF(Signal* signal)
+{
+ jamEntry();
+ const SubRemoveRef* ref = (SubRemoveRef*) signal->getDataPtr();
+ Uint32 senderRef = ref->senderRef;
+
+ if (refToBlock(senderRef) == SUMA) {
+ /*
+ * Participant
+ */
+ jam();
+ OpSubEventPtr subbPtr;
+ c_opSubEvent.getPtr(subbPtr, ref->senderData);
+ if (ref->errorCode == (Uint32) GrepError::SUBSCRIPTION_ID_NOT_FOUND) {
+ // conf this since this may occur if a nodefailiure has occured
+ // earlier so that the systable was not cleared
+ SubRemoveConf* conf = (SubRemoveConf*) signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = subbPtr.p->m_senderData;
+ sendSignal(subbPtr.p->m_senderRef, GSN_SUB_REMOVE_CONF,
+ signal, SubRemoveConf::SignalLength, JBB);
+ } else {
+ SubRemoveRef* ref = (SubRemoveRef*) signal->getDataPtrSend();
+ ref->senderRef = reference();
+ ref->senderData = subbPtr.p->m_senderData;
+ sendSignal(subbPtr.p->m_senderRef, GSN_SUB_REMOVE_REF,
+ signal, SubRemoveRef::SignalLength, JBB);
+ }
+ c_opSubEvent.release(subbPtr);
+ return;
+ }
+ /*
+ * Coordinator
+ */
+ ndbrequire(refToBlock(senderRef) == DBDICT);
+ OpDropEventPtr eventRecPtr;
+ c_opDropEvent.getPtr(eventRecPtr, ref->senderData);
+ if (ref->errorCode == SubRemoveRef::NF_FakeErrorREF){
+ jam();
+ eventRecPtr.p->m_reqTracker.ignoreRef(c_counterMgr, refToNode(senderRef));
+ } else {
+ jam();
+ eventRecPtr.p->m_reqTracker.reportRef(c_counterMgr, refToNode(senderRef));
+ }
+ completeSubRemoveReq(signal,eventRecPtr.i,0);
+}
+
+void
+Dbdict::execSUB_REMOVE_CONF(Signal* signal)
+{
+ jamEntry();
+ const SubRemoveConf* conf = (SubRemoveConf*) signal->getDataPtr();
+ Uint32 senderRef = conf->senderRef;
+
+ if (refToBlock(senderRef) == SUMA) {
+ /*
+ * Participant
+ */
+ jam();
+ OpSubEventPtr subbPtr;
+ c_opSubEvent.getPtr(subbPtr, conf->senderData);
+ SubRemoveConf* conf = (SubRemoveConf*) signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = subbPtr.p->m_senderData;
+ sendSignal(subbPtr.p->m_senderRef, GSN_SUB_REMOVE_CONF,
+ signal, SubRemoveConf::SignalLength, JBB);
+ c_opSubEvent.release(subbPtr);
+ return;
+ }
+ /*
+ * Coordinator
+ */
+ ndbrequire(refToBlock(senderRef) == DBDICT);
+ OpDropEventPtr eventRecPtr;
+ c_opDropEvent.getPtr(eventRecPtr, conf->senderData);
+ eventRecPtr.p->m_reqTracker.reportConf(c_counterMgr, refToNode(senderRef));
+ completeSubRemoveReq(signal,eventRecPtr.i,0);
+}
+
+void
+Dbdict::completeSubRemoveReq(Signal* signal, Uint32 ptrI, Uint32 xxx)
+{
+ OpDropEventPtr evntRecPtr;
+ c_opDropEvent.getPtr(evntRecPtr, ptrI);
+
+ if (!evntRecPtr.p->m_reqTracker.done()){
+ jam();
+ return;
+ }
+
+ if (evntRecPtr.p->m_reqTracker.hasRef()) {
+ jam();
+ evntRecPtr.p->m_errorNode = reference();
+ evntRecPtr.p->m_errorLine = __LINE__;
+ evntRecPtr.p->m_errorCode = DropEvntRef::Undefined;
+ dropEvent_sendReply(signal, evntRecPtr);
+ return;
+ }
+
+ Callback c = { safe_cast(&Dbdict::dropEventUTIL_PREPARE_DELETE), 0 };
+
+ prepareTransactionEventSysTable(&c, signal, evntRecPtr.i,
+ UtilPrepareReq::Delete);
+}
+
+void
+Dbdict::dropEventUTIL_PREPARE_DELETE(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode)
+{
+ jam();
+ EVENT_TRACE;
+ if (returnCode != 0) {
+ EVENT_TRACE;
+ dropEventUtilPrepareRef(signal, callbackData, returnCode);
+ return;
+ }
+
+ UtilPrepareConf* const req = (UtilPrepareConf*)signal->getDataPtr();
+ OpDropEventPtr evntRecPtr;
+ jam();
+ evntRecPtr.i = req->getSenderData();
+ const Uint32 prepareId = req->getPrepareId();
+
+ ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL);
+#ifdef EVENT_DEBUG
+ printf("DropEvntUTIL_PREPARE; evntRecPtr.i len %u\n",evntRecPtr.i);
+#endif
+
+ Callback c = { safe_cast(&Dbdict::dropEventUTIL_EXECUTE_DELETE), 0 };
+
+ executeTransEventSysTable(&c, signal,
+ evntRecPtr.i, evntRecPtr.p->m_eventRec,
+ prepareId, UtilPrepareReq::Delete);
+}
+
+void
+Dbdict::dropEventUTIL_EXECUTE_DELETE(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode)
+{
+ jam();
+ EVENT_TRACE;
+ if (returnCode != 0) {
+ EVENT_TRACE;
+ dropEventUtilExecuteRef(signal, callbackData, returnCode);
+ return;
+ }
+
+ OpDropEventPtr evntRecPtr;
+ UtilExecuteConf * const ref = (UtilExecuteConf *)signal->getDataPtr();
+ jam();
+ evntRecPtr.i = ref->getSenderData();
+ ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL);
+
+ dropEvent_sendReply(signal, evntRecPtr);
+}
+
+void
+Dbdict::dropEventUtilPrepareRef(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode)
+{
+ jam();
+ EVENT_TRACE;
+ UtilPrepareRef * const ref = (UtilPrepareRef *)signal->getDataPtr();
+ OpDropEventPtr evntRecPtr;
+ evntRecPtr.i = ref->getSenderData();
+ ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL);
+
+ bool temporary = false;
+ interpretUtilPrepareErrorCode((UtilPrepareRef::ErrorCode)ref->getErrorCode(),
+ temporary, evntRecPtr.p->m_errorLine);
+ if (temporary) {
+ evntRecPtr.p->m_errorCode = (DropEvntRef::ErrorCode)
+ ((Uint32) DropEvntRef::Undefined | (Uint32) DropEvntRef::Temporary);
+ }
+
+ if (evntRecPtr.p->m_errorCode == 0) {
+ evntRecPtr.p->m_errorCode = DropEvntRef::Undefined;
+ evntRecPtr.p->m_errorLine = __LINE__;
+ }
+ evntRecPtr.p->m_errorNode = reference();
+
+ dropEvent_sendReply(signal, evntRecPtr);
+}
+
+void
+Dbdict::dropEventUtilExecuteRef(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode)
+{
+ jam();
+ EVENT_TRACE;
+ OpDropEventPtr evntRecPtr;
+ UtilExecuteRef * const ref = (UtilExecuteRef *)signal->getDataPtr();
+ jam();
+ evntRecPtr.i = ref->getSenderData();
+ ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL);
+
+ evntRecPtr.p->m_errorNode = reference();
+ evntRecPtr.p->m_errorLine = __LINE__;
+
+ switch (ref->getErrorCode()) {
+ case UtilExecuteRef::TCError:
+ switch (ref->getTCErrorCode()) {
+ case ZNOT_FOUND:
+ jam();
+ evntRecPtr.p->m_errorCode = DropEvntRef::EventNotFound;
+ break;
+ default:
+ jam();
+ evntRecPtr.p->m_errorCode = DropEvntRef::UndefinedTCError;
+ break;
+ }
+ break;
+ default:
+ jam();
+ evntRecPtr.p->m_errorCode = DropEvntRef::Undefined;
+ break;
+ }
+ dropEvent_sendReply(signal, evntRecPtr);
+}
+
+void Dbdict::dropEvent_sendReply(Signal* signal,
+ OpDropEventPtr evntRecPtr)
+{
+ jam();
+ EVENT_TRACE;
+ Uint32 senderRef = evntRecPtr.p->m_request.getUserRef();
+
+ if (evntRecPtr.p->hasError()) {
+ jam();
+ DropEvntRef * ret = (DropEvntRef *)signal->getDataPtrSend();
+
+ ret->setUserData(evntRecPtr.p->m_request.getUserData());
+ ret->setUserRef(evntRecPtr.p->m_request.getUserRef());
+
+ ret->setErrorCode(evntRecPtr.p->m_errorCode);
+ ret->setErrorLine(evntRecPtr.p->m_errorLine);
+ ret->setErrorNode(evntRecPtr.p->m_errorNode);
+
+ sendSignal(senderRef, GSN_DROP_EVNT_REF, signal,
+ DropEvntRef::SignalLength, JBB);
+ } else {
+ jam();
+ DropEvntConf * evntConf = (DropEvntConf *)signal->getDataPtrSend();
+
+ evntConf->setUserData(evntRecPtr.p->m_request.getUserData());
+ evntConf->setUserRef(evntRecPtr.p->m_request.getUserRef());
+
+ sendSignal(senderRef, GSN_DROP_EVNT_CONF, signal,
+ DropEvntConf::SignalLength, JBB);
+ }
+
+ c_opDropEvent.release(evntRecPtr);
+}
+
+/**
+ * MODULE: Alter index
+ *
+ * Alter index state. Alter online creates the index in each TC and
+ * then invokes create trigger and alter trigger protocols to activate
+ * the 3 triggers. Alter offline does the opposite.
+ *
+ * Request type received in REQ and returned in CONF/REF:
+ *
+ * RT_USER - from API to DICT master
+ * RT_CREATE_INDEX - part of create index operation
+ * RT_DROP_INDEX - part of drop index operation
+ * RT_NODERESTART - node restart, activate locally only
+ * RT_SYSTEMRESTART - system restart, activate and build if not logged
+ * RT_DICT_PREPARE - prepare participants
+ * RT_DICT_TC - to local TC via each participant
+ * RT_DICT_COMMIT - commit in each participant
+ */
+
+void
+Dbdict::execALTER_INDX_REQ(Signal* signal)
+{
+ jamEntry();
+ AlterIndxReq* const req = (AlterIndxReq*)signal->getDataPtrSend();
+ OpAlterIndexPtr opPtr;
+ const Uint32 senderRef = signal->senderBlockRef();
+ const AlterIndxReq::RequestType requestType = req->getRequestType();
+ if (requestType == AlterIndxReq::RT_USER ||
+ requestType == AlterIndxReq::RT_CREATE_INDEX ||
+ requestType == AlterIndxReq::RT_DROP_INDEX ||
+ requestType == AlterIndxReq::RT_NODERESTART ||
+ requestType == AlterIndxReq::RT_SYSTEMRESTART) {
+ jam();
+ const bool isLocal = req->getRequestFlag() & RequestFlag::RF_LOCAL;
+ NdbNodeBitmask receiverNodes = c_aliveNodes;
+ if (isLocal) {
+ receiverNodes.clear();
+ receiverNodes.set(getOwnNodeId());
+ }
+ if (signal->getLength() == AlterIndxReq::SignalLength) {
+ jam();
+ if (! isLocal && getOwnNodeId() != c_masterNodeId) {
+ jam();
+
+ releaseSections(signal);
+ OpAlterIndex opBad;
+ opPtr.p = &opBad;
+ opPtr.p->save(req);
+ opPtr.p->m_errorCode = AlterIndxRef::NotMaster;
+ opPtr.p->m_errorLine = __LINE__;
+ opPtr.p->m_errorNode = c_masterNodeId;
+ alterIndex_sendReply(signal, opPtr, true);
+ return;
+ }
+ // forward initial request plus operation key to all
+ req->setOpKey(++c_opRecordSequence);
+ NodeReceiverGroup rg(DBDICT, receiverNodes);
+ sendSignal(rg, GSN_ALTER_INDX_REQ,
+ signal, AlterIndxReq::SignalLength + 1, JBB);
+ return;
+ }
+ // seize operation record
+ ndbrequire(signal->getLength() == AlterIndxReq::SignalLength + 1);
+ const Uint32 opKey = req->getOpKey();
+ OpAlterIndex opBusy;
+ if (! c_opAlterIndex.seize(opPtr))
+ opPtr.p = &opBusy;
+ opPtr.p->save(req);
+ opPtr.p->m_coordinatorRef = senderRef;
+ opPtr.p->m_isMaster = (senderRef == reference());
+ opPtr.p->key = opKey;
+ opPtr.p->m_requestType = AlterIndxReq::RT_DICT_PREPARE;
+ if (opPtr.p == &opBusy) {
+ jam();
+ opPtr.p->m_errorCode = AlterIndxRef::Busy;
+ opPtr.p->m_errorLine = __LINE__;
+ alterIndex_sendReply(signal, opPtr, opPtr.p->m_isMaster);
+ return;
+ }
+ c_opAlterIndex.add(opPtr);
+ // master expects to hear from all
+ if (opPtr.p->m_isMaster)
+ opPtr.p->m_signalCounter = receiverNodes;
+ // check request in all participants
+ alterIndex_slavePrepare(signal, opPtr);
+ alterIndex_sendReply(signal, opPtr, false);
+ return;
+ }
+ c_opAlterIndex.find(opPtr, req->getConnectionPtr());
+ if (! opPtr.isNull()) {
+ opPtr.p->m_requestType = requestType;
+ if (requestType == AlterIndxReq::RT_DICT_TC) {
+ jam();
+ if (opPtr.p->m_request.getOnline())
+ alterIndex_toCreateTc(signal, opPtr);
+ else
+ alterIndex_toDropTc(signal, opPtr);
+ return;
+ }
+ if (requestType == AlterIndxReq::RT_DICT_COMMIT ||
+ requestType == AlterIndxReq::RT_DICT_ABORT) {
+ jam();
+ if (requestType == AlterIndxReq::RT_DICT_COMMIT)
+ alterIndex_slaveCommit(signal, opPtr);
+ else
+ alterIndex_slaveAbort(signal, opPtr);
+ alterIndex_sendReply(signal, opPtr, false);
+ // done in slave
+ if (! opPtr.p->m_isMaster)
+ c_opAlterIndex.release(opPtr);
+ return;
+ }
+ }
+ jam();
+ // return to sender
+ OpAlterIndex opBad;
+ opPtr.p = &opBad;
+ opPtr.p->save(req);
+ opPtr.p->m_errorCode = AlterIndxRef::BadRequestType;
+ opPtr.p->m_errorLine = __LINE__;
+ alterIndex_sendReply(signal, opPtr, true);
+}
+
+void
+Dbdict::execALTER_INDX_CONF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(signal->getNoOfSections() == 0);
+ AlterIndxConf* conf = (AlterIndxConf*)signal->getDataPtrSend();
+ alterIndex_recvReply(signal, conf, 0);
+}
+
+void
+Dbdict::execALTER_INDX_REF(Signal* signal)
+{
+ jamEntry();
+ AlterIndxRef* ref = (AlterIndxRef*)signal->getDataPtrSend();
+ alterIndex_recvReply(signal, ref->getConf(), ref);
+}
+
+void
+Dbdict::alterIndex_recvReply(Signal* signal, const AlterIndxConf* conf,
+ const AlterIndxRef* ref)
+{
+ jam();
+ const Uint32 senderRef = signal->senderBlockRef();
+ const AlterIndxReq::RequestType requestType = conf->getRequestType();
+ const Uint32 key = conf->getConnectionPtr();
+ if (requestType == AlterIndxReq::RT_CREATE_INDEX) {
+ jam();
+ // part of create index operation
+ OpCreateIndexPtr opPtr;
+ c_opCreateIndex.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ createIndex_fromAlterIndex(signal, opPtr);
+ return;
+ }
+ if (requestType == AlterIndxReq::RT_DROP_INDEX) {
+ jam();
+ // part of drop index operation
+ OpDropIndexPtr opPtr;
+ c_opDropIndex.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ dropIndex_fromAlterIndex(signal, opPtr);
+ return;
+ }
+ if (requestType == AlterIndxReq::RT_TC ||
+ requestType == AlterIndxReq::RT_TUX) {
+ jam();
+ // part of build index operation
+ OpBuildIndexPtr opPtr;
+ c_opBuildIndex.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ buildIndex_fromOnline(signal, opPtr);
+ return;
+ }
+ if (requestType == AlterIndxReq::RT_NODERESTART) {
+ jam();
+ if (ref == 0) {
+ infoEvent("DICT: index %u activated", (unsigned)key);
+ } else {
+ warningEvent("DICT: index %u activation failed: code=%d line=%d",
+ (unsigned)key,
+ ref->getErrorCode(), ref->getErrorLine());
+ }
+ activateIndexes(signal, key + 1);
+ return;
+ }
+ if (requestType == AlterIndxReq::RT_SYSTEMRESTART) {
+ jam();
+ if (ref == 0) {
+ infoEvent("DICT: index %u activated done", (unsigned)key);
+ } else {
+ warningEvent("DICT: index %u activated failed: code=%d line=%d node=%d",
+ (unsigned)key,
+ ref->getErrorCode(), ref->getErrorLine(), ref->getErrorNode());
+ }
+ activateIndexes(signal, key + 1);
+ return;
+ }
+ OpAlterIndexPtr opPtr;
+ c_opAlterIndex.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ ndbrequire(opPtr.p->m_isMaster);
+ ndbrequire(opPtr.p->m_requestType == requestType);
+ opPtr.p->setError(ref);
+ opPtr.p->m_signalCounter.clearWaitingFor(refToNode(senderRef));
+ if (! opPtr.p->m_signalCounter.done()) {
+ jam();
+ return;
+ }
+ if (requestType == AlterIndxReq::RT_DICT_COMMIT ||
+ requestType == AlterIndxReq::RT_DICT_ABORT) {
+ jam();
+ // send reply to user
+ alterIndex_sendReply(signal, opPtr, true);
+ c_opAlterIndex.release(opPtr);
+ return;
+ }
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = AlterIndxReq::RT_DICT_ABORT;
+ alterIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
+ if (indexPtr.p->isHashIndex()) {
+ if (requestType == AlterIndxReq::RT_DICT_PREPARE) {
+ jam();
+ if (opPtr.p->m_request.getOnline()) {
+ opPtr.p->m_requestType = AlterIndxReq::RT_DICT_TC;
+ alterIndex_sendSlaveReq(signal, opPtr);
+ } else {
+ // start drop triggers
+ alterIndex_toDropTrigger(signal, opPtr);
+ }
+ return;
+ }
+ if (requestType == AlterIndxReq::RT_DICT_TC) {
+ jam();
+ if (opPtr.p->m_request.getOnline()) {
+ // start create triggers
+ alterIndex_toCreateTrigger(signal, opPtr);
+ } else {
+ opPtr.p->m_requestType = AlterIndxReq::RT_DICT_COMMIT;
+ alterIndex_sendSlaveReq(signal, opPtr);
+ }
+ return;
+ }
+ }
+ if (indexPtr.p->isOrderedIndex()) {
+ if (requestType == AlterIndxReq::RT_DICT_PREPARE) {
+ jam();
+ if (opPtr.p->m_request.getOnline()) {
+ // start create triggers
+ alterIndex_toCreateTrigger(signal, opPtr);
+ } else {
+ // start drop triggers
+ alterIndex_toDropTrigger(signal, opPtr);
+ }
+ return;
+ }
+ }
+ ndbrequire(false);
+}
+
+void
+Dbdict::alterIndex_slavePrepare(Signal* signal, OpAlterIndexPtr opPtr)
+{
+ jam();
+ const AlterIndxReq* const req = &opPtr.p->m_request;
+ if (! (req->getIndexId() < c_tableRecordPool.getSize())) {
+ jam();
+ opPtr.p->m_errorCode = AlterIndxRef::Inconsistency;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, req->getIndexId());
+ if (indexPtr.p->tabState != TableRecord::DEFINED) {
+ jam();
+ opPtr.p->m_errorCode = AlterIndxRef::IndexNotFound;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ if (! indexPtr.p->isIndex()) {
+ jam();
+ opPtr.p->m_errorCode = AlterIndxRef::NotAnIndex;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ if (req->getOnline())
+ indexPtr.p->indexState = TableRecord::IS_BUILDING;
+ else
+ indexPtr.p->indexState = TableRecord::IS_DROPPING;
+}
+
+void
+Dbdict::alterIndex_toCreateTc(Signal* signal, OpAlterIndexPtr opPtr)
+{
+ jam();
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
+ // request to create index in local TC
+ CreateIndxReq* const req = (CreateIndxReq*)signal->getDataPtrSend();
+ req->setUserRef(reference());
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(CreateIndxReq::RT_TC);
+ req->setIndexType(indexPtr.p->tableType);
+ req->setTableId(indexPtr.p->primaryTableId);
+ req->setIndexId(indexPtr.i);
+ req->setOnline(true);
+ getIndexAttrList(indexPtr, opPtr.p->m_attrList);
+ // send
+ LinearSectionPtr lsPtr[3];
+ lsPtr[0].p = (Uint32*)&opPtr.p->m_attrList;
+ lsPtr[0].sz = 1 + opPtr.p->m_attrList.sz;
+ sendSignal(calcTcBlockRef(getOwnNodeId()), GSN_CREATE_INDX_REQ,
+ signal, CreateIndxReq::SignalLength, JBB, lsPtr, 1);
+}
+
+void
+Dbdict::alterIndex_fromCreateTc(Signal* signal, OpAlterIndexPtr opPtr)
+{
+ jam();
+ // mark created in local TC
+ if (! opPtr.p->hasError()) {
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
+ indexPtr.p->indexLocal |= TableRecord::IL_CREATED_TC;
+ }
+ // forward CONF or REF to master
+ ndbrequire(opPtr.p->m_requestType == AlterIndxReq::RT_DICT_TC);
+ alterIndex_sendReply(signal, opPtr, false);
+}
+
+void
+Dbdict::alterIndex_toDropTc(Signal* signal, OpAlterIndexPtr opPtr)
+{
+ jam();
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
+ // broken index
+ if (! (indexPtr.p->indexLocal & TableRecord::IL_CREATED_TC)) {
+ jam();
+ alterIndex_sendReply(signal, opPtr, false);
+ return;
+ }
+ // request to drop in local TC
+ DropIndxReq* const req = (DropIndxReq*)signal->getDataPtrSend();
+ req->setUserRef(reference());
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(DropIndxReq::RT_TC);
+ req->setTableId(indexPtr.p->primaryTableId);
+ req->setIndexId(indexPtr.i);
+ req->setIndexVersion(indexPtr.p->tableVersion);
+ // send
+ sendSignal(calcTcBlockRef(getOwnNodeId()), GSN_DROP_INDX_REQ,
+ signal, DropIndxReq::SignalLength, JBB);
+}
+
+void
+Dbdict::alterIndex_fromDropTc(Signal* signal, OpAlterIndexPtr opPtr)
+{
+ jam();
+ ndbrequire(opPtr.p->m_requestType == AlterIndxReq::RT_DICT_TC);
+ if (! opPtr.p->hasError()) {
+ // mark dropped in local TC
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
+ indexPtr.p->indexLocal &= ~TableRecord::IL_CREATED_TC;
+ }
+ // forward CONF or REF to master
+ alterIndex_sendReply(signal, opPtr, false);
+}
+
+void
+Dbdict::alterIndex_toCreateTrigger(Signal* signal, OpAlterIndexPtr opPtr)
+{
+ jam();
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
+ // start creation of index triggers
+ CreateTrigReq* const req = (CreateTrigReq*)signal->getDataPtrSend();
+ req->setUserRef(reference());
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(CreateTrigReq::RT_ALTER_INDEX);
+ req->addRequestFlag(opPtr.p->m_requestFlag);
+ req->setTableId(opPtr.p->m_request.getTableId());
+ req->setIndexId(opPtr.p->m_request.getIndexId());
+ req->setTriggerId(RNIL);
+ req->setTriggerActionTime(TriggerActionTime::TA_AFTER);
+ req->setMonitorAllAttributes(false);
+ req->setOnline(true); // alter online after create
+ req->setReceiverRef(0); // implicit for index triggers
+ getIndexAttrMask(indexPtr, req->getAttributeMask());
+ // name section
+ char triggerName[MAX_TAB_NAME_SIZE];
+ Uint32 buffer[2 + ((MAX_TAB_NAME_SIZE + 3) >> 2)]; // SP string
+ LinearWriter w(buffer, sizeof(buffer) >> 2);
+ LinearSectionPtr lsPtr[3];
+ if (indexPtr.p->isHashIndex()) {
+ req->setTriggerType(TriggerType::SECONDARY_INDEX);
+ req->setMonitorReplicas(false);
+ // insert
+ if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL)
+ req->setTriggerId(indexPtr.p->insertTriggerId);
+ req->setTriggerEvent(TriggerEvent::TE_INSERT);
+ sprintf(triggerName, "NDB$INDEX_%u_INSERT", opPtr.p->m_request.getIndexId());
+ w.reset();
+ w.add(CreateTrigReq::TriggerNameKey, triggerName);
+ lsPtr[0].p = buffer;
+ lsPtr[0].sz = w.getWordsUsed();
+ sendSignal(reference(), GSN_CREATE_TRIG_REQ,
+ signal, CreateTrigReq::SignalLength, JBB, lsPtr, 1);
+ // update
+ if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL)
+ req->setTriggerId(indexPtr.p->updateTriggerId);
+ req->setTriggerEvent(TriggerEvent::TE_UPDATE);
+ sprintf(triggerName, "NDB$INDEX_%u_UPDATE", opPtr.p->m_request.getIndexId());
+ w.reset();
+ w.add(CreateTrigReq::TriggerNameKey, triggerName);
+ lsPtr[0].p = buffer;
+ lsPtr[0].sz = w.getWordsUsed();
+ sendSignal(reference(), GSN_CREATE_TRIG_REQ,
+ signal, CreateTrigReq::SignalLength, JBB, lsPtr, 1);
+ // delete
+ if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL)
+ req->setTriggerId(indexPtr.p->deleteTriggerId);
+ req->setTriggerEvent(TriggerEvent::TE_DELETE);
+ sprintf(triggerName, "NDB$INDEX_%u_DELETE", opPtr.p->m_request.getIndexId());
+ w.reset();
+ w.add(CreateTrigReq::TriggerNameKey, triggerName);
+ lsPtr[0].p = buffer;
+ lsPtr[0].sz = w.getWordsUsed();
+ sendSignal(reference(), GSN_CREATE_TRIG_REQ,
+ signal, CreateTrigReq::SignalLength, JBB, lsPtr, 1);
+ // triggers left to create
+ opPtr.p->m_triggerCounter = 3;
+ return;
+ }
+ if (indexPtr.p->isOrderedIndex()) {
+ req->addRequestFlag(RequestFlag::RF_NOTCTRIGGER);
+ req->setTriggerType(TriggerType::ORDERED_INDEX);
+ req->setTriggerActionTime(TriggerActionTime::TA_CUSTOM);
+ req->setMonitorReplicas(true);
+ // one trigger for 5 events (insert, update, delete, commit, abort)
+ if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL)
+ req->setTriggerId(indexPtr.p->customTriggerId);
+ req->setTriggerEvent(TriggerEvent::TE_CUSTOM);
+ sprintf(triggerName, "NDB$INDEX_%u_CUSTOM", opPtr.p->m_request.getIndexId());
+ w.reset();
+ w.add(CreateTrigReq::TriggerNameKey, triggerName);
+ lsPtr[0].p = buffer;
+ lsPtr[0].sz = w.getWordsUsed();
+ sendSignal(reference(), GSN_CREATE_TRIG_REQ,
+ signal, CreateTrigReq::SignalLength, JBB, lsPtr, 1);
+ // triggers left to create
+ opPtr.p->m_triggerCounter = 1;
+ return;
+ }
+ ndbrequire(false);
+}
+
+void
+Dbdict::alterIndex_fromCreateTrigger(Signal* signal, OpAlterIndexPtr opPtr)
+{
+ jam();
+ ndbrequire(opPtr.p->m_triggerCounter != 0);
+ if (--opPtr.p->m_triggerCounter != 0) {
+ jam();
+ return;
+ }
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = AlterIndxReq::RT_DICT_ABORT;
+ alterIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ if(opPtr.p->m_requestType != AlterIndxReq::RT_SYSTEMRESTART){
+ // send build request
+ alterIndex_toBuildIndex(signal, opPtr);
+ return;
+ }
+
+ /**
+ * During system restart,
+ * leave index in activated but not build state.
+ *
+ * Build a bit later when REDO has been run
+ */
+ alterIndex_sendReply(signal, opPtr, true);
+}
+
+void
+Dbdict::alterIndex_toDropTrigger(Signal* signal, OpAlterIndexPtr opPtr)
+{
+ jam();
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
+ // start drop of index triggers
+ DropTrigReq* const req = (DropTrigReq*)signal->getDataPtrSend();
+ req->setUserRef(reference());
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(DropTrigReq::RT_ALTER_INDEX);
+ req->setTableId(opPtr.p->m_request.getTableId());
+ req->setIndexId(opPtr.p->m_request.getIndexId());
+ req->setTriggerInfo(0); // not used
+ opPtr.p->m_triggerCounter = 0;
+ // insert
+ if (indexPtr.p->insertTriggerId != RNIL) {
+ req->setTriggerId(indexPtr.p->insertTriggerId);
+ sendSignal(reference(), GSN_DROP_TRIG_REQ,
+ signal, DropTrigReq::SignalLength, JBB);
+ opPtr.p->m_triggerCounter++;
+ }
+ // update
+ if (indexPtr.p->updateTriggerId != RNIL) {
+ req->setTriggerId(indexPtr.p->updateTriggerId);
+ sendSignal(reference(), GSN_DROP_TRIG_REQ,
+ signal, DropTrigReq::SignalLength, JBB);
+ opPtr.p->m_triggerCounter++;
+ }
+ // delete
+ if (indexPtr.p->deleteTriggerId != RNIL) {
+ req->setTriggerId(indexPtr.p->deleteTriggerId);
+ sendSignal(reference(), GSN_DROP_TRIG_REQ,
+ signal, DropTrigReq::SignalLength, JBB);
+ opPtr.p->m_triggerCounter++;
+ }
+ // custom
+ if (indexPtr.p->customTriggerId != RNIL) {
+ req->setTriggerId(indexPtr.p->customTriggerId);
+ sendSignal(reference(), GSN_DROP_TRIG_REQ,
+ signal, DropTrigReq::SignalLength, JBB);
+ opPtr.p->m_triggerCounter++;
+ }
+ // build
+ if (indexPtr.p->buildTriggerId != RNIL) {
+ req->setTriggerId(indexPtr.p->buildTriggerId);
+ sendSignal(reference(), GSN_DROP_TRIG_REQ,
+ signal, DropTrigReq::SignalLength, JBB);
+ opPtr.p->m_triggerCounter++;
+ }
+ if (opPtr.p->m_triggerCounter == 0) {
+ // drop in each TC
+ jam();
+ opPtr.p->m_requestType = AlterIndxReq::RT_DICT_TC;
+ alterIndex_sendSlaveReq(signal, opPtr);
+ }
+}
+
+void
+Dbdict::alterIndex_fromDropTrigger(Signal* signal, OpAlterIndexPtr opPtr)
+{
+ jam();
+ ndbrequire(opPtr.p->m_triggerCounter != 0);
+ if (--opPtr.p->m_triggerCounter != 0) {
+ jam();
+ return;
+ }
+ // finally drop index in each TC
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
+ const bool isHashIndex = indexPtr.p->isHashIndex();
+ const bool isOrderedIndex = indexPtr.p->isOrderedIndex();
+ ndbrequire(isHashIndex != isOrderedIndex); // xor
+ if (isHashIndex)
+ opPtr.p->m_requestType = AlterIndxReq::RT_DICT_TC;
+ if (isOrderedIndex)
+ opPtr.p->m_requestType = AlterIndxReq::RT_DICT_COMMIT;
+ alterIndex_sendSlaveReq(signal, opPtr);
+}
+
+void
+Dbdict::alterIndex_toBuildIndex(Signal* signal, OpAlterIndexPtr opPtr)
+{
+ jam();
+ // get index and table records
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, indexPtr.p->primaryTableId);
+ // build request to self (short signal)
+ BuildIndxReq* const req = (BuildIndxReq*)signal->getDataPtrSend();
+ req->setUserRef(reference());
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(BuildIndxReq::RT_ALTER_INDEX);
+ req->addRequestFlag(opPtr.p->m_requestFlag);
+ req->setBuildId(0); // not used
+ req->setBuildKey(0); // not used
+ req->setIndexType(indexPtr.p->tableType);
+ req->setIndexId(indexPtr.i);
+ req->setTableId(indexPtr.p->primaryTableId);
+ req->setParallelism(16);
+ // send
+ sendSignal(reference(), GSN_BUILDINDXREQ,
+ signal, BuildIndxReq::SignalLength, JBB);
+}
+
+void
+Dbdict::alterIndex_fromBuildIndex(Signal* signal, OpAlterIndexPtr opPtr)
+{
+ jam();
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = AlterIndxReq::RT_DICT_ABORT;
+ alterIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ opPtr.p->m_requestType = AlterIndxReq::RT_DICT_COMMIT;
+ alterIndex_sendSlaveReq(signal, opPtr);
+}
+
+void
+Dbdict::alterIndex_slaveCommit(Signal* signal, OpAlterIndexPtr opPtr)
+{
+ jam();
+ // get index record
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
+ indexPtr.p->indexState = TableRecord::IS_ONLINE;
+}
+
+void
+Dbdict::alterIndex_slaveAbort(Signal* signal, OpAlterIndexPtr opPtr)
+{
+ jam();
+ // find index record
+ const Uint32 indexId = opPtr.p->m_request.getIndexId();
+ if (indexId >= c_tableRecordPool.getSize())
+ return;
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, indexId);
+ if (! indexPtr.p->isIndex())
+ return;
+ // mark broken
+ indexPtr.p->indexState = TableRecord::IS_BROKEN;
+}
+
+void
+Dbdict::alterIndex_sendSlaveReq(Signal* signal, OpAlterIndexPtr opPtr)
+{
+ AlterIndxReq* const req = (AlterIndxReq*)signal->getDataPtrSend();
+ *req = opPtr.p->m_request;
+ req->setUserRef(opPtr.p->m_coordinatorRef);
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(opPtr.p->m_requestType);
+ req->addRequestFlag(opPtr.p->m_requestFlag);
+ NdbNodeBitmask receiverNodes = c_aliveNodes;
+ if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL) {
+ receiverNodes.clear();
+ receiverNodes.set(getOwnNodeId());
+ }
+ opPtr.p->m_signalCounter = receiverNodes;
+ NodeReceiverGroup rg(DBDICT, receiverNodes);
+ sendSignal(rg, GSN_ALTER_INDX_REQ,
+ signal, AlterIndxReq::SignalLength, JBB);
+}
+
+void
+Dbdict::alterIndex_sendReply(Signal* signal, OpAlterIndexPtr opPtr,
+ bool toUser)
+{
+ AlterIndxRef* rep = (AlterIndxRef*)signal->getDataPtrSend();
+ Uint32 gsn = GSN_ALTER_INDX_CONF;
+ Uint32 length = AlterIndxConf::InternalLength;
+ bool sendRef = opPtr.p->hasError();
+ if (! toUser) {
+ rep->setUserRef(opPtr.p->m_coordinatorRef);
+ rep->setConnectionPtr(opPtr.p->key);
+ rep->setRequestType(opPtr.p->m_requestType);
+ if (opPtr.p->m_requestType == AlterIndxReq::RT_DICT_ABORT)
+ sendRef = false;
+ } else {
+ rep->setUserRef(opPtr.p->m_request.getUserRef());
+ rep->setConnectionPtr(opPtr.p->m_request.getConnectionPtr());
+ rep->setRequestType(opPtr.p->m_request.getRequestType());
+ length = AlterIndxConf::SignalLength;
+ }
+ rep->setTableId(opPtr.p->m_request.getTableId());
+ rep->setIndexId(opPtr.p->m_request.getIndexId());
+ if (sendRef) {
+ if (opPtr.p->m_errorNode == 0)
+ opPtr.p->m_errorNode = getOwnNodeId();
+ rep->setErrorCode(opPtr.p->m_errorCode);
+ rep->setErrorLine(opPtr.p->m_errorLine);
+ rep->setErrorNode(opPtr.p->m_errorNode);
+ gsn = GSN_ALTER_INDX_REF;
+ length = AlterIndxRef::SignalLength;
+ }
+ sendSignal(rep->getUserRef(), gsn, signal, length, JBB);
+}
+
+/**
+ * MODULE: Build index
+ *
+ * Build index or all indexes on a table. Request type:
+ *
+ * RT_USER - normal user request, not yet used
+ * RT_ALTER_INDEX - from alter index
+ * RT_SYSTEM_RESTART -
+ * RT_DICT_PREPARE - prepare participants
+ * RT_DICT_TRIX - to participant on way to local TRIX
+ * RT_DICT_COMMIT - commit in each participant
+ * RT_DICT_ABORT - abort
+ * RT_TRIX - to local TRIX
+ */
+
+void
+Dbdict::execBUILDINDXREQ(Signal* signal)
+{
+ jamEntry();
+ BuildIndxReq* const req = (BuildIndxReq*)signal->getDataPtrSend();
+ OpBuildIndexPtr opPtr;
+ const Uint32 senderRef = signal->senderBlockRef();
+ const BuildIndxReq::RequestType requestType = req->getRequestType();
+ if (requestType == BuildIndxReq::RT_USER ||
+ requestType == BuildIndxReq::RT_ALTER_INDEX ||
+ requestType == BuildIndxReq::RT_SYSTEMRESTART) {
+ jam();
+ if (signal->getLength() == BuildIndxReq::SignalLength) {
+ jam();
+ if (getOwnNodeId() != c_masterNodeId) {
+ jam();
+
+ releaseSections(signal);
+ OpBuildIndex opBad;
+ opPtr.p = &opBad;
+ opPtr.p->save(req);
+ opPtr.p->m_errorCode = BuildIndxRef::NotMaster;
+ opPtr.p->m_errorLine = __LINE__;
+ opPtr.p->m_errorNode = c_masterNodeId;
+ buildIndex_sendReply(signal, opPtr, true);
+ return;
+ }
+ // forward initial request plus operation key to all
+ req->setOpKey(++c_opRecordSequence);
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ sendSignal(rg, GSN_BUILDINDXREQ,
+ signal, BuildIndxReq::SignalLength + 1, JBB);
+ return;
+ }
+ // seize operation record
+ ndbrequire(signal->getLength() == BuildIndxReq::SignalLength + 1);
+ const Uint32 opKey = req->getOpKey();
+ OpBuildIndex opBusy;
+ if (! c_opBuildIndex.seize(opPtr))
+ opPtr.p = &opBusy;
+ opPtr.p->save(req);
+ opPtr.p->m_coordinatorRef = senderRef;
+ opPtr.p->m_isMaster = (senderRef == reference());
+ opPtr.p->key = opKey;
+ opPtr.p->m_requestType = BuildIndxReq::RT_DICT_PREPARE;
+ if (opPtr.p == &opBusy) {
+ jam();
+ opPtr.p->m_errorCode = BuildIndxRef::Busy;
+ opPtr.p->m_errorLine = __LINE__;
+ buildIndex_sendReply(signal, opPtr, opPtr.p->m_isMaster);
+ return;
+ }
+ c_opBuildIndex.add(opPtr);
+ // master expects to hear from all
+ opPtr.p->m_signalCounter = c_aliveNodes;
+ buildIndex_sendReply(signal, opPtr, false);
+ return;
+ }
+ c_opBuildIndex.find(opPtr, req->getConnectionPtr());
+ if (! opPtr.isNull()) {
+ opPtr.p->m_requestType = requestType;
+ if (requestType == BuildIndxReq::RT_DICT_TRIX) {
+ jam();
+ buildIndex_buildTrix(signal, opPtr);
+ return;
+ }
+ if (requestType == BuildIndxReq::RT_DICT_TC ||
+ requestType == BuildIndxReq::RT_DICT_TUX) {
+ jam();
+ buildIndex_toOnline(signal, opPtr);
+ return;
+ }
+ if (requestType == BuildIndxReq::RT_DICT_COMMIT ||
+ requestType == BuildIndxReq::RT_DICT_ABORT) {
+ jam();
+ buildIndex_sendReply(signal, opPtr, false);
+ // done in slave
+ if (! opPtr.p->m_isMaster)
+ c_opBuildIndex.release(opPtr);
+ return;
+ }
+ }
+ jam();
+ // return to sender
+ OpBuildIndex opBad;
+ opPtr.p = &opBad;
+ opPtr.p->save(req);
+ opPtr.p->m_errorCode = BuildIndxRef::BadRequestType;
+ opPtr.p->m_errorLine = __LINE__;
+ buildIndex_sendReply(signal, opPtr, true);
+}
+
+void
+Dbdict::execBUILDINDXCONF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(signal->getNoOfSections() == 0);
+ BuildIndxConf* conf = (BuildIndxConf*)signal->getDataPtrSend();
+ buildIndex_recvReply(signal, conf, 0);
+}
+
+void
+Dbdict::execBUILDINDXREF(Signal* signal)
+{
+ jamEntry();
+ BuildIndxRef* ref = (BuildIndxRef*)signal->getDataPtrSend();
+ buildIndex_recvReply(signal, ref->getConf(), ref);
+}
+
+void
+Dbdict::buildIndex_recvReply(Signal* signal, const BuildIndxConf* conf,
+ const BuildIndxRef* ref)
+{
+ jam();
+ const Uint32 senderRef = signal->senderBlockRef();
+ const BuildIndxReq::RequestType requestType = conf->getRequestType();
+ const Uint32 key = conf->getConnectionPtr();
+ if (requestType == BuildIndxReq::RT_ALTER_INDEX) {
+ jam();
+ // part of alter index operation
+ OpAlterIndexPtr opPtr;
+ c_opAlterIndex.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ alterIndex_fromBuildIndex(signal, opPtr);
+ return;
+ }
+
+ if (requestType == BuildIndxReq::RT_SYSTEMRESTART) {
+ jam();
+ if (ref == 0) {
+ infoEvent("DICT: index %u rebuild done", (unsigned)key);
+ } else {
+ warningEvent("DICT: index %u rebuild failed: code=%d line=%d node=%d",
+ (unsigned)key, ref->getErrorCode());
+ }
+ rebuildIndexes(signal, key + 1);
+ return;
+ }
+
+ OpBuildIndexPtr opPtr;
+ c_opBuildIndex.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ if (requestType == BuildIndxReq::RT_TRIX) {
+ jam();
+ // forward to master
+ opPtr.p->m_requestType = BuildIndxReq::RT_DICT_TRIX;
+ buildIndex_sendReply(signal, opPtr, false);
+ return;
+ }
+ ndbrequire(opPtr.p->m_isMaster);
+ ndbrequire(opPtr.p->m_requestType == requestType);
+ opPtr.p->m_signalCounter.clearWaitingFor(refToNode(senderRef));
+ if (! opPtr.p->m_signalCounter.done()) {
+ jam();
+ return;
+ }
+ if (requestType == BuildIndxReq::RT_DICT_COMMIT ||
+ requestType == BuildIndxReq::RT_DICT_ABORT) {
+ jam();
+ // send reply to user
+ buildIndex_sendReply(signal, opPtr, true);
+ c_opBuildIndex.release(opPtr);
+ return;
+ }
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = BuildIndxReq::RT_DICT_ABORT;
+ buildIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
+ if (indexPtr.p->isHashIndex()) {
+ if (requestType == BuildIndxReq::RT_DICT_PREPARE) {
+ jam();
+ if (! (opPtr.p->m_requestFlag & RequestFlag::RF_NOBUILD)) {
+ buildIndex_toCreateConstr(signal, opPtr);
+ } else {
+ opPtr.p->m_requestType = BuildIndxReq::RT_DICT_TC;
+ buildIndex_sendSlaveReq(signal, opPtr);
+ }
+ return;
+ }
+ if (requestType == BuildIndxReq::RT_DICT_TRIX) {
+ jam();
+ ndbrequire(! (opPtr.p->m_requestFlag & RequestFlag::RF_NOBUILD));
+ buildIndex_toDropConstr(signal, opPtr);
+ return;
+ }
+ if (requestType == BuildIndxReq::RT_DICT_TC) {
+ jam();
+ opPtr.p->m_requestType = BuildIndxReq::RT_DICT_COMMIT;
+ buildIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ }
+ if (indexPtr.p->isOrderedIndex()) {
+ if (requestType == BuildIndxReq::RT_DICT_PREPARE) {
+ jam();
+ if (! (opPtr.p->m_requestFlag & RequestFlag::RF_NOBUILD)) {
+ opPtr.p->m_requestType = BuildIndxReq::RT_DICT_TRIX;
+ buildIndex_sendSlaveReq(signal, opPtr);
+ } else {
+ opPtr.p->m_requestType = BuildIndxReq::RT_DICT_TUX;
+ buildIndex_sendSlaveReq(signal, opPtr);
+ }
+ return;
+ }
+ if (requestType == BuildIndxReq::RT_DICT_TRIX) {
+ jam();
+ ndbrequire(! (opPtr.p->m_requestFlag & RequestFlag::RF_NOBUILD));
+ opPtr.p->m_requestType = BuildIndxReq::RT_DICT_TUX;
+ buildIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ if (requestType == BuildIndxReq::RT_DICT_TUX) {
+ jam();
+ opPtr.p->m_requestType = BuildIndxReq::RT_DICT_COMMIT;
+ buildIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ }
+ ndbrequire(false);
+}
+
+void
+Dbdict::buildIndex_toCreateConstr(Signal* signal, OpBuildIndexPtr opPtr)
+{
+ jam();
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
+ // request to create constraint trigger
+ CreateTrigReq* req = (CreateTrigReq*)signal->getDataPtrSend();
+ req->setUserRef(reference());
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(CreateTrigReq::RT_BUILD_INDEX);
+ req->addRequestFlag(0); // none
+ req->setTableId(indexPtr.i);
+ req->setIndexId(RNIL);
+ req->setTriggerId(RNIL);
+ req->setTriggerType(TriggerType::READ_ONLY_CONSTRAINT);
+ req->setTriggerActionTime(TriggerActionTime::TA_AFTER);
+ req->setTriggerEvent(TriggerEvent::TE_UPDATE);
+ req->setMonitorReplicas(false);
+ req->setMonitorAllAttributes(false);
+ req->setOnline(true); // alter online after create
+ req->setReceiverRef(0); // no receiver, REF-ed by TUP
+ req->getAttributeMask().clear();
+ // NDB$PK is last attribute
+ req->getAttributeMask().set(indexPtr.p->noOfAttributes - 1);
+ // name section
+ char triggerName[MAX_TAB_NAME_SIZE];
+ Uint32 buffer[2 + ((MAX_TAB_NAME_SIZE + 3) >> 2)]; // SP string
+ LinearWriter w(buffer, sizeof(buffer) >> 2);
+ LinearSectionPtr lsPtr[3];
+ sprintf(triggerName, "NDB$INDEX_%u_BUILD", indexPtr.i);
+ w.reset();
+ w.add(CreateTrigReq::TriggerNameKey, triggerName);
+ lsPtr[0].p = buffer;
+ lsPtr[0].sz = w.getWordsUsed();
+ sendSignal(reference(), GSN_CREATE_TRIG_REQ,
+ signal, CreateTrigReq::SignalLength, JBB, lsPtr, 1);
+}
+
+void
+Dbdict::buildIndex_fromCreateConstr(Signal* signal, OpBuildIndexPtr opPtr)
+{
+ jam();
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = BuildIndxReq::RT_DICT_ABORT;
+ buildIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ opPtr.p->m_requestType = BuildIndxReq::RT_DICT_TRIX;
+ buildIndex_sendSlaveReq(signal, opPtr);
+}
+
+void
+Dbdict::buildIndex_buildTrix(Signal* signal, OpBuildIndexPtr opPtr)
+{
+ jam();
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, indexPtr.p->primaryTableId);
+ // build request
+ BuildIndxReq* const req = (BuildIndxReq*)signal->getDataPtrSend();
+ req->setUserRef(reference());
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(BuildIndxReq::RT_TRIX);
+ req->setBuildId(0); // not yet..
+ req->setBuildKey(0); // ..in use
+ req->setIndexType(indexPtr.p->tableType);
+ req->setIndexId(indexPtr.i);
+ req->setTableId(indexPtr.p->primaryTableId);
+ req->setParallelism(16);
+ if (indexPtr.p->isHashIndex()) {
+ jam();
+ getIndexAttrList(indexPtr, opPtr.p->m_attrList);
+ getTableKeyList(tablePtr, opPtr.p->m_tableKeyList);
+ // send
+ LinearSectionPtr lsPtr[3];
+ lsPtr[0].sz = opPtr.p->m_attrList.sz;
+ lsPtr[0].p = opPtr.p->m_attrList.id;
+ lsPtr[1].sz = opPtr.p->m_tableKeyList.sz;
+ lsPtr[1].p = opPtr.p->m_tableKeyList.id;
+ sendSignal(calcTrixBlockRef(getOwnNodeId()), GSN_BUILDINDXREQ,
+ signal, BuildIndxReq::SignalLength, JBB, lsPtr, 2);
+ return;
+ }
+ if (indexPtr.p->isOrderedIndex()) {
+ jam();
+ sendSignal(calcTupBlockRef(getOwnNodeId()), GSN_BUILDINDXREQ,
+ signal, BuildIndxReq::SignalLength, JBB);
+ return;
+ }
+ ndbrequire(false);
+}
+
+void
+Dbdict::buildIndex_toDropConstr(Signal* signal, OpBuildIndexPtr opPtr)
+{
+ jam();
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
+ // request to drop constraint trigger
+ DropTrigReq* req = (DropTrigReq*)signal->getDataPtrSend();
+ req->setUserRef(reference());
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(DropTrigReq::RT_BUILD_INDEX);
+ req->addRequestFlag(0); // none
+ req->setTableId(indexPtr.i);
+ req->setIndexId(RNIL);
+ req->setTriggerId(opPtr.p->m_constrTriggerId);
+ req->setTriggerInfo(0); // not used
+ sendSignal(reference(), GSN_DROP_TRIG_REQ,
+ signal, DropTrigReq::SignalLength, JBB);
+}
+
+void
+Dbdict::buildIndex_fromDropConstr(Signal* signal, OpBuildIndexPtr opPtr)
+{
+ jam();
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = BuildIndxReq::RT_DICT_ABORT;
+ buildIndex_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ opPtr.p->m_requestType = BuildIndxReq::RT_DICT_TC;
+ buildIndex_sendSlaveReq(signal, opPtr);
+}
+
+void
+Dbdict::buildIndex_toOnline(Signal* signal, OpBuildIndexPtr opPtr)
+{
+ jam();
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId());
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, indexPtr.p->primaryTableId);
+ // request to set index online in TC or TUX
+ AlterIndxReq* const req = (AlterIndxReq*)signal->getDataPtrSend();
+ req->setUserRef(reference());
+ req->setConnectionPtr(opPtr.p->key);
+ if (opPtr.p->m_requestType == BuildIndxReq::RT_DICT_TC) {
+ req->setRequestType(AlterIndxReq::RT_TC);
+ } else if (opPtr.p->m_requestType == BuildIndxReq::RT_DICT_TUX) {
+ req->setRequestType(AlterIndxReq::RT_TUX);
+ } else {
+ ndbrequire(false);
+ }
+ req->setTableId(tablePtr.i);
+ req->setIndexId(indexPtr.i);
+ req->setIndexVersion(indexPtr.p->tableVersion);
+ req->setOnline(true);
+ BlockReference blockRef = 0;
+ if (opPtr.p->m_requestType == BuildIndxReq::RT_DICT_TC) {
+ blockRef = calcTcBlockRef(getOwnNodeId());
+ } else if (opPtr.p->m_requestType == BuildIndxReq::RT_DICT_TUX) {
+ blockRef = calcTuxBlockRef(getOwnNodeId());
+ } else {
+ ndbrequire(false);
+ }
+ // send
+ sendSignal(blockRef, GSN_ALTER_INDX_REQ,
+ signal, BuildIndxReq::SignalLength, JBB);
+}
+
+void
+Dbdict::buildIndex_fromOnline(Signal* signal, OpBuildIndexPtr opPtr)
+{
+ jam();
+ // forward to master
+ buildIndex_sendReply(signal, opPtr, false);
+}
+
+void
+Dbdict::buildIndex_sendSlaveReq(Signal* signal, OpBuildIndexPtr opPtr)
+{
+ BuildIndxReq* const req = (BuildIndxReq*)signal->getDataPtrSend();
+ *req = opPtr.p->m_request;
+ req->setUserRef(opPtr.p->m_coordinatorRef);
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(opPtr.p->m_requestType);
+ req->addRequestFlag(opPtr.p->m_requestFlag);
+ opPtr.p->m_signalCounter = c_aliveNodes;
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ sendSignal(rg, GSN_BUILDINDXREQ,
+ signal, BuildIndxReq::SignalLength, JBB);
+}
+
+void
+Dbdict::buildIndex_sendReply(Signal* signal, OpBuildIndexPtr opPtr,
+ bool toUser)
+{
+ BuildIndxRef* rep = (BuildIndxRef*)signal->getDataPtrSend();
+ Uint32 gsn = GSN_BUILDINDXCONF;
+ Uint32 length = BuildIndxConf::InternalLength;
+ bool sendRef = opPtr.p->hasError();
+ if (! toUser) {
+ rep->setUserRef(opPtr.p->m_coordinatorRef);
+ rep->setConnectionPtr(opPtr.p->key);
+ rep->setRequestType(opPtr.p->m_requestType);
+ if (opPtr.p->m_requestType == BuildIndxReq::RT_DICT_ABORT)
+ sendRef = false;
+ } else {
+ rep->setUserRef(opPtr.p->m_request.getUserRef());
+ rep->setConnectionPtr(opPtr.p->m_request.getConnectionPtr());
+ rep->setRequestType(opPtr.p->m_request.getRequestType());
+ length = BuildIndxConf::SignalLength;
+ }
+ rep->setIndexType(opPtr.p->m_request.getIndexType());
+ rep->setTableId(opPtr.p->m_request.getTableId());
+ rep->setIndexId(opPtr.p->m_request.getIndexId());
+ if (sendRef) {
+ rep->setErrorCode(opPtr.p->m_errorCode);
+ rep->masterNodeId = opPtr.p->m_errorNode;
+ gsn = GSN_BUILDINDXREF;
+ length = BuildIndxRef::SignalLength;
+ }
+ sendSignal(rep->getUserRef(), gsn, signal, length, JBB);
+}
+
+/**
+ * MODULE: Create trigger
+ *
+ * Create trigger in all DICT blocks. Optionally start alter trigger
+ * operation to set the trigger online.
+ *
+ * Request type received in REQ and returned in CONF/REF:
+ *
+ * RT_USER - normal user e.g. BACKUP
+ * RT_ALTER_INDEX - from alter index online
+ * RT_DICT_PREPARE - seize operation in each DICT
+ * RT_DICT_COMMIT - commit create in each DICT
+ * RT_TC - sending to TC (operation alter trigger)
+ * RT_LQH - sending to LQH (operation alter trigger)
+ */
+
+void
+Dbdict::execCREATE_TRIG_REQ(Signal* signal)
+{
+ jamEntry();
+ CreateTrigReq* const req = (CreateTrigReq*)signal->getDataPtrSend();
+ OpCreateTriggerPtr opPtr;
+ const Uint32 senderRef = signal->senderBlockRef();
+ const CreateTrigReq::RequestType requestType = req->getRequestType();
+ if (requestType == CreateTrigReq::RT_USER ||
+ requestType == CreateTrigReq::RT_ALTER_INDEX ||
+ requestType == CreateTrigReq::RT_BUILD_INDEX) {
+ jam();
+ if (! assembleFragments(signal)) {
+ jam();
+ return;
+ }
+ const bool isLocal = req->getRequestFlag() & RequestFlag::RF_LOCAL;
+ NdbNodeBitmask receiverNodes = c_aliveNodes;
+ if (isLocal) {
+ receiverNodes.clear();
+ receiverNodes.set(getOwnNodeId());
+ }
+ if (signal->getLength() == CreateTrigReq::SignalLength) {
+ jam();
+ if (! isLocal && getOwnNodeId() != c_masterNodeId) {
+ jam();
+
+ releaseSections(signal);
+ OpCreateTrigger opBad;
+ opPtr.p = &opBad;
+ opPtr.p->save(req);
+ opPtr.p->m_errorCode = CreateTrigRef::NotMaster;
+ opPtr.p->m_errorLine = __LINE__;
+ opPtr.p->m_errorNode = c_masterNodeId;
+ createTrigger_sendReply(signal, opPtr, true);
+ return;
+ }
+ // forward initial request plus operation key to all
+ req->setOpKey(++c_opRecordSequence);
+ NodeReceiverGroup rg(DBDICT, receiverNodes);
+ sendSignal(rg, GSN_CREATE_TRIG_REQ,
+ signal, CreateTrigReq::SignalLength + 1, JBB);
+ return;
+ }
+ // seize operation record
+ ndbrequire(signal->getLength() == CreateTrigReq::SignalLength + 1);
+ const Uint32 opKey = req->getOpKey();
+ OpCreateTrigger opBusy;
+ if (! c_opCreateTrigger.seize(opPtr))
+ opPtr.p = &opBusy;
+ opPtr.p->save(req);
+ opPtr.p->m_coordinatorRef = senderRef;
+ opPtr.p->m_isMaster = (senderRef == reference());
+ opPtr.p->key = opKey;
+ opPtr.p->m_requestType = CreateTrigReq::RT_DICT_PREPARE;
+ if (opPtr.p == &opBusy) {
+ jam();
+ opPtr.p->m_errorCode = CreateTrigRef::Busy;
+ opPtr.p->m_errorLine = __LINE__;
+ releaseSections(signal);
+ createTrigger_sendReply(signal, opPtr, opPtr.p->m_isMaster);
+ return;
+ }
+ c_opCreateTrigger.add(opPtr);
+ {
+ // save name
+ SegmentedSectionPtr ssPtr;
+ signal->getSection(ssPtr, CreateTrigReq::TRIGGER_NAME_SECTION);
+ SimplePropertiesSectionReader ssReader(ssPtr, getSectionSegmentPool());
+ if (ssReader.getKey() != CreateTrigReq::TriggerNameKey ||
+ ! ssReader.getString(opPtr.p->m_triggerName)) {
+ jam();
+ opPtr.p->m_errorCode = CreateTrigRef::InvalidName;
+ opPtr.p->m_errorLine = __LINE__;
+ releaseSections(signal);
+ createTrigger_sendReply(signal, opPtr, opPtr.p->m_isMaster);
+ return;
+ }
+ }
+ releaseSections(signal);
+ {
+ // check that trigger name is unique
+ TriggerRecordPtr triggerPtr;
+ TriggerRecord keyRecord;
+ strcpy(keyRecord.triggerName, opPtr.p->m_triggerName);
+ c_triggerRecordHash.find(triggerPtr, keyRecord);
+ if (triggerPtr.i != RNIL) {
+ jam();
+ opPtr.p->m_errorCode = CreateTrigRef::TriggerExists;
+ opPtr.p->m_errorLine = __LINE__;
+ createTrigger_sendReply(signal, opPtr, opPtr.p->m_isMaster);
+ return;
+ }
+ }
+
+ // master expects to hear from all
+ if (opPtr.p->m_isMaster)
+ opPtr.p->m_signalCounter = receiverNodes;
+ // check request in all participants
+ createTrigger_slavePrepare(signal, opPtr);
+ createTrigger_sendReply(signal, opPtr, false);
+ return;
+ }
+ c_opCreateTrigger.find(opPtr, req->getConnectionPtr());
+ if (! opPtr.isNull()) {
+ opPtr.p->m_requestType = requestType;
+ if (requestType == CreateTrigReq::RT_DICT_CREATE) {
+ jam();
+ // master has set trigger id
+ opPtr.p->m_request.setTriggerId(req->getTriggerId());
+ createTrigger_slaveCreate(signal, opPtr);
+ createTrigger_sendReply(signal, opPtr, false);
+ return;
+ }
+ if (requestType == CreateTrigReq::RT_DICT_COMMIT ||
+ requestType == CreateTrigReq::RT_DICT_ABORT) {
+ jam();
+ if (requestType == CreateTrigReq::RT_DICT_COMMIT)
+ createTrigger_slaveCommit(signal, opPtr);
+ else
+ createTrigger_slaveAbort(signal, opPtr);
+ createTrigger_sendReply(signal, opPtr, false);
+ // done in slave
+ if (! opPtr.p->m_isMaster)
+ c_opCreateTrigger.release(opPtr);
+ return;
+ }
+ }
+ jam();
+ // return to sender
+ releaseSections(signal);
+ OpCreateTrigger opBad;
+ opPtr.p = &opBad;
+ opPtr.p->save(req);
+ opPtr.p->m_errorCode = CreateTrigRef::BadRequestType;
+ opPtr.p->m_errorLine = __LINE__;
+ createTrigger_sendReply(signal, opPtr, true);
+}
+
+void
+Dbdict::execCREATE_TRIG_CONF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(signal->getNoOfSections() == 0);
+ CreateTrigConf* conf = (CreateTrigConf*)signal->getDataPtrSend();
+ createTrigger_recvReply(signal, conf, 0);
+}
+
+void
+Dbdict::execCREATE_TRIG_REF(Signal* signal)
+{
+ jamEntry();
+ CreateTrigRef* ref = (CreateTrigRef*)signal->getDataPtrSend();
+ createTrigger_recvReply(signal, ref->getConf(), ref);
+}
+
+void
+Dbdict::createTrigger_recvReply(Signal* signal, const CreateTrigConf* conf,
+ const CreateTrigRef* ref)
+{
+ jam();
+ const Uint32 senderRef = signal->senderBlockRef();
+ const CreateTrigReq::RequestType requestType = conf->getRequestType();
+ const Uint32 key = conf->getConnectionPtr();
+ if (requestType == CreateTrigReq::RT_ALTER_INDEX) {
+ jam();
+ // part of alter index operation
+ OpAlterIndexPtr opPtr;
+ c_opAlterIndex.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ alterIndex_fromCreateTrigger(signal, opPtr);
+ return;
+ }
+ if (requestType == CreateTrigReq::RT_BUILD_INDEX) {
+ jam();
+ // part of build index operation
+ OpBuildIndexPtr opPtr;
+ c_opBuildIndex.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ // fill in trigger id
+ opPtr.p->m_constrTriggerId = conf->getTriggerId();
+ buildIndex_fromCreateConstr(signal, opPtr);
+ return;
+ }
+ if (requestType == CreateTrigReq::RT_TC ||
+ requestType == CreateTrigReq::RT_LQH) {
+ jam();
+ // part of alter trigger operation
+ OpAlterTriggerPtr opPtr;
+ c_opAlterTrigger.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ alterTrigger_fromCreateLocal(signal, opPtr);
+ return;
+ }
+ OpCreateTriggerPtr opPtr;
+ c_opCreateTrigger.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ ndbrequire(opPtr.p->m_isMaster);
+ ndbrequire(opPtr.p->m_requestType == requestType);
+ opPtr.p->setError(ref);
+ opPtr.p->m_signalCounter.clearWaitingFor(refToNode(senderRef));
+ if (! opPtr.p->m_signalCounter.done()) {
+ jam();
+ return;
+ }
+ if (requestType == CreateTrigReq::RT_DICT_COMMIT ||
+ requestType == CreateTrigReq::RT_DICT_ABORT) {
+ jam();
+ // send reply to user
+ createTrigger_sendReply(signal, opPtr, true);
+ c_opCreateTrigger.release(opPtr);
+ return;
+ }
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = CreateTrigReq::RT_DICT_ABORT;
+ createTrigger_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ if (requestType == CreateTrigReq::RT_DICT_PREPARE) {
+ jam();
+ // seize trigger id in master
+ createTrigger_masterSeize(signal, opPtr);
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = CreateTrigReq::RT_DICT_ABORT;
+ createTrigger_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ opPtr.p->m_requestType = CreateTrigReq::RT_DICT_CREATE;
+ createTrigger_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ if (requestType == CreateTrigReq::RT_DICT_CREATE) {
+ jam();
+ if (opPtr.p->m_request.getOnline()) {
+ jam();
+ // start alter online
+ createTrigger_toAlterTrigger(signal, opPtr);
+ return;
+ }
+ opPtr.p->m_requestType = CreateTrigReq::RT_DICT_COMMIT;
+ createTrigger_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ ndbrequire(false);
+}
+
+void
+Dbdict::createTrigger_slavePrepare(Signal* signal, OpCreateTriggerPtr opPtr)
+{
+ jam();
+ const CreateTrigReq* const req = &opPtr.p->m_request;
+ // check trigger type
+ if (req->getRequestType() == CreateTrigReq::RT_USER &&
+ req->getTriggerType() == TriggerType::SUBSCRIPTION ||
+ req->getRequestType() == CreateTrigReq::RT_ALTER_INDEX &&
+ req->getTriggerType() == TriggerType::SECONDARY_INDEX ||
+ req->getRequestType() == CreateTrigReq::RT_ALTER_INDEX &&
+ req->getTriggerType() == TriggerType::ORDERED_INDEX ||
+ req->getRequestType() == CreateTrigReq::RT_BUILD_INDEX &&
+ req->getTriggerType() == TriggerType::READ_ONLY_CONSTRAINT) {
+ ;
+ } else {
+ jam();
+ opPtr.p->m_errorCode = CreateTrigRef::UnsupportedTriggerType;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ // check the table
+ const Uint32 tableId = req->getTableId();
+ if (! (tableId < c_tableRecordPool.getSize())) {
+ jam();
+ opPtr.p->m_errorCode = CreateTrigRef::InvalidTable;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, tableId);
+ if (tablePtr.p->tabState != TableRecord::DEFINED) {
+ jam();
+ opPtr.p->m_errorCode = CreateTrigRef::InvalidTable;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+}
+
+void
+Dbdict::createTrigger_masterSeize(Signal* signal, OpCreateTriggerPtr opPtr)
+{
+ TriggerRecordPtr triggerPtr;
+ if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL) {
+ triggerPtr.i = opPtr.p->m_request.getTriggerId();
+ } else {
+ triggerPtr.i = getFreeTriggerRecord();
+ if (triggerPtr.i == RNIL) {
+ jam();
+ opPtr.p->m_errorCode = CreateTrigRef::TooManyTriggers;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ }
+ c_triggerRecordPool.getPtr(triggerPtr);
+ initialiseTriggerRecord(triggerPtr);
+ triggerPtr.p->triggerState = TriggerRecord::TS_DEFINING;
+ opPtr.p->m_request.setTriggerId(triggerPtr.i);
+}
+
+void
+Dbdict::createTrigger_slaveCreate(Signal* signal, OpCreateTriggerPtr opPtr)
+{
+ jam();
+ const CreateTrigReq* const req = &opPtr.p->m_request;
+ // get the trigger record
+ const Uint32 triggerId = req->getTriggerId();
+ TriggerRecordPtr triggerPtr;
+ c_triggerRecordPool.getPtr(triggerPtr, triggerId);
+ initialiseTriggerRecord(triggerPtr);
+ // fill in trigger data
+ strcpy(triggerPtr.p->triggerName, opPtr.p->m_triggerName);
+ triggerPtr.p->triggerId = triggerId;
+ triggerPtr.p->tableId = req->getTableId();
+ triggerPtr.p->indexId = RNIL;
+ triggerPtr.p->triggerType = req->getTriggerType();
+ triggerPtr.p->triggerActionTime = req->getTriggerActionTime();
+ triggerPtr.p->triggerEvent = req->getTriggerEvent();
+ triggerPtr.p->monitorReplicas = req->getMonitorReplicas();
+ triggerPtr.p->monitorAllAttributes = req->getMonitorAllAttributes();
+ triggerPtr.p->attributeMask = req->getAttributeMask();
+ triggerPtr.p->triggerState = TriggerRecord::TS_OFFLINE;
+ // add to hash table
+ // ndbout_c("++++++++++++ Adding trigger id %u, %s", triggerPtr.p->triggerId, triggerPtr.p->triggerName);
+ c_triggerRecordHash.add(triggerPtr);
+ if (triggerPtr.p->triggerType == TriggerType::SECONDARY_INDEX ||
+ triggerPtr.p->triggerType == TriggerType::ORDERED_INDEX) {
+ jam();
+ // connect to index record XXX should be done in caller instead
+ triggerPtr.p->indexId = req->getIndexId();
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, triggerPtr.p->indexId);
+ switch (triggerPtr.p->triggerEvent) {
+ case TriggerEvent::TE_INSERT:
+ indexPtr.p->insertTriggerId = triggerPtr.p->triggerId;
+ break;
+ case TriggerEvent::TE_UPDATE:
+ indexPtr.p->updateTriggerId = triggerPtr.p->triggerId;
+ break;
+ case TriggerEvent::TE_DELETE:
+ indexPtr.p->deleteTriggerId = triggerPtr.p->triggerId;
+ break;
+ case TriggerEvent::TE_CUSTOM:
+ indexPtr.p->customTriggerId = triggerPtr.p->triggerId;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }
+ }
+ if (triggerPtr.p->triggerType == TriggerType::READ_ONLY_CONSTRAINT) {
+ jam();
+ // connect to index record XXX should be done in caller instead
+ triggerPtr.p->indexId = req->getTableId();
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, triggerPtr.p->indexId);
+ indexPtr.p->buildTriggerId = triggerPtr.p->triggerId;
+ }
+}
+
+void
+Dbdict::createTrigger_toAlterTrigger(Signal* signal, OpCreateTriggerPtr opPtr)
+{
+ jam();
+ AlterTrigReq* req = (AlterTrigReq*)signal->getDataPtrSend();
+ req->setUserRef(reference());
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(AlterTrigReq::RT_CREATE_TRIGGER);
+ req->addRequestFlag(opPtr.p->m_requestFlag);
+ req->setTableId(opPtr.p->m_request.getTableId());
+ req->setTriggerId(opPtr.p->m_request.getTriggerId());
+ req->setTriggerInfo(0); // not used
+ req->setOnline(true);
+ req->setReceiverRef(opPtr.p->m_request.getReceiverRef());
+ sendSignal(reference(), GSN_ALTER_TRIG_REQ,
+ signal, AlterTrigReq::SignalLength, JBB);
+}
+
+void
+Dbdict::createTrigger_fromAlterTrigger(Signal* signal, OpCreateTriggerPtr opPtr)
+{
+ jam();
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = CreateTrigReq::RT_DICT_ABORT;
+ createTrigger_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ opPtr.p->m_requestType = CreateTrigReq::RT_DICT_COMMIT;
+ createTrigger_sendSlaveReq(signal, opPtr);
+}
+
+void
+Dbdict::createTrigger_slaveCommit(Signal* signal, OpCreateTriggerPtr opPtr)
+{
+ jam();
+ const CreateTrigReq* const req = &opPtr.p->m_request;
+ // get the trigger record
+ const Uint32 triggerId = req->getTriggerId();
+ TriggerRecordPtr triggerPtr;
+ c_triggerRecordPool.getPtr(triggerPtr, triggerId);
+ if (! req->getOnline()) {
+ triggerPtr.p->triggerState = TriggerRecord::TS_OFFLINE;
+ } else {
+ ndbrequire(triggerPtr.p->triggerState == TriggerRecord::TS_ONLINE);
+ }
+}
+
+void
+Dbdict::createTrigger_slaveAbort(Signal* signal, OpCreateTriggerPtr opPtr)
+{
+ jam();
+}
+
+void
+Dbdict::createTrigger_sendSlaveReq(Signal* signal, OpCreateTriggerPtr opPtr)
+{
+ CreateTrigReq* const req = (CreateTrigReq*)signal->getDataPtrSend();
+ *req = opPtr.p->m_request;
+ req->setUserRef(opPtr.p->m_coordinatorRef);
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(opPtr.p->m_requestType);
+ req->addRequestFlag(opPtr.p->m_requestFlag);
+ NdbNodeBitmask receiverNodes = c_aliveNodes;
+ if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL) {
+ receiverNodes.clear();
+ receiverNodes.set(getOwnNodeId());
+ }
+ opPtr.p->m_signalCounter = receiverNodes;
+ NodeReceiverGroup rg(DBDICT, receiverNodes);
+ sendSignal(rg, GSN_CREATE_TRIG_REQ,
+ signal, CreateTrigReq::SignalLength, JBB);
+}
+
+void
+Dbdict::createTrigger_sendReply(Signal* signal, OpCreateTriggerPtr opPtr,
+ bool toUser)
+{
+ CreateTrigRef* rep = (CreateTrigRef*)signal->getDataPtrSend();
+ Uint32 gsn = GSN_CREATE_TRIG_CONF;
+ Uint32 length = CreateTrigConf::InternalLength;
+ bool sendRef = opPtr.p->hasError();
+ if (! toUser) {
+ rep->setUserRef(opPtr.p->m_coordinatorRef);
+ rep->setConnectionPtr(opPtr.p->key);
+ rep->setRequestType(opPtr.p->m_requestType);
+ if (opPtr.p->m_requestType == CreateTrigReq::RT_DICT_ABORT)
+ sendRef = false;
+ } else {
+ rep->setUserRef(opPtr.p->m_request.getUserRef());
+ rep->setConnectionPtr(opPtr.p->m_request.getConnectionPtr());
+ rep->setRequestType(opPtr.p->m_request.getRequestType());
+ length = CreateTrigConf::SignalLength;
+ }
+ rep->setTableId(opPtr.p->m_request.getTableId());
+ rep->setIndexId(opPtr.p->m_request.getIndexId());
+ rep->setTriggerId(opPtr.p->m_request.getTriggerId());
+ rep->setTriggerInfo(opPtr.p->m_request.getTriggerInfo());
+ if (sendRef) {
+ if (opPtr.p->m_errorNode == 0)
+ opPtr.p->m_errorNode = getOwnNodeId();
+ rep->setErrorCode(opPtr.p->m_errorCode);
+ rep->setErrorLine(opPtr.p->m_errorLine);
+ rep->setErrorNode(opPtr.p->m_errorNode);
+ gsn = GSN_CREATE_TRIG_REF;
+ length = CreateTrigRef::SignalLength;
+ }
+ sendSignal(rep->getUserRef(), gsn, signal, length, JBB);
+}
+
+/**
+ * MODULE: Drop trigger.
+ */
+
+void
+Dbdict::execDROP_TRIG_REQ(Signal* signal)
+{
+ jamEntry();
+ DropTrigReq* const req = (DropTrigReq*)signal->getDataPtrSend();
+ OpDropTriggerPtr opPtr;
+ const Uint32 senderRef = signal->senderBlockRef();
+ const DropTrigReq::RequestType requestType = req->getRequestType();
+
+ if (signal->getNoOfSections() > 0) {
+ ndbrequire(signal->getNoOfSections() == 1);
+ jam();
+ TriggerRecord keyRecord;
+ OpDropTrigger opTmp;
+ opPtr.p=&opTmp;
+
+ SegmentedSectionPtr ssPtr;
+ signal->getSection(ssPtr, DropTrigReq::TRIGGER_NAME_SECTION);
+ SimplePropertiesSectionReader ssReader(ssPtr, getSectionSegmentPool());
+ if (ssReader.getKey() != DropTrigReq::TriggerNameKey ||
+ ! ssReader.getString(keyRecord.triggerName)) {
+ jam();
+ opPtr.p->m_errorCode = DropTrigRef::InvalidName;
+ opPtr.p->m_errorLine = __LINE__;
+ releaseSections(signal);
+ dropTrigger_sendReply(signal, opPtr, opPtr.p->m_isMaster);
+ return;
+ }
+ releaseSections(signal);
+
+ TriggerRecordPtr triggerPtr;
+
+ // ndbout_c("++++++++++++++ Looking for trigger %s", keyRecord.triggerName);
+ c_triggerRecordHash.find(triggerPtr, keyRecord);
+ if (triggerPtr.i == RNIL) {
+ jam();
+ req->setTriggerId(RNIL);
+ } else {
+ jam();
+ // ndbout_c("++++++++++ Found trigger %s", triggerPtr.p->triggerName);
+ req->setTriggerId(triggerPtr.p->triggerId);
+ req->setTableId(triggerPtr.p->tableId);
+ }
+ }
+ if (requestType == DropTrigReq::RT_USER ||
+ requestType == DropTrigReq::RT_ALTER_INDEX ||
+ requestType == DropTrigReq::RT_BUILD_INDEX) {
+ jam();
+ if (signal->getLength() == DropTrigReq::SignalLength) {
+ if (getOwnNodeId() != c_masterNodeId) {
+ jam();
+ // forward to DICT master
+ sendSignal(calcDictBlockRef(c_masterNodeId), GSN_DROP_TRIG_REQ,
+ signal, signal->getLength(), JBB);
+ return;
+ }
+ if (!c_triggerRecordPool.findId(req->getTriggerId())) {
+ jam();
+ // return to sender
+ OpDropTrigger opBad;
+ opPtr.p = &opBad;
+ opPtr.p->save(req);
+ opPtr.p->m_errorCode = DropTrigRef::TriggerNotFound;
+ opPtr.p->m_errorLine = __LINE__;
+ dropTrigger_sendReply(signal, opPtr, true);
+ return;
+ }
+ // forward initial request plus operation key to all
+ req->setOpKey(++c_opRecordSequence);
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ sendSignal(rg, GSN_DROP_TRIG_REQ,
+ signal, DropTrigReq::SignalLength + 1, JBB);
+ return;
+ }
+ // seize operation record
+ ndbrequire(signal->getLength() == DropTrigReq::SignalLength + 1);
+ const Uint32 opKey = req->getOpKey();
+ OpDropTrigger opBusy;
+ if (! c_opDropTrigger.seize(opPtr))
+ opPtr.p = &opBusy;
+ opPtr.p->save(req);
+ opPtr.p->m_coordinatorRef = senderRef;
+ opPtr.p->m_isMaster = (senderRef == reference());
+ opPtr.p->key = opKey;
+ opPtr.p->m_requestType = DropTrigReq::RT_DICT_PREPARE;
+ if (opPtr.p == &opBusy) {
+ jam();
+ opPtr.p->m_errorCode = DropTrigRef::Busy;
+ opPtr.p->m_errorLine = __LINE__;
+ dropTrigger_sendReply(signal, opPtr, opPtr.p->m_isMaster);
+ return;
+ }
+ c_opDropTrigger.add(opPtr);
+ // master expects to hear from all
+ if (opPtr.p->m_isMaster)
+ opPtr.p->m_signalCounter = c_aliveNodes;
+ dropTrigger_slavePrepare(signal, opPtr);
+ dropTrigger_sendReply(signal, opPtr, false);
+ return;
+ }
+ c_opDropTrigger.find(opPtr, req->getConnectionPtr());
+ if (! opPtr.isNull()) {
+ opPtr.p->m_requestType = requestType;
+ if (requestType == DropTrigReq::RT_DICT_COMMIT ||
+ requestType == DropTrigReq::RT_DICT_ABORT) {
+ jam();
+ if (requestType == DropTrigReq::RT_DICT_COMMIT)
+ dropTrigger_slaveCommit(signal, opPtr);
+ else
+ dropTrigger_slaveAbort(signal, opPtr);
+ dropTrigger_sendReply(signal, opPtr, false);
+ // done in slave
+ if (! opPtr.p->m_isMaster)
+ c_opDropTrigger.release(opPtr);
+ return;
+ }
+ }
+ jam();
+ // return to sender
+ OpDropTrigger opBad;
+ opPtr.p = &opBad;
+ opPtr.p->save(req);
+ opPtr.p->m_errorCode = DropTrigRef::BadRequestType;
+ opPtr.p->m_errorLine = __LINE__;
+ dropTrigger_sendReply(signal, opPtr, true);
+}
+
+void
+Dbdict::execDROP_TRIG_CONF(Signal* signal)
+{
+ jamEntry();
+ DropTrigConf* conf = (DropTrigConf*)signal->getDataPtrSend();
+ dropTrigger_recvReply(signal, conf, 0);
+}
+
+void
+Dbdict::execDROP_TRIG_REF(Signal* signal)
+{
+ jamEntry();
+ DropTrigRef* ref = (DropTrigRef*)signal->getDataPtrSend();
+ dropTrigger_recvReply(signal, ref->getConf(), ref);
+}
+
+void
+Dbdict::dropTrigger_recvReply(Signal* signal, const DropTrigConf* conf,
+ const DropTrigRef* ref)
+{
+ jam();
+ const Uint32 senderRef = signal->senderBlockRef();
+ const DropTrigReq::RequestType requestType = conf->getRequestType();
+ const Uint32 key = conf->getConnectionPtr();
+ if (requestType == DropTrigReq::RT_ALTER_INDEX) {
+ jam();
+ // part of alter index operation
+ OpAlterIndexPtr opPtr;
+ c_opAlterIndex.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ alterIndex_fromDropTrigger(signal, opPtr);
+ return;
+ }
+ if (requestType == DropTrigReq::RT_BUILD_INDEX) {
+ jam();
+ // part of build index operation
+ OpBuildIndexPtr opPtr;
+ c_opBuildIndex.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ buildIndex_fromDropConstr(signal, opPtr);
+ return;
+ }
+ if (requestType == DropTrigReq::RT_TC ||
+ requestType == DropTrigReq::RT_LQH) {
+ jam();
+ // part of alter trigger operation
+ OpAlterTriggerPtr opPtr;
+ c_opAlterTrigger.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ alterTrigger_fromDropLocal(signal, opPtr);
+ return;
+ }
+ OpDropTriggerPtr opPtr;
+ c_opDropTrigger.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ ndbrequire(opPtr.p->m_isMaster);
+ ndbrequire(opPtr.p->m_requestType == requestType);
+ opPtr.p->setError(ref);
+ opPtr.p->m_signalCounter.clearWaitingFor(refToNode(senderRef));
+ if (! opPtr.p->m_signalCounter.done()) {
+ jam();
+ return;
+ }
+ if (requestType == DropTrigReq::RT_DICT_COMMIT ||
+ requestType == DropTrigReq::RT_DICT_ABORT) {
+ jam();
+ // send reply to user
+ dropTrigger_sendReply(signal, opPtr, true);
+ c_opDropTrigger.release(opPtr);
+ return;
+ }
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = DropTrigReq::RT_DICT_ABORT;
+ dropTrigger_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ if (requestType == DropTrigReq::RT_DICT_PREPARE) {
+ jam();
+ // start alter offline
+ dropTrigger_toAlterTrigger(signal, opPtr);
+ return;
+ }
+ ndbrequire(false);
+}
+
+void
+Dbdict::dropTrigger_slavePrepare(Signal* signal, OpDropTriggerPtr opPtr)
+{
+ jam();
+}
+
+void
+Dbdict::dropTrigger_toAlterTrigger(Signal* signal, OpDropTriggerPtr opPtr)
+{
+ jam();
+ AlterTrigReq* req = (AlterTrigReq*)signal->getDataPtrSend();
+ req->setUserRef(reference());
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(AlterTrigReq::RT_DROP_TRIGGER);
+ req->setTableId(opPtr.p->m_request.getTableId());
+ req->setTriggerId(opPtr.p->m_request.getTriggerId());
+ req->setTriggerInfo(0); // not used
+ req->setOnline(false);
+ req->setReceiverRef(0);
+ sendSignal(reference(), GSN_ALTER_TRIG_REQ,
+ signal, AlterTrigReq::SignalLength, JBB);
+}
+
+void
+Dbdict::dropTrigger_fromAlterTrigger(Signal* signal, OpDropTriggerPtr opPtr)
+{
+ jam();
+ // remove in all
+ opPtr.p->m_requestType = DropTrigReq::RT_DICT_COMMIT;
+ dropTrigger_sendSlaveReq(signal, opPtr);
+}
+
+void
+Dbdict::dropTrigger_sendSlaveReq(Signal* signal, OpDropTriggerPtr opPtr)
+{
+ DropTrigReq* const req = (DropTrigReq*)signal->getDataPtrSend();
+ *req = opPtr.p->m_request;
+ req->setUserRef(opPtr.p->m_coordinatorRef);
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(opPtr.p->m_requestType);
+ req->addRequestFlag(opPtr.p->m_requestFlag);
+ opPtr.p->m_signalCounter = c_aliveNodes;
+ NodeReceiverGroup rg(DBDICT, c_aliveNodes);
+ sendSignal(rg, GSN_DROP_TRIG_REQ,
+ signal, DropTrigReq::SignalLength, JBB);
+}
+
+void
+Dbdict::dropTrigger_slaveCommit(Signal* signal, OpDropTriggerPtr opPtr)
+{
+ jam();
+ const DropTrigReq* const req = &opPtr.p->m_request;
+ // get trigger record
+ const Uint32 triggerId = req->getTriggerId();
+ TriggerRecordPtr triggerPtr;
+ c_triggerRecordPool.getPtr(triggerPtr, triggerId);
+ if (triggerPtr.p->triggerType == TriggerType::SECONDARY_INDEX ||
+ triggerPtr.p->triggerType == TriggerType::ORDERED_INDEX) {
+ jam();
+ // disconnect from index if index trigger XXX move to drop index
+ triggerPtr.p->indexId = req->getIndexId();
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, triggerPtr.p->indexId);
+ ndbrequire(! indexPtr.isNull());
+ switch (triggerPtr.p->triggerEvent) {
+ case TriggerEvent::TE_INSERT:
+ indexPtr.p->insertTriggerId = RNIL;
+ break;
+ case TriggerEvent::TE_UPDATE:
+ indexPtr.p->updateTriggerId = RNIL;
+ break;
+ case TriggerEvent::TE_DELETE:
+ indexPtr.p->deleteTriggerId = RNIL;
+ break;
+ case TriggerEvent::TE_CUSTOM:
+ indexPtr.p->customTriggerId = RNIL;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }
+ }
+ if (triggerPtr.p->triggerType == TriggerType::READ_ONLY_CONSTRAINT) {
+ jam();
+ // disconnect from index record XXX should be done in caller instead
+ triggerPtr.p->indexId = req->getTableId();
+ TableRecordPtr indexPtr;
+ c_tableRecordPool.getPtr(indexPtr, triggerPtr.p->indexId);
+ indexPtr.p->buildTriggerId = RNIL;
+ }
+ // remove trigger
+ // ndbout_c("++++++++++++ Removing trigger id %u, %s", triggerPtr.p->triggerId, triggerPtr.p->triggerName);
+ c_triggerRecordHash.remove(triggerPtr);
+ triggerPtr.p->triggerState = TriggerRecord::TS_NOT_DEFINED;
+}
+
+void
+Dbdict::dropTrigger_slaveAbort(Signal* signal, OpDropTriggerPtr opPtr)
+{
+ jam();
+}
+
+void
+Dbdict::dropTrigger_sendReply(Signal* signal, OpDropTriggerPtr opPtr,
+ bool toUser)
+{
+ DropTrigRef* rep = (DropTrigRef*)signal->getDataPtrSend();
+ Uint32 gsn = GSN_DROP_TRIG_CONF;
+ Uint32 length = DropTrigConf::InternalLength;
+ bool sendRef = opPtr.p->hasError();
+ if (! toUser) {
+ rep->setUserRef(opPtr.p->m_coordinatorRef);
+ rep->setConnectionPtr(opPtr.p->key);
+ rep->setRequestType(opPtr.p->m_requestType);
+ if (opPtr.p->m_requestType == DropTrigReq::RT_DICT_ABORT)
+ sendRef = false;
+ } else {
+ rep->setUserRef(opPtr.p->m_request.getUserRef());
+ rep->setConnectionPtr(opPtr.p->m_request.getConnectionPtr());
+ rep->setRequestType(opPtr.p->m_request.getRequestType());
+ length = DropTrigConf::SignalLength;
+ }
+ rep->setTableId(opPtr.p->m_request.getTableId());
+ rep->setIndexId(opPtr.p->m_request.getIndexId());
+ rep->setTriggerId(opPtr.p->m_request.getTriggerId());
+ if (sendRef) {
+ if (opPtr.p->m_errorNode == 0)
+ opPtr.p->m_errorNode = getOwnNodeId();
+ rep->setErrorCode(opPtr.p->m_errorCode);
+ rep->setErrorLine(opPtr.p->m_errorLine);
+ rep->setErrorNode(opPtr.p->m_errorNode);
+ gsn = GSN_DROP_TRIG_REF;
+ length = CreateTrigRef::SignalLength;
+ }
+ sendSignal(rep->getUserRef(), gsn, signal, length, JBB);
+}
+
+/**
+ * MODULE: Alter trigger.
+ *
+ * Alter trigger state. Alter online creates the trigger first in all
+ * TC (if index trigger) and then in all LQH-TUP.
+ *
+ * Request type received in REQ and returned in CONF/REF:
+ *
+ * RT_USER - normal user e.g. BACKUP
+ * RT_CREATE_TRIGGER - from create trigger
+ * RT_DROP_TRIGGER - from drop trigger
+ * RT_DICT_PREPARE - seize operations and check request
+ * RT_DICT_TC - master to each DICT on way to TC
+ * RT_DICT_LQH - master to each DICT on way to LQH-TUP
+ * RT_DICT_COMMIT - commit state change in each DICT (no reply)
+ */
+
+void
+Dbdict::execALTER_TRIG_REQ(Signal* signal)
+{
+ jamEntry();
+ AlterTrigReq* const req = (AlterTrigReq*)signal->getDataPtrSend();
+ OpAlterTriggerPtr opPtr;
+ const Uint32 senderRef = signal->senderBlockRef();
+ const AlterTrigReq::RequestType requestType = req->getRequestType();
+ if (requestType == AlterTrigReq::RT_USER ||
+ requestType == AlterTrigReq::RT_CREATE_TRIGGER ||
+ requestType == AlterTrigReq::RT_DROP_TRIGGER) {
+ jam();
+ const bool isLocal = req->getRequestFlag() & RequestFlag::RF_LOCAL;
+ NdbNodeBitmask receiverNodes = c_aliveNodes;
+ if (isLocal) {
+ receiverNodes.clear();
+ receiverNodes.set(getOwnNodeId());
+ }
+ if (signal->getLength() == AlterTrigReq::SignalLength) {
+ jam();
+ if (! isLocal && getOwnNodeId() != c_masterNodeId) {
+ jam();
+ // forward to DICT master
+ sendSignal(calcDictBlockRef(c_masterNodeId), GSN_ALTER_TRIG_REQ,
+ signal, AlterTrigReq::SignalLength, JBB);
+ return;
+ }
+ // forward initial request plus operation key to all
+ req->setOpKey(++c_opRecordSequence);
+ NodeReceiverGroup rg(DBDICT, receiverNodes);
+ sendSignal(rg, GSN_ALTER_TRIG_REQ,
+ signal, AlterTrigReq::SignalLength + 1, JBB);
+ return;
+ }
+ // seize operation record
+ ndbrequire(signal->getLength() == AlterTrigReq::SignalLength + 1);
+ const Uint32 opKey = req->getOpKey();
+ OpAlterTrigger opBusy;
+ if (! c_opAlterTrigger.seize(opPtr))
+ opPtr.p = &opBusy;
+ opPtr.p->save(req);
+ opPtr.p->m_coordinatorRef = senderRef;
+ opPtr.p->m_isMaster = (senderRef == reference());
+ opPtr.p->key = opKey;
+ opPtr.p->m_requestType = AlterTrigReq::RT_DICT_PREPARE;
+ if (opPtr.p == &opBusy) {
+ jam();
+ opPtr.p->m_errorCode = AlterTrigRef::Busy;
+ opPtr.p->m_errorLine = __LINE__;
+ alterTrigger_sendReply(signal, opPtr, opPtr.p->m_isMaster);
+ return;
+ }
+ c_opAlterTrigger.add(opPtr);
+ // master expects to hear from all
+ if (opPtr.p->m_isMaster) {
+ opPtr.p->m_nodes = receiverNodes;
+ opPtr.p->m_signalCounter = receiverNodes;
+ }
+ alterTrigger_slavePrepare(signal, opPtr);
+ alterTrigger_sendReply(signal, opPtr, false);
+ return;
+ }
+ c_opAlterTrigger.find(opPtr, req->getConnectionPtr());
+ if (! opPtr.isNull()) {
+ opPtr.p->m_requestType = requestType;
+ if (requestType == AlterTrigReq::RT_DICT_TC ||
+ requestType == AlterTrigReq::RT_DICT_LQH) {
+ jam();
+ if (req->getOnline())
+ alterTrigger_toCreateLocal(signal, opPtr);
+ else
+ alterTrigger_toDropLocal(signal, opPtr);
+ return;
+ }
+ if (requestType == AlterTrigReq::RT_DICT_COMMIT ||
+ requestType == AlterTrigReq::RT_DICT_ABORT) {
+ jam();
+ if (requestType == AlterTrigReq::RT_DICT_COMMIT)
+ alterTrigger_slaveCommit(signal, opPtr);
+ else
+ alterTrigger_slaveAbort(signal, opPtr);
+ alterTrigger_sendReply(signal, opPtr, false);
+ // done in slave
+ if (! opPtr.p->m_isMaster)
+ c_opAlterTrigger.release(opPtr);
+ return;
+ }
+ }
+ jam();
+ // return to sender
+ OpAlterTrigger opBad;
+ opPtr.p = &opBad;
+ opPtr.p->save(req);
+ opPtr.p->m_errorCode = AlterTrigRef::BadRequestType;
+ opPtr.p->m_errorLine = __LINE__;
+ alterTrigger_sendReply(signal, opPtr, true);
+ return;
+}
+
+void
+Dbdict::execALTER_TRIG_CONF(Signal* signal)
+{
+ jamEntry();
+ AlterTrigConf* conf = (AlterTrigConf*)signal->getDataPtrSend();
+ alterTrigger_recvReply(signal, conf, 0);
+}
+
+void
+Dbdict::execALTER_TRIG_REF(Signal* signal)
+{
+ jamEntry();
+ AlterTrigRef* ref = (AlterTrigRef*)signal->getDataPtrSend();
+ alterTrigger_recvReply(signal, ref->getConf(), ref);
+}
+
+void
+Dbdict::alterTrigger_recvReply(Signal* signal, const AlterTrigConf* conf,
+ const AlterTrigRef* ref)
+{
+ jam();
+ const Uint32 senderRef = signal->senderBlockRef();
+ const AlterTrigReq::RequestType requestType = conf->getRequestType();
+ const Uint32 key = conf->getConnectionPtr();
+ if (requestType == AlterTrigReq::RT_CREATE_TRIGGER) {
+ jam();
+ // part of create trigger operation
+ OpCreateTriggerPtr opPtr;
+ c_opCreateTrigger.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ createTrigger_fromAlterTrigger(signal, opPtr);
+ return;
+ }
+ if (requestType == AlterTrigReq::RT_DROP_TRIGGER) {
+ jam();
+ // part of drop trigger operation
+ OpDropTriggerPtr opPtr;
+ c_opDropTrigger.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ opPtr.p->setError(ref);
+ dropTrigger_fromAlterTrigger(signal, opPtr);
+ return;
+ }
+ OpAlterTriggerPtr opPtr;
+ c_opAlterTrigger.find(opPtr, key);
+ ndbrequire(! opPtr.isNull());
+ ndbrequire(opPtr.p->m_isMaster);
+ ndbrequire(opPtr.p->m_requestType == requestType);
+ /*
+ * If refuse on drop trig, because of non-existent trigger,
+ * comes from anyone but the master node - ignore it and
+ * remove the node from forter ALTER_TRIG communication
+ * This will happen if a new node has started since the
+ * trigger whas created.
+ */
+ if (ref &&
+ refToNode(senderRef) != refToNode(reference()) &&
+ opPtr.p->m_request.getRequestType() == AlterTrigReq::RT_DROP_TRIGGER &&
+ ref->getErrorCode() == AlterTrigRef::TriggerNotFound) {
+ jam();
+ ref = 0; // ignore this error
+ opPtr.p->m_nodes.clear(refToNode(senderRef)); // remove this from group
+ }
+ opPtr.p->setError(ref);
+ opPtr.p->m_signalCounter.clearWaitingFor(refToNode(senderRef));
+ if (! opPtr.p->m_signalCounter.done()) {
+ jam();
+ return;
+ }
+ if (requestType == AlterTrigReq::RT_DICT_COMMIT ||
+ requestType == AlterTrigReq::RT_DICT_ABORT) {
+ jam();
+ // send reply to user
+ alterTrigger_sendReply(signal, opPtr, true);
+ c_opAlterTrigger.release(opPtr);
+ return;
+ }
+ if (opPtr.p->hasError()) {
+ jam();
+ opPtr.p->m_requestType = AlterTrigReq::RT_DICT_ABORT;
+ alterTrigger_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ if (! (opPtr.p->m_request.getRequestFlag() & RequestFlag::RF_NOTCTRIGGER)) {
+ if (requestType == AlterTrigReq::RT_DICT_PREPARE) {
+ jam();
+ if (opPtr.p->m_request.getOnline())
+ opPtr.p->m_requestType = AlterTrigReq::RT_DICT_TC;
+ else
+ opPtr.p->m_requestType = AlterTrigReq::RT_DICT_LQH;
+ alterTrigger_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ if (requestType == AlterTrigReq::RT_DICT_TC) {
+ jam();
+ if (opPtr.p->m_request.getOnline())
+ opPtr.p->m_requestType = AlterTrigReq::RT_DICT_LQH;
+ else
+ opPtr.p->m_requestType = AlterTrigReq::RT_DICT_COMMIT;
+ alterTrigger_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ if (requestType == AlterTrigReq::RT_DICT_LQH) {
+ jam();
+ if (opPtr.p->m_request.getOnline())
+ opPtr.p->m_requestType = AlterTrigReq::RT_DICT_COMMIT;
+ else
+ opPtr.p->m_requestType = AlterTrigReq::RT_DICT_TC;
+ alterTrigger_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ } else {
+ if (requestType == AlterTrigReq::RT_DICT_PREPARE) {
+ jam();
+ opPtr.p->m_requestType = AlterTrigReq::RT_DICT_LQH;
+ alterTrigger_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ if (requestType == AlterTrigReq::RT_DICT_LQH) {
+ jam();
+ opPtr.p->m_requestType = AlterTrigReq::RT_DICT_COMMIT;
+ alterTrigger_sendSlaveReq(signal, opPtr);
+ return;
+ }
+ }
+ ndbrequire(false);
+}
+
+void
+Dbdict::alterTrigger_slavePrepare(Signal* signal, OpAlterTriggerPtr opPtr)
+{
+ jam();
+ const AlterTrigReq* const req = &opPtr.p->m_request;
+ const Uint32 triggerId = req->getTriggerId();
+ TriggerRecordPtr triggerPtr;
+ if (! (triggerId < c_triggerRecordPool.getSize())) {
+ jam();
+ opPtr.p->m_errorCode = AlterTrigRef::TriggerNotFound;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+ c_triggerRecordPool.getPtr(triggerPtr, triggerId);
+ if (triggerPtr.p->triggerState == TriggerRecord::TS_NOT_DEFINED) {
+ jam();
+ opPtr.p->m_errorCode = AlterTrigRef::TriggerNotFound;
+ opPtr.p->m_errorLine = __LINE__;
+ return;
+ }
+}
+
+void
+Dbdict::alterTrigger_toCreateLocal(Signal* signal, OpAlterTriggerPtr opPtr)
+{
+ jam();
+ // find trigger record
+ const Uint32 triggerId = opPtr.p->m_request.getTriggerId();
+ TriggerRecordPtr triggerPtr;
+ c_triggerRecordPool.getPtr(triggerPtr, triggerId);
+ CreateTrigReq* const req = (CreateTrigReq*)signal->getDataPtrSend();
+ req->setUserRef(reference());
+ req->setConnectionPtr(opPtr.p->key);
+ if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_TC) {
+ req->setRequestType(CreateTrigReq::RT_TC);
+ } else if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_LQH) {
+ req->setRequestType(CreateTrigReq::RT_LQH);
+ } else {
+ ndbassert(false);
+ }
+ req->setTableId(triggerPtr.p->tableId);
+ req->setIndexId(triggerPtr.p->indexId);
+ req->setTriggerId(triggerPtr.i);
+ req->setTriggerType(triggerPtr.p->triggerType);
+ req->setTriggerActionTime(triggerPtr.p->triggerActionTime);
+ req->setTriggerEvent(triggerPtr.p->triggerEvent);
+ req->setMonitorReplicas(triggerPtr.p->monitorReplicas);
+ req->setMonitorAllAttributes(triggerPtr.p->monitorAllAttributes);
+ req->setOnline(true);
+ req->setReceiverRef(opPtr.p->m_request.getReceiverRef());
+ BlockReference blockRef = 0;
+ if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_TC) {
+ blockRef = calcTcBlockRef(getOwnNodeId());
+ } else if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_LQH) {
+ blockRef = calcLqhBlockRef(getOwnNodeId());
+ } else {
+ ndbassert(false);
+ }
+ req->setAttributeMask(triggerPtr.p->attributeMask);
+ sendSignal(blockRef, GSN_CREATE_TRIG_REQ,
+ signal, CreateTrigReq::SignalLength, JBB);
+}
+
+void
+Dbdict::alterTrigger_fromCreateLocal(Signal* signal, OpAlterTriggerPtr opPtr)
+{
+ jam();
+ if (! opPtr.p->hasError()) {
+ // mark created locally
+ TriggerRecordPtr triggerPtr;
+ c_triggerRecordPool.getPtr(triggerPtr, opPtr.p->m_request.getTriggerId());
+ if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_TC) {
+ triggerPtr.p->triggerLocal |= TriggerRecord::TL_CREATED_TC;
+ } else if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_LQH) {
+ triggerPtr.p->triggerLocal |= TriggerRecord::TL_CREATED_LQH;
+ } else {
+ ndbrequire(false);
+ }
+ }
+ // forward CONF or REF to master
+ alterTrigger_sendReply(signal, opPtr, false);
+}
+
+void
+Dbdict::alterTrigger_toDropLocal(Signal* signal, OpAlterTriggerPtr opPtr)
+{
+ jam();
+ TriggerRecordPtr triggerPtr;
+ c_triggerRecordPool.getPtr(triggerPtr, opPtr.p->m_request.getTriggerId());
+ DropTrigReq* const req = (DropTrigReq*)signal->getDataPtrSend();
+ req->setUserRef(reference());
+ req->setConnectionPtr(opPtr.p->key);
+ if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_TC) {
+ // broken trigger
+ if (! (triggerPtr.p->triggerLocal & TriggerRecord::TL_CREATED_TC)) {
+ jam();
+ alterTrigger_sendReply(signal, opPtr, false);
+ return;
+ }
+ req->setRequestType(DropTrigReq::RT_TC);
+ } else if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_LQH) {
+ // broken trigger
+ if (! (triggerPtr.p->triggerLocal & TriggerRecord::TL_CREATED_LQH)) {
+ jam();
+ alterTrigger_sendReply(signal, opPtr, false);
+ return;
+ }
+ req->setRequestType(DropTrigReq::RT_LQH);
+ } else {
+ ndbassert(false);
+ }
+ req->setTableId(triggerPtr.p->tableId);
+ req->setIndexId(triggerPtr.p->indexId);
+ req->setTriggerId(triggerPtr.i);
+ req->setTriggerType(triggerPtr.p->triggerType);
+ req->setTriggerActionTime(triggerPtr.p->triggerActionTime);
+ req->setTriggerEvent(triggerPtr.p->triggerEvent);
+ req->setMonitorReplicas(triggerPtr.p->monitorReplicas);
+ req->setMonitorAllAttributes(triggerPtr.p->monitorAllAttributes);
+ BlockReference blockRef = 0;
+ if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_TC) {
+ blockRef = calcTcBlockRef(getOwnNodeId());
+ } else if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_LQH) {
+ blockRef = calcLqhBlockRef(getOwnNodeId());
+ } else {
+ ndbassert(false);
+ }
+ sendSignal(blockRef, GSN_DROP_TRIG_REQ,
+ signal, DropTrigReq::SignalLength, JBB);
+}
+
+void
+Dbdict::alterTrigger_fromDropLocal(Signal* signal, OpAlterTriggerPtr opPtr)
+{
+ jam();
+ if (! opPtr.p->hasError()) {
+ // mark dropped locally
+ TriggerRecordPtr triggerPtr;
+ c_triggerRecordPool.getPtr(triggerPtr, opPtr.p->m_request.getTriggerId());
+ if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_TC) {
+ triggerPtr.p->triggerLocal &= ~TriggerRecord::TL_CREATED_TC;
+ } else if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_LQH) {
+ triggerPtr.p->triggerLocal &= ~TriggerRecord::TL_CREATED_LQH;
+ } else {
+ ndbrequire(false);
+ }
+ }
+ // forward CONF or REF to master
+ alterTrigger_sendReply(signal, opPtr, false);
+}
+
+void
+Dbdict::alterTrigger_slaveCommit(Signal* signal, OpAlterTriggerPtr opPtr)
+{
+ jam();
+ TriggerRecordPtr triggerPtr;
+ c_triggerRecordPool.getPtr(triggerPtr, opPtr.p->m_request.getTriggerId());
+ // set state
+ triggerPtr.p->triggerState = TriggerRecord::TS_ONLINE;
+}
+
+void
+Dbdict::alterTrigger_slaveAbort(Signal* signal, OpAlterTriggerPtr opPtr)
+{
+ jam();
+}
+
+void
+Dbdict::alterTrigger_sendSlaveReq(Signal* signal, OpAlterTriggerPtr opPtr)
+{
+ AlterTrigReq* const req = (AlterTrigReq*)signal->getDataPtrSend();
+ *req = opPtr.p->m_request;
+ req->setUserRef(opPtr.p->m_coordinatorRef);
+ req->setConnectionPtr(opPtr.p->key);
+ req->setRequestType(opPtr.p->m_requestType);
+ req->addRequestFlag(opPtr.p->m_requestFlag);
+ NdbNodeBitmask receiverNodes = c_aliveNodes;
+ if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL) {
+ receiverNodes.clear();
+ receiverNodes.set(getOwnNodeId());
+ } else {
+ opPtr.p->m_nodes.bitAND(receiverNodes);
+ receiverNodes = opPtr.p->m_nodes;
+ }
+ opPtr.p->m_signalCounter = receiverNodes;
+ NodeReceiverGroup rg(DBDICT, receiverNodes);
+ sendSignal(rg, GSN_ALTER_TRIG_REQ,
+ signal, AlterTrigReq::SignalLength, JBB);
+}
+
+void
+Dbdict::alterTrigger_sendReply(Signal* signal, OpAlterTriggerPtr opPtr,
+ bool toUser)
+{
+ jam();
+ AlterTrigRef* rep = (AlterTrigRef*)signal->getDataPtrSend();
+ Uint32 gsn = GSN_ALTER_TRIG_CONF;
+ Uint32 length = AlterTrigConf::InternalLength;
+ bool sendRef = opPtr.p->hasError();
+ if (! toUser) {
+ rep->setUserRef(opPtr.p->m_coordinatorRef);
+ rep->setConnectionPtr(opPtr.p->key);
+ rep->setRequestType(opPtr.p->m_requestType);
+ if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_ABORT) {
+ jam();
+ sendRef = false;
+ } else {
+ jam();
+ }
+ } else {
+ jam();
+ rep->setUserRef(opPtr.p->m_request.getUserRef());
+ rep->setConnectionPtr(opPtr.p->m_request.getConnectionPtr());
+ rep->setRequestType(opPtr.p->m_request.getRequestType());
+ length = AlterTrigConf::SignalLength;
+ }
+ rep->setTableId(opPtr.p->m_request.getTableId());
+ rep->setTriggerId(opPtr.p->m_request.getTriggerId());
+ if (sendRef) {
+ if (opPtr.p->m_errorNode == 0) {
+ jam();
+ opPtr.p->m_errorNode = getOwnNodeId();
+ } else {
+ jam();
+ }
+ rep->setErrorCode(opPtr.p->m_errorCode);
+ rep->setErrorLine(opPtr.p->m_errorLine);
+ rep->setErrorNode(opPtr.p->m_errorNode);
+ gsn = GSN_ALTER_TRIG_REF;
+ length = AlterTrigRef::SignalLength;
+ }
+ sendSignal(rep->getUserRef(), gsn, signal, length, JBB);
+}
+
+/**
+ * MODULE: Support routines for index and trigger.
+ */
+
+void
+Dbdict::getTableKeyList(TableRecordPtr tablePtr, AttributeList& list)
+{
+ jam();
+ list.sz = 0;
+ for (Uint32 tAttr = tablePtr.p->firstAttribute; tAttr != RNIL; ) {
+ AttributeRecord* aRec = c_attributeRecordPool.getPtr(tAttr);
+ if (aRec->tupleKey)
+ list.id[list.sz++] = aRec->attributeId;
+ tAttr = aRec->nextAttrInTable;
+ }
+}
+
+// XXX should store the primary attribute id
+void
+Dbdict::getIndexAttr(TableRecordPtr indexPtr, Uint32 itAttr, Uint32* id)
+{
+ jam();
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, indexPtr.p->primaryTableId);
+ AttributeRecord* iaRec = c_attributeRecordPool.getPtr(itAttr);
+ for (Uint32 tAttr = tablePtr.p->firstAttribute; tAttr != RNIL; ) {
+ AttributeRecord* aRec = c_attributeRecordPool.getPtr(tAttr);
+ if (iaRec->equal(*aRec)) {
+ id[0] = aRec->attributeId;
+ return;
+ }
+ tAttr = aRec->nextAttrInTable;
+ }
+ ndbrequire(false);
+}
+
+void
+Dbdict::getIndexAttrList(TableRecordPtr indexPtr, AttributeList& list)
+{
+ jam();
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, indexPtr.p->primaryTableId);
+ list.sz = 0;
+ memset(list.id, 0, sizeof(list.id));
+ ndbrequire(indexPtr.p->noOfAttributes >= 2);
+ Uint32 itAttr = indexPtr.p->firstAttribute;
+ for (Uint32 i = 0; i < (Uint32)indexPtr.p->noOfAttributes - 1; i++) {
+ getIndexAttr(indexPtr, itAttr, &list.id[list.sz++]);
+ AttributeRecord* iaRec = c_attributeRecordPool.getPtr(itAttr);
+ itAttr = iaRec->nextAttrInTable;
+ }
+}
+
+void
+Dbdict::getIndexAttrMask(TableRecordPtr indexPtr, AttributeMask& mask)
+{
+ jam();
+ TableRecordPtr tablePtr;
+ c_tableRecordPool.getPtr(tablePtr, indexPtr.p->primaryTableId);
+ mask.clear();
+ ndbrequire(indexPtr.p->noOfAttributes >= 2);
+ Uint32 itAttr = indexPtr.p->firstAttribute;
+ for (Uint32 i = 0; i < (Uint32)indexPtr.p->noOfAttributes - 1; i++) {
+ Uint32 id;
+ getIndexAttr(indexPtr, itAttr, &id);
+ mask.set(id);
+ AttributeRecord* iaRec = c_attributeRecordPool.getPtr(itAttr);
+ itAttr = iaRec->nextAttrInTable;
+ }
+}
+
+/* **************************************************************** */
+/* ---------------------------------------------------------------- */
+/* MODULE: STORE/RESTORE SCHEMA FILE---------------------- */
+/* ---------------------------------------------------------------- */
+/* */
+/* General module used to store the schema file on disk and */
+/* similar function to restore it from disk. */
+/* ---------------------------------------------------------------- */
+/* **************************************************************** */
+
+void
+Dbdict::initSchemaFile(SchemaFile * sf, Uint32 fileSz){
+ memcpy(sf->Magic, "NDBSCHMA", sizeof(sf->Magic));
+ sf->ByteOrder = 0x12345678;
+ sf->NdbVersion = NDB_VERSION;
+ sf->FileSize = fileSz;
+ sf->CheckSum = 0;
+
+ Uint32 headSz = (sizeof(SchemaFile)-sizeof(SchemaFile::TableEntry));
+ Uint32 noEntries = (fileSz - headSz) / sizeof(SchemaFile::TableEntry);
+ Uint32 slack = (fileSz - headSz) - noEntries * sizeof(SchemaFile::TableEntry);
+
+ ndbrequire(noEntries > MAX_TABLES);
+
+ sf->NoOfTableEntries = noEntries;
+ memset(sf->TableEntries, 0, noEntries*sizeof(SchemaFile::TableEntry));
+ memset(&(sf->TableEntries[noEntries]), 0, slack);
+ computeChecksum(sf);
+}
+
+void
+Dbdict::computeChecksum(SchemaFile * sf){
+ sf->CheckSum = 0;
+ sf->CheckSum = computeChecksum((const Uint32*)sf, sf->FileSize/4);
+}
+
+bool
+Dbdict::validateChecksum(const SchemaFile * sf){
+
+ Uint32 c = computeChecksum((const Uint32*)sf, sf->FileSize/4);
+ return c == 0;
+}
+
+Uint32
+Dbdict::computeChecksum(const Uint32 * src, Uint32 len){
+ Uint32 ret = 0;
+ for(Uint32 i = 0; i<len; i++)
+ ret ^= src[i];
+ return ret;
+}
+
+SchemaFile::TableEntry *
+Dbdict::getTableEntry(void * p, Uint32 tableId, bool allowTooBig){
+ SchemaFile * sf = (SchemaFile*)p;
+
+ ndbrequire(allowTooBig || tableId < sf->NoOfTableEntries);
+ return &sf->TableEntries[tableId];
+}
+
+// global metadata support
+
+int
+Dbdict::getMetaTablePtr(TableRecordPtr& tablePtr, Uint32 tableId, Uint32 tableVersion)
+{
+ if (tableId >= c_tableRecordPool.getSize()) {
+ return MetaData::InvalidArgument;
+ }
+ c_tableRecordPool.getPtr(tablePtr, tableId);
+ if (tablePtr.p->tabState == TableRecord::NOT_DEFINED) {
+ return MetaData::TableNotFound;
+ }
+ if (tablePtr.p->tableVersion != tableVersion) {
+ return MetaData::InvalidTableVersion;
+ }
+ // online flag is not maintained by DICT
+ tablePtr.p->online =
+ tablePtr.p->isTable() && tablePtr.p->tabState == TableRecord::DEFINED ||
+ tablePtr.p->isIndex() && tablePtr.p->indexState == TableRecord::IS_ONLINE;
+ return 0;
+}
+
+int
+Dbdict::getMetaTable(MetaData::Table& table, Uint32 tableId, Uint32 tableVersion)
+{
+ int ret;
+ TableRecordPtr tablePtr;
+ if ((ret = getMetaTablePtr(tablePtr, tableId, tableVersion)) < 0) {
+ return ret;
+ }
+ new (&table) MetaData::Table(*tablePtr.p);
+ return 0;
+}
+
+int
+Dbdict::getMetaTable(MetaData::Table& table, const char* tableName)
+{
+ int ret;
+ TableRecordPtr tablePtr;
+ if (strlen(tableName) + 1 > MAX_TAB_NAME_SIZE) {
+ return MetaData::InvalidArgument;
+ }
+ TableRecord keyRecord;
+ strcpy(keyRecord.tableName, tableName);
+ c_tableRecordHash.find(tablePtr, keyRecord);
+ if (tablePtr.i == RNIL) {
+ return MetaData::TableNotFound;
+ }
+ if ((ret = getMetaTablePtr(tablePtr, tablePtr.i, tablePtr.p->tableVersion)) < 0) {
+ return ret;
+ }
+ new (&table) MetaData::Table(*tablePtr.p);
+ return 0;
+}
+
+int
+Dbdict::getMetaAttribute(MetaData::Attribute& attr, const MetaData::Table& table, Uint32 attributeId)
+{
+ int ret;
+ TableRecordPtr tablePtr;
+ if ((ret = getMetaTablePtr(tablePtr, table.tableId, table.tableVersion)) < 0) {
+ return ret;
+ }
+ AttributeRecordPtr attrPtr;
+ attrPtr.i = tablePtr.p->firstAttribute;
+ while (attrPtr.i != RNIL) {
+ c_attributeRecordPool.getPtr(attrPtr);
+ if (attrPtr.p->attributeId == attributeId)
+ break;
+ attrPtr.i = attrPtr.p->nextAttrInTable;
+ }
+ if (attrPtr.i == RNIL) {
+ return MetaData::AttributeNotFound;
+ }
+ new (&attr) MetaData::Attribute(*attrPtr.p);
+ return 0;
+}
+
+int
+Dbdict::getMetaAttribute(MetaData::Attribute& attr, const MetaData::Table& table, const char* attributeName)
+{
+ int ret;
+ TableRecordPtr tablePtr;
+ if ((ret = getMetaTablePtr(tablePtr, table.tableId, table.tableVersion)) < 0) {
+ return ret;
+ }
+ AttributeRecordPtr attrPtr;
+ attrPtr.i = tablePtr.p->firstAttribute;
+ while (attrPtr.i != RNIL) {
+ c_attributeRecordPool.getPtr(attrPtr);
+ if (strcmp(attrPtr.p->attributeName, attributeName) == 0)
+ break;
+ attrPtr.i = attrPtr.p->nextAttrInTable;
+ }
+ if (attrPtr.i == RNIL) {
+ return MetaData::AttributeNotFound;
+ }
+ new (&attr) MetaData::Attribute(*attrPtr.p);
+ return 0;
+}
diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
new file mode 100644
index 00000000000..73fbdcc8e16
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
@@ -0,0 +1,1990 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef DBDICT_H
+#define DBDICT_H
+
+/**
+ * Dict : Dictionary Block
+ */
+
+#include <ndb_limits.h>
+#include <trigger_definitions.h>
+#include <pc.hpp>
+#include <ArrayList.hpp>
+#include <DLHashTable.hpp>
+#include <CArray.hpp>
+#include <KeyTable2.hpp>
+#include <SimulatedBlock.hpp>
+#include <SimpleProperties.hpp>
+#include <SignalCounter.hpp>
+#include <Bitmask.hpp>
+#include <AttributeList.hpp>
+#include <signaldata/GetTableId.hpp>
+#include <signaldata/GetTabInfo.hpp>
+#include <signaldata/DictTabInfo.hpp>
+#include <signaldata/CreateTable.hpp>
+#include <signaldata/CreateTab.hpp>
+#include <signaldata/DropTable.hpp>
+#include <signaldata/AlterTable.hpp>
+#include <signaldata/AlterTab.hpp>
+#include <signaldata/CreateIndx.hpp>
+#include <signaldata/DropIndx.hpp>
+#include <signaldata/AlterIndx.hpp>
+#include <signaldata/BuildIndx.hpp>
+#include <signaldata/UtilPrepare.hpp>
+#include <signaldata/CreateEvnt.hpp>
+#include <signaldata/CreateTrig.hpp>
+#include <signaldata/DropTrig.hpp>
+#include <signaldata/AlterTrig.hpp>
+#include "SchemaFile.hpp"
+#include <blocks/mutexes.hpp>
+#include <SafeCounter.hpp>
+#include <RequestTracker.hpp>
+
+#ifdef DBDICT_C
+// Debug Macros
+
+/*--------------------------------------------------------------*/
+// Constants for CONTINUEB
+/*--------------------------------------------------------------*/
+#define ZPACK_TABLE_INTO_PAGES 0
+#define ZSEND_GET_TAB_RESPONSE 3
+
+
+/*--------------------------------------------------------------*/
+// Other constants in alphabetical order
+/*--------------------------------------------------------------*/
+#define ZNOMOREPHASES 255
+
+/*--------------------------------------------------------------*/
+// Schema file defines
+/*--------------------------------------------------------------*/
+#define ZSCHEMA_WORDS 4
+
+/*--------------------------------------------------------------*/
+// Page constants
+/*--------------------------------------------------------------*/
+#define ZALLOCATE 1 //Variable number of page for NDBFS
+#define ZPAGE_HEADER_SIZE 32
+#define ZPOS_PAGE_SIZE 16
+#define ZPOS_CHECKSUM 17
+#define ZPOS_VERSION 18
+#define ZPOS_PAGE_HEADER_SIZE 19
+
+/*--------------------------------------------------------------*/
+// Size constants
+/*--------------------------------------------------------------*/
+#define ZFS_CONNECT_SIZE 4
+#define ZSIZE_OF_PAGES_IN_WORDS 8192
+#define ZLOG_SIZE_OF_PAGES_IN_WORDS 13
+#define ZMAX_PAGES_OF_TABLE_DEFINITION 8
+#define ZNUMBER_OF_PAGES (2 * ZMAX_PAGES_OF_TABLE_DEFINITION + 2)
+#define ZNO_OF_FRAGRECORD 5
+
+/*--------------------------------------------------------------*/
+// Error codes
+/*--------------------------------------------------------------*/
+#define ZNODE_FAILURE_ERROR 704
+#endif
+
+/**
+ * Systable NDB$EVENTS_0
+ */
+
+#define EVENT_SYSTEM_TABLE_NAME "sys/def/NDB$EVENTS_0"
+#define EVENT_SYSTEM_TABLE_LENGTH 6
+
+struct sysTab_NDBEVENTS_0 {
+ char NAME[MAX_TAB_NAME_SIZE];
+ Uint32 EVENT_TYPE;
+ char TABLE_NAME[MAX_TAB_NAME_SIZE];
+ Uint32 ATTRIBUTE_MASK[MAXNROFATTRIBUTESINWORDS];
+ Uint32 SUBID;
+ Uint32 SUBKEY;
+};
+
+/**
+ * DICT - This blocks handles all metadata
+ */
+class Dbdict: public SimulatedBlock {
+public:
+ /*
+ * 2.3 RECORD AND FILESIZES
+ */
+ /**
+ * Shared table / index record. Most of this is permanent data stored
+ * on disk. Index trigger ids are volatile.
+ */
+ struct TableRecord : public MetaData::Table {
+ /****************************************************
+ * Support variables for table handling
+ ****************************************************/
+
+ /* Active page which is sent to disk */
+ Uint32 activePage;
+
+ /** File pointer received from disk */
+ Uint32 filePtr[2];
+
+ /** Pointer to first attribute in table */
+ Uint32 firstAttribute;
+
+ /* Pointer to first page of table description */
+ Uint32 firstPage;
+
+ /** Pointer to last attribute in table */
+ Uint32 lastAttribute;
+
+ /* Temporary record used during add/drop table */
+ Uint32 myConnect;
+#ifdef HAVE_TABLE_REORG
+ /* Second table used by this table (for table reorg) */
+ Uint32 secondTable;
+#endif
+ /* Next record in Pool */
+ Uint32 nextPool;
+
+ /* Next record in hash table */
+ Uint32 nextHash;
+
+ /* Previous record in Pool */
+ Uint32 prevPool;
+
+ /* Previous record in hash table */
+ Uint32 prevHash;
+
+ enum TabState {
+ NOT_DEFINED = 0,
+ REORG_TABLE_PREPARED = 1,
+ DEFINING = 2,
+ CHECKED = 3,
+ DEFINED = 4,
+ PREPARE_DROPPING = 5,
+ DROPPING = 6
+ };
+ TabState tabState;
+
+ /* State when returning from TC_SCHVERREQ */
+ enum TabReturnState {
+ TRS_IDLE = 0,
+ ADD_TABLE = 1,
+ SLAVE_SYSTEM_RESTART = 2,
+ MASTER_SYSTEM_RESTART = 3
+ };
+ TabReturnState tabReturnState;
+
+ /** Number of words */
+ Uint32 packedSize;
+
+ /** Index state (volatile data) */
+ enum IndexState {
+ IS_UNDEFINED = 0, // initial
+ IS_OFFLINE = 1, // index table created
+ IS_BUILDING = 2, // building (local state)
+ IS_DROPPING = 3, // dropping (local state)
+ IS_ONLINE = 4, // online
+ IS_BROKEN = 9 // build or drop aborted
+ };
+ IndexState indexState;
+
+ /** Trigger ids of index (volatile data) */
+ Uint32 insertTriggerId;
+ Uint32 updateTriggerId;
+ Uint32 deleteTriggerId;
+ Uint32 customTriggerId; // ordered index
+ Uint32 buildTriggerId; // temp during build
+
+ /** Index state in other blocks on this node */
+ enum IndexLocal {
+ IL_CREATED_TC = 1 << 0 // created in TC
+ };
+ Uint32 indexLocal;
+
+ Uint32 noOfNullBits;
+
+ inline bool equal(TableRecord & rec) const {
+ return strcmp(tableName, rec.tableName) == 0;
+ }
+
+ inline Uint32 hashValue() const {
+ Uint32 h = 0;
+ for (const char* p = tableName; *p != 0; p++)
+ h = (h << 5) + h + (*p);
+ return h;
+ }
+
+ /** frm data for this table */
+ /** TODO Could preferrably be made dynamic size */
+ Uint32 frmLen;
+ char frmData[MAX_FRM_DATA_SIZE];
+
+ Uint32 fragmentCount;
+ };
+
+ typedef Ptr<TableRecord> TableRecordPtr;
+ ArrayPool<TableRecord> c_tableRecordPool;
+ DLHashTable<TableRecord> c_tableRecordHash;
+
+ /**
+ * Table attributes. Permanent data.
+ *
+ * Indexes have an attribute list which duplicates primary table
+ * attributes. This is wrong but convenient.
+ */
+ struct AttributeRecord : public MetaData::Attribute {
+ union {
+ /** Pointer to the next attribute used by ArrayPool */
+ Uint32 nextPool;
+
+ /** Pointer to the next attribute used by DLHash */
+ Uint32 nextHash;
+ };
+
+ /** Pointer to the previous attribute used by DLHash */
+ Uint32 prevHash;
+
+ /** Pointer to the next attribute in table */
+ Uint32 nextAttrInTable;
+
+ inline bool equal(AttributeRecord & rec) const {
+ return strcmp(attributeName, rec.attributeName) == 0;
+ }
+
+ inline Uint32 hashValue() const {
+ Uint32 h = 0;
+ for (const char* p = attributeName; *p != 0; p++)
+ h = (h << 5) + h + (*p);
+ return h;
+ }
+ };
+
+ typedef Ptr<AttributeRecord> AttributeRecordPtr;
+ ArrayPool<AttributeRecord> c_attributeRecordPool;
+ DLHashTable<AttributeRecord> c_attributeRecordHash;
+
+ /**
+ * Triggers. This is volatile data not saved on disk. Setting a
+ * trigger online creates the trigger in TC (if index) and LQH-TUP.
+ */
+ struct TriggerRecord {
+
+ /** Trigger state */
+ enum TriggerState {
+ TS_NOT_DEFINED = 0,
+ TS_DEFINING = 1,
+ TS_OFFLINE = 2, // created globally in DICT
+ TS_BUILDING = 3,
+ TS_DROPPING = 4,
+ TS_ONLINE = 5 // activated globally
+ };
+ TriggerState triggerState;
+
+ /** Trigger state in other blocks on this node */
+ enum IndexLocal {
+ TL_CREATED_TC = 1 << 0, // created in TC
+ TL_CREATED_LQH = 1 << 1 // created in LQH-TUP
+ };
+ Uint32 triggerLocal;
+
+ /** Trigger name, used by DICT to identify the trigger */
+ char triggerName[MAX_TAB_NAME_SIZE];
+
+ /** Trigger id, used by TRIX, TC, LQH, and TUP to identify the trigger */
+ Uint32 triggerId;
+
+ /** Table id, the table the trigger is defined on */
+ Uint32 tableId;
+
+ /** Trigger type, defines what the trigger is used for */
+ TriggerType::Value triggerType;
+
+ /** Trigger action time, defines when the trigger should fire */
+ TriggerActionTime::Value triggerActionTime;
+
+ /** Trigger event, defines what events the trigger should monitor */
+ TriggerEvent::Value triggerEvent;
+
+ /** Monitor all replicas */
+ bool monitorReplicas;
+
+ /** Monitor all, the trigger monitors changes of all attributes in table */
+ bool monitorAllAttributes;
+
+ /**
+ * Attribute mask, defines what attributes are to be monitored.
+ * Can be seen as a compact representation of SQL column name list.
+ */
+ AttributeMask attributeMask;
+
+ /** Index id, only used by secondary_index triggers */
+ Uint32 indexId;
+
+ union {
+ /** Pointer to the next attribute used by ArrayPool */
+ Uint32 nextPool;
+
+ /** Next record in hash table */
+ Uint32 nextHash;
+ };
+
+ /** Previous record in hash table */
+ Uint32 prevHash;
+
+ /** Equal function, used by DLHashTable */
+ inline bool equal(TriggerRecord & rec) const {
+ return strcmp(triggerName, rec.triggerName) == 0;
+ }
+
+ /** Hash value function, used by DLHashTable */
+ inline Uint32 hashValue() const {
+ Uint32 h = 0;
+ for (const char* p = triggerName; *p != 0; p++)
+ h = (h << 5) + h + (*p);
+ return h;
+ }
+ };
+
+ Uint32 c_maxNoOfTriggers;
+ typedef Ptr<TriggerRecord> TriggerRecordPtr;
+ ArrayPool<TriggerRecord> c_triggerRecordPool;
+ DLHashTable<TriggerRecord> c_triggerRecordHash;
+
+ /**
+ * Information for each FS connection.
+ ****************************************************************************/
+ struct FsConnectRecord {
+ enum FsState {
+ IDLE = 0,
+ OPEN_WRITE_SCHEMA = 1,
+ WRITE_SCHEMA = 2,
+ CLOSE_WRITE_SCHEMA = 3,
+ OPEN_READ_SCHEMA1 = 4,
+ OPEN_READ_SCHEMA2 = 5,
+ READ_SCHEMA1 = 6,
+ READ_SCHEMA2 = 7,
+ CLOSE_READ_SCHEMA = 8,
+ OPEN_READ_TAB_FILE1 = 9,
+ OPEN_READ_TAB_FILE2 = 10,
+ READ_TAB_FILE1 = 11,
+ READ_TAB_FILE2 = 12,
+ CLOSE_READ_TAB_FILE = 13,
+ OPEN_WRITE_TAB_FILE = 14,
+ WRITE_TAB_FILE = 15,
+ CLOSE_WRITE_TAB_FILE = 16
+ };
+ /** File Pointer for this file system connection */
+ Uint32 filePtr;
+
+ /** Reference of owner record */
+ Uint32 ownerPtr;
+
+ /** State of file system connection */
+ FsState fsState;
+
+ /** Used by Array Pool for free list handling */
+ Uint32 nextPool;
+ };
+
+ typedef Ptr<FsConnectRecord> FsConnectRecordPtr;
+ ArrayPool<FsConnectRecord> c_fsConnectRecordPool;
+
+ /**
+ * This record stores all the information about a node and all its attributes
+ ****************************************************************************/
+ struct NodeRecord {
+ enum NodeState {
+ API_NODE = 0,
+ NDB_NODE_ALIVE = 1,
+ NDB_NODE_DEAD = 2
+ };
+ bool hotSpare;
+ NodeState nodeState;
+ };
+
+ typedef Ptr<NodeRecord> NodeRecordPtr;
+ CArray<NodeRecord> c_nodes;
+ NdbNodeBitmask c_aliveNodes;
+
+ /**
+ * This record stores all the information about a table and all its attributes
+ ****************************************************************************/
+ struct PageRecord {
+ Uint32 word[8192];
+ };
+
+ typedef Ptr<PageRecord> PageRecordPtr;
+ CArray<PageRecord> c_pageRecordArray;
+
+ /**
+ * A page for create index table signal.
+ */
+ PageRecord c_indexPage;
+
+public:
+ Dbdict(const class Configuration &);
+ virtual ~Dbdict();
+
+private:
+ BLOCK_DEFINES(Dbdict);
+
+ // Signal receivers
+ void execDICTSTARTREQ(Signal* signal);
+
+ void execGET_TABINFOREQ(Signal* signal);
+ void execGET_TABLEDID_REQ(Signal* signal);
+ void execGET_TABINFO_REF(Signal* signal);
+ void execGET_TABINFO_CONF(Signal* signal);
+ void execCONTINUEB(Signal* signal);
+
+ void execDUMP_STATE_ORD(Signal* signal);
+ void execHOT_SPAREREP(Signal* signal);
+ void execDIADDTABCONF(Signal* signal);
+ void execDIADDTABREF(Signal* signal);
+ void execTAB_COMMITCONF(Signal* signal);
+ void execTAB_COMMITREF(Signal* signal);
+ void execGET_SCHEMA_INFOREQ(Signal* signal);
+ void execSCHEMA_INFO(Signal* signal);
+ void execSCHEMA_INFOCONF(Signal* signal);
+ void execREAD_NODESCONF(Signal* signal);
+ void execFSCLOSECONF(Signal* signal);
+ void execFSCLOSEREF(Signal* signal);
+ void execFSOPENCONF(Signal* signal);
+ void execFSOPENREF(Signal* signal);
+ void execFSREADCONF(Signal* signal);
+ void execFSREADREF(Signal* signal);
+ void execFSWRITECONF(Signal* signal);
+ void execFSWRITEREF(Signal* signal);
+ void execNDB_STTOR(Signal* signal);
+ void execREAD_CONFIG_REQ(Signal* signal);
+ void execSTTOR(Signal* signal);
+ void execTC_SCHVERCONF(Signal* signal);
+ void execNODE_FAILREP(Signal* signal);
+ void execINCL_NODEREQ(Signal* signal);
+ void execAPI_FAILREQ(Signal* signal);
+
+ void execWAIT_GCP_REF(Signal* signal);
+ void execWAIT_GCP_CONF(Signal* signal);
+
+ void execLIST_TABLES_REQ(Signal* signal);
+
+ // Index signals
+ void execCREATE_INDX_REQ(Signal* signal);
+ void execCREATE_INDX_CONF(Signal* signal);
+ void execCREATE_INDX_REF(Signal* signal);
+
+ void execALTER_INDX_REQ(Signal* signal);
+ void execALTER_INDX_CONF(Signal* signal);
+ void execALTER_INDX_REF(Signal* signal);
+
+ void execCREATE_TABLE_CONF(Signal* signal);
+ void execCREATE_TABLE_REF(Signal* signal);
+
+ void execDROP_INDX_REQ(Signal* signal);
+ void execDROP_INDX_CONF(Signal* signal);
+ void execDROP_INDX_REF(Signal* signal);
+
+ void execDROP_TABLE_CONF(Signal* signal);
+ void execDROP_TABLE_REF(Signal* signal);
+
+ void execBUILDINDXREQ(Signal* signal);
+ void execBUILDINDXCONF(Signal* signal);
+ void execBUILDINDXREF(Signal* signal);
+
+ // Util signals used by Event code
+ void execUTIL_PREPARE_CONF(Signal* signal);
+ void execUTIL_PREPARE_REF (Signal* signal);
+ void execUTIL_EXECUTE_CONF(Signal* signal);
+ void execUTIL_EXECUTE_REF (Signal* signal);
+ void execUTIL_RELEASE_CONF(Signal* signal);
+ void execUTIL_RELEASE_REF (Signal* signal);
+
+
+ // Event signals from API
+ void execCREATE_EVNT_REQ (Signal* signal);
+ void execCREATE_EVNT_CONF(Signal* signal);
+ void execCREATE_EVNT_REF (Signal* signal);
+
+ void execDROP_EVNT_REQ (Signal* signal);
+
+ void execSUB_START_REQ (Signal* signal);
+ void execSUB_START_CONF (Signal* signal);
+ void execSUB_START_REF (Signal* signal);
+
+ void execSUB_STOP_REQ (Signal* signal);
+ void execSUB_STOP_CONF (Signal* signal);
+ void execSUB_STOP_REF (Signal* signal);
+
+ // Event signals from SUMA
+
+ void execCREATE_SUBID_CONF(Signal* signal);
+ void execCREATE_SUBID_REF (Signal* signal);
+
+ void execSUB_CREATE_CONF(Signal* signal);
+ void execSUB_CREATE_REF (Signal* signal);
+
+ void execSUB_SYNC_CONF(Signal* signal);
+ void execSUB_SYNC_REF (Signal* signal);
+
+ void execSUB_REMOVE_REQ(Signal* signal);
+ void execSUB_REMOVE_CONF(Signal* signal);
+ void execSUB_REMOVE_REF(Signal* signal);
+
+ // Trigger signals
+ void execCREATE_TRIG_REQ(Signal* signal);
+ void execCREATE_TRIG_CONF(Signal* signal);
+ void execCREATE_TRIG_REF(Signal* signal);
+ void execALTER_TRIG_REQ(Signal* signal);
+ void execALTER_TRIG_CONF(Signal* signal);
+ void execALTER_TRIG_REF(Signal* signal);
+ void execDROP_TRIG_REQ(Signal* signal);
+ void execDROP_TRIG_CONF(Signal* signal);
+ void execDROP_TRIG_REF(Signal* signal);
+
+ void execDROP_TABLE_REQ(Signal* signal);
+
+ void execPREP_DROP_TAB_REQ(Signal* signal);
+ void execPREP_DROP_TAB_REF(Signal* signal);
+ void execPREP_DROP_TAB_CONF(Signal* signal);
+
+ void execDROP_TAB_REQ(Signal* signal);
+ void execDROP_TAB_REF(Signal* signal);
+ void execDROP_TAB_CONF(Signal* signal);
+
+ void execCREATE_TABLE_REQ(Signal* signal);
+ void execALTER_TABLE_REQ(Signal* signal);
+ void execCREATE_FRAGMENTATION_REF(Signal*);
+ void execCREATE_FRAGMENTATION_CONF(Signal*);
+ void execCREATE_TAB_REQ(Signal* signal);
+ void execADD_FRAGREQ(Signal* signal);
+ void execLQHFRAGREF(Signal* signal);
+ void execLQHFRAGCONF(Signal* signal);
+ void execLQHADDATTREF(Signal* signal);
+ void execLQHADDATTCONF(Signal* signal);
+ void execCREATE_TAB_REF(Signal* signal);
+ void execCREATE_TAB_CONF(Signal* signal);
+ void execALTER_TAB_REQ(Signal* signal);
+ void execALTER_TAB_REF(Signal* signal);
+ void execALTER_TAB_CONF(Signal* signal);
+
+ /*
+ * 2.4 COMMON STORED VARIABLES
+ */
+
+ /**
+ * This record stores all the state needed
+ * when the schema page is being sent to other nodes
+ ***************************************************************************/
+ struct SendSchemaRecord {
+ /** Number of words of schema data */
+ Uint32 noOfWords;
+ /** Page Id of schema data */
+ Uint32 pageId;
+
+ Uint32 nodeId;
+ SignalCounter m_SCHEMAINFO_Counter;
+
+ Uint32 noOfWordsCurrentlySent;
+ Uint32 noOfSignalsSentSinceDelay;
+
+ bool inUse;
+ };
+ SendSchemaRecord c_sendSchemaRecord;
+
+ /**
+ * This record stores all the state needed
+ * when a table file is being read from disk
+ ****************************************************************************/
+ struct ReadTableRecord {
+ /** Number of Pages */
+ Uint32 noOfPages;
+ /** Page Id*/
+ Uint32 pageId;
+ /** Table Id of read table */
+ Uint32 tableId;
+
+ bool inUse;
+ Callback m_callback;
+ };
+ ReadTableRecord c_readTableRecord;
+
+ /**
+ * This record stores all the state needed
+ * when a table file is being written to disk
+ ****************************************************************************/
+ struct WriteTableRecord {
+ /** Number of Pages */
+ Uint32 noOfPages;
+ /** Page Id*/
+ Uint32 pageId;
+ /** Table Files Handled, local state variable */
+ Uint32 noOfTableFilesHandled;
+ /** Table Id of written table */
+ Uint32 tableId;
+ /** State, indicates from where it was called */
+ enum TableWriteState {
+ IDLE = 0,
+ WRITE_ADD_TABLE_MASTER = 1,
+ WRITE_ADD_TABLE_SLAVE = 2,
+ WRITE_RESTART_FROM_MASTER = 3,
+ WRITE_RESTART_FROM_OWN = 4,
+ TWR_CALLBACK = 5
+ };
+ TableWriteState tableWriteState;
+ Callback m_callback;
+ };
+ WriteTableRecord c_writeTableRecord;
+
+ /**
+ * This record stores all the state needed
+ * when a schema file is being read from disk
+ ****************************************************************************/
+ struct ReadSchemaRecord {
+ /** Page Id of schema page */
+ Uint32 pageId;
+ /** State, indicates from where it was called */
+ enum SchemaReadState {
+ IDLE = 0,
+ INITIAL_READ = 1
+ };
+ SchemaReadState schemaReadState;
+ };
+ ReadSchemaRecord c_readSchemaRecord;
+
+private:
+ /**
+ * This record stores all the state needed
+ * when a schema file is being written to disk
+ ****************************************************************************/
+ struct WriteSchemaRecord {
+ /** Page Id of schema page */
+ Uint32 pageId;
+ /** Schema Files Handled, local state variable */
+ Uint32 noOfSchemaFilesHandled;
+
+ bool inUse;
+ Callback m_callback;
+ };
+ WriteSchemaRecord c_writeSchemaRecord;
+
+ /**
+ * This record stores all the information needed
+ * when a file is being read from disk
+ ****************************************************************************/
+ struct RestartRecord {
+ /** Global check point identity */
+ Uint32 gciToRestart;
+
+ /** The active table at restart process */
+ Uint32 activeTable;
+
+ /** The active table at restart process */
+ BlockReference returnBlockRef;
+ };
+ RestartRecord c_restartRecord;
+
+ /**
+ * This record stores all the information needed
+ * when a file is being read from disk
+ ****************************************************************************/
+ struct RetrieveRecord {
+ RetrieveRecord(){ noOfWaiters = 0;}
+
+ /** Only one retrieve table definition at a time */
+ bool busyState;
+
+ /**
+ * No of waiting in time queue
+ */
+ Uint32 noOfWaiters;
+
+ /** Block Reference of retriever */
+ BlockReference blockRef;
+
+ /** Id of retriever */
+ Uint32 m_senderData;
+
+ /** Table id of retrieved table */
+ Uint32 tableId;
+
+ /** Starting page to retrieve data from */
+ Uint32 retrievePage;
+
+ /** Number of pages retrieved */
+ Uint32 retrievedNoOfPages;
+
+ /** Number of words retrieved */
+ Uint32 retrievedNoOfWords;
+
+ /** Number of words sent currently */
+ Uint32 currentSent;
+
+ /**
+ * Long signal stuff
+ */
+ bool m_useLongSig;
+ };
+ RetrieveRecord c_retrieveRecord;
+
+ /**
+ * This record stores all the information needed
+ * when a file is being read from disk
+ *
+ * This is the info stored in one entry of the schema
+ * page. Each table has 4 words of info.
+ * Word 1: Schema version (upper 16 bits)
+ * Table State (lower 16 bits)
+ * Word 2: Number of pages of table description
+ * Word 3: Global checkpoint id table was created
+ * Word 4: Currently zero
+ ****************************************************************************/
+ struct SchemaRecord {
+ /** Schema page */
+ Uint32 schemaPage;
+
+ /** Old Schema page (used at node restart) */
+ Uint32 oldSchemaPage;
+
+ Callback m_callback;
+ };
+ SchemaRecord c_schemaRecord;
+
+ void initSchemaFile(SchemaFile *, Uint32 sz);
+ void computeChecksum(SchemaFile *);
+ bool validateChecksum(const SchemaFile *);
+ SchemaFile::TableEntry * getTableEntry(void * buf, Uint32 tableId,
+ bool allowTooBig = false);
+
+ Uint32 computeChecksum(const Uint32 * src, Uint32 len);
+
+
+ /* ----------------------------------------------------------------------- */
+ // Node References
+ /* ----------------------------------------------------------------------- */
+ Uint16 c_masterNodeId;
+
+ /* ----------------------------------------------------------------------- */
+ // Various current system properties
+ /* ----------------------------------------------------------------------- */
+ Uint16 c_numberNode;
+ Uint16 c_noHotSpareNodes;
+ Uint16 c_noNodesFailed;
+ Uint32 c_failureNr;
+
+ /* ----------------------------------------------------------------------- */
+ // State variables
+ /* ----------------------------------------------------------------------- */
+
+ enum BlockState {
+ BS_IDLE = 0,
+ BS_CREATE_TAB = 1,
+ BS_BUSY = 2,
+ BS_NODE_FAILURE = 3
+ };
+ BlockState c_blockState;
+
+ struct PackTable {
+
+ enum PackTableState {
+ PTS_IDLE = 0,
+ PTS_ADD_TABLE_MASTER = 1,
+ PTS_ADD_TABLE_SLAVE = 2,
+ PTS_GET_TAB = 3,
+ PTS_RESTART = 4
+ } m_state;
+
+ } c_packTable;
+
+ Uint32 c_startPhase;
+ Uint32 c_restartType;
+ bool c_initialStart;
+ bool c_systemRestart;
+ bool c_nodeRestart;
+ bool c_initialNodeRestart;
+ Uint32 c_tabinfoReceived;
+
+ /**
+ * Temporary structure used when parsing table info
+ */
+ struct ParseDictTabInfoRecord {
+ DictTabInfo::RequestType requestType;
+ Uint32 errorCode;
+ Uint32 errorLine;
+
+ SimpleProperties::UnpackStatus status;
+ Uint32 errorKey;
+ TableRecordPtr tablePtr;
+ };
+
+ // Operation records
+
+ /**
+ * Common part of operation records. Uses KeyTable2. Note that each
+ * seize/release invokes ctor/dtor automatically.
+ */
+ struct OpRecordCommon {
+ Uint32 key; // key shared between master and slaves
+ Uint32 nextHash;
+ Uint32 prevHash;
+ Uint32 hashValue() const {
+ return key;
+ }
+ bool equal(const OpRecordCommon& rec) const {
+ return key == rec.key;
+ }
+ };
+
+ /**
+ * Create table record
+ */
+ struct CreateTableRecord : OpRecordCommon {
+ Uint32 m_senderRef;
+ Uint32 m_senderData;
+ Uint32 m_coordinatorRef;
+
+ Uint32 m_errorCode;
+ void setErrorCode(Uint32 c){ if(m_errorCode == 0) m_errorCode = c;}
+
+ // For alter table
+ Uint32 m_changeMask;
+ bool m_alterTableFailed;
+ AlterTableRef m_alterTableRef;
+ Uint32 m_alterTableId;
+
+ /* Previous table name (used for reverting failed table rename) */
+ char previousTableName[MAX_TAB_NAME_SIZE];
+
+ Uint32 m_tablePtrI;
+ Uint32 m_tabInfoPtrI;
+ Uint32 m_fragmentsPtrI;
+
+ Uint32 m_dihAddFragPtr; // Connect ptr towards DIH
+ Uint32 m_lqhFragPtr; // Connect ptr towards LQH
+
+ Callback m_callback; // Who's using local create tab
+ MutexHandle2<DIH_START_LCP_MUTEX> m_startLcpMutex;
+
+ struct CoordinatorData {
+ Uint32 m_gsn;
+ SafeCounterHandle m_counter;
+ CreateTabReq::RequestType m_requestType;
+ } m_coordinatorData;
+ };
+ typedef Ptr<CreateTableRecord> CreateTableRecordPtr;
+
+ /**
+ * Drop table record
+ */
+ struct DropTableRecord : OpRecordCommon {
+ DropTableReq m_request;
+
+ Uint32 m_requestType;
+ Uint32 m_coordinatorRef;
+
+ Uint32 m_errorCode;
+ void setErrorCode(Uint32 c){ if(m_errorCode == 0) m_errorCode = c;}
+
+ /**
+ * When sending stuff around
+ */
+ struct CoordinatorData {
+ Uint32 m_gsn;
+ Uint32 m_block;
+ SignalCounter m_signalCounter;
+ } m_coordinatorData;
+
+ struct ParticipantData {
+ Uint32 m_gsn;
+ Uint32 m_block;
+ SignalCounter m_signalCounter;
+
+ Callback m_callback;
+ } m_participantData;
+ };
+ typedef Ptr<DropTableRecord> DropTableRecordPtr;
+
+ /**
+ * Request flags passed in signals along with request type and
+ * propagated across operations.
+ */
+ struct RequestFlag {
+ enum {
+ RF_LOCAL = 1 << 0, // create on local node only
+ RF_NOBUILD = 1 << 1, // no need to build index
+ RF_NOTCTRIGGER = 1 << 2 // alter trigger: no trigger in TC
+ };
+ };
+
+ /**
+ * Operation record for create index.
+ */
+ struct OpCreateIndex : OpRecordCommon {
+ // original request (index id will be added)
+ CreateIndxReq m_request;
+ AttributeList m_attrList;
+ char m_indexName[MAX_TAB_NAME_SIZE];
+ bool m_storedIndex;
+ // coordinator DICT
+ Uint32 m_coordinatorRef;
+ bool m_isMaster;
+ // state info
+ CreateIndxReq::RequestType m_requestType;
+ Uint32 m_requestFlag;
+ // error info
+ CreateIndxRef::ErrorCode m_errorCode;
+ Uint32 m_errorLine;
+ Uint32 m_errorNode;
+ // counters
+ SignalCounter m_signalCounter;
+ // ctor
+ OpCreateIndex() {
+ memset(&m_request, 0, sizeof(m_request));
+ m_coordinatorRef = 0;
+ m_requestType = CreateIndxReq::RT_UNDEFINED;
+ m_requestFlag = 0;
+ m_errorCode = CreateIndxRef::NoError;
+ m_errorLine = 0;
+ m_errorNode = 0;
+ }
+ void save(const CreateIndxReq* req) {
+ m_request = *req;
+ m_requestType = req->getRequestType();
+ m_requestFlag = req->getRequestFlag();
+ }
+ bool hasError() {
+ return m_errorCode != CreateIndxRef::NoError;
+ }
+ void setError(const CreateIndxRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ void setError(const CreateTableRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ switch (ref->getErrorCode()) {
+ case CreateTableRef::TableAlreadyExist:
+ m_errorCode = CreateIndxRef::IndexExists;
+ break;
+ default:
+ m_errorCode = (CreateIndxRef::ErrorCode)ref->getErrorCode();
+ break;
+ }
+ m_errorLine = ref->getErrorLine();
+ }
+ }
+ void setError(const AlterIndxRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = (CreateIndxRef::ErrorCode)ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ };
+ typedef Ptr<OpCreateIndex> OpCreateIndexPtr;
+
+ /**
+ * Operation record for drop index.
+ */
+ struct OpDropIndex : OpRecordCommon {
+ // original request
+ DropIndxReq m_request;
+ // coordinator DICT
+ Uint32 m_coordinatorRef;
+ bool m_isMaster;
+ // state info
+ DropIndxReq::RequestType m_requestType;
+ Uint32 m_requestFlag;
+ // error info
+ DropIndxRef::ErrorCode m_errorCode;
+ Uint32 m_errorLine;
+ Uint32 m_errorNode;
+ // counters
+ SignalCounter m_signalCounter;
+ // ctor
+ OpDropIndex() {
+ memset(&m_request, 0, sizeof(m_request));
+ m_coordinatorRef = 0;
+ m_requestType = DropIndxReq::RT_UNDEFINED;
+ m_requestFlag = 0;
+ m_errorCode = DropIndxRef::NoError;
+ m_errorLine = 0;
+ m_errorNode = 0;
+ }
+ void save(const DropIndxReq* req) {
+ m_request = *req;
+ m_requestType = req->getRequestType();
+ m_requestFlag = req->getRequestFlag();
+ }
+ bool hasError() {
+ return m_errorCode != DropIndxRef::NoError;
+ }
+ void setError(const DropIndxRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ void setError(const AlterIndxRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = (DropIndxRef::ErrorCode)ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ void setError(const DropTableRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ switch(ref->errorCode) {
+ case(DropTableRef::Busy):
+ m_errorCode = DropIndxRef::Busy;
+ break;
+ case(DropTableRef::NoSuchTable):
+ m_errorCode = DropIndxRef::IndexNotFound;
+ break;
+ case(DropTableRef::DropInProgress):
+ m_errorCode = DropIndxRef::Busy;
+ break;
+ case(DropTableRef::NoDropTableRecordAvailable):
+ m_errorCode = DropIndxRef::Busy;
+ break;
+ default:
+ m_errorCode = (DropIndxRef::ErrorCode)ref->errorCode;
+ break;
+ }
+ //m_errorLine = ref->getErrorLine();
+ //m_errorNode = ref->getErrorNode();
+ }
+ }
+ };
+ typedef Ptr<OpDropIndex> OpDropIndexPtr;
+
+ /**
+ * Operation record for alter index.
+ */
+ struct OpAlterIndex : OpRecordCommon {
+ // original request plus buffer for attribute lists
+ AlterIndxReq m_request;
+ AttributeList m_attrList;
+ AttributeList m_tableKeyList;
+ // coordinator DICT
+ Uint32 m_coordinatorRef;
+ bool m_isMaster;
+ // state info
+ AlterIndxReq::RequestType m_requestType;
+ Uint32 m_requestFlag;
+ // error info
+ AlterIndxRef::ErrorCode m_errorCode;
+ Uint32 m_errorLine;
+ Uint32 m_errorNode;
+ // counters
+ SignalCounter m_signalCounter;
+ Uint32 m_triggerCounter;
+ // ctor
+ OpAlterIndex() {
+ memset(&m_request, 0, sizeof(m_request));
+ m_coordinatorRef = 0;
+ m_requestType = AlterIndxReq::RT_UNDEFINED;
+ m_requestFlag = 0;
+ m_errorCode = AlterIndxRef::NoError;
+ m_errorLine = 0;
+ m_errorNode = 0;
+ m_triggerCounter = 0;
+ }
+ void save(const AlterIndxReq* req) {
+ m_request = *req;
+ m_requestType = req->getRequestType();
+ m_requestFlag = req->getRequestFlag();
+ }
+ bool hasError() {
+ return m_errorCode != AlterIndxRef::NoError;
+ }
+ void setError(const AlterIndxRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ void setError(const CreateIndxRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = (AlterIndxRef::ErrorCode)ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ void setError(const DropIndxRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = (AlterIndxRef::ErrorCode)ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ void setError(const BuildIndxRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = (AlterIndxRef::ErrorCode)ref->getErrorCode();
+ }
+ }
+ void setError(const CreateTrigRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = (AlterIndxRef::ErrorCode)ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ void setError(const DropTrigRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = (AlterIndxRef::ErrorCode)ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ };
+ typedef Ptr<OpAlterIndex> OpAlterIndexPtr;
+
+ /**
+ * Operation record for build index.
+ */
+ struct OpBuildIndex : OpRecordCommon {
+ // original request plus buffer for attribute lists
+ BuildIndxReq m_request;
+ AttributeList m_attrList;
+ AttributeList m_tableKeyList;
+ // coordinator DICT
+ Uint32 m_coordinatorRef;
+ bool m_isMaster;
+ // state info
+ BuildIndxReq::RequestType m_requestType;
+ Uint32 m_requestFlag;
+ Uint32 m_constrTriggerId;
+ // error info
+ BuildIndxRef::ErrorCode m_errorCode;
+ Uint32 m_errorLine;
+ Uint32 m_errorNode;
+ // counters
+ SignalCounter m_signalCounter;
+ // ctor
+ OpBuildIndex() {
+ memset(&m_request, 0, sizeof(m_request));
+ m_coordinatorRef = 0;
+ m_requestType = BuildIndxReq::RT_UNDEFINED;
+ m_requestFlag = 0;
+// Uint32 m_constrTriggerId = RNIL;
+ m_errorCode = BuildIndxRef::NoError;
+ m_errorLine = 0;
+ m_errorNode = 0;
+ }
+ void save(const BuildIndxReq* req) {
+ m_request = *req;
+ m_requestType = req->getRequestType();
+ m_requestFlag = req->getRequestFlag();
+ }
+ bool hasError() {
+ return m_errorCode != BuildIndxRef::NoError;
+ }
+ void setError(const BuildIndxRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = ref->getErrorCode();
+ }
+ }
+ void setError(const AlterIndxRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = (BuildIndxRef::ErrorCode)ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ void setError(const CreateTrigRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = (BuildIndxRef::ErrorCode)ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ void setError(const DropTrigRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = (BuildIndxRef::ErrorCode)ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ };
+ typedef Ptr<OpBuildIndex> OpBuildIndexPtr;
+
+ /**
+ * Operation record for Util Signals.
+ */
+ struct OpSignalUtil : OpRecordCommon{
+ Callback m_callback;
+ Uint32 m_userData;
+ };
+ typedef Ptr<OpSignalUtil> OpSignalUtilPtr;
+
+ /**
+ * Operation record for subscribe-start-stop
+ */
+ struct OpSubEvent : OpRecordCommon {
+ Uint32 m_senderRef;
+ Uint32 m_senderData;
+ Uint32 m_errorCode;
+ RequestTracker m_reqTracker;
+ };
+ typedef Ptr<OpSubEvent> OpSubEventPtr;
+
+ static const Uint32 sysTab_NDBEVENTS_0_szs[];
+
+ /**
+ * Operation record for create event.
+ */
+ struct OpCreateEvent : OpRecordCommon {
+ // original request (event id will be added)
+ CreateEvntReq m_request;
+ //AttributeMask m_attrListBitmask;
+ // AttributeList m_attrList;
+ sysTab_NDBEVENTS_0 m_eventRec;
+ // char m_eventName[MAX_TAB_NAME_SIZE];
+ // char m_tableName[MAX_TAB_NAME_SIZE];
+
+ // coordinator DICT
+ RequestTracker m_reqTracker;
+ // state info
+ CreateEvntReq::RequestType m_requestType;
+ Uint32 m_requestFlag;
+ // error info
+ CreateEvntRef::ErrorCode m_errorCode;
+ Uint32 m_errorLine;
+ Uint32 m_errorNode;
+ // ctor
+ OpCreateEvent() {
+ memset(&m_request, 0, sizeof(m_request));
+ m_requestType = CreateEvntReq::RT_UNDEFINED;
+ m_requestFlag = 0;
+ m_errorCode = CreateEvntRef::NoError;
+ m_errorLine = 0;
+ m_errorNode = 0;
+ }
+ void init(const CreateEvntReq* req, Dbdict* dp) {
+ m_request = *req;
+ m_errorCode = CreateEvntRef::NoError;
+ m_errorLine = 0;
+ m_errorNode = 0;
+ m_requestType = req->getRequestType();
+ m_requestFlag = req->getRequestFlag();
+ }
+ bool hasError() {
+ return m_errorCode != CreateEvntRef::NoError;
+ }
+ void setError(const CreateEvntRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+
+ };
+ typedef Ptr<OpCreateEvent> OpCreateEventPtr;
+
+ /**
+ * Operation record for drop event.
+ */
+ struct OpDropEvent : OpRecordCommon {
+ // original request
+ DropEvntReq m_request;
+ // char m_eventName[MAX_TAB_NAME_SIZE];
+ sysTab_NDBEVENTS_0 m_eventRec;
+ RequestTracker m_reqTracker;
+ // error info
+ DropEvntRef::ErrorCode m_errorCode;
+ Uint32 m_errorLine;
+ Uint32 m_errorNode;
+ // ctor
+ OpDropEvent() {
+ memset(&m_request, 0, sizeof(m_request));
+ m_errorCode = DropEvntRef::NoError;
+ m_errorLine = 0;
+ m_errorNode = 0;
+ }
+ void init(const DropEvntReq* req) {
+ m_request = *req;
+ m_errorCode = DropEvntRef::NoError;
+ m_errorLine = 0;
+ m_errorNode = 0;
+ }
+ bool hasError() {
+ return m_errorCode != DropEvntRef::NoError;
+ }
+ void setError(const DropEvntRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ };
+ typedef Ptr<OpDropEvent> OpDropEventPtr;
+
+ /**
+ * Operation record for create trigger.
+ */
+ struct OpCreateTrigger : OpRecordCommon {
+ // original request (trigger id will be added)
+ CreateTrigReq m_request;
+ char m_triggerName[MAX_TAB_NAME_SIZE];
+ // coordinator DICT
+ Uint32 m_coordinatorRef;
+ bool m_isMaster;
+ // state info
+ CreateTrigReq::RequestType m_requestType;
+ Uint32 m_requestFlag;
+ // error info
+ CreateTrigRef::ErrorCode m_errorCode;
+ Uint32 m_errorLine;
+ Uint32 m_errorNode;
+ // counters
+ SignalCounter m_signalCounter;
+ // ctor
+ OpCreateTrigger() {
+ memset(&m_request, 0, sizeof(m_request));
+ m_coordinatorRef = 0;
+ m_requestType = CreateTrigReq::RT_UNDEFINED;
+ m_requestFlag = 0;
+ m_errorCode = CreateTrigRef::NoError;
+ m_errorLine = 0;
+ m_errorNode = 0;
+ }
+ void save(const CreateTrigReq* req) {
+ m_request = *req;
+ m_requestType = req->getRequestType();
+ m_requestFlag = req->getRequestFlag();
+ }
+ bool hasError() {
+ return m_errorCode != CreateTrigRef::NoError;
+ }
+ void setError(const CreateTrigRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ void setError(const AlterTrigRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = (CreateTrigRef::ErrorCode)ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ };
+ typedef Ptr<OpCreateTrigger> OpCreateTriggerPtr;
+
+ /**
+ * Operation record for drop trigger.
+ */
+ struct OpDropTrigger : OpRecordCommon {
+ // original request
+ DropTrigReq m_request;
+ // coordinator DICT
+ Uint32 m_coordinatorRef;
+ bool m_isMaster;
+ // state info
+ DropTrigReq::RequestType m_requestType;
+ Uint32 m_requestFlag;
+ // error info
+ DropTrigRef::ErrorCode m_errorCode;
+ Uint32 m_errorLine;
+ Uint32 m_errorNode;
+ // counters
+ SignalCounter m_signalCounter;
+ // ctor
+ OpDropTrigger() {
+ memset(&m_request, 0, sizeof(m_request));
+ m_coordinatorRef = 0;
+ m_requestType = DropTrigReq::RT_UNDEFINED;
+ m_requestFlag = 0;
+ m_errorCode = DropTrigRef::NoError;
+ m_errorLine = 0;
+ m_errorNode = 0;
+ }
+ void save(const DropTrigReq* req) {
+ m_request = *req;
+ m_requestType = req->getRequestType();
+ m_requestFlag = req->getRequestFlag();
+ }
+ bool hasError() {
+ return m_errorCode != DropTrigRef::NoError;
+ }
+ void setError(const DropTrigRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ void setError(const AlterTrigRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = (DropTrigRef::ErrorCode)ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ };
+ typedef Ptr<OpDropTrigger> OpDropTriggerPtr;
+
+ /**
+ * Operation record for alter trigger.
+ */
+ struct OpAlterTrigger : OpRecordCommon {
+ // original request
+ AlterTrigReq m_request;
+ // nodes participating in operation
+ NdbNodeBitmask m_nodes;
+ // coordinator DICT
+ Uint32 m_coordinatorRef;
+ bool m_isMaster;
+ // state info
+ AlterTrigReq::RequestType m_requestType;
+ Uint32 m_requestFlag;
+ // error info
+ AlterTrigRef::ErrorCode m_errorCode;
+ Uint32 m_errorLine;
+ Uint32 m_errorNode;
+ // counters
+ SignalCounter m_signalCounter;
+ // ctor
+ OpAlterTrigger() {
+ memset(&m_request, 0, sizeof(m_request));
+ m_coordinatorRef = 0;
+ m_requestType = AlterTrigReq::RT_UNDEFINED;
+ m_requestFlag = 0;
+ m_errorCode = AlterTrigRef::NoError;
+ m_errorLine = 0;
+ m_errorNode = 0;
+ }
+ void save(const AlterTrigReq* req) {
+ m_request = *req;
+ m_requestType = req->getRequestType();
+ m_requestFlag = req->getRequestFlag();
+ }
+ bool hasError() {
+ return m_errorCode != AlterTrigRef::NoError;
+ }
+ void setError(const AlterTrigRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = (AlterTrigRef::ErrorCode)ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ void setError(const CreateTrigRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = (AlterTrigRef::ErrorCode)ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ void setError(const DropTrigRef* ref) {
+ if (ref != 0 && ! hasError()) {
+ m_errorCode = (AlterTrigRef::ErrorCode)ref->getErrorCode();
+ m_errorLine = ref->getErrorLine();
+ m_errorNode = ref->getErrorNode();
+ }
+ }
+ };
+ typedef Ptr<OpAlterTrigger> OpAlterTriggerPtr;
+
+ // Common operation record pool
+public:
+ STATIC_CONST( opCreateTableSize = sizeof(CreateTableRecord) );
+ STATIC_CONST( opDropTableSize = sizeof(DropTableRecord) );
+ STATIC_CONST( opCreateIndexSize = sizeof(OpCreateIndex) );
+ STATIC_CONST( opDropIndexSize = sizeof(OpDropIndex) );
+ STATIC_CONST( opAlterIndexSize = sizeof(OpAlterIndex) );
+ STATIC_CONST( opBuildIndexSize = sizeof(OpBuildIndex) );
+ STATIC_CONST( opCreateEventSize = sizeof(OpCreateEvent) );
+ STATIC_CONST( opSubEventSize = sizeof(OpSubEvent) );
+ STATIC_CONST( opDropEventSize = sizeof(OpDropEvent) );
+ STATIC_CONST( opSignalUtilSize = sizeof(OpSignalUtil) );
+ STATIC_CONST( opCreateTriggerSize = sizeof(OpCreateTrigger) );
+ STATIC_CONST( opDropTriggerSize = sizeof(OpDropTrigger) );
+ STATIC_CONST( opAlterTriggerSize = sizeof(OpAlterTrigger) );
+private:
+#define PTR_ALIGN(n) ((((n)+sizeof(void*)-1)>>2)&~((sizeof(void*)-1)>>2))
+ union OpRecordUnion {
+ Uint32 u_opCreateTable [PTR_ALIGN(opCreateTableSize)];
+ Uint32 u_opDropTable [PTR_ALIGN(opDropTableSize)];
+ Uint32 u_opCreateIndex [PTR_ALIGN(opCreateIndexSize)];
+ Uint32 u_opDropIndex [PTR_ALIGN(opDropIndexSize)];
+ Uint32 u_opCreateEvent [PTR_ALIGN(opCreateEventSize)];
+ Uint32 u_opSubEvent [PTR_ALIGN(opSubEventSize)];
+ Uint32 u_opDropEvent [PTR_ALIGN(opDropEventSize)];
+ Uint32 u_opSignalUtil [PTR_ALIGN(opSignalUtilSize)];
+ Uint32 u_opAlterIndex [PTR_ALIGN(opAlterIndexSize)];
+ Uint32 u_opBuildIndex [PTR_ALIGN(opBuildIndexSize)];
+ Uint32 u_opCreateTrigger[PTR_ALIGN(opCreateTriggerSize)];
+ Uint32 u_opDropTrigger [PTR_ALIGN(opDropTriggerSize)];
+ Uint32 u_opAlterTrigger [PTR_ALIGN(opAlterTriggerSize)];
+ Uint32 nextPool;
+ };
+ ArrayPool<OpRecordUnion> c_opRecordPool;
+
+ // Operation records
+ KeyTable2<CreateTableRecord, OpRecordUnion> c_opCreateTable;
+ KeyTable2<DropTableRecord, OpRecordUnion> c_opDropTable;
+ KeyTable2<OpCreateIndex, OpRecordUnion> c_opCreateIndex;
+ KeyTable2<OpDropIndex, OpRecordUnion> c_opDropIndex;
+ KeyTable2<OpAlterIndex, OpRecordUnion> c_opAlterIndex;
+ KeyTable2<OpBuildIndex, OpRecordUnion> c_opBuildIndex;
+ KeyTable2<OpCreateEvent, OpRecordUnion> c_opCreateEvent;
+ KeyTable2<OpSubEvent, OpRecordUnion> c_opSubEvent;
+ KeyTable2<OpDropEvent, OpRecordUnion> c_opDropEvent;
+ KeyTable2<OpSignalUtil, OpRecordUnion> c_opSignalUtil;
+ KeyTable2<OpCreateTrigger, OpRecordUnion> c_opCreateTrigger;
+ KeyTable2<OpDropTrigger, OpRecordUnion> c_opDropTrigger;
+ KeyTable2<OpAlterTrigger, OpRecordUnion> c_opAlterTrigger;
+
+ // Unique key for operation XXX move to some system table
+ Uint32 c_opRecordSequence;
+
+ // Statement blocks
+
+ /* ------------------------------------------------------------ */
+ // Start/Restart Handling
+ /* ------------------------------------------------------------ */
+ void sendSTTORRY(Signal* signal);
+ void sendNDB_STTORRY(Signal* signal);
+ void initSchemaFile(Signal* signal);
+
+ /* ------------------------------------------------------------ */
+ // Drop Table Handling
+ /* ------------------------------------------------------------ */
+ void releaseTableObject(Uint32 tableId, bool removeFromHash = true);
+
+ /* ------------------------------------------------------------ */
+ // General Stuff
+ /* ------------------------------------------------------------ */
+ Uint32 getFreeTableRecord(Uint32 primaryTableId);
+ Uint32 getFreeTriggerRecord();
+ bool getNewAttributeRecord(TableRecordPtr tablePtr,
+ AttributeRecordPtr & attrPtr);
+ void packTableIntoPages(Signal* signal, Uint32 tableId, Uint32 pageId);
+ void packTableIntoPagesImpl(SimpleProperties::Writer &, TableRecordPtr,
+ Signal* signal= 0);
+
+ void sendGET_TABINFOREQ(Signal* signal,
+ Uint32 tableId);
+ void sendTC_SCHVERREQ(Signal* signal,
+ Uint32 tableId,
+ BlockReference tcRef);
+
+ /* ------------------------------------------------------------ */
+ // System Restart Handling
+ /* ------------------------------------------------------------ */
+ void initSendSchemaData(Signal* signal);
+ void sendSchemaData(Signal* signal);
+ Uint32 sendSCHEMA_INFO(Signal* signal, Uint32 nodeId, Uint32* pagePointer);
+ void checkSchemaStatus(Signal* signal);
+ void sendDIHSTARTTAB_REQ(Signal* signal);
+
+ /* ------------------------------------------------------------ */
+ // Receive Table Handling
+ /* ------------------------------------------------------------ */
+ void handleTabInfoInit(SimpleProperties::Reader &,
+ ParseDictTabInfoRecord *,
+ bool checkExist = true);
+ void handleTabInfo(SimpleProperties::Reader & it, ParseDictTabInfoRecord *);
+
+ void handleAddTableFailure(Signal* signal,
+ Uint32 failureLine,
+ Uint32 tableId);
+ bool verifyTableCorrect(Signal* signal, Uint32 tableId);
+
+ /* ------------------------------------------------------------ */
+ // Add Table Handling
+ /* ------------------------------------------------------------ */
+
+ /* ------------------------------------------------------------ */
+ // Add Fragment Handling
+ /* ------------------------------------------------------------ */
+ void sendLQHADDATTRREQ(Signal*, CreateTableRecordPtr, Uint32 attributePtrI);
+
+ /* ------------------------------------------------------------ */
+ // Read/Write Schema and Table files
+ /* ------------------------------------------------------------ */
+ void updateSchemaState(Signal* signal, Uint32 tableId,
+ SchemaFile::TableEntry*, Callback*);
+ void startWriteSchemaFile(Signal* signal);
+ void openSchemaFile(Signal* signal,
+ Uint32 fileNo,
+ Uint32 fsPtr,
+ bool writeFlag);
+ void writeSchemaFile(Signal* signal, Uint32 filePtr, Uint32 fsPtr);
+ void writeSchemaConf(Signal* signal,
+ FsConnectRecordPtr fsPtr);
+ void closeFile(Signal* signal, Uint32 filePtr, Uint32 fsPtr);
+ void closeWriteSchemaConf(Signal* signal,
+ FsConnectRecordPtr fsPtr);
+ void initSchemaFile_conf(Signal* signal, Uint32 i, Uint32 returnCode);
+
+ void writeTableFile(Signal* signal, Uint32 tableId,
+ SegmentedSectionPtr tabInfo, Callback*);
+ void startWriteTableFile(Signal* signal, Uint32 tableId);
+ void openTableFile(Signal* signal,
+ Uint32 fileNo,
+ Uint32 fsPtr,
+ Uint32 tableId,
+ bool writeFlag);
+ void writeTableFile(Signal* signal, Uint32 filePtr, Uint32 fsPtr);
+ void writeTableConf(Signal* signal,
+ FsConnectRecordPtr fsPtr);
+ void closeWriteTableConf(Signal* signal,
+ FsConnectRecordPtr fsPtr);
+
+ void startReadTableFile(Signal* signal, Uint32 tableId);
+ void openReadTableRef(Signal* signal,
+ FsConnectRecordPtr fsPtr);
+ void readTableFile(Signal* signal, Uint32 filePtr, Uint32 fsPtr);
+ void readTableConf(Signal* signal,
+ FsConnectRecordPtr fsPtr);
+ void readTableRef(Signal* signal,
+ FsConnectRecordPtr fsPtr);
+ void closeReadTableConf(Signal* signal,
+ FsConnectRecordPtr fsPtr);
+
+ void startReadSchemaFile(Signal* signal);
+ void openReadSchemaRef(Signal* signal,
+ FsConnectRecordPtr fsPtr);
+ void readSchemaFile(Signal* signal, Uint32 filePtr, Uint32 fsPtr);
+ void readSchemaConf(Signal* signal, FsConnectRecordPtr fsPtr);
+ void readSchemaRef(Signal* signal, FsConnectRecordPtr fsPtr);
+ void closeReadSchemaConf(Signal* signal,
+ FsConnectRecordPtr fsPtr);
+
+ /* ------------------------------------------------------------ */
+ // Get table definitions
+ /* ------------------------------------------------------------ */
+ void sendGET_TABINFOREF(Signal* signal,
+ GetTabInfoReq*,
+ GetTabInfoRef::ErrorCode errorCode);
+
+ void sendGET_TABLEID_REF(Signal* signal,
+ GetTableIdReq * req,
+ GetTableIdRef::ErrorCode errorCode);
+
+ void sendGetTabResponse(Signal* signal);
+
+ /* ------------------------------------------------------------ */
+ // Indexes and triggers
+ /* ------------------------------------------------------------ */
+
+ // reactivate and rebuild indexes on start up
+ void activateIndexes(Signal* signal, Uint32 i);
+ void rebuildIndexes(Signal* signal, Uint32 i);
+
+ // create index
+ void createIndex_recvReply(Signal* signal, const CreateIndxConf* conf,
+ const CreateIndxRef* ref);
+ void createIndex_slavePrepare(Signal* signal, OpCreateIndexPtr opPtr);
+ void createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr);
+ void createIndex_fromCreateTable(Signal* signal, OpCreateIndexPtr opPtr);
+ void createIndex_toAlterIndex(Signal* signal, OpCreateIndexPtr opPtr);
+ void createIndex_fromAlterIndex(Signal* signal, OpCreateIndexPtr opPtr);
+ void createIndex_slaveCommit(Signal* signal, OpCreateIndexPtr opPtr);
+ void createIndex_slaveAbort(Signal* signal, OpCreateIndexPtr opPtr);
+ void createIndex_sendSlaveReq(Signal* signal, OpCreateIndexPtr opPtr);
+ void createIndex_sendReply(Signal* signal, OpCreateIndexPtr opPtr, bool);
+ // drop index
+ void dropIndex_recvReply(Signal* signal, const DropIndxConf* conf,
+ const DropIndxRef* ref);
+ void dropIndex_slavePrepare(Signal* signal, OpDropIndexPtr opPtr);
+ void dropIndex_toAlterIndex(Signal* signal, OpDropIndexPtr opPtr);
+ void dropIndex_fromAlterIndex(Signal* signal, OpDropIndexPtr opPtr);
+ void dropIndex_toDropTable(Signal* signal, OpDropIndexPtr opPtr);
+ void dropIndex_fromDropTable(Signal* signal, OpDropIndexPtr opPtr);
+ void dropIndex_slaveCommit(Signal* signal, OpDropIndexPtr opPtr);
+ void dropIndex_slaveAbort(Signal* signal, OpDropIndexPtr opPtr);
+ void dropIndex_sendSlaveReq(Signal* signal, OpDropIndexPtr opPtr);
+ void dropIndex_sendReply(Signal* signal, OpDropIndexPtr opPtr, bool);
+ // alter index
+ void alterIndex_recvReply(Signal* signal, const AlterIndxConf* conf,
+ const AlterIndxRef* ref);
+ void alterIndex_slavePrepare(Signal* signal, OpAlterIndexPtr opPtr);
+ void alterIndex_toCreateTc(Signal* signal, OpAlterIndexPtr opPtr);
+ void alterIndex_fromCreateTc(Signal* signal, OpAlterIndexPtr opPtr);
+ void alterIndex_toDropTc(Signal* signal, OpAlterIndexPtr opPtr);
+ void alterIndex_fromDropTc(Signal* signal, OpAlterIndexPtr opPtr);
+ void alterIndex_toCreateTrigger(Signal* signal, OpAlterIndexPtr opPtr);
+ void alterIndex_fromCreateTrigger(Signal* signal, OpAlterIndexPtr opPtr);
+ void alterIndex_toDropTrigger(Signal* signal, OpAlterIndexPtr opPtr);
+ void alterIndex_fromDropTrigger(Signal* signal, OpAlterIndexPtr opPtr);
+ void alterIndex_toBuildIndex(Signal* signal, OpAlterIndexPtr opPtr);
+ void alterIndex_fromBuildIndex(Signal* signal, OpAlterIndexPtr opPtr);
+ void alterIndex_slaveCommit(Signal* signal, OpAlterIndexPtr opPtr);
+ void alterIndex_slaveAbort(Signal* signal, OpAlterIndexPtr opPtr);
+ void alterIndex_sendSlaveReq(Signal* signal, OpAlterIndexPtr opPtr);
+ void alterIndex_sendReply(Signal* signal, OpAlterIndexPtr opPtr, bool);
+ // build index
+ void buildIndex_recvReply(Signal* signal, const BuildIndxConf* conf,
+ const BuildIndxRef* ref);
+ void buildIndex_toCreateConstr(Signal* signal, OpBuildIndexPtr opPtr);
+ void buildIndex_fromCreateConstr(Signal* signal, OpBuildIndexPtr opPtr);
+ void buildIndex_buildTrix(Signal* signal, OpBuildIndexPtr opPtr);
+ void buildIndex_toDropConstr(Signal* signal, OpBuildIndexPtr opPtr);
+ void buildIndex_fromDropConstr(Signal* signal, OpBuildIndexPtr opPtr);
+ void buildIndex_toOnline(Signal* signal, OpBuildIndexPtr opPtr);
+ void buildIndex_fromOnline(Signal* signal, OpBuildIndexPtr opPtr);
+ void buildIndex_sendSlaveReq(Signal* signal, OpBuildIndexPtr opPtr);
+ void buildIndex_sendReply(Signal* signal, OpBuildIndexPtr opPtr, bool);
+
+ // Events
+ void
+ createEventUTIL_PREPARE(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode);
+ void
+ createEventUTIL_EXECUTE(Signal *signal,
+ Uint32 callbackData,
+ Uint32 returnCode);
+ void
+ dropEventUTIL_PREPARE_READ(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode);
+ void
+ dropEventUTIL_EXECUTE_READ(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode);
+ void
+ dropEventUTIL_PREPARE_DELETE(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode);
+ void
+ dropEventUTIL_EXECUTE_DELETE(Signal *signal,
+ Uint32 callbackData,
+ Uint32 returnCode);
+ void
+ dropEventUtilPrepareRef(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode);
+ void
+ dropEventUtilExecuteRef(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode);
+ int
+ sendSignalUtilReq(Callback *c,
+ BlockReference ref,
+ GlobalSignalNumber gsn,
+ Signal* signal,
+ Uint32 length,
+ JobBufferLevel jbuf,
+ LinearSectionPtr ptr[3],
+ Uint32 noOfSections);
+ int
+ recvSignalUtilReq(Signal* signal, Uint32 returnCode);
+
+ void completeSubStartReq(Signal* signal, Uint32 ptrI, Uint32 returnCode);
+ void completeSubStopReq(Signal* signal, Uint32 ptrI, Uint32 returnCode);
+ void completeSubRemoveReq(Signal* signal, Uint32 ptrI, Uint32 returnCode);
+
+ void dropEvent_sendReply(Signal* signal,
+ OpDropEventPtr evntRecPtr);
+
+ void createEvent_RT_USER_CREATE(Signal* signal, OpCreateEventPtr evntRecPtr);
+ void createEventComplete_RT_USER_CREATE(Signal* signal,
+ OpCreateEventPtr evntRecPtr);
+ void createEvent_RT_USER_GET(Signal* signal, OpCreateEventPtr evntRecPtr);
+ void createEventComplete_RT_USER_GET(Signal* signal, OpCreateEventPtr evntRecPtr);
+
+ void createEvent_RT_DICT_AFTER_GET(Signal* signal, OpCreateEventPtr evntRecPtr);
+
+ void createEvent_nodeFailCallback(Signal* signal, Uint32 eventRecPtrI,
+ Uint32 returnCode);
+ void createEvent_sendReply(Signal* signal, OpCreateEventPtr evntRecPtr,
+ LinearSectionPtr *ptr = NULL, int noLSP = 0);
+
+ void prepareTransactionEventSysTable (Callback *c,
+ Signal* signal,
+ Uint32 senderData,
+ UtilPrepareReq::OperationTypeValue prepReq);
+ void prepareUtilTransaction(Callback *c,
+ Signal* signal,
+ Uint32 senderData,
+ Uint32 tableId,
+ const char *tableName,
+ UtilPrepareReq::OperationTypeValue prepReq,
+ Uint32 noAttr,
+ Uint32 attrIds[],
+ const char *attrNames[]);
+
+ void executeTransEventSysTable(Callback *c,
+ Signal *signal,
+ const Uint32 ptrI,
+ sysTab_NDBEVENTS_0& m_eventRec,
+ const Uint32 prepareId,
+ UtilPrepareReq::OperationTypeValue prepReq);
+ void executeTransaction(Callback *c,
+ Signal* signal,
+ Uint32 senderData,
+ Uint32 prepareId,
+ Uint32 noAttr,
+ LinearSectionPtr headerPtr,
+ LinearSectionPtr dataPtr);
+
+ void parseReadEventSys(Signal *signal, sysTab_NDBEVENTS_0& m_eventRec);
+
+ // create trigger
+ void createTrigger_recvReply(Signal* signal, const CreateTrigConf* conf,
+ const CreateTrigRef* ref);
+ void createTrigger_slavePrepare(Signal* signal, OpCreateTriggerPtr opPtr);
+ void createTrigger_masterSeize(Signal* signal, OpCreateTriggerPtr opPtr);
+ void createTrigger_slaveCreate(Signal* signal, OpCreateTriggerPtr opPtr);
+ void createTrigger_toAlterTrigger(Signal* signal, OpCreateTriggerPtr opPtr);
+ void createTrigger_fromAlterTrigger(Signal* signal, OpCreateTriggerPtr opPtr);
+ void createTrigger_slaveCommit(Signal* signal, OpCreateTriggerPtr opPtr);
+ void createTrigger_slaveAbort(Signal* signal, OpCreateTriggerPtr opPtr);
+ void createTrigger_sendSlaveReq(Signal* signal, OpCreateTriggerPtr opPtr);
+ void createTrigger_sendReply(Signal* signal, OpCreateTriggerPtr opPtr, bool);
+ // drop trigger
+ void dropTrigger_recvReply(Signal* signal, const DropTrigConf* conf,
+ const DropTrigRef* ref);
+ void dropTrigger_slavePrepare(Signal* signal, OpDropTriggerPtr opPtr);
+ void dropTrigger_toAlterTrigger(Signal* signal, OpDropTriggerPtr opPtr);
+ void dropTrigger_fromAlterTrigger(Signal* signal, OpDropTriggerPtr opPtr);
+ void dropTrigger_slaveCommit(Signal* signal, OpDropTriggerPtr opPtr);
+ void dropTrigger_slaveAbort(Signal* signal, OpDropTriggerPtr opPtr);
+ void dropTrigger_sendSlaveReq(Signal* signal, OpDropTriggerPtr opPtr);
+ void dropTrigger_sendReply(Signal* signal, OpDropTriggerPtr opPtr, bool);
+ // alter trigger
+ void alterTrigger_recvReply(Signal* signal, const AlterTrigConf* conf,
+ const AlterTrigRef* ref);
+ void alterTrigger_slavePrepare(Signal* signal, OpAlterTriggerPtr opPtr);
+ void alterTrigger_toCreateLocal(Signal* signal, OpAlterTriggerPtr opPtr);
+ void alterTrigger_fromCreateLocal(Signal* signal, OpAlterTriggerPtr opPtr);
+ void alterTrigger_toDropLocal(Signal* signal, OpAlterTriggerPtr opPtr);
+ void alterTrigger_fromDropLocal(Signal* signal, OpAlterTriggerPtr opPtr);
+ void alterTrigger_slaveCommit(Signal* signal, OpAlterTriggerPtr opPtr);
+ void alterTrigger_slaveAbort(Signal* signal, OpAlterTriggerPtr opPtr);
+ void alterTrigger_sendSlaveReq(Signal* signal, OpAlterTriggerPtr opPtr);
+ void alterTrigger_sendReply(Signal* signal, OpAlterTriggerPtr opPtr, bool);
+ // support
+ void getTableKeyList(TableRecordPtr tablePtr, AttributeList& list);
+ void getIndexAttr(TableRecordPtr indexPtr, Uint32 itAttr, Uint32* id);
+ void getIndexAttrList(TableRecordPtr indexPtr, AttributeList& list);
+ void getIndexAttrMask(TableRecordPtr indexPtr, AttributeMask& mask);
+
+ /* ------------------------------------------------------------ */
+ // Initialisation
+ /* ------------------------------------------------------------ */
+ void initCommonData();
+ void initRecords();
+ void initConnectRecord();
+ void initRetrieveRecord(Signal*, Uint32, Uint32 returnCode);
+ void initSchemaRecord();
+ void initRestartRecord();
+ void initSendSchemaRecord();
+ void initReadTableRecord();
+ void initWriteTableRecord();
+ void initReadSchemaRecord();
+ void initWriteSchemaRecord();
+
+ void initNodeRecords();
+ void initTableRecords();
+ void initialiseTableRecord(TableRecordPtr tablePtr);
+ void initTriggerRecords();
+ void initialiseTriggerRecord(TriggerRecordPtr triggerPtr);
+ void initPageRecords();
+
+ Uint32 getFsConnRecord();
+
+ bool getIsFailed(Uint32 nodeId) const;
+
+ void dropTableRef(Signal * signal, DropTableReq *, DropTableRef::ErrorCode);
+ void printTables(); // For debugging only
+ int handleAlterTab(AlterTabReq * req,
+ CreateTableRecord * regAlterTabPtr,
+ TableRecordPtr origTablePtr,
+ TableRecordPtr newTablePtr);
+ void revertAlterTable(Signal * signal,
+ Uint32 changeMask,
+ Uint32 tableId,
+ CreateTableRecord * regAlterTabPtr);
+ void alterTableRef(Signal * signal,
+ AlterTableReq *, AlterTableRef::ErrorCode,
+ ParseDictTabInfoRecord* parseRecord = NULL);
+ void alterTabRef(Signal * signal,
+ AlterTabReq *, AlterTableRef::ErrorCode,
+ ParseDictTabInfoRecord* parseRecord = NULL);
+ void alterTab_writeSchemaConf(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode);
+ void alterTab_writeTableConf(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode);
+
+ void prepDropTab_nextStep(Signal* signal, DropTableRecordPtr);
+ void prepDropTab_complete(Signal* signal, DropTableRecordPtr);
+ void prepDropTab_writeSchemaConf(Signal* signal, Uint32 dropTabPtrI, Uint32);
+
+ void dropTab_localDROP_TAB_CONF(Signal* signal);
+ void dropTab_nextStep(Signal* signal, DropTableRecordPtr);
+ void dropTab_complete(Signal* signal, Uint32 dropTabPtrI, Uint32);
+ void dropTab_writeSchemaConf(Signal* signal, Uint32 dropTabPtrI, Uint32);
+
+ void createTab_prepare(Signal* signal, CreateTabReq * req);
+ void createTab_writeSchemaConf1(Signal* signal, Uint32 callback, Uint32);
+ void createTab_writeTableConf(Signal* signal, Uint32 callbackData, Uint32);
+ void createTab_dih(Signal*, CreateTableRecordPtr,
+ SegmentedSectionPtr, Callback*);
+ void createTab_dihComplete(Signal* signal, Uint32 callbackData, Uint32);
+
+ void createTab_startLcpMutex_locked(Signal* signal, Uint32, Uint32);
+ void createTab_startLcpMutex_unlocked(Signal* signal, Uint32, Uint32);
+
+ void createTab_commit(Signal* signal, CreateTabReq * req);
+ void createTab_writeSchemaConf2(Signal* signal, Uint32 callbackData, Uint32);
+ void createTab_alterComplete(Signal*, Uint32 callbackData, Uint32);
+
+ void createTab_drop(Signal* signal, CreateTabReq * req);
+ void createTab_dropComplete(Signal* signal, Uint32 callbackData, Uint32);
+
+ void createTab_reply(Signal* signal, CreateTableRecordPtr, Uint32 nodeId);
+ void alterTab_activate(Signal*, CreateTableRecordPtr, Callback*);
+
+ void restartCreateTab(Signal*, Uint32, const SchemaFile::TableEntry *, bool);
+ void restartCreateTab_readTableConf(Signal* signal, Uint32 callback, Uint32);
+ void restartCreateTab_writeTableConf(Signal* signal, Uint32 callback, Uint32);
+ void restartCreateTab_dihComplete(Signal* signal, Uint32 callback, Uint32);
+ void restartCreateTab_activateComplete(Signal*, Uint32 callback, Uint32);
+
+ void restartDropTab(Signal* signal, Uint32 tableId);
+ void restartDropTab_complete(Signal*, Uint32 callback, Uint32);
+
+ void restart_checkSchemaStatusComplete(Signal*, Uint32 callback, Uint32);
+ void restart_writeSchemaConf(Signal*, Uint32 callbackData, Uint32);
+ void masterRestart_checkSchemaStatusComplete(Signal*, Uint32, Uint32);
+
+ void sendSchemaComplete(Signal*, Uint32 callbackData, Uint32);
+
+ // global metadata support
+ friend class MetaData;
+ int getMetaTablePtr(TableRecordPtr& tablePtr, Uint32 tableId, Uint32 tableVersion);
+ int getMetaTable(MetaData::Table& table, Uint32 tableId, Uint32 tableVersion);
+ int getMetaTable(MetaData::Table& table, const char* tableName);
+ int getMetaAttribute(MetaData::Attribute& attribute, const MetaData::Table& table, Uint32 attributeId);
+ int getMetaAttribute(MetaData::Attribute& attribute, const MetaData::Table& table, const char* attributeName);
+};
+
+#endif
diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.txt b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.txt
new file mode 100644
index 00000000000..8d4267a1c42
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.txt
@@ -0,0 +1,88 @@
+
+Event creation
+
+USER DICT(Master) UTIL SUMA
+================================================================================
+CREATE_EVENT_REQ::create
+-------------------------->
+ - Get ID
+ CREATE_SUBID
+ ----------------------------------------------->
+ <-----------------------------------------------
+ - insert into system table
+ UTIL_PREPARE::insert
+ ------------------------>
+ <------------------------
+ UTIL_EXECUTE
+ ------------------------>
+ <------------------------
+CREATE_EVENT_CONF
+<--------------------------
+
+
+Event dropping
+
+USER DICT(Master) UTIL SUMA
+================================================================================
+DROP_EVENT_REQ
+-------------------------->
+ - remove from system table
+ UTIL_PREPARE::delete
+ ------------------------>
+ <------------------------
+ UTIL_EXECUTE
+ ------------------------>
+ <------------------------
+DROP_EVENT_CONF
+<--------------------------
+
+
+
+create NdbEventOperation
+
+USER DICT(Master) (Slaves) UTIL
+=======================================================================
+CREATE_EVENT_REQ::get
+-------------------------->
+ - read from system table
+ UTIL_PREPARE::read
+ ---------------------------------------->
+ <----------------------------------------
+ UTIL_EXECUTE
+ ---------------------------------------->
+ <----------------------------------------
+ SUMA
+ CREATE_EVENT_REQ::after_get ======
+ ---------------------->
+ SUB_CREATE
+ ------------------>
+ <------------------
+ SUB_SYNC
+ ------------------>
+ <------------------
+ CREATE_EVENT_CONF
+ <----------------------
+CREATE_EVENT_CONF
+<-------------------------
+
+
+
+USER DICT(Master) (Slaves) SUMA
+=======================================================================
+SUB_START_REQ
+-------------------------->
+ SUB_START_REQ
+ ---------------------->
+ SUB_START
+ ------------------>
+ <------------------
+ SUB_START_CONF
+ <----------------------
+SUB_START_CONF
+<-------------------------
+
+
+SUB_STOP analogous to SUB_STOP
+
+
+
diff --git a/storage/ndb/src/kernel/blocks/dbdict/DropTable.txt b/storage/ndb/src/kernel/blocks/dbdict/DropTable.txt
new file mode 100644
index 00000000000..8d364d15c57
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbdict/DropTable.txt
@@ -0,0 +1,140 @@
+DROP TABLE DESCRIPTION
+----------------------
+
+Drop table is controlled by DICT.
+
+Drop table is used in the following cases in some sort.
+ - Drop Table
+ - Abort Add Table
+ - Drop table in node restart
+ - Drop table in system restart
+
+Sequence of Drop Table:
+-----------------------
+
+1) PREP_DROP_TAB_REQ -> all DICT
+ Update schema files on disk
+ Table status = DROPPING
+
+2) Controlling DICT only
+ Report Table Dropped secured but not yet completed.
+
+------ PREP DROP
+
+4) PREP_DROP_TAB_REQ -> all LQHs
+
+5) PREP_DROP_TAB_REQ -> all TCs
+
+6) PREP_DROP_TAB_REQ -> all DIHs
+
+
+--- LQH::PREP_DROP_TAB_REQ
+
+*) Mark the table so that no new operations will start
+*) Mark all fragments so that new LCP_FRAG_ORD gets replied directly
+ w.o actually checkpointing the fragment
+2) Start waiting for completion
+3) Reply PREP_DROP_TAB_CONF
+
+- After this LQH accepts WAIT_DROP_TAB_REQ
+
+--- TC::PREP_DROP_TAB_REQ
+
+1) Mark the table so that no new transactions will start on the table
+2) Send WAIT_DROP_TAB_REQ -> all connected LQH's
+3) Wait for CONF (including NF-handling) from LQH:s
+4) Reply PREP_DROP_TAB_CONF
+
+--- DIH::PREP_DROP_TAB_REQ
+
+1) Mark the table so that no new LCP will start on the table
+2) If master (unlink any queued LCP_FRAG_ORD)
+3) Send WAIT_DROP_TAB_REQ -> all connected LQH's
+4) Wait for CONF (including NF-handling) from LQH:s
+5) Reply PREP_DROP_TAB_CONF
+
+--- LQH::WAIT_DROP_TAB_REQ
+
+1) Wait for running operations
+ Wait for running LCP
+
+2) Reply
+
+------ PREP_DROP
+
+7) DROP_TAB_REQ -> all DICT's
+ *) DROP_TAB_REQ -> TC
+ *) DROP_TAB_REQ -> ACC
+ *) DROP_TAB_REQ -> TUP
+ *) DROP_TAB_REQ -> DIH
+ *) DROP_TAB_REQ -> LQH
+ *) Update schema files on disk DROPPED
+
+8) DICT_SCHEMAREQ -> all DICT
+ Table status = DROPPED
+
+---------------------------------
+
+Sequence of Drop table in node/system restart
+---------------------------------------------
+
+In both node and system restart the node receives the schema information from
+the master. If the table is in a state where it needs to complete the drop
+table activity then DBACC, DBTUP, DBDIH, DBDICT is contacted to drop all files
+related to the table. After this the schema information is updated with the new
+state. Since all nodes receive the same schema information there is no risk of
+different behaviour in the various NDB nodes.
+
+API Requirements for Drop Table
+-------------------------------
+Definition:
+
+ Two tables are NOT the same if they were created with two create
+ tables at different points in time, even if the two create tables
+ had exactly the same definition.
+
+Requirements:
+
+1. Each operation in a transaction refering to a table (by name or by id)
+ should operate on the same table. (This is probably necessary.)
+
+2. Each operation in a transaction refering to a table (by name or by
+ id) should operate on the same table as were defined at the
+ startTransaction timepoint. (This is not strictly necessary for
+ API consistency.)
+
+ Example 1:
+
+ startTransaction()
+
+ drop("TableName1")
+ create("TableName1")
+
+ getNdbOperation("TableName1")
+
+ execute(commit)
+
+ - If both requirements 1 and 2 are fulfilled, then this should lead
+ to "Error: Invalid Schema Version" or similar error
+
+ - If only requirement 1 is fulfilled, then this may be executed
+ without any errors.
+
+
+ Example 2:
+
+ startTransaction()
+
+ getNdbOperation("TableName1")
+ execute(NoCommit)
+
+ drop("TableName1")
+ create("TableName1")
+
+ getNdbOperation("TableName1")
+
+ execute(commit)
+
+ - This should always lead to "Error: Invalid Schema Version" or
+ similar error.
+
diff --git a/storage/ndb/src/kernel/blocks/dbdict/Event.txt b/storage/ndb/src/kernel/blocks/dbdict/Event.txt
new file mode 100644
index 00000000000..553c915d9c5
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbdict/Event.txt
@@ -0,0 +1,102 @@
+
+Event creation
+
+USER DICT(Master) UTIL SUMA
+================================================================================
+CREATE_EVENT_REQ::create
+-------------------------->
+ - Get ID
+ CREATE_SUBID
+ ----------------------------------------------->
+ <-----------------------------------------------
+ - insert into system table
+ UTIL_PREPARE::insert
+ ------------------------>
+ <------------------------
+ UTIL_EXECUTE
+ ------------------------>
+ <------------------------
+CREATE_EVENT_CONF
+<--------------------------
+
+
+Event dropping
+
+USER DICT(Master) (Slaves) UTIL SUMA
+================================================================================
+DROP_EVENT_REQ
+-------------------------->
+ - read from system table
+ UTIL_PREPARE::read
+ ------------------------------------>
+ <------------------------------------
+ UTIL_EXECUTE
+ ------------------------------------>
+ <------------------------------------
+ SUB_REMOVE_REQ
+ -------------------->
+ SUB_REMOVE
+ ------------------------------>
+ <------------------------------
+ SUB_REMOVE_CONF
+ <--------------------
+ - remove from system table
+ UTIL_PREPARE::delete
+ ------------------------------------>
+ <------------------------------------
+ UTIL_EXECUTE
+ ------------------------------------>
+ <------------------------------------
+DROP_EVENT_CONF
+<--------------------------
+
+
+
+create NdbEventOperation
+
+USER DICT(Master) (Slaves) UTIL
+=======================================================================
+CREATE_EVENT_REQ::get
+-------------------------->
+ - read from system table
+ UTIL_PREPARE::read
+ ---------------------------------------->
+ <----------------------------------------
+ UTIL_EXECUTE
+ ---------------------------------------->
+ <----------------------------------------
+ SUMA
+ CREATE_EVENT_REQ::after_get ======
+ ---------------------->
+ SUB_CREATE
+ ------------------>
+ <------------------
+ SUB_SYNC
+ ------------------>
+ <------------------
+ CREATE_EVENT_CONF
+ <----------------------
+CREATE_EVENT_CONF
+<-------------------------
+
+
+
+USER DICT(Master) (Slaves) SUMA
+=======================================================================
+SUB_START_REQ
+-------------------------->
+ SUB_START_REQ
+ ---------------------->
+ SUB_START
+ ------------------>
+ <------------------
+ SUB_START_CONF
+ <----------------------
+SUB_START_CONF
+<-------------------------
+
+
+SUB_STOP analogous to SUB_STOP
+
+
+
diff --git a/storage/ndb/src/kernel/blocks/dbdict/Makefile.am b/storage/ndb/src/kernel/blocks/dbdict/Makefile.am
new file mode 100644
index 00000000000..9a0d68f8148
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbdict/Makefile.am
@@ -0,0 +1,25 @@
+#SUBDIRS = printSchemafile
+
+noinst_LIBRARIES = libdbdict.a
+
+libdbdict_a_SOURCES = Dbdict.cpp
+
+include $(top_srcdir)/ndb/config/common.mk.am
+include $(top_srcdir)/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libdbdict.dsp
+
+libdbdict.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-lib.am \
+ $(top_srcdir)/ndb/config/win-name \
+ $(top_srcdir)/ndb/config/win-includes \
+ $(top_srcdir)/ndb/config/win-sources \
+ $(top_srcdir)/ndb/config/win-libraries
+ cat $(top_srcdir)/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/ndb/config/win-sources $@ $(libdbdict_a_SOURCES)
+ @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/storage/ndb/src/kernel/blocks/dbdict/Master_AddTable.sfl b/storage/ndb/src/kernel/blocks/dbdict/Master_AddTable.sfl
new file mode 100644
index 00000000000..1bcec156ef7
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbdict/Master_AddTable.sfl
@@ -0,0 +1,751 @@
+// ---------------------------------------------------------------------------
+// This file contains a signal log trace for DBDICT at the master for a
+// create table. Another file contains the signal log for the participant
+// node. Master node is 2, participant node 4 and api node is 3.
+//
+
+// ---------------------------------------------------------------------------
+// First arrives the table description in a number of DICTTABINFO signals.
+// These have a header of 5 words (see DictTabInfo.hpp for details) and
+// upto 20 words of property data per signal. The property data is packed
+// by the SimpleProperties class.
+// ---------------------------------------------------------------------------
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57069 gsn: 204 "DICTTABINFO" prio: 1
+s.bn: 0 "API", s.proc: 3, s.sigId: 940284 length: 25 trace: 0
+ H'00010003 H'00047700 H'00000001 H'00000042 H'00000000 H'4e444250 H'524f5053
+ H'00010000 H'00000000 H'1c0a1203 H'524f4c46 H'00020001 H'0000000a H'56504e5f
+ H'55534552 H'53000000 H'0001000a H'0000004b H'000203e8 H'00000007 H'56504e5f
+ H'49440000 H'000103ee H'00000001 H'000203e8
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57069 gsn: 204 "DICTTABINFO" prio: 1
+s.bn: 0 "API", s.proc: 3, s.sigId: 940284 length: 25 trace: 0
+ H'00010003 H'00047700 H'00000001 H'00000042 H'00000014 H'00000007 H'56504e5f
+ H'4e420000 H'000103ee H'00000001 H'000203e8 H'0000000d H'44495245 H'43544f52
+ H'595f4e42 H'00000000 H'000103eb H'00000003 H'000103ed H'0000000a H'000103ec
+ H'00000002 H'000203e8 H'00000010 H'4c415354
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57069 gsn: 204 "DICTTABINFO" prio: 1
+s.bn: 0 "API", s.proc: 3, s.sigId: 940284 length: 25 trace: 0
+ H'00010003 H'00047700 H'00000001 H'00000042 H'00000028 H'5f43414c H'4c5f5041
+ H'52545900 H'000103eb H'00000003 H'000103ed H'0000000a H'000103ec H'00000002
+ H'000203e8 H'00000006 H'44455343 H'52000000 H'000103eb H'00000003 H'000103ed
+ H'00000064 H'000103ec H'00000002 H'00010005
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57069 gsn: 204 "DICTTABINFO" prio: 1
+s.bn: 0 "API", s.proc: 3, s.sigId: 940284 length: 11 trace: 0
+ H'00010003 H'00047700 H'00000001 H'00000042 H'0000003c H'00000002 H'00010006
+ H'00000005 H'0001000c H'00000002 H'0000ffff
+
+// ---------------------------------------------------------------------------
+// Send DICT_SCHEMAREQ to all nodes including ourselves to write the state
+// ADD_STARTED in the schema file for the new table.
+// ---------------------------------------------------------------------------
+
+---- Send ----- Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, gsn: 132 "DICT_SCHEMAREQ" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57069 length: 7 trace: 0
+ H'00010003 H'00047700 H'00000002 H'00000001 H'00000000 H'00000000 H'00000001
+---- Send ----- Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, gsn: 132 "DICT_SCHEMAREQ" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57069 length: 7 trace: 0
+ H'00010003 H'00047700 H'00000002 H'00000001 H'00000000 H'00000000 H'00000001
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57069 gsn: 132 "DICT_SCHEMAREQ" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57077 length: 7 trace: 0
+ H'00010003 H'00047700 H'00000002 H'00000001 H'00000000 H'00000000 H'00000001
+
+// ---------------------------------------------------------------------------
+// Write both schema files with new state of table added.
+// ---------------------------------------------------------------------------
+
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 2, gsn: 261 "FSOPENREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57069 length: 7 trace: 0
+ UserReference: H'00fa0002, userPointer: H'00000000
+ FileNumber[1-4]: H'ffffffff H'ffffffff H'ffffffff H'01050100
+ FileFlags: H'00000311 Open write only, Create new file, Truncate existing file
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57081 gsn: 259 "FSOPENCONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57082 length: 3 trace: 0
+ UserPointer: H'00000000
+ FilePointer: 99
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 2, gsn: 272 "FSWRITEREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57081 length: 8 trace: 0
+ FilePointer: 99
+ UserReference: H'00fa0002, UserPointer: H'00000000
+ Operation flag: H'00000011, Sync, Format=Array of pages
+ varIndex: 1
+ numberOfPages: 1
+ pageData: H'00000008, H'00000000
+
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57090 gsn: 270 "FSWRITECONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57091 length: 1 trace: 0
+ UserPointer: H'00000000
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 2, gsn: 257 "FSCLOSEREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57090 length: 4 trace: 0
+ FilePointer: 99
+ UserReference: H'00fa0002, userPointer: H'00000000
+ Flags: H'00000000, Don't remove file
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57099 gsn: 255 "FSCLOSECONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57100 length: 1 trace: 0
+ UserPointer: H'00000000
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 2, gsn: 261 "FSOPENREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57099 length: 7 trace: 0
+ UserReference: H'00fa0002, userPointer: H'00000000
+ FileNumber[1-4]: H'ffffffff H'ffffffff H'ffffffff H'01050200
+ FileFlags: H'00000311 Open write only, Create new file, Truncate existing file
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57111 gsn: 259 "FSOPENCONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57112 length: 3 trace: 0
+ UserPointer: H'00000000
+ FilePointer: 100
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 2, gsn: 272 "FSWRITEREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57111 length: 8 trace: 0
+ FilePointer: 100
+ UserReference: H'00fa0002, UserPointer: H'00000000
+ Operation flag: H'00000011, Sync, Format=Array of pages
+ varIndex: 1
+ numberOfPages: 1
+ pageData: H'00000008, H'00000000
+
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57123 gsn: 270 "FSWRITECONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57124 length: 1 trace: 0
+ UserPointer: H'00000000
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 2, gsn: 257 "FSCLOSEREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57123 length: 4 trace: 0
+ FilePointer: 100
+ UserReference: H'00fa0002, userPointer: H'00000000
+ Flags: H'00000000, Don't remove file
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57132 gsn: 255 "FSCLOSECONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57133 length: 1 trace: 0
+ UserPointer: H'00000000
+---- Send ----- Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, gsn: 133 "DICT_SCHEMACONF" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57132 length: 1 trace: 0
+ H'00000002
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57132 gsn: 133 "DICT_SCHEMACONF" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57135 length: 1 trace: 0
+ H'00000002
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57132 gsn: 133 "DICT_SCHEMACONF" prio: 1
+s.bn: 250 "DBDICT", s.proc: 4, s.sigId: 46718 length: 1 trace: 0
+ H'00000004
+
+// ---------------------------------------------------------------------------
+// Pack Table description into pages in DICT using SimpleProperties class.
+// ---------------------------------------------------------------------------
+
+---- Send ----- Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, gsn: 164 "CONTINUEB" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57132 length: 3 trace: 0
+ H'00000001 H'00000002 H'00000000
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57132 gsn: 164 "CONTINUEB" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57140 length: 3 trace: 0
+ H'00000001 H'00000002 H'00000000
+---- Send ----- Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, gsn: 164 "CONTINUEB" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57132 length: 2 trace: 0
+ H'00000002 H'00000002
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57132 gsn: 164 "CONTINUEB" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57141 length: 2 trace: 0
+ H'00000002 H'00000002
+
+// ---------------------------------------------------------------------------
+// Send the table description over to the other NDB nodes.
+// A CONTINUEB is sent for each signal sent to avoid overloading the
+// transporters.
+// ---------------------------------------------------------------------------
+
+---- Send ----- Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, gsn: 204 "DICTTABINFO" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57132 length: 25 trace: 0
+ H'00fa0002 H'00000000 H'00000002 H'0000006e H'00000000 H'4e444250 H'524f5053
+ H'00002000 H'0000001c H'1c0a1203 H'524f4c46 H'00020001 H'0000000a H'56504e5f
+ H'55534552 H'53000000 H'0001000a H'0000004b H'000203e8 H'00000007 H'56504e5f
+ H'49440000 H'1cc03924 H'00000001 H'000203e8
+---- Send ----- Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, gsn: 164 "CONTINUEB" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57132 length: 2 trace: 0
+ H'00000002 H'00000002
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57132 gsn: 164 "CONTINUEB" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57142 length: 2 trace: 0
+ H'00000002 H'00000002
+---- Send ----- Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, gsn: 204 "DICTTABINFO" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57132 length: 25 trace: 0
+ H'00fa0002 H'00000000 H'00000002 H'0000006e H'00000014 H'00000007 H'56504e5f
+ H'4e420000 H'000103ee H'00000001 H'000203e8 H'0000000d H'44495245 H'43544f52
+ H'595f4e42 H'00000000 H'000103eb H'00000003 H'524f4c46 H'00020001 H'0000000a
+ H'56504e5f H'55534552 H'53000010 H'00010002
+---- Send ----- Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, gsn: 164 "CONTINUEB" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57132 length: 2 trace: 0
+ H'00000002 H'00000002
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57132 gsn: 164 "CONTINUEB" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57143 length: 2 trace: 0
+ H'00000002 H'00000002
+---- Send ----- Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, gsn: 204 "DICTTABINFO" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57132 length: 25 trace: 0
+ H'00fa0002 H'00000000 H'00000002 H'0000006e H'00000028 H'00000002 H'00010011
+ H'00000003 H'00010003 H'00000001 H'00010005 H'00000002 H'00010006 H'00000005
+ H'0001000a H'0000004b H'0001000c H'00000002 H'000203e8 H'00000007 H'56504e5f
+ H'49440064 H'000103e9 H'00000000 H'000103ee
+---- Send ----- Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, gsn: 164 "CONTINUEB" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57132 length: 2 trace: 0
+ H'00000002 H'00000002
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57132 gsn: 164 "CONTINUEB" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57144 length: 2 trace: 0
+ H'00000002 H'00000002
+---- Send ----- Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, gsn: 204 "DICTTABINFO" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57132 length: 25 trace: 0
+ H'00fa0002 H'00000000 H'00000002 H'0000006e H'0000003c H'00000001 H'000203e8
+ H'00000007 H'56504e5f H'4e420002 H'000103e9 H'00000001 H'000103ee H'00000001
+ H'000203e8 H'0000000d H'44495245 H'43544f52 H'595f4e42 H'00000000 H'000103e9
+ H'00000002 H'000103eb H'00000003 H'000103ec
+---- Send ----- Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, gsn: 164 "CONTINUEB" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57132 length: 2 trace: 0
+ H'00000002 H'00000002
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57132 gsn: 164 "CONTINUEB" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57145 length: 2 trace: 0
+ H'00000002 H'00000002
+---- Send ----- Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, gsn: 204 "DICTTABINFO" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57132 length: 25 trace: 0
+ H'00fa0002 H'00000000 H'00000002 H'0000006e H'00000050 H'00000002 H'000103ed
+ H'0000000a H'000203e8 H'00000010 H'4c415354 H'5f43414c H'4c5f5041 H'52545900
+ H'000103e9 H'00000003 H'000103eb H'00000003 H'000103ec H'00000002 H'000103ed
+ H'0000000a H'000203e8 H'00000006 H'44455343
+---- Send ----- Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, gsn: 164 "CONTINUEB" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57132 length: 2 trace: 0
+ H'00000002 H'00000002
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57132 gsn: 164 "CONTINUEB" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57146 length: 2 trace: 0
+ H'00000002 H'00000002
+---- Send ----- Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, gsn: 204 "DICTTABINFO" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57132 length: 15 trace: 0
+ H'00fa0002 H'00000000 H'00000002 H'0000006e H'00000064 H'52000000 H'000103e9
+ H'00000004 H'000103eb H'00000003 H'000103ec H'00000002 H'000103ed H'00000064
+ H'0000ffff
+
+// ---------------------------------------------------------------------------
+// In parallel with sending the table description to other nodes we will also
+// write the table description to our local file system.
+// ---------------------------------------------------------------------------
+
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 2, gsn: 261 "FSOPENREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57132 length: 7 trace: 0
+ UserReference: H'00fa0002, userPointer: H'00000000
+ FileNumber[1-4]: H'00000002 H'ffffffff H'00000001 H'010401ff
+ FileFlags: H'00000311 Open write only, Create new file, Truncate existing file
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57165 gsn: 259 "FSOPENCONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57166 length: 3 trace: 0
+ UserPointer: H'00000000
+ FilePointer: 101
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 2, gsn: 272 "FSWRITEREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57165 length: 8 trace: 0
+ FilePointer: 101
+ UserReference: H'00fa0002, UserPointer: H'00000000
+ Operation flag: H'00000011, Sync, Format=Array of pages
+ varIndex: 1
+ numberOfPages: 1
+ pageData: H'00000000, H'00000000
+
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57177 gsn: 270 "FSWRITECONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57178 length: 1 trace: 0
+ UserPointer: H'00000000
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 2, gsn: 257 "FSCLOSEREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57177 length: 4 trace: 0
+ FilePointer: 101
+ UserReference: H'00fa0002, userPointer: H'00000000
+ Flags: H'00000000, Don't remove file
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57186 gsn: 255 "FSCLOSECONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57187 length: 1 trace: 0
+ UserPointer: H'00000000
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 2, gsn: 261 "FSOPENREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57186 length: 7 trace: 0
+ UserReference: H'00fa0002, userPointer: H'00000000
+ FileNumber[1-4]: H'00000002 H'ffffffff H'00000001 H'010402ff
+ FileFlags: H'00000311 Open write only, Create new file, Truncate existing file
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57195 gsn: 259 "FSOPENCONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57196 length: 3 trace: 0
+ UserPointer: H'00000000
+ FilePointer: 102
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 2, gsn: 272 "FSWRITEREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57195 length: 8 trace: 0
+ FilePointer: 102
+ UserReference: H'00fa0002, UserPointer: H'00000000
+ Operation flag: H'00000011, Sync, Format=Array of pages
+ varIndex: 1
+ numberOfPages: 1
+ pageData: H'00000000, H'00000000
+
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57204 gsn: 270 "FSWRITECONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57205 length: 1 trace: 0
+ UserPointer: H'00000000
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 2, gsn: 257 "FSCLOSEREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57204 length: 4 trace: 0
+ FilePointer: 102
+ UserReference: H'00fa0002, userPointer: H'00000000
+ Flags: H'00000000, Don't remove file
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57218 gsn: 255 "FSCLOSECONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57219 length: 1 trace: 0
+ UserPointer: H'00000000
+
+// ---------------------------------------------------------------------------
+// Completed writing to our file system the table description.
+// ---------------------------------------------------------------------------
+
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57229 gsn: 24 "DICTTABCONF" prio: 1
+s.bn: 250 "DBDICT", s.proc: 4, s.sigId: 46803 length: 2 trace: 0
+ H'00000002 H'00000004
+
+// ---------------------------------------------------------------------------
+// Also the participant have completed writing the table description to file.
+// ---------------------------------------------------------------------------
+
+// ---------------------------------------------------------------------------
+// Write the state UPDATE_PAGE_COUNT to schema file for the new table.
+// This also contains the number of pages used for the table description.
+// ---------------------------------------------------------------------------
+
+---- Send ----- Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, gsn: 132 "DICT_SCHEMAREQ" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57229 length: 7 trace: 0
+ H'00010003 H'00047700 H'00000002 H'00000001 H'00000001 H'00000000 H'00000002
+---- Send ----- Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, gsn: 132 "DICT_SCHEMAREQ" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57229 length: 7 trace: 0
+ H'00010003 H'00047700 H'00000002 H'00000001 H'00000001 H'00000000 H'00000002
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57229 gsn: 132 "DICT_SCHEMAREQ" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57234 length: 7 trace: 0
+ H'00010003 H'00047700 H'00000002 H'00000001 H'00000001 H'00000000 H'00000002
+
+// ---------------------------------------------------------------------------
+// Write schema file to disk
+// ---------------------------------------------------------------------------
+
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 2, gsn: 261 "FSOPENREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57229 length: 7 trace: 0
+ UserReference: H'00fa0002, userPointer: H'00000000
+ FileNumber[1-4]: H'ffffffff H'ffffffff H'ffffffff H'01050100
+ FileFlags: H'00000311 Open write only, Create new file, Truncate existing file
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57238 gsn: 259 "FSOPENCONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57239 length: 3 trace: 0
+ UserPointer: H'00000000
+ FilePointer: 103
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 2, gsn: 272 "FSWRITEREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57238 length: 8 trace: 0
+ FilePointer: 103
+ UserReference: H'00fa0002, UserPointer: H'00000000
+ Operation flag: H'00000011, Sync, Format=Array of pages
+ varIndex: 1
+ numberOfPages: 1
+ pageData: H'00000008, H'00000000
+
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57247 gsn: 270 "FSWRITECONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57248 length: 1 trace: 0
+ UserPointer: H'00000000
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 2, gsn: 257 "FSCLOSEREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57247 length: 4 trace: 0
+ FilePointer: 103
+ UserReference: H'00fa0002, userPointer: H'00000000
+ Flags: H'00000000, Don't remove file
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57257 gsn: 255 "FSCLOSECONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57258 length: 1 trace: 0
+ UserPointer: H'00000000
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 2, gsn: 261 "FSOPENREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57257 length: 7 trace: 0
+ UserReference: H'00fa0002, userPointer: H'00000000
+ FileNumber[1-4]: H'ffffffff H'ffffffff H'ffffffff H'01050200
+ FileFlags: H'00000311 Open write only, Create new file, Truncate existing file
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57267 gsn: 259 "FSOPENCONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57268 length: 3 trace: 0
+ UserPointer: H'00000000
+ FilePointer: 104
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 2, gsn: 272 "FSWRITEREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57267 length: 8 trace: 0
+ FilePointer: 104
+ UserReference: H'00fa0002, UserPointer: H'00000000
+ Operation flag: H'00000011, Sync, Format=Array of pages
+ varIndex: 1
+ numberOfPages: 1
+ pageData: H'00000008, H'00000000
+
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57279 gsn: 270 "FSWRITECONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57283 length: 1 trace: 0
+ UserPointer: H'00000000
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 2, gsn: 257 "FSCLOSEREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57279 length: 4 trace: 0
+ FilePointer: 104
+ UserReference: H'00fa0002, userPointer: H'00000000
+ Flags: H'00000000, Don't remove file
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57290 gsn: 255 "FSCLOSECONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57291 length: 1 trace: 0
+ UserPointer: H'00000000
+---- Send ----- Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, gsn: 133 "DICT_SCHEMACONF" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57290 length: 1 trace: 0
+ H'00000002
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57290 gsn: 133 "DICT_SCHEMACONF" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57293 length: 1 trace: 0
+ H'00000002
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57299 gsn: 133 "DICT_SCHEMACONF" prio: 1
+s.bn: 250 "DBDICT", s.proc: 4, s.sigId: 46860 length: 1 trace: 0
+ H'00000004
+
+// ---------------------------------------------------------------------------
+// All schema files in the system have been updated.
+// ---------------------------------------------------------------------------
+
+// ---------------------------------------------------------------------------
+// Now control is given to DIH for adding the fragments needed by this table.
+// We first seize a record in DIH and then we send the add table request with
+// the needed table parameters.
+// ---------------------------------------------------------------------------
+
+---- Send ----- Signal ----------------
+r.bn: 246 "DBDIH", r.proc: 2, gsn: 238 "DISEIZEREQ" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57299 length: 2 trace: 0
+ H'00000000 H'00fa0002
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57299 gsn: 236 "DISEIZECONF" prio: 1
+s.bn: 246 "DBDIH", s.proc: 2, s.sigId: 57304 length: 2 trace: 0
+ H'00000000 H'00000210
+---- Send ----- Signal ----------------
+r.bn: 246 "DBDIH", r.proc: 2, gsn: 187 "DIADDTABREQ" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57299 length: 6 trace: 0
+ H'00000210 H'00000002 H'00000000 H'00000006 H'00000000 H'00000001
+
+// ---------------------------------------------------------------------------
+// DIH requests us to add a certain fragment replica.
+// ---------------------------------------------------------------------------
+
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57400 gsn: 195 "DICTFRAGSREQ" prio: 1
+s.bn: 246 "DBDIH", s.proc: 2, s.sigId: 57418 length: 7 trace: 0
+ H'00000000 H'00000000 H'00000000 H'00000002 H'00150040 H'00000001 H'00000002
+
+// ---------------------------------------------------------------------------
+// We add the fragment by contacting LQH through sending a LQHFRAGREQ and
+// a number of LQHADDATTREQ (in this case only one since not more than 8
+// attributes).
+// ---------------------------------------------------------------------------
+
+---- Send ----- Signal ----------------
+r.bn: 247 "DBLQH", r.proc: 2, gsn: 313 "LQHFRAGREQ" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57400 length: 17 trace: 0
+ H'00000000 H'00fa0002 H'00000000 H'00000000 H'00000002 H'00000001 H'00000050
+ H'0000004b H'00000006 H'00000001 H'00000000 H'00000005 H'00000000 H'00000000
+ H'00000001 H'00000002 H'00000000
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57400 gsn: 311 "LQHFRAGCONF" prio: 1
+s.bn: 247 "DBLQH", s.proc: 2, s.sigId: 57428 length: 2 trace: 0
+ H'00000000 H'00000000
+---- Send ----- Signal ----------------
+r.bn: 247 "DBLQH", r.proc: 2, gsn: 310 "LQHADDATTREQ" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57400 length: 12 trace: 0
+ H'00000000 H'00000005 H'00000000 H'00012255 H'00000001 H'00012255 H'00000002
+ H'000a2236 H'00000003 H'000a2236 H'00000004 H'00642236
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57400 gsn: 308 "LQHADDATTCONF" prio: 1
+s.bn: 247 "DBLQH", s.proc: 2, s.sigId: 57450 length: 1 trace: 0
+ H'00000000
+
+// ---------------------------------------------------------------------------
+// When we have completed adding the fragment we send DINEXTNODEREQ (should
+// change name to DICTFRAGSCONF) to DIH indicate we have completed the task.
+// ---------------------------------------------------------------------------
+
+---- Send ----- Signal ----------------
+r.bn: 246 "DBDIH", r.proc: 2, gsn: 231 "DINEXTNODEREQ" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57400 length: 4 trace: 0
+ H'00000210 H'00000000 H'00000001 H'00000000
+
+// ---------------------------------------------------------------------------
+// We continue by performing the same task again for the next fragment replica.
+// We skip this from this log since they contain no more interesting stuff.
+// ---------------------------------------------------------------------------
+
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 57618 gsn: 185 "DIADDTABCONF" prio: 1
+s.bn: 246 "DBDIH", s.proc: 2, s.sigId: 57655 length: 2 trace: 0
+ H'00000000 H'00000002
+
+// ---------------------------------------------------------------------------
+// Now that we have added all fragments DIH gives back control to DICT by
+// sending DIADDTABCONF.
+// ---------------------------------------------------------------------------
+
+// ---------------------------------------------------------------------------
+// It is now time to decide which global checkpoint this table will be born.
+// ---------------------------------------------------------------------------
+
+---- Send ----- Signal ----------------
+r.bn: 246 "DBDIH", r.proc: 2, gsn: 499 "WAIT_GCP_REQ" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 57618 length: 3 trace: 0
+ H'00fa0002 H'00000000 H'00000002
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 58288 gsn: 501 "WAIT_GCP_CONF" prio: 1
+s.bn: 246 "DBDIH", s.proc: 2, s.sigId: 58296 length: 2 trace: 0
+ H'00000000 H'0000000c
+
+// ---------------------------------------------------------------------------
+// We can update all schema files in the system with this global checkpoint
+// number. We are certain that no transaction will be performed on the table
+// before this global checkpoint.
+// ---------------------------------------------------------------------------
+
+---- Send ----- Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, gsn: 132 "DICT_SCHEMAREQ" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 58288 length: 7 trace: 0
+ H'00010003 H'00047700 H'00000002 H'00000001 H'00000001 H'0000000c H'00000003
+---- Send ----- Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, gsn: 132 "DICT_SCHEMAREQ" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 58288 length: 7 trace: 0
+ H'00010003 H'00047700 H'00000002 H'00000001 H'00000001 H'0000000c H'00000003
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 58288 gsn: 132 "DICT_SCHEMAREQ" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 58298 length: 7 trace: 0
+ H'00010003 H'00047700 H'00000002 H'00000001 H'00000001 H'0000000c H'00000003
+
+// ---------------------------------------------------------------------------
+// Write schema files as usual when updating schema file state.
+// ---------------------------------------------------------------------------
+
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 2, gsn: 261 "FSOPENREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 58288 length: 7 trace: 0
+ UserReference: H'00fa0002, userPointer: H'00000000
+ FileNumber[1-4]: H'ffffffff H'ffffffff H'ffffffff H'01050100
+ FileFlags: H'00000311 Open write only, Create new file, Truncate existing file
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 58304 gsn: 259 "FSOPENCONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 58305 length: 3 trace: 0
+ UserPointer: H'00000000
+ FilePointer: 117
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 2, gsn: 272 "FSWRITEREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 58304 length: 8 trace: 0
+ FilePointer: 117
+ UserReference: H'00fa0002, UserPointer: H'00000000
+ Operation flag: H'00000011, Sync, Format=Array of pages
+ varIndex: 1
+ numberOfPages: 1
+ pageData: H'00000008, H'00000000
+
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 58315 gsn: 270 "FSWRITECONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 58316 length: 1 trace: 0
+ UserPointer: H'00000000
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 2, gsn: 257 "FSCLOSEREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 58315 length: 4 trace: 0
+ FilePointer: 117
+ UserReference: H'00fa0002, userPointer: H'00000000
+ Flags: H'00000000, Don't remove file
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 58326 gsn: 255 "FSCLOSECONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 58327 length: 1 trace: 0
+ UserPointer: H'00000000
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 2, gsn: 261 "FSOPENREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 58326 length: 7 trace: 0
+ UserReference: H'00fa0002, userPointer: H'00000000
+ FileNumber[1-4]: H'ffffffff H'ffffffff H'ffffffff H'01050200
+ FileFlags: H'00000311 Open write only, Create new file, Truncate existing file
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 58339 gsn: 259 "FSOPENCONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 58340 length: 3 trace: 0
+ UserPointer: H'00000000
+ FilePointer: 118
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 2, gsn: 272 "FSWRITEREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 58339 length: 8 trace: 0
+ FilePointer: 118
+ UserReference: H'00fa0002, UserPointer: H'00000000
+ Operation flag: H'00000011, Sync, Format=Array of pages
+ varIndex: 1
+ numberOfPages: 1
+ pageData: H'00000008, H'00000000
+
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 58348 gsn: 270 "FSWRITECONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 58349 length: 1 trace: 0
+ UserPointer: H'00000000
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 2, gsn: 257 "FSCLOSEREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 58348 length: 4 trace: 0
+ FilePointer: 118
+ UserReference: H'00fa0002, userPointer: H'00000000
+ Flags: H'00000000, Don't remove file
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 58359 gsn: 255 "FSCLOSECONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 58360 length: 1 trace: 0
+ UserPointer: H'00000000
+---- Send ----- Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, gsn: 133 "DICT_SCHEMACONF" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 58359 length: 1 trace: 0
+ H'00000002
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 58359 gsn: 133 "DICT_SCHEMACONF" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 58364 length: 1 trace: 0
+ H'00000002
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 58359 gsn: 133 "DICT_SCHEMACONF" prio: 1
+s.bn: 250 "DBDICT", s.proc: 4, s.sigId: 47846 length: 1 trace: 0
+ H'00000004
+
+// ---------------------------------------------------------------------------
+// Commit the table for usage in DIH and LQH in all nodes.
+// ---------------------------------------------------------------------------
+
+---- Send ----- Signal ----------------
+r.bn: 247 "DBLQH", r.proc: 2, gsn: 398 "TAB_COMMITREQ" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 58359 length: 3 trace: 0
+ H'00000000 H'00fa0002 H'00000002
+---- Send ----- Signal ----------------
+r.bn: 246 "DBDIH", r.proc: 2, gsn: 398 "TAB_COMMITREQ" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 58359 length: 3 trace: 0
+ H'00000001 H'00fa0002 H'00000002
+---- Send ----- Signal ----------------
+r.bn: 247 "DBLQH", r.proc: 4, gsn: 398 "TAB_COMMITREQ" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 58359 length: 3 trace: 0
+ H'00000000 H'00fa0002 H'00000002
+---- Send ----- Signal ----------------
+r.bn: 246 "DBDIH", r.proc: 4, gsn: 398 "TAB_COMMITREQ" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 58359 length: 3 trace: 0
+ H'00000001 H'00fa0002 H'00000002
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 58359 gsn: 396 "TAB_COMMITCONF" prio: 1
+s.bn: 247 "DBLQH", s.proc: 2, s.sigId: 58370 length: 3 trace: 0
+ H'00000000 H'00000002 H'00000002
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 58359 gsn: 396 "TAB_COMMITCONF" prio: 1
+s.bn: 246 "DBDIH", s.proc: 2, s.sigId: 58371 length: 3 trace: 0
+ H'00000001 H'00000002 H'00000002
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 58359 gsn: 396 "TAB_COMMITCONF" prio: 1
+s.bn: 247 "DBLQH", s.proc: 4, s.sigId: 47846 length: 3 trace: 0
+ H'00000000 H'00000004 H'00000002
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 58359 gsn: 396 "TAB_COMMITCONF" prio: 1
+s.bn: 246 "DBDIH", s.proc: 4, s.sigId: 47846 length: 3 trace: 0
+ H'00000001 H'00000004 H'00000002
+
+// ---------------------------------------------------------------------------
+// Finally also open the table for usage from TC in all nodes.
+// After this signal is received in TC it is ok to execute transactions on
+// this new empty table.
+// ---------------------------------------------------------------------------
+
+---- Send ----- Signal ----------------
+r.bn: 245 "DBTC", r.proc: 2, gsn: 404 "TC_SCHVERREQ" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 58359 length: 5 trace: 0
+ H'00000002 H'00000001 H'00000001 H'00fa0002 H'00000000
+---- Send ----- Signal ----------------
+r.bn: 245 "DBTC", r.proc: 4, gsn: 404 "TC_SCHVERREQ" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 58359 length: 5 trace: 0
+ H'00000002 H'00000001 H'00000001 H'00fa0002 H'00000000
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 58359 gsn: 403 "TC_SCHVERCONF" prio: 1
+s.bn: 245 "DBTC", s.proc: 2, s.sigId: 58376 length: 2 trace: 0
+ H'00000002 H'00000000
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 58359 gsn: 403 "TC_SCHVERCONF" prio: 1
+s.bn: 245 "DBTC", s.proc: 4, s.sigId: 47846 length: 2 trace: 0
+ H'00000002 H'00000001
+
+// ---------------------------------------------------------------------------
+// Unblock dictionary to allow for another add table.
+// ---------------------------------------------------------------------------
+
+---- Send ----- Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, gsn: 444 "UNBLO_DICTREQ" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 58359 length: 1 trace: 0
+ H'00fa0002
+---- Send ----- Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, gsn: 444 "UNBLO_DICTREQ" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 58359 length: 1 trace: 0
+ H'00fa0002
+
+// ---------------------------------------------------------------------------
+// Send the confirmation to the requesting application process.
+// ---------------------------------------------------------------------------
+
+---- Send ----- Signal ----------------
+r.bn: 1 "API", r.proc: 3, gsn: 24 "DICTTABCONF" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 58359 length: 3 trace: 0
+ H'00047700 H'00000002 H'00000001
+
+// ---------------------------------------------------------------------------
+// Also release the connection in DIH that was previously established.
+// ---------------------------------------------------------------------------
+
+---- Send ----- Signal ----------------
+r.bn: 246 "DBDIH", r.proc: 2, gsn: 234 "DIRELEASEREQ" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, sigId: 58359 length: 3 trace: 0
+ H'00000210 H'00000000 H'00fa0002
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 58359 gsn: 444 "UNBLO_DICTREQ" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 58378 length: 1 trace: 0
+ H'00fa0002
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, sigId: 58359 gsn: 232 "DIRELEASECONF" prio: 1
+s.bn: 246 "DBDIH", s.proc: 2, s.sigId: 58380 length: 1 trace: 0
+ H'00000000
+
+// ---------------------------------------------------------------------------
+// Now all actions regarding this add table have completed.
+// ---------------------------------------------------------------------------
diff --git a/storage/ndb/src/kernel/blocks/dbdict/SchemaFile.hpp b/storage/ndb/src/kernel/blocks/dbdict/SchemaFile.hpp
new file mode 100644
index 00000000000..7c3223d3d14
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbdict/SchemaFile.hpp
@@ -0,0 +1,57 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef DBDICT_SCHEMA_FILE_HPP
+#define DBDICT_SCHEMA_FILE_HPP
+
+#include <ndb_types.h>
+#include <string.h>
+
+struct SchemaFile {
+ char Magic[8];
+ Uint32 ByteOrder;
+ Uint32 NdbVersion;
+ Uint32 FileSize; // In bytes
+ Uint32 Unused;
+
+ Uint32 CheckSum;
+
+ enum TableState {
+ INIT = 0,
+ ADD_STARTED = 1,
+ TABLE_ADD_COMMITTED = 2,
+ DROP_TABLE_STARTED = 3,
+ DROP_TABLE_COMMITTED = 4,
+ ALTER_TABLE_COMMITTED = 5
+ };
+
+ struct TableEntry {
+ Uint32 m_tableState;
+ Uint32 m_tableVersion;
+ Uint32 m_tableType;
+ Uint32 m_noOfPages;
+ Uint32 m_gcp;
+
+ bool operator==(const TableEntry& o) const {
+ return memcmp(this, &o, sizeof(* this))== 0;
+ }
+ };
+
+ Uint32 NoOfTableEntries;
+ TableEntry TableEntries[1];
+};
+
+#endif
diff --git a/storage/ndb/src/kernel/blocks/dbdict/Slave_AddTable.sfl b/storage/ndb/src/kernel/blocks/dbdict/Slave_AddTable.sfl
new file mode 100644
index 00000000000..8740be9595d
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbdict/Slave_AddTable.sfl
@@ -0,0 +1,416 @@
+// ---------------------------------------------------------------------------
+// This file contains a signal log trace for DBDICT at the participant for a
+// add table. Another file contains the signal log for the master
+// node. Master node is 2, participant node 4 and api node is 3.
+//
+
+// ---------------------------------------------------------------------------
+//--------------------------------------------------------------------------
+// Master requests us to save a new state of the table in the schema file
+// == ADD_STARTED
+//--------------------------------------------------------------------------
+
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 46661 gsn: 132 "DICT_SCHEMAREQ" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57069 length: 7 trace: 0
+ H'00010003 H'00047700 H'00000002 H'00000001 H'00000000 H'00000000 H'00000001
+
+//--------------------------------------------------------------------------
+// Write the new state to the schema files.
+//--------------------------------------------------------------------------
+
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 4, gsn: 261 "FSOPENREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 4, sigId: 46661 length: 7 trace: 0
+ UserReference: H'00fa0004, userPointer: H'00000000
+ FileNumber[1-4]: H'ffffffff H'ffffffff H'ffffffff H'01050100
+ FileFlags: H'00000311 Open write only, Create new file, Truncate existing file
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 46669 gsn: 259 "FSOPENCONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46670 length: 3 trace: 0
+ UserPointer: H'00000000
+ FilePointer: 99
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 4, gsn: 272 "FSWRITEREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 4, sigId: 46669 length: 8 trace: 0
+ FilePointer: 99
+ UserReference: H'00fa0004, UserPointer: H'00000000
+ Operation flag: H'00000011, Sync, Format=Array of pages
+ varIndex: 1
+ numberOfPages: 1
+ pageData: H'00000008, H'00000000
+
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 46679 gsn: 270 "FSWRITECONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46680 length: 1 trace: 0
+ UserPointer: H'00000000
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 4, gsn: 257 "FSCLOSEREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 4, sigId: 46679 length: 4 trace: 0
+ FilePointer: 99
+ UserReference: H'00fa0004, userPointer: H'00000000
+ Flags: H'00000000, Don't remove file
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 46690 gsn: 255 "FSCLOSECONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46691 length: 1 trace: 0
+ UserPointer: H'00000000
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 4, gsn: 261 "FSOPENREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 4, sigId: 46690 length: 7 trace: 0
+ UserReference: H'00fa0004, userPointer: H'00000000
+ FileNumber[1-4]: H'ffffffff H'ffffffff H'ffffffff H'01050200
+ FileFlags: H'00000311 Open write only, Create new file, Truncate existing file
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 46700 gsn: 259 "FSOPENCONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46701 length: 3 trace: 0
+ UserPointer: H'00000000
+ FilePointer: 100
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 4, gsn: 272 "FSWRITEREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 4, sigId: 46700 length: 8 trace: 0
+ FilePointer: 100
+ UserReference: H'00fa0004, UserPointer: H'00000000
+ Operation flag: H'00000011, Sync, Format=Array of pages
+ varIndex: 1
+ numberOfPages: 1
+ pageData: H'00000008, H'00000000
+
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 46709 gsn: 270 "FSWRITECONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46710 length: 1 trace: 0
+ UserPointer: H'00000000
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 4, gsn: 257 "FSCLOSEREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 4, sigId: 46709 length: 4 trace: 0
+ FilePointer: 100
+ UserReference: H'00fa0004, userPointer: H'00000000
+ Flags: H'00000000, Don't remove file
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 46718 gsn: 255 "FSCLOSECONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46719 length: 1 trace: 0
+ UserPointer: H'00000000
+---- Send ----- Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, gsn: 133 "DICT_SCHEMACONF" prio: 1
+s.bn: 250 "DBDICT", s.proc: 4, sigId: 46718 length: 1 trace: 0
+ H'00000004
+
+//--------------------------------------------------------------------------
+// We receive the table description from the master node.
+// We set the data in the DICT block. (table and attribute records).
+//--------------------------------------------------------------------------
+
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 46718 gsn: 204 "DICTTABINFO" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57132 length: 25 trace: 0
+ H'00fa0002 H'00000000 H'00000002 H'0000006e H'00000000 H'4e444250 H'524f5053
+ H'00002000 H'0000001c H'1c0a1203 H'524f4c46 H'00020001 H'0000000a H'56504e5f
+ H'55534552 H'53000000 H'0001000a H'0000004b H'000203e8 H'00000007 H'56504e5f
+ H'49440000 H'1cc03924 H'00000001 H'000203e8
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 46718 gsn: 204 "DICTTABINFO" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57132 length: 25 trace: 0
+ H'00fa0002 H'00000000 H'00000002 H'0000006e H'00000014 H'00000007 H'56504e5f
+ H'4e420000 H'000103ee H'00000001 H'000203e8 H'0000000d H'44495245 H'43544f52
+ H'595f4e42 H'00000000 H'000103eb H'00000003 H'524f4c46 H'00020001 H'0000000a
+ H'56504e5f H'55534552 H'53000010 H'00010002
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 46718 gsn: 204 "DICTTABINFO" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57132 length: 25 trace: 0
+ H'00fa0002 H'00000000 H'00000002 H'0000006e H'00000028 H'00000002 H'00010011
+ H'00000003 H'00010003 H'00000001 H'00010005 H'00000002 H'00010006 H'00000005
+ H'0001000a H'0000004b H'0001000c H'00000002 H'000203e8 H'00000007 H'56504e5f
+ H'49440064 H'000103e9 H'00000000 H'000103ee
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 46718 gsn: 204 "DICTTABINFO" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57132 length: 25 trace: 0
+ H'00fa0002 H'00000000 H'00000002 H'0000006e H'0000003c H'00000001 H'000203e8
+ H'00000007 H'56504e5f H'4e420002 H'000103e9 H'00000001 H'000103ee H'00000001
+ H'000203e8 H'0000000d H'44495245 H'43544f52 H'595f4e42 H'00000000 H'000103e9
+ H'00000002 H'000103eb H'00000003 H'000103ec
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 46718 gsn: 204 "DICTTABINFO" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57132 length: 25 trace: 0
+ H'00fa0002 H'00000000 H'00000002 H'0000006e H'00000050 H'00000002 H'000103ed
+ H'0000000a H'000203e8 H'00000010 H'4c415354 H'5f43414c H'4c5f5041 H'52545900
+ H'000103e9 H'00000003 H'000103eb H'00000003 H'000103ec H'00000002 H'000103ed
+ H'0000000a H'000203e8 H'00000006 H'44455343
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 46718 gsn: 204 "DICTTABINFO" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57132 length: 15 trace: 0
+ H'00fa0002 H'00000000 H'00000002 H'0000006e H'00000064 H'52000000 H'000103e9
+ H'00000004 H'000103eb H'00000003 H'000103ec H'00000002 H'000103ed H'00000064
+ H'0000ffff
+
+//--------------------------------------------------------------------------
+// Pack the table description into pages.
+//--------------------------------------------------------------------------
+
+---- Send ----- Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, gsn: 164 "CONTINUEB" prio: 1
+s.bn: 250 "DBDICT", s.proc: 4, sigId: 46718 length: 3 trace: 0
+ H'00000001 H'00000002 H'00000000
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 46718 gsn: 164 "CONTINUEB" prio: 1
+s.bn: 250 "DBDICT", s.proc: 4, s.sigId: 46730 length: 3 trace: 0
+ H'00000001 H'00000002 H'00000000
+
+//--------------------------------------------------------------------------
+// Write the pages of the table description to disk.
+//--------------------------------------------------------------------------
+
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 4, gsn: 261 "FSOPENREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 4, sigId: 46718 length: 7 trace: 0
+ UserReference: H'00fa0004, userPointer: H'00000000
+ FileNumber[1-4]: H'00000002 H'ffffffff H'00000001 H'010401ff
+ FileFlags: H'00000311 Open write only, Create new file, Truncate existing file
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 46748 gsn: 259 "FSOPENCONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46749 length: 3 trace: 0
+ UserPointer: H'00000000
+ FilePointer: 101
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 4, gsn: 272 "FSWRITEREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 4, sigId: 46748 length: 8 trace: 0
+ FilePointer: 101
+ UserReference: H'00fa0004, UserPointer: H'00000000
+ Operation flag: H'00000011, Sync, Format=Array of pages
+ varIndex: 1
+ numberOfPages: 1
+ pageData: H'00000000, H'00000000
+
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 46757 gsn: 270 "FSWRITECONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46758 length: 1 trace: 0
+ UserPointer: H'00000000
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 4, gsn: 257 "FSCLOSEREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 4, sigId: 46757 length: 4 trace: 0
+ FilePointer: 101
+ UserReference: H'00fa0004, userPointer: H'00000000
+ Flags: H'00000000, Don't remove file
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 46766 gsn: 255 "FSCLOSECONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46767 length: 1 trace: 0
+ UserPointer: H'00000000
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 4, gsn: 261 "FSOPENREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 4, sigId: 46766 length: 7 trace: 0
+ UserReference: H'00fa0004, userPointer: H'00000000
+ FileNumber[1-4]: H'00000002 H'ffffffff H'00000001 H'010402ff
+ FileFlags: H'00000311 Open write only, Create new file, Truncate existing file
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 46783 gsn: 259 "FSOPENCONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46784 length: 3 trace: 0
+ UserPointer: H'00000000
+ FilePointer: 102
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 4, gsn: 272 "FSWRITEREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 4, sigId: 46783 length: 8 trace: 0
+ FilePointer: 102
+ UserReference: H'00fa0004, UserPointer: H'00000000
+ Operation flag: H'00000011, Sync, Format=Array of pages
+ varIndex: 1
+ numberOfPages: 1
+ pageData: H'00000000, H'00000000
+
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 46794 gsn: 270 "FSWRITECONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46795 length: 1 trace: 0
+ UserPointer: H'00000000
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 4, gsn: 257 "FSCLOSEREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 4, sigId: 46794 length: 4 trace: 0
+ FilePointer: 102
+ UserReference: H'00fa0004, userPointer: H'00000000
+ Flags: H'00000000, Don't remove file
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 46803 gsn: 255 "FSCLOSECONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46804 length: 1 trace: 0
+ UserPointer: H'00000000
+---- Send ----- Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, gsn: 24 "DICTTABCONF" prio: 1
+s.bn: 250 "DBDICT", s.proc: 4, sigId: 46803 length: 2 trace: 0
+ H'00000002 H'00000004
+
+//--------------------------------------------------------------------------
+// Update schema file ín memory and on disk to UPDATE_PAGE_COUNT.
+//--------------------------------------------------------------------------
+
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 46803 gsn: 132 "DICT_SCHEMAREQ" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57229 length: 7 trace: 0
+ H'00010003 H'00047700 H'00000002 H'00000001 H'00000001 H'00000000 H'00000002
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 4, gsn: 261 "FSOPENREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 4, sigId: 46803 length: 7 trace: 0
+ UserReference: H'00fa0004, userPointer: H'00000000
+ FileNumber[1-4]: H'ffffffff H'ffffffff H'ffffffff H'01050100
+ FileFlags: H'00000311 Open write only, Create new file, Truncate existing file
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 46813 gsn: 259 "FSOPENCONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46814 length: 3 trace: 0
+ UserPointer: H'00000000
+ FilePointer: 103
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 4, gsn: 272 "FSWRITEREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 4, sigId: 46813 length: 8 trace: 0
+ FilePointer: 103
+ UserReference: H'00fa0004, UserPointer: H'00000000
+ Operation flag: H'00000011, Sync, Format=Array of pages
+ varIndex: 1
+ numberOfPages: 1
+ pageData: H'00000008, H'00000000
+
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 46823 gsn: 270 "FSWRITECONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46824 length: 1 trace: 0
+ UserPointer: H'00000000
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 4, gsn: 257 "FSCLOSEREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 4, sigId: 46823 length: 4 trace: 0
+ FilePointer: 103
+ UserReference: H'00fa0004, userPointer: H'00000000
+ Flags: H'00000000, Don't remove file
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 46833 gsn: 255 "FSCLOSECONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46834 length: 1 trace: 0
+ UserPointer: H'00000000
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 4, gsn: 261 "FSOPENREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 4, sigId: 46833 length: 7 trace: 0
+ UserReference: H'00fa0004, userPointer: H'00000000
+ FileNumber[1-4]: H'ffffffff H'ffffffff H'ffffffff H'01050200
+ FileFlags: H'00000311 Open write only, Create new file, Truncate existing file
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 46842 gsn: 259 "FSOPENCONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46843 length: 3 trace: 0
+ UserPointer: H'00000000
+ FilePointer: 104
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 4, gsn: 272 "FSWRITEREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 4, sigId: 46842 length: 8 trace: 0
+ FilePointer: 104
+ UserReference: H'00fa0004, UserPointer: H'00000000
+ Operation flag: H'00000011, Sync, Format=Array of pages
+ varIndex: 1
+ numberOfPages: 1
+ pageData: H'00000008, H'00000000
+
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 46851 gsn: 270 "FSWRITECONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46852 length: 1 trace: 0
+ UserPointer: H'00000000
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 4, gsn: 257 "FSCLOSEREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 4, sigId: 46851 length: 4 trace: 0
+ FilePointer: 104
+ UserReference: H'00fa0004, userPointer: H'00000000
+ Flags: H'00000000, Don't remove file
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 46860 gsn: 255 "FSCLOSECONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46861 length: 1 trace: 0
+ UserPointer: H'00000000
+---- Send ----- Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, gsn: 133 "DICT_SCHEMACONF" prio: 1
+s.bn: 250 "DBDICT", s.proc: 4, sigId: 46860 length: 1 trace: 0
+ H'00000004
+
+//--------------------------------------------------------------------------
+// Update schema file with information about the starting global checkpoint
+// identity.
+//--------------------------------------------------------------------------
+
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 47782 gsn: 132 "DICT_SCHEMAREQ" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 58288 length: 7 trace: 0
+ H'00010003 H'00047700 H'00000002 H'00000001 H'00000001 H'0000000c H'00000003
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 4, gsn: 261 "FSOPENREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 4, sigId: 47782 length: 7 trace: 0
+ UserReference: H'00fa0004, userPointer: H'00000000
+ FileNumber[1-4]: H'ffffffff H'ffffffff H'ffffffff H'01050100
+ FileFlags: H'00000311 Open write only, Create new file, Truncate existing file
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 47793 gsn: 259 "FSOPENCONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 47794 length: 3 trace: 0
+ UserPointer: H'00000000
+ FilePointer: 117
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 4, gsn: 272 "FSWRITEREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 4, sigId: 47793 length: 8 trace: 0
+ FilePointer: 117
+ UserReference: H'00fa0004, UserPointer: H'00000000
+ Operation flag: H'00000011, Sync, Format=Array of pages
+ varIndex: 1
+ numberOfPages: 1
+ pageData: H'00000008, H'00000000
+
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 47804 gsn: 270 "FSWRITECONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 47805 length: 1 trace: 0
+ UserPointer: H'00000000
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 4, gsn: 257 "FSCLOSEREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 4, sigId: 47804 length: 4 trace: 0
+ FilePointer: 117
+ UserReference: H'00fa0004, userPointer: H'00000000
+ Flags: H'00000000, Don't remove file
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 47817 gsn: 255 "FSCLOSECONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 47818 length: 1 trace: 0
+ UserPointer: H'00000000
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 4, gsn: 261 "FSOPENREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 4, sigId: 47817 length: 7 trace: 0
+ UserReference: H'00fa0004, userPointer: H'00000000
+ FileNumber[1-4]: H'ffffffff H'ffffffff H'ffffffff H'01050200
+ FileFlags: H'00000311 Open write only, Create new file, Truncate existing file
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 47826 gsn: 259 "FSOPENCONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 47827 length: 3 trace: 0
+ UserPointer: H'00000000
+ FilePointer: 118
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 4, gsn: 272 "FSWRITEREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 4, sigId: 47826 length: 8 trace: 0
+ FilePointer: 118
+ UserReference: H'00fa0004, UserPointer: H'00000000
+ Operation flag: H'00000011, Sync, Format=Array of pages
+ varIndex: 1
+ numberOfPages: 1
+ pageData: H'00000008, H'00000000
+
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 47836 gsn: 270 "FSWRITECONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 47837 length: 1 trace: 0
+ UserPointer: H'00000000
+---- Send ----- Signal ----------------
+r.bn: 253 "NDBFS", r.proc: 4, gsn: 257 "FSCLOSEREQ" prio: 0
+s.bn: 250 "DBDICT", s.proc: 4, sigId: 47836 length: 4 trace: 0
+ FilePointer: 118
+ UserReference: H'00fa0004, userPointer: H'00000000
+ Flags: H'00000000, Don't remove file
+---- Received - Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 47846 gsn: 255 "FSCLOSECONF" prio: 1
+s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 47847 length: 1 trace: 0
+ UserPointer: H'00000000
+---- Send ----- Signal ----------------
+r.bn: 250 "DBDICT", r.proc: 2, gsn: 133 "DICT_SCHEMACONF" prio: 1
+s.bn: 250 "DBDICT", s.proc: 4, sigId: 47846 length: 1 trace: 0
+ H'00000004
+---- Received - Signal ----------------
+
+//--------------------------------------------------------------------------
+// Finally unblock the DICT block so that it can handle add table as master
+// if it becomes master in the future.
+//--------------------------------------------------------------------------
+
+r.bn: 250 "DBDICT", r.proc: 4, sigId: 47846 gsn: 444 "UNBLO_DICTREQ" prio: 1
+s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 58359 length: 1 trace: 0
+ H'00fa0002
+
+//--------------------------------------------------------------------------
+// We completed the add table operation.
+//--------------------------------------------------------------------------
+
diff --git a/storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp b/storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp
new file mode 100644
index 00000000000..0ba52878b7c
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp
@@ -0,0 +1,112 @@
+#if 0
+make -f Makefile -f - printSchemaFile <<'_eof_'
+printSchemaFile: printSchemaFile.cpp
+ $(CXXCOMPILE) -o $@ $@.cpp -L../../../common/util/.libs -lgeneral
+_eof_
+exit $?
+#endif
+
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+#include <ndb_global.h>
+
+#include <NdbMain.h>
+#include <NdbOut.hpp>
+#include <SchemaFile.hpp>
+
+void
+usage(const char * prg){
+ ndbout << "Usage " << prg
+ << " P0.SchemaLog" << endl;
+}
+
+void
+fill(const char * buf, int mod){
+ int len = strlen(buf)+1;
+ ndbout << buf << " ";
+ while((len % mod) != 0){
+ ndbout << " ";
+ len++;
+ }
+}
+
+void
+print(const char * filename, const SchemaFile * file){
+ ndbout << "----- Schemafile: " << filename << " -----" << endl;
+ ndbout_c("Magic: %.*s ByteOrder: %.8x NdbVersion: %d FileSize: %d",
+ sizeof(file->Magic), file->Magic,
+ file->ByteOrder,
+ file->NdbVersion,
+ file->FileSize);
+
+ for(Uint32 i = 0; i<file->NoOfTableEntries; i++){
+ SchemaFile::TableEntry te = file->TableEntries[i];
+ if(te.m_tableState != SchemaFile::INIT){
+ ndbout << "Table " << i << ": State = " << te.m_tableState
+ << " version = " << te.m_tableVersion
+ << " type = " << te.m_tableType
+ << " noOfPages = " << te.m_noOfPages
+ << " gcp: " << te.m_gcp << endl;
+ }
+ }
+}
+
+NDB_COMMAND(printSchemafile,
+ "printSchemafile", "printSchemafile", "Prints a schemafile", 16384){
+ if(argc < 2){
+ usage(argv[0]);
+ return 0;
+ }
+
+ const char * filename = argv[1];
+
+ struct stat sbuf;
+ const int res = stat(filename, &sbuf);
+ if(res != 0){
+ ndbout << "Could not find file: \"" << filename << "\"" << endl;
+ return 0;
+ }
+ const Uint32 bytes = sbuf.st_size;
+
+ Uint32 * buf = new Uint32[bytes/4+1];
+
+ FILE * f = fopen(filename, "rb");
+ if(f == 0){
+ ndbout << "Failed to open file" << endl;
+ delete [] buf;
+ return 0;
+ }
+ Uint32 sz = fread(buf, 1, bytes, f);
+ fclose(f);
+ if(sz != bytes){
+ ndbout << "Failure while reading file" << endl;
+ delete [] buf;
+ return 0;
+ }
+
+ print(filename, (SchemaFile *)&buf[0]);
+
+ Uint32 chk = 0, i;
+ for (i = 0; i < bytes/4; i++)
+ chk ^= buf[i];
+ if (chk != 0)
+ ndbout << "Invalid checksum!" << endl;
+
+ delete [] buf;
+ return 0;
+}
diff --git a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
new file mode 100644
index 00000000000..ee67bf47d7b
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
@@ -0,0 +1,1603 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef DBDIH_H
+#define DBDIH_H
+
+#include <ndb_limits.h>
+#include <pc.hpp>
+#include <SimulatedBlock.hpp>
+#include "Sysfile.hpp"
+#include <ArrayList.hpp>
+#include <SignalCounter.hpp>
+
+#include <signaldata/MasterLCP.hpp>
+#include <signaldata/CopyGCIReq.hpp>
+#include <blocks/mutexes.hpp>
+
+#ifdef DBDIH_C
+
+/*###################*/
+/* FILE SYSTEM FLAGS */
+/*###################*/
+#define ZLIST_OF_PAIRS 0
+#define ZLIST_OF_PAIRS_SYNCH 16
+#define ZOPEN_READ_WRITE 2
+#define ZCREATE_READ_WRITE 0x302
+#define ZCLOSE_NO_DELETE 0
+#define ZCLOSE_DELETE 1
+
+/*###############*/
+/* NODE STATES */
+/*###############*/
+#define ZIDLE 0
+#define ZACTIVE 1
+
+/*#########*/
+/* GENERAL */
+/*#########*/
+#define ZVAR_NO_WORD 1
+#define ZVAR_NO_CRESTART_INFO 20
+#define ZVAR_NO_CRESTART_INFO_TO_FILE 21
+#define ZVALID 1
+#define ZINVALID 2
+
+/*###############*/
+/* ERROR CODES */
+/*###############*/
+// ------------------------------------------
+// Error Codes for Transactions (None sofar)
+// ------------------------------------------
+
+// --------------------------------------
+// Error Codes for Add Table
+// --------------------------------------
+#define ZREPLERROR1 306
+#define ZNOTIMPLEMENTED 307
+#define ZTABLEINSTALLED 310
+// --------------------------------------
+// Error Codes for Scan Table
+// --------------------------------------
+#define ZERRONOUSSTATE 308
+
+// --------------------------------------
+// Crash Codes
+// --------------------------------------
+#define ZCOULD_NOT_OCCUR_ERROR 300
+#define ZNOT_MASTER_ERROR 301
+#define ZWRONG_FAILURE_NUMBER_ERROR 302
+#define ZWRONG_START_NODE_ERROR 303
+#define ZNO_REPLICA_FOUND_ERROR 304
+#define ZNODE_ALREADY_STARTING_ERROR 305
+#define ZNODE_START_DISALLOWED_ERROR 309
+
+// --------------------------------------
+// Codes from LQH
+// --------------------------------------
+#define ZNODE_FAILURE_ERROR 400
+
+
+/*#########*/
+/* PHASES */
+/*#########*/
+#define ZNDB_SPH1 1
+#define ZNDB_SPH2 2
+#define ZNDB_SPH3 3
+#define ZNDB_SPH4 4
+#define ZNDB_SPH5 5
+#define ZNDB_SPH6 6
+#define ZNDB_SPH7 7
+#define ZNDB_SPH8 8
+/*#########*/
+/* SIZES */
+/*#########*/
+#define ZPAGEREC 100
+#define ZCREATE_REPLICA_FILE_SIZE 4
+#define ZPROXY_MASTER_FILE_SIZE 10
+#define ZPROXY_FILE_SIZE 10
+#endif
+
+class Dbdih: public SimulatedBlock {
+public:
+
+ // Records
+
+ /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤
+ * THE API CONNECT RECORD IS THE SAME RECORD POINTER AS USED IN THE TC BLOCK
+ *
+ * IT KEEPS TRACK OF ALL THE OPERATIONS CONNECTED TO THIS TRANSACTION.
+ * IT IS LINKED INTO A QUEUE IN CASE THE GLOBAL CHECKPOINT IS CURRENTLY
+ * ONGOING */
+ struct ApiConnectRecord {
+ Uint32 apiGci;
+ Uint32 nextApi;
+ };
+ typedef Ptr<ApiConnectRecord> ApiConnectRecordPtr;
+
+ /*############## CONNECT_RECORD ##############*/
+ /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
+ /* THE CONNECT RECORD IS CREATED WHEN A TRANSACTION HAS TO START. IT KEEPS
+ ALL INTERMEDIATE INFORMATION NECESSARY FOR THE TRANSACTION FROM THE
+ DISTRIBUTED MANAGER. THE RECORD KEEPS INFORMATION ABOUT THE
+ OPERATIONS THAT HAVE TO BE CARRIED OUT BY THE TRANSACTION AND
+ ALSO THE TRAIL OF NODES FOR EACH OPERATION IN THE THE
+ TRANSACTION.
+ */
+ struct ConnectRecord {
+ enum ConnectState {
+ INUSE = 0,
+ FREE = 1,
+ STARTED = 2
+ };
+ Uint32 nodes[MAX_REPLICAS];
+ ConnectState connectState;
+ Uint32 nfConnect;
+ Uint32 table;
+ Uint32 userpointer;
+ BlockReference userblockref;
+ };
+ typedef Ptr<ConnectRecord> ConnectRecordPtr;
+
+ /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
+ /* THESE RECORDS ARE USED WHEN CREATING REPLICAS DURING SYSTEM */
+ /* RESTART. I NEED A COMPLEX DATA STRUCTURE DESCRIBING THE REPLICAS */
+ /* I WILL TRY TO CREATE FOR EACH FRAGMENT. */
+ /* */
+ /* I STORE A REFERENCE TO THE FOUR POSSIBLE CREATE REPLICA RECORDS */
+ /* IN A COMMON STORED VARIABLE. I ALLOW A MAXIMUM OF 4 REPLICAS TO */
+ /* BE RESTARTED PER FRAGMENT. */
+ /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
+ struct CreateReplicaRecord {
+ Uint32 logStartGci[MAX_LOG_EXEC];
+ Uint32 logStopGci[MAX_LOG_EXEC];
+ Uint16 logNodeId[MAX_LOG_EXEC];
+ Uint32 createLcpId;
+
+ bool hotSpareUse;
+ Uint32 replicaRec;
+ Uint16 dataNodeId;
+ Uint16 lcpNo;
+ Uint16 noLogNodes;
+ };
+ typedef Ptr<CreateReplicaRecord> CreateReplicaRecordPtr;
+
+ /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
+ /* THIS RECORD CONTAINS A FILE DESCRIPTION. THERE ARE TWO */
+ /* FILES PER TABLE TO RAISE SECURITY LEVEL AGAINST DISK CRASHES. */
+ /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
+ struct FileRecord {
+ enum FileStatus {
+ CLOSED = 0,
+ CRASHED = 1,
+ OPEN = 2
+ };
+ enum FileType {
+ TABLE_FILE = 0,
+ GCP_FILE = 1
+ };
+ enum ReqStatus {
+ IDLE = 0,
+ CREATING_GCP = 1,
+ OPENING_GCP = 2,
+ OPENING_COPY_GCI = 3,
+ WRITING_COPY_GCI = 4,
+ CREATING_COPY_GCI = 5,
+ OPENING_TABLE = 6,
+ READING_GCP = 7,
+ READING_TABLE = 8,
+ WRITE_INIT_GCP = 9,
+ TABLE_CREATE = 10,
+ TABLE_WRITE = 11,
+ TABLE_CLOSE = 12,
+ CLOSING_GCP = 13,
+ CLOSING_TABLE_CRASH = 14,
+ CLOSING_TABLE_SR = 15,
+ CLOSING_GCP_CRASH = 16,
+ TABLE_OPEN_FOR_DELETE = 17,
+ TABLE_CLOSE_DELETE = 18
+ };
+ Uint32 fileName[4];
+ Uint32 fileRef;
+ FileStatus fileStatus;
+ FileType fileType;
+ Uint32 nextFile;
+ ReqStatus reqStatus;
+ Uint32 tabRef;
+ };
+ typedef Ptr<FileRecord> FileRecordPtr;
+
+ /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
+ /* THIS RECORD KEEPS THE STORAGE AND DECISIONS INFORMATION OF A FRAGMENT */
+ /* AND ITS REPLICAS. IF FRAGMENT HAS MORE THAN ONE BACK UP */
+ /* REPLICA THEN A LIST OF MORE NODES IS ATTACHED TO THIS RECORD. */
+ /* EACH RECORD IN MORE LIST HAS INFORMATION ABOUT ONE BACKUP. THIS RECORD */
+ /* ALSO HAVE THE STATUS OF THE FRAGMENT. */
+ /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
+ /* */
+ /* FRAGMENTSTORE RECORD ALIGNED TO BE 64 BYTES */
+ /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
+ struct Fragmentstore {
+ Uint16 activeNodes[MAX_REPLICAS];
+ Uint32 preferredPrimary;
+
+ Uint32 oldStoredReplicas; /* "DEAD" STORED REPLICAS */
+ Uint32 storedReplicas; /* "ALIVE" STORED REPLICAS */
+ Uint32 nextFragmentChunk;
+
+ Uint8 distributionKey;
+ Uint8 fragReplicas;
+ Uint8 noOldStoredReplicas; /* NUMBER OF "DEAD" STORED REPLICAS */
+ Uint8 noStoredReplicas; /* NUMBER OF "ALIVE" STORED REPLICAS*/
+ Uint8 noLcpReplicas; ///< No of replicas remaining to be LCP:ed
+ };
+ typedef Ptr<Fragmentstore> FragmentstorePtr;
+
+ /*########### PAGE RECORD ############*/
+ /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
+ /* THIS RECORD KEEPS INFORMATION ABOUT NODE GROUPS. */
+ /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
+ struct NodeGroupRecord {
+ Uint32 nodesInGroup[MAX_REPLICAS + 1];
+ Uint32 nextReplicaNode;
+ Uint32 nodeCount;
+ bool activeTakeOver;
+ };
+ typedef Ptr<NodeGroupRecord> NodeGroupRecordPtr;
+ /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
+ /* THIS RECORD KEEPS INFORMATION ABOUT NODES. */
+ /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
+ /* RECORD ALIGNED TO BE 64 BYTES. */
+ /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/
+ enum NodefailHandlingStep {
+ NF_REMOVE_NODE_FROM_TABLE = 1,
+ NF_GCP_TAKE_OVER = 2,
+ NF_LCP_TAKE_OVER = 4
+ };
+
+ struct NodeRecord {
+ NodeRecord();
+
+ enum NodeStatus {
+ NOT_IN_CLUSTER = 0,
+ ALIVE = 1,
+ STARTING = 2,
+ DIED_NOW = 3,
+ DYING = 4,
+ DEAD = 5
+ };
+
+ struct FragmentCheckpointInfo {
+ Uint32 tableId;
+ Uint32 fragId;
+ Uint32 replicaPtr;
+ };
+
+ enum GcpState {
+ READY = 0,
+ PREPARE_SENT = 1,
+ PREPARE_RECEIVED = 2,
+ COMMIT_SENT = 3,
+ NODE_FINISHED = 4,
+ SAVE_REQ_SENT = 5,
+ SAVE_RECEIVED = 6,
+ COPY_GCI_SENT = 7
+ };
+
+ GcpState gcpstate;
+ Sysfile::ActiveStatus activeStatus;
+
+ NodeStatus nodeStatus;
+ bool useInTransactions;
+ bool allowNodeStart;
+ bool copyCompleted;
+ bool m_inclDihLcp;
+
+ FragmentCheckpointInfo startedChkpt[2];
+ FragmentCheckpointInfo queuedChkpt[2];
+
+ Bitmask<1> m_nodefailSteps;
+ Uint32 activeTabptr;
+ Uint32 nextNode;
+ Uint32 nodeGroup;
+
+ SignalCounter m_NF_COMPLETE_REP;
+
+ Uint8 dbtcFailCompleted;
+ Uint8 dblqhFailCompleted;
+ Uint8 dbdihFailCompleted;
+ Uint8 dbdictFailCompleted;
+ Uint8 recNODE_FAILREP;
+
+ Uint8 noOfQueuedChkpt;
+ Uint8 noOfStartedChkpt;
+
+ MasterLCPConf::State lcpStateAtTakeOver;
+ };
+ typedef Ptr<NodeRecord> NodeRecordPtr;
+ /**********************************************************************/
+ /* THIS RECORD KEEPS THE INFORMATION ABOUT A TABLE AND ITS FRAGMENTS */
+ /**********************************************************************/
+ struct PageRecord {
+ Uint32 word[2048];
+ /* 8 KBYTE PAGE*/
+ Uint32 nextfreepage;
+ };
+ typedef Ptr<PageRecord> PageRecordPtr;
+
+ /************ REPLICA RECORD *************/
+ /**********************************************************************/
+ /* THIS RECORD KEEPS THE INFORMATION ABOUT A REPLICA OF A FRAGMENT */
+ /**********************************************************************/
+ struct ReplicaRecord {
+ /* -------------------------------------------------------------------- */
+ /* THE GLOBAL CHECKPOINT IDENTITY WHEN THIS REPLICA WAS CREATED. */
+ /* THERE IS ONE INDEX PER REPLICA. A REPLICA INDEX IS CREATED WHEN ANODE*/
+ /* CRASH OCCURS. */
+ /* -------------------------------------------------------------------- */
+ Uint32 createGci[8];
+ /* -------------------------------------------------------------------- */
+ /* THE LAST GLOBAL CHECKPOINT IDENTITY WHICH HAS BEEN SAVED ON DISK. */
+ /* THIS VARIABLE IS ONLY VALID FOR REPLICAS WHICH HAVE "DIED". A REPLICA*/
+ /* "DIES" EITHER WHEN THE NODE CRASHES THAT KEPT THE REPLICA OR BY BEING*/
+ /* STOPPED IN A CONTROLLED MANNER. */
+ /* THERE IS ONE INDEX PER REPLICA. A REPLICA INDEX IS CREATED WHEN ANODE*/
+ /* CRASH OCCURS. */
+ /* -------------------------------------------------------------------- */
+ Uint32 replicaLastGci[8];
+ /* -------------------------------------------------------------------- */
+ /* THE LOCAL CHECKPOINT IDENTITY OF A LOCAL CHECKPOINT. */
+ /* -------------------------------------------------------------------- */
+ Uint32 lcpId[MAX_LCP_STORED];
+ /* -------------------------------------------------------------------- */
+ /* THIS VARIABLE KEEPS TRACK OF THE MAXIMUM GLOBAL CHECKPOINT COMPLETED */
+ /* FOR EACH OF THE LOCAL CHECKPOINTS IN THIS FRAGMENT REPLICA. */
+ /* -------------------------------------------------------------------- */
+ Uint32 maxGciCompleted[MAX_LCP_STORED];
+ /* -------------------------------------------------------------------- */
+ /* THIS VARIABLE KEEPS TRACK OF THE MINIMUM GLOBAL CHECKPOINT STARTEDFOR*/
+ /* EACH OF THE LOCAL CHECKPOINTS IN THIS FRAGMENT REPLICA. */
+ /* -------------------------------------------------------------------- */
+ Uint32 maxGciStarted[MAX_LCP_STORED];
+ /* -------------------------------------------------------------------- */
+ /* THE GLOBAL CHECKPOINT IDENTITY WHEN THE TABLE WAS CREATED. */
+ /* -------------------------------------------------------------------- */
+ Uint32 initialGci;
+
+ /* -------------------------------------------------------------------- */
+ /* THE REFERENCE TO THE NEXT REPLICA. EITHER IT REFERS TO THE NEXT IN */
+ /* THE FREE LIST OR IT REFERS TO THE NEXT IN A LIST OF REPLICAS ON A */
+ /* FRAGMENT. */
+ /* -------------------------------------------------------------------- */
+ Uint32 nextReplica;
+
+ /* -------------------------------------------------------------------- */
+ /* THE NODE ID WHERE THIS REPLICA IS STORED. */
+ /* -------------------------------------------------------------------- */
+ Uint16 procNode;
+
+ /* -------------------------------------------------------------------- */
+ /* The last local checkpoint id started or queued on this replica. */
+ /* -------------------------------------------------------------------- */
+ Uint32 lcpIdStarted; // Started or queued
+
+ /* -------------------------------------------------------------------- */
+ /* THIS VARIABLE SPECIFIES WHAT THE STATUS OF THE LOCAL CHECKPOINT IS.IT*/
+ /* CAN EITHER BE VALID OR INVALID. AT CREATION OF A FRAGMENT REPLICA ALL*/
+ /* LCP'S ARE INVALID. ALSO IF IF INDEX >= NO_LCP THEN THELOCALCHECKPOINT*/
+ /* IS ALWAYS INVALID. IF THE LCP BEFORE THE NEXT_LCP HAS LCP_ID THAT */
+ /* DIFFERS FROM THE LATEST LCP_ID STARTED THEN THE NEXT_LCP IS ALSO */
+ /* INVALID */
+ /* -------------------------------------------------------------------- */
+ Uint8 lcpStatus[MAX_LCP_STORED];
+
+ /* -------------------------------------------------------------------- */
+ /* THE NEXT LOCAL CHECKPOINT TO EXECUTE IN THIS FRAGMENT REPLICA. */
+ /* -------------------------------------------------------------------- */
+ Uint8 nextLcp;
+
+ /* -------------------------------------------------------------------- */
+ /* THE NUMBER OF CRASHED REPLICAS IN THIS REPLICAS SO FAR. */
+ /* -------------------------------------------------------------------- */
+ Uint8 noCrashedReplicas;
+
+ /**
+ * Is a LCP currently ongoing on fragment
+ */
+ Uint8 lcpOngoingFlag;
+ };
+ typedef Ptr<ReplicaRecord> ReplicaRecordPtr;
+
+ /*************************************************************************
+ * TAB_DESCRIPTOR IS A DESCRIPTOR OF THE LOCATION OF THE FRAGMENTS BELONGING
+ * TO THE TABLE.THE INFORMATION ABOUT FRAGMENTS OF A TABLE ARE STORED IN
+ * CHUNKS OF FRAGMENTSTORE RECORDS.
+ * THIS RECORD ALSO HAS THE NECESSARY INFORMATION TO LOCATE A FRAGMENT AND
+ * TO LOCATE A FRAGMENT AND TO TRANSLATE A KEY OF A TUPLE TO THE FRAGMENT IT
+ * BELONGS
+ */
+ struct TabRecord {
+ /**
+ * State for copying table description into pages
+ */
+ enum CopyStatus {
+ CS_IDLE,
+ CS_SR_PHASE1_READ_PAGES,
+ CS_SR_PHASE2_READ_TABLE,
+ CS_SR_PHASE3_COPY_TABLE,
+ CS_REMOVE_NODE,
+ CS_LCP_READ_TABLE,
+ CS_COPY_TAB_REQ,
+ CS_COPY_NODE_STATE,
+ CS_ADD_TABLE_MASTER,
+ CS_ADD_TABLE_SLAVE,
+ CS_INVALIDATE_NODE_LCP
+ };
+ /**
+ * State for copying pages to disk
+ */
+ enum UpdateState {
+ US_IDLE,
+ US_LOCAL_CHECKPOINT,
+ US_REMOVE_NODE,
+ US_COPY_TAB_REQ,
+ US_ADD_TABLE_MASTER,
+ US_ADD_TABLE_SLAVE,
+ US_INVALIDATE_NODE_LCP
+ };
+ enum TabLcpStatus {
+ TLS_ACTIVE = 1,
+ TLS_WRITING_TO_FILE = 2,
+ TLS_COMPLETED = 3
+ };
+ enum TabStatus {
+ TS_IDLE = 0,
+ TS_ACTIVE = 1,
+ TS_CREATING = 2,
+ TS_DROPPING = 3
+ };
+ enum Method {
+ HASH = 0,
+ NOTDEFINED = 1
+ };
+ CopyStatus tabCopyStatus;
+ UpdateState tabUpdateState;
+ TabLcpStatus tabLcpStatus;
+ TabStatus tabStatus;
+ Method method;
+
+ Uint32 pageRef[8];
+//-----------------------------------------------------------------------------
+// Each entry in this array contains a reference to 16 fragment records in a
+// row. Thus finding the correct record is very quick provided the fragment id.
+//-----------------------------------------------------------------------------
+ Uint32 startFid[MAX_NDB_NODES];
+
+ Uint32 tabFile[2];
+ Uint32 connectrec;
+ Uint32 hashpointer;
+ Uint32 mask;
+ Uint32 noOfWords;
+ Uint32 schemaVersion;
+ Uint32 tabRemoveNode;
+ Uint32 totalfragments;
+ Uint32 noOfFragChunks;
+ Uint32 tabErrorCode;
+ struct {
+ Uint32 tabUserRef;
+ Uint32 tabUserPtr;
+ } m_dropTab;
+
+ struct DropTable {
+ Uint32 senderRef;
+ Uint32 senderData;
+ SignalCounter waitDropTabCount;
+ } m_prepDropTab;
+
+ Uint8 kvalue;
+ Uint8 noOfBackups;
+ Uint8 noPages;
+ Uint8 storedTable; /* 0 IF THE TABLE IS A TEMPORARY TABLE */
+ Uint16 tableType;
+ Uint16 primaryTableId;
+ };
+ typedef Ptr<TabRecord> TabRecordPtr;
+
+ /***************************************************************************/
+ /* THIS RECORD IS USED TO KEEP TRACK OF TAKE OVER AND STARTING A NODE. */
+ /* WE KEEP IT IN A RECORD TO ENABLE IT TO BE PARALLELISED IN THE FUTURE. */
+ /**************************************************************************/
+ struct TakeOverRecord {
+ enum ToMasterStatus {
+ IDLE = 0,
+ TO_WAIT_START_TAKE_OVER = 1,
+ TO_START_COPY = 2,
+ TO_START_COPY_ONGOING = 3,
+ TO_WAIT_START = 4,
+ STARTING = 5,
+ SELECTING_NEXT = 6,
+ TO_WAIT_PREPARE_CREATE = 9,
+ PREPARE_CREATE = 10,
+ COPY_FRAG = 11,
+ TO_WAIT_UPDATE_TO = 12,
+ TO_UPDATE_TO = 13,
+ COPY_ACTIVE = 14,
+ TO_WAIT_COMMIT_CREATE = 15,
+ LOCK_MUTEX = 23,
+ COMMIT_CREATE = 16,
+ TO_COPY_COMPLETED = 17,
+ WAIT_LCP = 18,
+ TO_END_COPY = 19,
+ TO_END_COPY_ONGOING = 20,
+ TO_WAIT_ENDING = 21,
+ ENDING = 22
+ };
+ enum ToSlaveStatus {
+ TO_SLAVE_IDLE = 0,
+ TO_SLAVE_STARTED = 1,
+ TO_SLAVE_CREATE_PREPARE = 2,
+ TO_SLAVE_COPY_FRAG_COMPLETED = 3,
+ TO_SLAVE_CREATE_COMMIT = 4,
+ TO_SLAVE_COPY_COMPLETED = 5
+ };
+ Uint32 startGci;
+ Uint32 toCopyNode;
+ Uint32 toCurrentFragid;
+ Uint32 toCurrentReplica;
+ Uint32 toCurrentTabref;
+ Uint32 toFailedNode;
+ Uint32 toStartingNode;
+ Uint32 nextTakeOver;
+ Uint32 prevTakeOver;
+ bool toNodeRestart;
+ ToMasterStatus toMasterStatus;
+ ToSlaveStatus toSlaveStatus;
+ MutexHandle2<DIH_SWITCH_PRIMARY_MUTEX> m_switchPrimaryMutexHandle;
+ };
+ typedef Ptr<TakeOverRecord> TakeOverRecordPtr;
+
+public:
+ Dbdih(const class Configuration &);
+ virtual ~Dbdih();
+
+ struct RWFragment {
+ Uint32 pageIndex;
+ Uint32 wordIndex;
+ Uint32 fragId;
+ TabRecordPtr rwfTabPtr;
+ PageRecordPtr rwfPageptr;
+ };
+ struct CopyTableNode {
+ Uint32 pageIndex;
+ Uint32 wordIndex;
+ Uint32 noOfWords;
+ TabRecordPtr ctnTabPtr;
+ PageRecordPtr ctnPageptr;
+ };
+
+private:
+ BLOCK_DEFINES(Dbdih);
+
+ void execDUMP_STATE_ORD(Signal *);
+ void execNDB_TAMPER(Signal *);
+ void execDEBUG_SIG(Signal *);
+ void execEMPTY_LCP_CONF(Signal *);
+ void execMASTER_GCPREF(Signal *);
+ void execMASTER_GCPREQ(Signal *);
+ void execMASTER_GCPCONF(Signal *);
+ void execMASTER_LCPREF(Signal *);
+ void execMASTER_LCPREQ(Signal *);
+ void execMASTER_LCPCONF(Signal *);
+ void execNF_COMPLETEREP(Signal *);
+ void execSTART_PERMREQ(Signal *);
+ void execSTART_PERMCONF(Signal *);
+ void execSTART_PERMREF(Signal *);
+ void execINCL_NODEREQ(Signal *);
+ void execINCL_NODECONF(Signal *);
+ void execEND_TOREQ(Signal *);
+ void execEND_TOCONF(Signal *);
+ void execSTART_TOREQ(Signal *);
+ void execSTART_TOCONF(Signal *);
+ void execSTART_MEREQ(Signal *);
+ void execSTART_MECONF(Signal *);
+ void execSTART_MEREF(Signal *);
+ void execSTART_COPYREQ(Signal *);
+ void execSTART_COPYCONF(Signal *);
+ void execSTART_COPYREF(Signal *);
+ void execCREATE_FRAGREQ(Signal *);
+ void execCREATE_FRAGCONF(Signal *);
+ void execDIVERIFYREQ(Signal *);
+ void execGCP_SAVECONF(Signal *);
+ void execGCP_PREPARECONF(Signal *);
+ void execGCP_PREPARE(Signal *);
+ void execGCP_NODEFINISH(Signal *);
+ void execGCP_COMMIT(Signal *);
+ void execDIHNDBTAMPER(Signal *);
+ void execCONTINUEB(Signal *);
+ void execCOPY_GCIREQ(Signal *);
+ void execCOPY_GCICONF(Signal *);
+ void execCOPY_TABREQ(Signal *);
+ void execCOPY_TABCONF(Signal *);
+ void execTCGETOPSIZECONF(Signal *);
+ void execTC_CLOPSIZECONF(Signal *);
+
+ void execLCP_FRAG_REP(Signal *);
+ void execLCP_COMPLETE_REP(Signal *);
+ void execSTART_LCP_REQ(Signal *);
+ void execSTART_LCP_CONF(Signal *);
+ MutexHandle2<DIH_START_LCP_MUTEX> c_startLcpMutexHandle;
+ void startLcpMutex_locked(Signal* signal, Uint32, Uint32);
+ void startLcpMutex_unlocked(Signal* signal, Uint32, Uint32);
+
+ MutexHandle2<DIH_SWITCH_PRIMARY_MUTEX> c_switchPrimaryMutexHandle;
+ void switchPrimaryMutex_locked(Signal* signal, Uint32, Uint32);
+ void switchPrimaryMutex_unlocked(Signal* signal, Uint32, Uint32);
+ void switch_primary_stop_node(Signal* signal, Uint32, Uint32);
+
+ void execBLOCK_COMMIT_ORD(Signal *);
+ void execUNBLOCK_COMMIT_ORD(Signal *);
+
+ void execDIH_SWITCH_REPLICA_REQ(Signal *);
+ void execDIH_SWITCH_REPLICA_REF(Signal *);
+ void execDIH_SWITCH_REPLICA_CONF(Signal *);
+
+ void execSTOP_PERM_REQ(Signal *);
+ void execSTOP_PERM_REF(Signal *);
+ void execSTOP_PERM_CONF(Signal *);
+
+ void execSTOP_ME_REQ(Signal *);
+ void execSTOP_ME_REF(Signal *);
+ void execSTOP_ME_CONF(Signal *);
+
+ void execREAD_CONFIG_REQ(Signal *);
+ void execUNBLO_DICTCONF(Signal *);
+ void execCOPY_ACTIVECONF(Signal *);
+ void execTAB_COMMITREQ(Signal *);
+ void execNODE_FAILREP(Signal *);
+ void execCOPY_FRAGCONF(Signal *);
+ void execCOPY_FRAGREF(Signal *);
+ void execDIADDTABREQ(Signal *);
+ void execDIGETNODESREQ(Signal *);
+ void execDIRELEASEREQ(Signal *);
+ void execDISEIZEREQ(Signal *);
+ void execSTTOR(Signal *);
+ void execDI_FCOUNTREQ(Signal *);
+ void execDIGETPRIMREQ(Signal *);
+ void execGCP_SAVEREF(Signal *);
+ void execGCP_TCFINISHED(Signal *);
+ void execREAD_NODESCONF(Signal *);
+ void execNDB_STTOR(Signal *);
+ void execDICTSTARTCONF(Signal *);
+ void execNDB_STARTREQ(Signal *);
+ void execGETGCIREQ(Signal *);
+ void execDIH_RESTARTREQ(Signal *);
+ void execSTART_RECCONF(Signal *);
+ void execSTART_FRAGCONF(Signal *);
+ void execADD_FRAGCONF(Signal *);
+ void execADD_FRAGREF(Signal *);
+ void execFSOPENCONF(Signal *);
+ void execFSOPENREF(Signal *);
+ void execFSCLOSECONF(Signal *);
+ void execFSCLOSEREF(Signal *);
+ void execFSREADCONF(Signal *);
+ void execFSREADREF(Signal *);
+ void execFSWRITECONF(Signal *);
+ void execFSWRITEREF(Signal *);
+ void execSET_VAR_REQ(Signal *);
+ void execCHECKNODEGROUPSREQ(Signal *);
+ void execSTART_INFOREQ(Signal*);
+ void execSTART_INFOREF(Signal*);
+ void execSTART_INFOCONF(Signal*);
+ void execWAIT_GCP_REQ(Signal* signal);
+ void execWAIT_GCP_REF(Signal* signal);
+ void execWAIT_GCP_CONF(Signal* signal);
+ void execUPDATE_TOREQ(Signal* signal);
+ void execUPDATE_TOCONF(Signal* signal);
+
+ void execPREP_DROP_TAB_REQ(Signal* signal);
+ void execWAIT_DROP_TAB_REF(Signal* signal);
+ void execWAIT_DROP_TAB_CONF(Signal* signal);
+ void execDROP_TAB_REQ(Signal* signal);
+
+ void execALTER_TAB_REQ(Signal* signal);
+
+ void execCREATE_FRAGMENTATION_REQ(Signal*);
+
+ void waitDropTabWritingToFile(Signal *, TabRecordPtr tabPtr);
+ void checkPrepDropTabComplete(Signal *, TabRecordPtr tabPtr);
+ void checkWaitDropTabFailedLqh(Signal *, Uint32 nodeId, Uint32 tableId);
+
+ // Statement blocks
+//------------------------------------
+// Methods that send signals
+//------------------------------------
+ void nullRoutine(Signal *, Uint32 nodeId);
+ void sendCOPY_GCIREQ(Signal *, Uint32 nodeId);
+ void sendDIH_SWITCH_REPLICA_REQ(Signal *, Uint32 nodeId);
+ void sendEMPTY_LCP_REQ(Signal *, Uint32 nodeId);
+ void sendEND_TOREQ(Signal *, Uint32 nodeId);
+ void sendGCP_COMMIT(Signal *, Uint32 nodeId);
+ void sendGCP_PREPARE(Signal *, Uint32 nodeId);
+ void sendGCP_SAVEREQ(Signal *, Uint32 nodeId);
+ void sendINCL_NODEREQ(Signal *, Uint32 nodeId);
+ void sendMASTER_GCPREQ(Signal *, Uint32 nodeId);
+ void sendMASTER_LCPREQ(Signal *, Uint32 nodeId);
+ void sendMASTER_LCPCONF(Signal * signal);
+ void sendSTART_RECREQ(Signal *, Uint32 nodeId);
+ void sendSTART_INFOREQ(Signal *, Uint32 nodeId);
+ void sendSTART_TOREQ(Signal *, Uint32 nodeId);
+ void sendSTOP_ME_REQ(Signal *, Uint32 nodeId);
+ void sendTC_CLOPSIZEREQ(Signal *, Uint32 nodeId);
+ void sendTCGETOPSIZEREQ(Signal *, Uint32 nodeId);
+ void sendUPDATE_TOREQ(Signal *, Uint32 nodeId);
+ void sendSTART_LCP_REQ(Signal *, Uint32 nodeId);
+
+ void sendLCP_FRAG_ORD(Signal*, NodeRecord::FragmentCheckpointInfo info);
+ void sendLastLCP_FRAG_ORD(Signal *);
+
+ void sendCopyTable(Signal *, CopyTableNode* ctn,
+ BlockReference ref, Uint32 reqinfo);
+ void sendCreateFragReq(Signal *,
+ Uint32 startGci,
+ Uint32 storedType,
+ Uint32 takeOverPtr);
+ void sendDihfragreq(Signal *,
+ TabRecordPtr regTabPtr,
+ Uint32 fragId);
+ void sendStartFragreq(Signal *,
+ TabRecordPtr regTabPtr,
+ Uint32 fragId);
+ void sendHOT_SPAREREP(Signal *);
+ void sendAddFragreq(Signal *,
+ TabRecordPtr regTabPtr,
+ Uint32 fragId,
+ Uint32 lcpNo,
+ Uint32 param);
+
+ void sendAddFragreq(Signal*, ConnectRecordPtr, TabRecordPtr, Uint32 fragId);
+ void addTable_closeConf(Signal* signal, Uint32 tabPtrI);
+ void resetReplicaSr(TabRecordPtr tabPtr);
+ void resetReplicaLcp(ReplicaRecord * replicaP, Uint32 stopGci);
+
+//------------------------------------
+// Methods for LCP functionality
+//------------------------------------
+ void checkKeepGci(Uint32 replicaStartIndex);
+ void checkLcpStart(Signal *, Uint32 lineNo);
+ void checkStartMoreLcp(Signal *, Uint32 nodeId);
+ bool reportLcpCompletion(const class LcpFragRep *);
+ void sendLCP_COMPLETE_REP(Signal *);
+
+//------------------------------------
+// Methods for Delete Table Files
+//------------------------------------
+ void startDeleteFile(Signal* signal, TabRecordPtr tabPtr);
+ void openTableFileForDelete(Signal* signal, Uint32 fileIndex);
+ void tableOpenLab(Signal* signal, FileRecordPtr regFilePtr);
+ void tableDeleteLab(Signal* signal, FileRecordPtr regFilePtr);
+
+//------------------------------------
+// File Record specific methods
+//------------------------------------
+ void closeFile(Signal *, FileRecordPtr regFilePtr);
+ void closeFileDelete(Signal *, FileRecordPtr regFilePtr);
+ void createFileRw(Signal *, FileRecordPtr regFilePtr);
+ void openFileRw(Signal *, FileRecordPtr regFilePtr);
+ void openFileRo(Signal *, FileRecordPtr regFilePtr);
+ void seizeFile(FileRecordPtr& regFilePtr);
+ void releaseFile(Uint32 fileIndex);
+
+//------------------------------------
+// Methods called when completing file
+// operation.
+//------------------------------------
+ void creatingGcpLab(Signal *, FileRecordPtr regFilePtr);
+ void openingGcpLab(Signal *, FileRecordPtr regFilePtr);
+ void openingTableLab(Signal *, FileRecordPtr regFilePtr);
+ void tableCreateLab(Signal *, FileRecordPtr regFilePtr);
+ void creatingGcpErrorLab(Signal *, FileRecordPtr regFilePtr);
+ void openingCopyGciErrorLab(Signal *, FileRecordPtr regFilePtr);
+ void creatingCopyGciErrorLab(Signal *, FileRecordPtr regFilePtr);
+ void openingGcpErrorLab(Signal *, FileRecordPtr regFilePtr);
+ void openingTableErrorLab(Signal *, FileRecordPtr regFilePtr);
+ void tableCreateErrorLab(Signal *, FileRecordPtr regFilePtr);
+ void closingGcpLab(Signal *, FileRecordPtr regFilePtr);
+ void closingGcpCrashLab(Signal *, FileRecordPtr regFilePtr);
+ void closingTableCrashLab(Signal *, FileRecordPtr regFilePtr);
+ void closingTableSrLab(Signal *, FileRecordPtr regFilePtr);
+ void tableCloseLab(Signal *, FileRecordPtr regFilePtr);
+ void tableCloseErrorLab(FileRecordPtr regFilePtr);
+ void readingGcpLab(Signal *, FileRecordPtr regFilePtr);
+ void readingTableLab(Signal *, FileRecordPtr regFilePtr);
+ void readingGcpErrorLab(Signal *, FileRecordPtr regFilePtr);
+ void readingTableErrorLab(Signal *, FileRecordPtr regFilePtr);
+ void writingCopyGciLab(Signal *, FileRecordPtr regFilePtr);
+ void writeInitGcpLab(Signal *, FileRecordPtr regFilePtr);
+ void tableWriteLab(Signal *, FileRecordPtr regFilePtr);
+ void writeInitGcpErrorLab(Signal *, FileRecordPtr regFilePtr);
+
+
+ void calculateHotSpare();
+ void checkEscalation();
+ void clearRestartInfoBits(Signal *);
+ void invalidateLcpInfoAfterSr();
+
+ bool isMaster();
+ bool isActiveMaster();
+
+ void emptyverificbuffer(Signal *, bool aContintueB);
+ Uint32 findHotSpare();
+ void handleGcpStateInMaster(Signal *, NodeRecordPtr failedNodeptr);
+ void initRestartInfo();
+ void initRestorableGciFiles();
+ void makeNodeGroups(Uint32 nodeArray[]);
+ void makePrnList(class ReadNodesConf * readNodes, Uint32 nodeArray[]);
+ void nodeResetStart();
+ void releaseTabPages(Uint32 tableId);
+ void replication(Uint32 noOfReplicas,
+ NodeGroupRecordPtr NGPtr,
+ FragmentstorePtr regFragptr);
+ void selectMasterCandidateAndSend(Signal *);
+ void setInitialActiveStatus();
+ void setLcpActiveStatusEnd();
+ void setLcpActiveStatusStart(Signal *);
+ void setNodeActiveStatus();
+ void setNodeGroups();
+ void setNodeInfo(Signal *);
+ void setNodeLcpActiveStatus();
+ void setNodeRestartInfoBits();
+ void startGcp(Signal *);
+
+ void readFragment(RWFragment* rf, FragmentstorePtr regFragptr);
+ Uint32 readPageWord(RWFragment* rf);
+ void readReplica(RWFragment* rf, ReplicaRecordPtr readReplicaPtr);
+ void readReplicas(RWFragment* rf, FragmentstorePtr regFragptr);
+ void readRestorableGci(Signal *, FileRecordPtr regFilePtr);
+ void readTabfile(Signal *, TabRecord* tab, FileRecordPtr regFilePtr);
+ void writeFragment(RWFragment* wf, FragmentstorePtr regFragptr);
+ void writePageWord(RWFragment* wf, Uint32 dataWord);
+ void writeReplicas(RWFragment* wf, Uint32 replicaStartIndex);
+ void writeRestorableGci(Signal *, FileRecordPtr regFilePtr);
+ void writeTabfile(Signal *, TabRecord* tab, FileRecordPtr regFilePtr);
+ void copyTabReq_complete(Signal* signal, TabRecordPtr tabPtr);
+
+ void gcpcommitreqLab(Signal *);
+ void gcpsavereqLab(Signal *);
+ void copyGciLab(Signal *, CopyGCIReq::CopyReason reason);
+ void storeNewLcpIdLab(Signal *);
+ void startLcpRoundLoopLab(Signal *, Uint32 startTableId, Uint32 startFragId);
+
+ void nodeFailCompletedCheckLab(Signal*, NodeRecordPtr failedNodePtr);
+
+ /**
+ *
+ */
+ void setLocalNodefailHandling(Signal*, Uint32 failedNodeId,
+ NodefailHandlingStep step);
+ void checkLocalNodefailComplete(Signal*, Uint32 failedNodeId,
+ NodefailHandlingStep step);
+
+ void ndbsttorry10Lab(Signal *, Uint32 _line);
+ void createMutexes(Signal* signal, Uint32 no);
+ void createMutex_done(Signal* signal, Uint32 no, Uint32 retVal);
+ void crashSystemAtGcpStop(Signal *);
+ void sendFirstDictfragsreq(Signal *, TabRecordPtr regTabPtr);
+ void addtabrefuseLab(Signal *, ConnectRecordPtr regConnectPtr, Uint32 errorCode);
+ void GCP_SAVEhandling(Signal *, Uint32 nodeId);
+ void packTableIntoPagesLab(Signal *, Uint32 tableId);
+ void readPagesIntoTableLab(Signal *, Uint32 tableId);
+ void readPagesIntoFragLab(Signal *, RWFragment* rf);
+ void readTabDescriptionLab(Signal *, Uint32 tableId);
+ void copyTableLab(Signal *, Uint32 tableId);
+ void breakCopyTableLab(Signal *,
+ TabRecordPtr regTabPtr,
+ Uint32 nodeId);
+ void checkAddfragCompletedLab(Signal *,
+ TabRecordPtr regTabPtr,
+ Uint32 fragId);
+ void completeRestartLab(Signal *);
+ void readTableFromPagesLab(Signal *, TabRecordPtr regTabPtr);
+ void srPhase2ReadTableLab(Signal *, TabRecordPtr regTabPtr);
+ void checkTcCounterLab(Signal *);
+ void calculateKeepGciLab(Signal *, Uint32 tableId, Uint32 fragId);
+ void tableUpdateLab(Signal *, TabRecordPtr regTabPtr);
+ void checkLcpCompletedLab(Signal *);
+ void initLcpLab(Signal *, Uint32 masterRef, Uint32 tableId);
+ void startGcpLab(Signal *, Uint32 aWaitTime);
+ void checkGcpStopLab(Signal *);
+ void MASTER_GCPhandling(Signal *, Uint32 failedNodeId);
+ void MASTER_LCPhandling(Signal *, Uint32 failedNodeId);
+ void rnfTableNotReadyLab(Signal *, TabRecordPtr regTabPtr, Uint32 removeNodeId);
+ void startLcpTakeOverLab(Signal *, Uint32 failedNodeId);
+
+ void startLcpMasterTakeOver(Signal *, Uint32 failedNodeId);
+ void startGcpMasterTakeOver(Signal *, Uint32 failedNodeId);
+ void checkGcpOutstanding(Signal*, Uint32 failedNodeId);
+
+ void checkEmptyLcpComplete(Signal *);
+ void lcpBlockedLab(Signal *);
+ void breakCheckTabCompletedLab(Signal *, TabRecordPtr regTabptr);
+ void readGciFileLab(Signal *);
+ void openingCopyGciSkipInitLab(Signal *, FileRecordPtr regFilePtr);
+ void startLcpRoundLab(Signal *);
+ void gcpBlockedLab(Signal *);
+ void initialStartCompletedLab(Signal *);
+ void allNodesLcpCompletedLab(Signal *);
+ void nodeRestartPh2Lab(Signal *);
+ void initGciFilesLab(Signal *);
+ void dictStartConfLab(Signal *);
+ void nodeDictStartConfLab(Signal *);
+ void ndbStartReqLab(Signal *, BlockReference ref);
+ void nodeRestartStartRecConfLab(Signal *);
+ void dihCopyCompletedLab(Signal *);
+ void release_connect(ConnectRecordPtr ptr);
+ void copyTableNode(Signal *,
+ CopyTableNode* ctn,
+ NodeRecordPtr regNodePtr);
+ void startFragment(Signal *, Uint32 tableId, Uint32 fragId);
+ bool checkLcpAllTablesDoneInLqh();
+
+ void lcpStateAtNodeFailureLab(Signal *, Uint32 nodeId);
+ void copyNodeLab(Signal *, Uint32 tableId);
+ void copyGciReqLab(Signal *);
+ void allLab(Signal *,
+ ConnectRecordPtr regConnectPtr,
+ TabRecordPtr regTabPtr);
+ void tableCopyNodeLab(Signal *, TabRecordPtr regTabPtr);
+
+ void removeNodeFromTables(Signal *, Uint32 tableId, Uint32 nodeId);
+ void removeNodeFromTable(Signal *, Uint32 tableId, TabRecordPtr tabPtr);
+ void removeNodeFromTablesComplete(Signal* signal, Uint32 nodeId);
+
+ void packFragIntoPagesLab(Signal *, RWFragment* wf);
+ void startNextChkpt(Signal *);
+ void failedNodeLcpHandling(Signal*, NodeRecordPtr failedNodePtr);
+ void failedNodeSynchHandling(Signal *, NodeRecordPtr failedNodePtr);
+ void checkCopyTab(NodeRecordPtr failedNodePtr);
+
+ void initCommonData();
+ void initialiseRecordsLab(Signal *, Uint32 stepNo, Uint32, Uint32);
+
+ void findReplica(ReplicaRecordPtr& regReplicaPtr,
+ Fragmentstore* fragPtrP, Uint32 nodeId);
+//------------------------------------
+// Node failure handling methods
+//------------------------------------
+ void startRemoveFailedNode(Signal *, NodeRecordPtr failedNodePtr);
+ void handleGcpTakeOver(Signal *, NodeRecordPtr failedNodePtr);
+ void handleLcpTakeOver(Signal *, NodeRecordPtr failedNodePtr);
+ void handleNewMaster(Signal *, NodeRecordPtr failedNodePtr);
+ void checkTakeOverInMasterAllNodeFailure(Signal*, NodeRecordPtr failedNode);
+ void checkTakeOverInMasterCopyNodeFailure(Signal*, Uint32 failedNodeId);
+ void checkTakeOverInMasterStartNodeFailure(Signal*, Uint32 takeOverPtr);
+ void checkTakeOverInNonMasterStartNodeFailure(Signal*, Uint32 takeOverPtr);
+ void handleLcpMasterTakeOver(Signal *, Uint32 nodeId);
+
+//------------------------------------
+// Replica record specific methods
+//------------------------------------
+ Uint32 findLogInterval(ConstPtr<ReplicaRecord> regReplicaPtr,
+ Uint32 startGci);
+ void findMinGci(ReplicaRecordPtr fmgReplicaPtr,
+ Uint32& keeGci,
+ Uint32& oldestRestorableGci);
+ bool findStartGci(ConstPtr<ReplicaRecord> fstReplicaPtr,
+ Uint32 tfstStopGci,
+ Uint32& tfstStartGci,
+ Uint32& tfstLcp);
+ void newCrashedReplica(Uint32 nodeId, ReplicaRecordPtr ncrReplicaPtr);
+ void packCrashedReplicas(ReplicaRecordPtr pcrReplicaPtr);
+ void releaseReplicas(Uint32 replicaPtr);
+ void removeOldCrashedReplicas(ReplicaRecordPtr rocReplicaPtr);
+ void removeTooNewCrashedReplicas(ReplicaRecordPtr rtnReplicaPtr);
+ void seizeReplicaRec(ReplicaRecordPtr& replicaPtr);
+
+//------------------------------------
+// Methods operating on a fragment and
+// its connected replicas and nodes.
+//------------------------------------
+ void allocStoredReplica(FragmentstorePtr regFragptr,
+ ReplicaRecordPtr& newReplicaPtr,
+ Uint32 nodeId);
+ Uint32 extractNodeInfo(const Fragmentstore * fragPtr, Uint32 nodes[]);
+ bool findBestLogNode(CreateReplicaRecord* createReplica,
+ FragmentstorePtr regFragptr,
+ Uint32 startGci,
+ Uint32 stopGci,
+ Uint32 logNode,
+ Uint32& fblStopGci);
+ bool findLogNodes(CreateReplicaRecord* createReplica,
+ FragmentstorePtr regFragptr,
+ Uint32 startGci,
+ Uint32 stopGci);
+ void findToReplica(TakeOverRecord* regTakeOver,
+ Uint32 replicaType,
+ FragmentstorePtr regFragptr,
+ ReplicaRecordPtr& ftrReplicaPtr);
+ void initFragstore(FragmentstorePtr regFragptr);
+ void insertBackup(FragmentstorePtr regFragptr, Uint32 nodeId);
+ void insertfraginfo(FragmentstorePtr regFragptr,
+ Uint32 noOfBackups,
+ Uint32* nodeArray);
+ void linkOldStoredReplica(FragmentstorePtr regFragptr,
+ ReplicaRecordPtr replicaPtr);
+ void linkStoredReplica(FragmentstorePtr regFragptr,
+ ReplicaRecordPtr replicaPtr);
+ void prepareReplicas(FragmentstorePtr regFragptr);
+ void removeNodeFromStored(Uint32 nodeId,
+ FragmentstorePtr regFragptr,
+ ReplicaRecordPtr replicaPtr);
+ void removeOldStoredReplica(FragmentstorePtr regFragptr,
+ ReplicaRecordPtr replicaPtr);
+ void removeStoredReplica(FragmentstorePtr regFragptr,
+ ReplicaRecordPtr replicaPtr);
+ void searchStoredReplicas(FragmentstorePtr regFragptr);
+ void updateNodeInfo(FragmentstorePtr regFragptr);
+
+//------------------------------------
+// Fragment allocation, deallocation and
+// find methods
+//------------------------------------
+ void allocFragments(Uint32 noOfFragments, TabRecordPtr regTabPtr);
+ void releaseFragments(TabRecordPtr regTabPtr);
+ void getFragstore(TabRecord *, Uint32 fragNo, FragmentstorePtr & ptr);
+ void initialiseFragstore();
+
+//------------------------------------
+// Page Record specific methods
+//------------------------------------
+ void allocpage(PageRecordPtr& regPagePtr);
+ void releasePage(Uint32 pageIndex);
+
+//------------------------------------
+// Table Record specific methods
+//------------------------------------
+ void initTable(TabRecordPtr regTabPtr);
+ void initTableFile(TabRecordPtr regTabPtr);
+ void releaseTable(TabRecordPtr tabPtr);
+ Uint32 findTakeOver(Uint32 failedNodeId);
+ void handleTakeOverMaster(Signal *, Uint32 takeOverPtr);
+ void handleTakeOverNewMaster(Signal *, Uint32 takeOverPtr);
+
+//------------------------------------
+// TakeOver Record specific methods
+//------------------------------------
+ void initTakeOver(TakeOverRecordPtr regTakeOverptr);
+ void seizeTakeOver(TakeOverRecordPtr& regTakeOverptr);
+ void allocateTakeOver(TakeOverRecordPtr& regTakeOverptr);
+ void releaseTakeOver(Uint32 takeOverPtr);
+ bool anyActiveTakeOver();
+ void checkToCopy();
+ void checkToCopyCompleted(Signal *);
+ bool checkToInterrupted(TakeOverRecordPtr& regTakeOverptr);
+ Uint32 getStartNode(Uint32 takeOverPtr);
+
+//------------------------------------
+// Methods for take over functionality
+//------------------------------------
+ void changeNodeGroups(Uint32 startNode, Uint32 nodeTakenOver);
+ void endTakeOver(Uint32 takeOverPtr);
+ void initStartTakeOver(const class StartToReq *,
+ TakeOverRecordPtr regTakeOverPtr);
+
+ void nodeRestartTakeOver(Signal *, Uint32 startNodeId);
+ void systemRestartTakeOverLab(Signal *);
+ void startTakeOver(Signal *,
+ Uint32 takeOverPtr,
+ Uint32 startNode,
+ Uint32 toNode);
+ void sendStartTo(Signal *, Uint32 takeOverPtr);
+ void startNextCopyFragment(Signal *, Uint32 takeOverPtr);
+ void toCopyFragLab(Signal *, Uint32 takeOverPtr);
+ void startHsAddFragConfLab(Signal *);
+ void prepareSendCreateFragReq(Signal *, Uint32 takeOverPtr);
+ void sendUpdateTo(Signal *, Uint32 takeOverPtr, Uint32 updateState);
+ void toCopyCompletedLab(Signal *, TakeOverRecordPtr regTakeOverptr);
+ void takeOverCompleted(Uint32 aNodeId);
+ void sendEndTo(Signal *, Uint32 takeOverPtr);
+
+//------------------------------------
+// Node Record specific methods
+//------------------------------------
+ void checkStartTakeOver(Signal *);
+ void insertAlive(NodeRecordPtr newNodePtr);
+ void insertDeadNode(NodeRecordPtr removeNodePtr);
+ void removeAlive(NodeRecordPtr removeNodePtr);
+ void removeDeadNode(NodeRecordPtr removeNodePtr);
+
+ NodeRecord::NodeStatus getNodeStatus(Uint32 nodeId);
+ void setNodeStatus(Uint32 nodeId, NodeRecord::NodeStatus);
+ Sysfile::ActiveStatus getNodeActiveStatus(Uint32 nodeId);
+ void setNodeActiveStatus(Uint32 nodeId, Sysfile::ActiveStatus newStatus);
+ void setNodeLcpActiveStatus(Uint32 nodeId, bool newState);
+ bool getNodeLcpActiveStatus(Uint32 nodeId);
+ bool getAllowNodeStart(Uint32 nodeId);
+ void setAllowNodeStart(Uint32 nodeId, bool newState);
+ bool getNodeCopyCompleted(Uint32 nodeId);
+ void setNodeCopyCompleted(Uint32 nodeId, bool newState);
+ bool checkNodeAlive(Uint32 nodeId);
+
+ // Initialisation
+ void initData();
+ void initRecords();
+
+ // Variables to support record structures and their free lists
+
+ ApiConnectRecord *apiConnectRecord;
+ Uint32 capiConnectFileSize;
+
+ ConnectRecord *connectRecord;
+ Uint32 cfirstconnect;
+ Uint32 cconnectFileSize;
+
+ CreateReplicaRecord *createReplicaRecord;
+ Uint32 cnoOfCreateReplicas;
+
+ FileRecord *fileRecord;
+ Uint32 cfirstfreeFile;
+ Uint32 cfileFileSize;
+
+ Fragmentstore *fragmentstore;
+ Uint32 cfirstfragstore;
+ Uint32 cfragstoreFileSize;
+
+ Uint32 c_nextNodeGroup;
+ NodeGroupRecord *nodeGroupRecord;
+
+ NodeRecord *nodeRecord;
+
+ PageRecord *pageRecord;
+ Uint32 cfirstfreepage;
+ Uint32 cpageFileSize;
+
+ ReplicaRecord *replicaRecord;
+ Uint32 cfirstfreeReplica;
+ Uint32 cnoFreeReplicaRec;
+ Uint32 creplicaFileSize;
+
+ TabRecord *tabRecord;
+ Uint32 ctabFileSize;
+
+ TakeOverRecord *takeOverRecord;
+ Uint32 cfirstfreeTakeOver;
+
+ /*
+ 2.4 C O M M O N S T O R E D V A R I A B L E S
+ ----------------------------------------------------
+ */
+ Uint32 cfirstVerifyQueue;
+ Uint32 clastVerifyQueue;
+ Uint32 cverifyQueueCounter;
+
+ /*------------------------------------------------------------------------*/
+ /* THIS VARIABLE KEEPS THE REFERENCES TO FILE RECORDS THAT DESCRIBE */
+ /* THE TWO FILES THAT ARE USED TO STORE THE VARIABLE CRESTART_INFO */
+ /* ON DISK. */
+ /*------------------------------------------------------------------------*/
+ Uint32 crestartInfoFile[2];
+ /*------------------------------------------------------------------------*/
+ /* THIS VARIABLE KEEPS TRACK OF THE STATUS OF A GLOBAL CHECKPOINT */
+ /* PARTICIPANT. THIS IS NEEDED TO HANDLE A NODE FAILURE. WHEN A NODE*/
+ /* FAILURE OCCURS IT IS EASY THAT THE PROTOCOL STOPS IF NO ACTION IS*/
+ /* TAKEN TO PREVENT THIS. THIS VARIABLE ENSURES SUCH ACTION CAN BE */
+ /* TAKEN. */
+ /*------------------------------------------------------------------------*/
+ enum GcpParticipantState {
+ GCP_PARTICIPANT_READY = 0,
+ GCP_PARTICIPANT_PREPARE_RECEIVED = 1,
+ GCP_PARTICIPANT_COMMIT_RECEIVED = 2,
+ GCP_PARTICIPANT_TC_FINISHED = 3,
+ GCP_PARTICIPANT_COPY_GCI_RECEIVED = 4
+ };
+ GcpParticipantState cgcpParticipantState;
+ /*------------------------------------------------------------------------*/
+ /* THESE VARIABLES ARE USED TO CONTROL THAT GCP PROCESSING DO NOT */
+ /*STOP FOR SOME REASON. */
+ /*------------------------------------------------------------------------*/
+ enum GcpStatus {
+ GCP_READY = 0,
+ GCP_PREPARE_SENT = 1,
+ GCP_COMMIT_SENT = 2,
+ GCP_NODE_FINISHED = 3,
+ GCP_SAVE_LQH_FINISHED = 4
+ };
+ GcpStatus cgcpStatus;
+ Uint32 cgcpStartCounter;
+ Uint32 coldGcpStatus;
+ Uint32 coldGcpId;
+ /*------------------------------------------------------------------------*/
+ /* THIS VARIABLE KEEPS TRACK OF THE STATE OF THIS NODE AS MASTER. */
+ /*------------------------------------------------------------------------*/
+ enum MasterState {
+ MASTER_IDLE = 0,
+ MASTER_ACTIVE = 1,
+ MASTER_TAKE_OVER_GCP = 2
+ };
+ MasterState cmasterState;
+ Uint16 cmasterTakeOverNode;
+ /* NODE IS NOT MASTER */
+ /* NODE IS ACTIVE AS MASTER */
+ /* NODE IS TAKING OVER AS MASTER */
+
+ struct CopyGCIMaster {
+ CopyGCIMaster(){ m_copyReason = m_waiting = CopyGCIReq::IDLE;}
+ /*------------------------------------------------------------------------*/
+ /* THIS STATE VARIABLE IS USED TO INDICATE IF COPYING OF RESTART */
+ /* INFO WAS STARTED BY A LOCAL CHECKPOINT OR AS PART OF A SYSTEM */
+ /* RESTART. */
+ /*------------------------------------------------------------------------*/
+ CopyGCIReq::CopyReason m_copyReason;
+
+ /*------------------------------------------------------------------------*/
+ /* COPYING RESTART INFO CAN BE STARTED BY LOCAL CHECKPOINTS AND BY */
+ /* GLOBAL CHECKPOINTS. WE CAN HOWEVER ONLY HANDLE ONE SUCH COPY AT */
+ /* THE TIME. THUS WE HAVE TO KEEP WAIT INFORMATION IN THIS VARIABLE.*/
+ /*------------------------------------------------------------------------*/
+ CopyGCIReq::CopyReason m_waiting;
+ } c_copyGCIMaster;
+
+ struct CopyGCISlave {
+ CopyGCISlave(){ m_copyReason = CopyGCIReq::IDLE; m_expectedNextWord = 0;}
+ /*------------------------------------------------------------------------*/
+ /* THIS STATE VARIABLE IS USED TO INDICATE IF COPYING OF RESTART */
+ /* INFO WAS STARTED BY A LOCAL CHECKPOINT OR AS PART OF A SYSTEM */
+ /* RESTART. THIS VARIABLE IS USED BY THE NODE THAT RECEIVES */
+ /* COPY_GCI_REQ. */
+ /*------------------------------------------------------------------------*/
+ Uint32 m_senderData;
+ BlockReference m_senderRef;
+ CopyGCIReq::CopyReason m_copyReason;
+
+ Uint32 m_expectedNextWord;
+ } c_copyGCISlave;
+
+ /*------------------------------------------------------------------------*/
+ /* THIS VARIABLE IS USED TO KEEP TRACK OF THE STATE OF LOCAL */
+ /* CHECKPOINTS. */
+ /*------------------------------------------------------------------------*/
+public:
+ enum LcpStatus {
+ LCP_STATUS_IDLE = 0,
+ LCP_TCGET = 1, // Only master
+ LCP_STATUS_ACTIVE = 2,
+ LCP_CALCULATE_KEEP_GCI = 4, // Only master
+ LCP_COPY_GCI = 5,
+ LCP_INIT_TABLES = 6,
+ LCP_TC_CLOPSIZE = 7, // Only master
+ LCP_START_LCP_ROUND = 8,
+ LCP_TAB_COMPLETED = 9,
+ LCP_TAB_SAVED = 10
+ };
+private:
+
+ struct LcpState {
+ LcpStatus lcpStatus;
+ Uint32 lcpStatusUpdatedPlace;
+
+ void setLcpStatus(LcpStatus status, Uint32 line){
+ lcpStatus = status;
+ lcpStatusUpdatedPlace = line;
+ }
+
+ Uint32 lcpStart;
+ Uint32 lcpStartGcp;
+ Uint32 keepGci; /* USED TO CALCULATE THE GCI TO KEEP AFTER A LCP */
+ Uint32 oldestRestorableGci;
+
+ struct CurrentFragment {
+ Uint32 tableId;
+ Uint32 fragmentId;
+ } currentFragment;
+
+ Uint32 noOfLcpFragRepOutstanding;
+
+ /*------------------------------------------------------------------------*/
+ /* USED TO ENSURE THAT LCP'S ARE EXECUTED WITH CERTAIN TIMEINTERVALS*/
+ /* EVEN WHEN SYSTEM IS NOT DOING ANYTHING. */
+ /*------------------------------------------------------------------------*/
+ Uint32 ctimer;
+ Uint32 ctcCounter;
+ Uint32 clcpDelay; /* MAX. 2^(CLCP_DELAY - 2) SEC BETWEEN LCP'S */
+
+ /*------------------------------------------------------------------------*/
+ /* THIS STATE IS USED TO TELL IF THE FIRST LCP AFTER START/RESTART */
+ /* HAS BEEN RUN. AFTER A NODE RESTART THE NODE DOES NOT ENTER */
+ /* STARTED STATE BEFORE THIS IS DONE. */
+ /*------------------------------------------------------------------------*/
+ bool immediateLcpStart;
+ bool m_LCP_COMPLETE_REP_From_Master_Received;
+ SignalCounter m_LCP_COMPLETE_REP_Counter_DIH;
+ SignalCounter m_LCP_COMPLETE_REP_Counter_LQH;
+ SignalCounter m_LAST_LCP_FRAG_ORD;
+ NdbNodeBitmask m_participatingLQH;
+ NdbNodeBitmask m_participatingDIH;
+
+ Uint32 m_masterLcpDihRef;
+ bool m_MASTER_LCPREQ_Received;
+ Uint32 m_MASTER_LCPREQ_FailedNodeId;
+ } c_lcpState;
+
+ /*------------------------------------------------------------------------*/
+ /* THIS VARIABLE KEEPS TRACK OF HOW MANY TABLES ARE ACTIVATED WHEN */
+ /* STARTING A LOCAL CHECKPOINT WE SHOULD AVOID STARTING A CHECKPOINT*/
+ /* WHEN NO TABLES ARE ACTIVATED. */
+ /*------------------------------------------------------------------------*/
+ Uint32 cnoOfActiveTables;
+ Uint32 cgcpDelay; /* Delay between global checkpoints */
+
+ BlockReference cdictblockref; /* DICTIONARY BLOCK REFERENCE */
+ Uint32 cfailurenr; /* EVERY TIME WHEN A NODE FAILURE IS REPORTED
+ THIS NUMBER IS INCREMENTED. AT THE START OF
+ THE SYSTEM THIS NUMBER MUST BE INITIATED TO
+ ZERO */
+ bool cgckptflag; /* A FLAG WHICH IS SET WHILE A NEW GLOBAL CHECK
+ POINT IS BEING CREATED. NO VERIFICATION IS ALLOWED
+ IF THE FLAG IS SET*/
+ Uint32 cgcpOrderBlocked;
+ BlockReference clocallqhblockref;
+ BlockReference clocaltcblockref;
+ BlockReference cmasterdihref;
+ Uint16 cownNodeId;
+ Uint32 cnewgcp;
+ BlockReference cndbStartReqBlockref;
+ BlockReference cntrlblockref;
+ Uint32 cgcpSameCounter;
+ Uint32 coldgcp;
+ Uint32 con_lineNodes;
+ Uint32 creceivedfrag;
+ Uint32 cremainingfrags;
+ Uint32 cstarttype;
+ Uint32 csystemnodes;
+ Uint32 currentgcp;
+
+ enum GcpMasterTakeOverState {
+ GMTOS_IDLE = 0,
+ GMTOS_INITIAL = 1,
+ ALL_READY = 2,
+ ALL_PREPARED = 3,
+ COMMIT_STARTED_NOT_COMPLETED = 4,
+ COMMIT_COMPLETED = 5,
+ PREPARE_STARTED_NOT_COMMITTED = 6,
+ SAVE_STARTED_NOT_COMPLETED = 7
+ };
+ GcpMasterTakeOverState cgcpMasterTakeOverState;
+
+public:
+ enum LcpMasterTakeOverState {
+ LMTOS_IDLE = 0,
+ LMTOS_WAIT_EMPTY_LCP = 1, // Currently doing empty LCP
+ LMTOS_WAIT_LCP_FRAG_REP = 2,// Currently waiting for outst. LCP_FRAG_REP
+ LMTOS_INITIAL = 3,
+ LMTOS_ALL_IDLE = 4,
+ LMTOS_ALL_ACTIVE = 5,
+ LMTOS_LCP_CONCLUDING = 6,
+ LMTOS_COPY_ONGOING = 7
+ };
+private:
+ class MasterTakeOverState {
+ public:
+ void set(LcpMasterTakeOverState s, Uint32 line) {
+ state = s; updatePlace = line;
+ }
+
+ LcpMasterTakeOverState state;
+ Uint32 updatePlace;
+
+ Uint32 minTableId;
+ Uint32 minFragId;
+ Uint32 failedNodeId;
+ } c_lcpMasterTakeOverState;
+
+ Uint16 cmasterNodeId;
+ Uint8 cnoHotSpare;
+
+ struct NodeStartMasterRecord {
+ Uint32 startNode;
+ Uint32 wait;
+ Uint32 failNr;
+ bool activeState;
+ bool blockLcp;
+ bool blockGcp;
+ Uint32 startInfoErrorCode;
+ Uint32 m_outstandingGsn;
+ };
+ NodeStartMasterRecord c_nodeStartMaster;
+
+ struct NodeStartSlaveRecord {
+ NodeStartSlaveRecord() { nodeId = 0;}
+
+ Uint32 nodeId;
+ };
+ NodeStartSlaveRecord c_nodeStartSlave;
+
+ Uint32 cfirstAliveNode;
+ Uint32 cfirstDeadNode;
+ Uint32 cstartPhase;
+ Uint32 cnoReplicas;
+
+ Uint32 c_startToLock;
+ Uint32 c_endToLock;
+ Uint32 c_createFragmentLock;
+ Uint32 c_updateToLock;
+
+ bool cwaitLcpSr;
+ Uint32 cnoOfNodeGroups;
+ bool cstartGcpNow;
+
+ Uint32 crestartGci; /* VALUE OF GCI WHEN SYSTEM RESTARTED OR STARTED */
+ Uint32 cminHotSpareNodes;
+
+ /**
+ * Counter variables keeping track of the number of outstanding signals
+ * for particular signals in various protocols.
+ */
+ SignalCounter c_COPY_GCIREQ_Counter;
+ SignalCounter c_COPY_TABREQ_Counter;
+ SignalCounter c_CREATE_FRAGREQ_Counter;
+ SignalCounter c_DIH_SWITCH_REPLICA_REQ_Counter;
+ SignalCounter c_EMPTY_LCP_REQ_Counter;
+ SignalCounter c_END_TOREQ_Counter;
+ SignalCounter c_GCP_COMMIT_Counter;
+ SignalCounter c_GCP_PREPARE_Counter;
+ SignalCounter c_GCP_SAVEREQ_Counter;
+ SignalCounter c_INCL_NODEREQ_Counter;
+ SignalCounter c_MASTER_GCPREQ_Counter;
+ SignalCounter c_MASTER_LCPREQ_Counter;
+ SignalCounter c_START_INFOREQ_Counter;
+ SignalCounter c_START_RECREQ_Counter;
+ SignalCounter c_START_TOREQ_Counter;
+ SignalCounter c_STOP_ME_REQ_Counter;
+ SignalCounter c_TC_CLOPSIZEREQ_Counter;
+ SignalCounter c_TCGETOPSIZEREQ_Counter;
+ SignalCounter c_UPDATE_TOREQ_Counter;
+ SignalCounter c_START_LCP_REQ_Counter;
+
+ bool c_blockCommit;
+ Uint32 c_blockCommitNo;
+
+ bool getBlockCommit() const {
+ return c_blockCommit || cgckptflag;
+ }
+
+ /**
+ * SwitchReplicaRecord - Should only be used by master
+ */
+ struct SwitchReplicaRecord {
+ void clear(){}
+
+ Uint32 nodeId;
+ Uint32 tableId;
+ Uint32 fragNo;
+ };
+ SwitchReplicaRecord c_switchReplicas;
+
+ struct StopPermProxyRecord {
+ StopPermProxyRecord() { clientRef = 0; }
+
+ Uint32 clientData;
+ BlockReference clientRef;
+ BlockReference masterRef;
+ };
+
+ struct StopPermMasterRecord {
+ StopPermMasterRecord() { clientRef = 0;}
+
+ Uint32 returnValue;
+
+ Uint32 clientData;
+ BlockReference clientRef;
+ };
+
+ StopPermProxyRecord c_stopPermProxy;
+ StopPermMasterRecord c_stopPermMaster;
+
+ void checkStopPermProxy(Signal*, NodeId failedNodeId);
+ void checkStopPermMaster(Signal*, NodeRecordPtr failedNodePtr);
+
+ void switchReplica(Signal*,
+ Uint32 nodeId,
+ Uint32 tableId,
+ Uint32 fragNo);
+
+ void switchReplicaReply(Signal*, NodeId nodeId);
+
+ /**
+ * Wait GCP (proxy)
+ */
+ struct WaitGCPProxyRecord {
+ WaitGCPProxyRecord() { clientRef = 0;}
+
+ Uint32 clientData;
+ BlockReference clientRef;
+ BlockReference masterRef;
+
+ union { Uint32 nextPool; Uint32 nextList; };
+ Uint32 prevList;
+ };
+ typedef Ptr<WaitGCPProxyRecord> WaitGCPProxyPtr;
+
+ /**
+ * Wait GCP (master)
+ */
+ struct WaitGCPMasterRecord {
+ WaitGCPMasterRecord() { clientRef = 0;}
+ Uint32 clientData;
+ BlockReference clientRef;
+
+ union { Uint32 nextPool; Uint32 nextList; };
+ Uint32 prevList;
+ };
+ typedef Ptr<WaitGCPMasterRecord> WaitGCPMasterPtr;
+
+ /**
+ * Pool/list of WaitGCPProxyRecord record
+ */
+ ArrayPool<WaitGCPProxyRecord> waitGCPProxyPool;
+ ArrayList<WaitGCPProxyRecord> c_waitGCPProxyList;
+
+ /**
+ * Pool/list of WaitGCPMasterRecord record
+ */
+ ArrayPool<WaitGCPMasterRecord> waitGCPMasterPool;
+ ArrayList<WaitGCPMasterRecord> c_waitGCPMasterList;
+
+ void checkWaitGCPProxy(Signal*, NodeId failedNodeId);
+ void checkWaitGCPMaster(Signal*, NodeId failedNodeId);
+ void emptyWaitGCPMasterQueue(Signal*);
+
+ /**
+ * Stop me
+ */
+ struct StopMeRecord {
+ StopMeRecord() { clientRef = 0;}
+
+ BlockReference clientRef;
+ Uint32 clientData;
+ };
+ StopMeRecord c_stopMe;
+
+ void checkStopMe(Signal *, NodeRecordPtr failedNodePtr);
+
+#define DIH_CDATA_SIZE 128
+ /**
+ * This variable must be atleast the size of Sysfile::SYSFILE_SIZE32
+ */
+ Uint32 cdata[DIH_CDATA_SIZE]; /* TEMPORARY ARRAY VARIABLE */
+
+ /**
+ * Sys file data
+ */
+ Uint32 sysfileData[DIH_CDATA_SIZE];
+ Uint32 sysfileDataToFile[DIH_CDATA_SIZE];
+
+ /**
+ * When a node comes up without filesystem
+ * we have to clear all LCP for that node
+ */
+ void invalidateNodeLCP(Signal *, Uint32 nodeId, Uint32 tableId);
+ void invalidateNodeLCP(Signal *, Uint32 nodeId, TabRecordPtr);
+
+ /**
+ * Reply from nodeId
+ */
+ void startInfoReply(Signal *, Uint32 nodeId);
+};
+
+#if (DIH_CDATA_SIZE < _SYSFILE_SIZE32)
+#error "cdata is to small compared to Sysfile size"
+#endif
+
+#endif
+
diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp
new file mode 100644
index 00000000000..9a5efebc56e
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp
@@ -0,0 +1,319 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+#define DBDIH_C
+#include "Dbdih.hpp"
+#include <ndb_limits.h>
+
+#define DEBUG(x) { ndbout << "DIH::" << x << endl; }
+
+void Dbdih::initData()
+{
+ cpageFileSize = ZPAGEREC;
+
+ apiConnectRecord = 0;
+ connectRecord = 0;
+ fileRecord = 0;
+ fragmentstore = 0;
+ pageRecord = 0;
+ replicaRecord = 0;
+ tabRecord = 0;
+ takeOverRecord = 0;
+ createReplicaRecord = 0;
+ nodeGroupRecord = 0;
+ nodeRecord = 0;
+ c_nextNodeGroup = 0;
+
+ // Records with constant sizes
+ createReplicaRecord = (CreateReplicaRecord*)
+ allocRecord("CreateReplicaRecord", sizeof(CreateReplicaRecord),
+ ZCREATE_REPLICA_FILE_SIZE);
+
+ nodeGroupRecord = (NodeGroupRecord*)
+ allocRecord("NodeGroupRecord", sizeof(NodeGroupRecord), MAX_NDB_NODES);
+
+ nodeRecord = (NodeRecord*)
+ allocRecord("NodeRecord", sizeof(NodeRecord), MAX_NDB_NODES);
+
+ Uint32 i;
+ for(i = 0; i<MAX_NDB_NODES; i++){
+ new (&nodeRecord[i]) NodeRecord();
+ }
+
+ takeOverRecord = (TakeOverRecord*)allocRecord("TakeOverRecord",
+ sizeof(TakeOverRecord),
+ MAX_NDB_NODES);
+ for(i = 0; i<MAX_NDB_NODES; i++)
+ new (&takeOverRecord[i]) TakeOverRecord();
+
+ for(i = 0; i<MAX_NDB_NODES; i++)
+ new (&takeOverRecord[i]) TakeOverRecord();
+
+ waitGCPProxyPool.setSize(ZPROXY_FILE_SIZE);
+ waitGCPMasterPool.setSize(ZPROXY_MASTER_FILE_SIZE);
+
+ cgcpOrderBlocked = 0;
+ c_lcpState.ctcCounter = 0;
+ cwaitLcpSr = false;
+ c_blockCommit = false;
+ c_blockCommitNo = 1;
+}//Dbdih::initData()
+
+void Dbdih::initRecords()
+{
+ // Records with dynamic sizes
+ apiConnectRecord = (ApiConnectRecord*)
+ allocRecord("ApiConnectRecord",
+ sizeof(ApiConnectRecord),
+ capiConnectFileSize);
+
+ connectRecord = (ConnectRecord*)allocRecord("ConnectRecord",
+ sizeof(ConnectRecord),
+ cconnectFileSize);
+
+ fileRecord = (FileRecord*)allocRecord("FileRecord",
+ sizeof(FileRecord),
+ cfileFileSize);
+
+ fragmentstore = (Fragmentstore*)allocRecord("Fragmentstore",
+ sizeof(Fragmentstore),
+ cfragstoreFileSize);
+
+ pageRecord = (PageRecord*)allocRecord("PageRecord",
+ sizeof(PageRecord),
+ cpageFileSize);
+
+ replicaRecord = (ReplicaRecord*)allocRecord("ReplicaRecord",
+ sizeof(ReplicaRecord),
+ creplicaFileSize);
+
+ tabRecord = (TabRecord*)allocRecord("TabRecord",
+ sizeof(TabRecord),
+ ctabFileSize);
+
+ // Initialize BAT for interface to file system
+ NewVARIABLE* bat = allocateBat(22);
+ bat[1].WA = &pageRecord->word[0];
+ bat[1].nrr = cpageFileSize;
+ bat[1].ClusterSize = sizeof(PageRecord);
+ bat[1].bits.q = 11;
+ bat[1].bits.v = 5;
+ bat[20].WA = &sysfileData[0];
+ bat[20].nrr = 1;
+ bat[20].ClusterSize = sizeof(sysfileData);
+ bat[20].bits.q = 7;
+ bat[20].bits.v = 5;
+ bat[21].WA = &sysfileDataToFile[0];
+ bat[21].nrr = 1;
+ bat[21].ClusterSize = sizeof(sysfileDataToFile);
+ bat[21].bits.q = 7;
+ bat[21].bits.v = 5;
+}//Dbdih::initRecords()
+
+Dbdih::Dbdih(const class Configuration & config):
+ SimulatedBlock(DBDIH, config),
+ c_waitGCPProxyList(waitGCPProxyPool),
+ c_waitGCPMasterList(waitGCPMasterPool)
+{
+ BLOCK_CONSTRUCTOR(Dbdih);
+
+ addRecSignal(GSN_DUMP_STATE_ORD, &Dbdih::execDUMP_STATE_ORD);
+ addRecSignal(GSN_NDB_TAMPER, &Dbdih::execNDB_TAMPER, true);
+ addRecSignal(GSN_DEBUG_SIG, &Dbdih::execDEBUG_SIG);
+ addRecSignal(GSN_MASTER_GCPREQ, &Dbdih::execMASTER_GCPREQ);
+ addRecSignal(GSN_MASTER_GCPREF, &Dbdih::execMASTER_GCPREF);
+ addRecSignal(GSN_MASTER_GCPCONF, &Dbdih::execMASTER_GCPCONF);
+ addRecSignal(GSN_EMPTY_LCP_CONF, &Dbdih::execEMPTY_LCP_CONF);
+ addRecSignal(GSN_MASTER_LCPREQ, &Dbdih::execMASTER_LCPREQ);
+ addRecSignal(GSN_MASTER_LCPREF, &Dbdih::execMASTER_LCPREF);
+ addRecSignal(GSN_MASTER_LCPCONF, &Dbdih::execMASTER_LCPCONF);
+ addRecSignal(GSN_NF_COMPLETEREP, &Dbdih::execNF_COMPLETEREP);
+ addRecSignal(GSN_START_PERMREQ, &Dbdih::execSTART_PERMREQ);
+ addRecSignal(GSN_START_PERMCONF, &Dbdih::execSTART_PERMCONF);
+ addRecSignal(GSN_START_PERMREF, &Dbdih::execSTART_PERMREF);
+ addRecSignal(GSN_INCL_NODEREQ, &Dbdih::execINCL_NODEREQ);
+ addRecSignal(GSN_INCL_NODECONF, &Dbdih::execINCL_NODECONF);
+ addRecSignal(GSN_END_TOREQ, &Dbdih::execEND_TOREQ);
+ addRecSignal(GSN_END_TOCONF, &Dbdih::execEND_TOCONF);
+ addRecSignal(GSN_START_TOREQ, &Dbdih::execSTART_TOREQ);
+ addRecSignal(GSN_START_TOCONF, &Dbdih::execSTART_TOCONF);
+ addRecSignal(GSN_START_MEREQ, &Dbdih::execSTART_MEREQ);
+ addRecSignal(GSN_START_MECONF, &Dbdih::execSTART_MECONF);
+ addRecSignal(GSN_START_MEREF, &Dbdih::execSTART_MEREF);
+ addRecSignal(GSN_START_COPYREQ, &Dbdih::execSTART_COPYREQ);
+ addRecSignal(GSN_START_COPYCONF, &Dbdih::execSTART_COPYCONF);
+ addRecSignal(GSN_START_COPYREF, &Dbdih::execSTART_COPYREF);
+ addRecSignal(GSN_CREATE_FRAGREQ, &Dbdih::execCREATE_FRAGREQ);
+ addRecSignal(GSN_CREATE_FRAGCONF, &Dbdih::execCREATE_FRAGCONF);
+ addRecSignal(GSN_DIVERIFYREQ, &Dbdih::execDIVERIFYREQ);
+ addRecSignal(GSN_GCP_SAVECONF, &Dbdih::execGCP_SAVECONF);
+ addRecSignal(GSN_GCP_PREPARECONF, &Dbdih::execGCP_PREPARECONF);
+ addRecSignal(GSN_GCP_PREPARE, &Dbdih::execGCP_PREPARE);
+ addRecSignal(GSN_GCP_NODEFINISH, &Dbdih::execGCP_NODEFINISH);
+ addRecSignal(GSN_GCP_COMMIT, &Dbdih::execGCP_COMMIT);
+ addRecSignal(GSN_DIHNDBTAMPER, &Dbdih::execDIHNDBTAMPER);
+ addRecSignal(GSN_CONTINUEB, &Dbdih::execCONTINUEB);
+ addRecSignal(GSN_COPY_GCIREQ, &Dbdih::execCOPY_GCIREQ);
+ addRecSignal(GSN_COPY_GCICONF, &Dbdih::execCOPY_GCICONF);
+ addRecSignal(GSN_COPY_TABREQ, &Dbdih::execCOPY_TABREQ);
+ addRecSignal(GSN_COPY_TABCONF, &Dbdih::execCOPY_TABCONF);
+ addRecSignal(GSN_TCGETOPSIZECONF, &Dbdih::execTCGETOPSIZECONF);
+ addRecSignal(GSN_TC_CLOPSIZECONF, &Dbdih::execTC_CLOPSIZECONF);
+
+ addRecSignal(GSN_LCP_COMPLETE_REP, &Dbdih::execLCP_COMPLETE_REP);
+ addRecSignal(GSN_LCP_FRAG_REP, &Dbdih::execLCP_FRAG_REP);
+ addRecSignal(GSN_START_LCP_REQ, &Dbdih::execSTART_LCP_REQ);
+ addRecSignal(GSN_START_LCP_CONF, &Dbdih::execSTART_LCP_CONF);
+
+ addRecSignal(GSN_READ_CONFIG_REQ, &Dbdih::execREAD_CONFIG_REQ, true);
+ addRecSignal(GSN_UNBLO_DICTCONF, &Dbdih::execUNBLO_DICTCONF);
+ addRecSignal(GSN_COPY_ACTIVECONF, &Dbdih::execCOPY_ACTIVECONF);
+ addRecSignal(GSN_TAB_COMMITREQ, &Dbdih::execTAB_COMMITREQ);
+ addRecSignal(GSN_NODE_FAILREP, &Dbdih::execNODE_FAILREP);
+ addRecSignal(GSN_COPY_FRAGCONF, &Dbdih::execCOPY_FRAGCONF);
+ addRecSignal(GSN_COPY_FRAGREF, &Dbdih::execCOPY_FRAGREF);
+ addRecSignal(GSN_DIADDTABREQ, &Dbdih::execDIADDTABREQ);
+ addRecSignal(GSN_DIGETNODESREQ, &Dbdih::execDIGETNODESREQ);
+ addRecSignal(GSN_DIRELEASEREQ, &Dbdih::execDIRELEASEREQ);
+ addRecSignal(GSN_DISEIZEREQ, &Dbdih::execDISEIZEREQ);
+ addRecSignal(GSN_STTOR, &Dbdih::execSTTOR);
+ addRecSignal(GSN_DI_FCOUNTREQ, &Dbdih::execDI_FCOUNTREQ);
+ addRecSignal(GSN_DIGETPRIMREQ, &Dbdih::execDIGETPRIMREQ);
+ addRecSignal(GSN_GCP_SAVEREF, &Dbdih::execGCP_SAVEREF);
+ addRecSignal(GSN_GCP_TCFINISHED, &Dbdih::execGCP_TCFINISHED);
+ addRecSignal(GSN_READ_NODESCONF, &Dbdih::execREAD_NODESCONF);
+ addRecSignal(GSN_NDB_STTOR, &Dbdih::execNDB_STTOR);
+ addRecSignal(GSN_DICTSTARTCONF, &Dbdih::execDICTSTARTCONF);
+ addRecSignal(GSN_NDB_STARTREQ, &Dbdih::execNDB_STARTREQ);
+ addRecSignal(GSN_GETGCIREQ, &Dbdih::execGETGCIREQ);
+ addRecSignal(GSN_DIH_RESTARTREQ, &Dbdih::execDIH_RESTARTREQ);
+ addRecSignal(GSN_START_RECCONF, &Dbdih::execSTART_RECCONF);
+ addRecSignal(GSN_START_FRAGCONF, &Dbdih::execSTART_FRAGCONF);
+ addRecSignal(GSN_ADD_FRAGCONF, &Dbdih::execADD_FRAGCONF);
+ addRecSignal(GSN_ADD_FRAGREF, &Dbdih::execADD_FRAGREF);
+ addRecSignal(GSN_FSOPENCONF, &Dbdih::execFSOPENCONF);
+ addRecSignal(GSN_FSOPENREF, &Dbdih::execFSOPENREF);
+ addRecSignal(GSN_FSCLOSECONF, &Dbdih::execFSCLOSECONF);
+ addRecSignal(GSN_FSCLOSEREF, &Dbdih::execFSCLOSEREF);
+ addRecSignal(GSN_FSREADCONF, &Dbdih::execFSREADCONF);
+ addRecSignal(GSN_FSREADREF, &Dbdih::execFSREADREF);
+ addRecSignal(GSN_FSWRITECONF, &Dbdih::execFSWRITECONF);
+ addRecSignal(GSN_FSWRITEREF, &Dbdih::execFSWRITEREF);
+ addRecSignal(GSN_SET_VAR_REQ, &Dbdih::execSET_VAR_REQ);
+
+ addRecSignal(GSN_START_INFOREQ,
+ &Dbdih::execSTART_INFOREQ);
+ addRecSignal(GSN_START_INFOREF,
+ &Dbdih::execSTART_INFOREF);
+ addRecSignal(GSN_START_INFOCONF,
+ &Dbdih::execSTART_INFOCONF);
+
+ addRecSignal(GSN_CHECKNODEGROUPSREQ, &Dbdih::execCHECKNODEGROUPSREQ);
+
+ addRecSignal(GSN_BLOCK_COMMIT_ORD,
+ &Dbdih::execBLOCK_COMMIT_ORD);
+ addRecSignal(GSN_UNBLOCK_COMMIT_ORD,
+ &Dbdih::execUNBLOCK_COMMIT_ORD);
+
+ addRecSignal(GSN_DIH_SWITCH_REPLICA_REQ,
+ &Dbdih::execDIH_SWITCH_REPLICA_REQ);
+
+ addRecSignal(GSN_DIH_SWITCH_REPLICA_REF,
+ &Dbdih::execDIH_SWITCH_REPLICA_REF);
+
+ addRecSignal(GSN_DIH_SWITCH_REPLICA_CONF,
+ &Dbdih::execDIH_SWITCH_REPLICA_CONF);
+
+ addRecSignal(GSN_STOP_PERM_REQ, &Dbdih::execSTOP_PERM_REQ);
+ addRecSignal(GSN_STOP_PERM_REF, &Dbdih::execSTOP_PERM_REF);
+ addRecSignal(GSN_STOP_PERM_CONF, &Dbdih::execSTOP_PERM_CONF);
+
+ addRecSignal(GSN_STOP_ME_REQ, &Dbdih::execSTOP_ME_REQ);
+ addRecSignal(GSN_STOP_ME_REF, &Dbdih::execSTOP_ME_REF);
+ addRecSignal(GSN_STOP_ME_CONF, &Dbdih::execSTOP_ME_CONF);
+
+ addRecSignal(GSN_WAIT_GCP_REQ, &Dbdih::execWAIT_GCP_REQ);
+ addRecSignal(GSN_WAIT_GCP_REF, &Dbdih::execWAIT_GCP_REF);
+ addRecSignal(GSN_WAIT_GCP_CONF, &Dbdih::execWAIT_GCP_CONF);
+
+ addRecSignal(GSN_UPDATE_TOREQ, &Dbdih::execUPDATE_TOREQ);
+ addRecSignal(GSN_UPDATE_TOCONF, &Dbdih::execUPDATE_TOCONF);
+
+ addRecSignal(GSN_PREP_DROP_TAB_REQ, &Dbdih::execPREP_DROP_TAB_REQ);
+ addRecSignal(GSN_WAIT_DROP_TAB_REF, &Dbdih::execWAIT_DROP_TAB_REF);
+ addRecSignal(GSN_WAIT_DROP_TAB_CONF, &Dbdih::execWAIT_DROP_TAB_CONF);
+ addRecSignal(GSN_DROP_TAB_REQ, &Dbdih::execDROP_TAB_REQ);
+
+ addRecSignal(GSN_ALTER_TAB_REQ, &Dbdih::execALTER_TAB_REQ);
+
+ addRecSignal(GSN_CREATE_FRAGMENTATION_REQ,
+ &Dbdih::execCREATE_FRAGMENTATION_REQ);
+
+ initData();
+}//Dbdih::Dbdih()
+
+Dbdih::~Dbdih()
+{
+ deallocRecord((void **)&apiConnectRecord, "ApiConnectRecord",
+ sizeof(ApiConnectRecord),
+ capiConnectFileSize);
+
+ deallocRecord((void **)&connectRecord, "ConnectRecord",
+ sizeof(ConnectRecord),
+ cconnectFileSize);
+
+ deallocRecord((void **)&fileRecord, "FileRecord",
+ sizeof(FileRecord),
+ cfileFileSize);
+
+ deallocRecord((void **)&fragmentstore, "Fragmentstore",
+ sizeof(Fragmentstore),
+ cfragstoreFileSize);
+
+ deallocRecord((void **)&pageRecord, "PageRecord",
+ sizeof(PageRecord),
+ cpageFileSize);
+
+ deallocRecord((void **)&replicaRecord, "ReplicaRecord",
+ sizeof(ReplicaRecord),
+ creplicaFileSize);
+
+ deallocRecord((void **)&tabRecord, "TabRecord",
+ sizeof(TabRecord),
+ ctabFileSize);
+
+ // Records with constant sizes
+ deallocRecord((void **)&createReplicaRecord,
+ "CreateReplicaRecord", sizeof(CreateReplicaRecord),
+ ZCREATE_REPLICA_FILE_SIZE);
+
+ deallocRecord((void **)&nodeGroupRecord, "NodeGroupRecord",
+ sizeof(NodeGroupRecord), MAX_NDB_NODES);
+
+ deallocRecord((void **)&nodeRecord, "NodeRecord",
+ sizeof(NodeRecord), MAX_NDB_NODES);
+
+ deallocRecord((void **)&takeOverRecord, "TakeOverRecord",
+ sizeof(TakeOverRecord),
+ MAX_NDB_NODES);
+
+}//Dbdih::~Dbdih()
+
+BLOCK_FUNCTIONS(Dbdih)
+
+
+
diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
new file mode 100644
index 00000000000..af75707560a
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
@@ -0,0 +1,14272 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#define DBDIH_C
+#include <ndb_limits.h>
+#include <ndb_version.h>
+#include <NdbOut.hpp>
+
+#include "Dbdih.hpp"
+#include "Configuration.hpp"
+
+#include <signaldata/BlockCommitOrd.hpp>
+#include <signaldata/CheckNodeGroups.hpp>
+#include <signaldata/CreateFrag.hpp>
+#include <signaldata/CopyActive.hpp>
+#include <signaldata/CopyFrag.hpp>
+#include <signaldata/CopyGCIReq.hpp>
+#include <signaldata/DiAddTab.hpp>
+#include <signaldata/DictStart.hpp>
+#include <signaldata/DiGetNodes.hpp>
+#include <signaldata/DihContinueB.hpp>
+#include <signaldata/DihSwitchReplica.hpp>
+#include <signaldata/DumpStateOrd.hpp>
+#include <signaldata/EmptyLcp.hpp>
+#include <signaldata/EndTo.hpp>
+#include <signaldata/EventReport.hpp>
+#include <signaldata/GCPSave.hpp>
+#include <signaldata/HotSpareRep.hpp>
+#include <signaldata/MasterGCP.hpp>
+#include <signaldata/MasterLCP.hpp>
+#include <signaldata/NFCompleteRep.hpp>
+#include <signaldata/NodeFailRep.hpp>
+#include <signaldata/ReadNodesConf.hpp>
+#include <signaldata/StartFragReq.hpp>
+#include <signaldata/StartInfo.hpp>
+#include <signaldata/StartMe.hpp>
+#include <signaldata/StartPerm.hpp>
+#include <signaldata/StartRec.hpp>
+#include <signaldata/StartTo.hpp>
+#include <signaldata/StopPerm.hpp>
+#include <signaldata/StopMe.hpp>
+#include <signaldata/TestOrd.hpp>
+#include <signaldata/UpdateTo.hpp>
+#include <signaldata/WaitGCP.hpp>
+#include <signaldata/DihStartTab.hpp>
+#include <signaldata/LCP.hpp>
+#include <signaldata/SystemError.hpp>
+
+#include <signaldata/DropTab.hpp>
+#include <signaldata/AlterTab.hpp>
+#include <signaldata/PrepDropTab.hpp>
+#include <signaldata/SumaImpl.hpp>
+#include <signaldata/DictTabInfo.hpp>
+#include <signaldata/CreateFragmentation.hpp>
+#include <signaldata/LqhFrag.hpp>
+#include <signaldata/FsOpenReq.hpp>
+#include <DebuggerNames.hpp>
+
+#define SYSFILE ((Sysfile *)&sysfileData[0])
+
+#define RETURN_IF_NODE_NOT_ALIVE(node) \
+ if (!checkNodeAlive((node))) { \
+ jam(); \
+ return; \
+ } \
+
+#define RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverIndex, regTOPtr) \
+ regTOPtr.i = takeOverIndex; \
+ ptrCheckGuard(regTOPtr, MAX_NDB_NODES, takeOverRecord); \
+ if (checkToInterrupted(regTOPtr)) { \
+ jam(); \
+ return; \
+ } \
+
+#define receiveLoopMacro(sigName, receiveNodeId)\
+{ \
+ c_##sigName##_Counter.clearWaitingFor(receiveNodeId); \
+ if(c_##sigName##_Counter.done() == false){ \
+ jam(); \
+ return; \
+ } \
+}
+
+#define sendLoopMacro(sigName, signalRoutine) \
+{ \
+ c_##sigName##_Counter.clearWaitingFor(); \
+ NodeRecordPtr specNodePtr; \
+ specNodePtr.i = cfirstAliveNode; \
+ do { \
+ jam(); \
+ ptrCheckGuard(specNodePtr, MAX_NDB_NODES, nodeRecord); \
+ c_##sigName##_Counter.setWaitingFor(specNodePtr.i); \
+ signalRoutine(signal, specNodePtr.i); \
+ specNodePtr.i = specNodePtr.p->nextNode; \
+ } while (specNodePtr.i != RNIL); \
+}
+
+static
+Uint32
+prevLcpNo(Uint32 lcpNo){
+ if(lcpNo == 0)
+ return MAX_LCP_STORED - 1;
+ return lcpNo - 1;
+}
+
+static
+Uint32
+nextLcpNo(Uint32 lcpNo){
+ lcpNo++;
+ if(lcpNo == MAX_LCP_STORED)
+ return 0;
+ return lcpNo;
+}
+
+#define gth(x, y) ndbrequire(((int)x)>((int)y))
+
+void Dbdih::nullRoutine(Signal* signal, Uint32 nodeId)
+{
+}//Dbdih::nullRoutine()
+
+void Dbdih::sendCOPY_GCIREQ(Signal* signal, Uint32 nodeId)
+{
+ ndbrequire(c_copyGCIMaster.m_copyReason != CopyGCIReq::IDLE);
+
+ const BlockReference ref = calcDihBlockRef(nodeId);
+ const Uint32 wordPerSignal = CopyGCIReq::DATA_SIZE;
+ const Uint32 noOfSignals = ((Sysfile::SYSFILE_SIZE32 + (wordPerSignal - 1)) /
+ wordPerSignal);
+
+ CopyGCIReq * const copyGCI = (CopyGCIReq *)&signal->theData[0];
+ copyGCI->anyData = nodeId;
+ copyGCI->copyReason = c_copyGCIMaster.m_copyReason;
+ copyGCI->startWord = 0;
+
+ for(Uint32 i = 0; i < noOfSignals; i++) {
+ jam();
+ { // Do copy
+ const int startWord = copyGCI->startWord;
+ for(Uint32 j = 0; j < wordPerSignal; j++) {
+ copyGCI->data[j] = sysfileData[j+startWord];
+ }//for
+ }
+ sendSignal(ref, GSN_COPY_GCIREQ, signal, 25, JBB);
+ copyGCI->startWord += wordPerSignal;
+ }//for
+}//Dbdih::sendCOPY_GCIREQ()
+
+
+void Dbdih::sendDIH_SWITCH_REPLICA_REQ(Signal* signal, Uint32 nodeId)
+{
+ const BlockReference ref = calcDihBlockRef(nodeId);
+ sendSignal(ref, GSN_DIH_SWITCH_REPLICA_REQ, signal,
+ DihSwitchReplicaReq::SignalLength, JBB);
+}//Dbdih::sendDIH_SWITCH_REPLICA_REQ()
+
+void Dbdih::sendEMPTY_LCP_REQ(Signal* signal, Uint32 nodeId)
+{
+ BlockReference ref = calcLqhBlockRef(nodeId);
+ sendSignal(ref, GSN_EMPTY_LCP_REQ, signal, EmptyLcpReq::SignalLength, JBB);
+}//Dbdih::sendEMPTY_LCPREQ()
+
+void Dbdih::sendEND_TOREQ(Signal* signal, Uint32 nodeId)
+{
+ BlockReference ref = calcDihBlockRef(nodeId);
+ sendSignal(ref, GSN_END_TOREQ, signal, EndToReq::SignalLength, JBB);
+}//Dbdih::sendEND_TOREQ()
+
+void Dbdih::sendGCP_COMMIT(Signal* signal, Uint32 nodeId)
+{
+ BlockReference ref = calcDihBlockRef(nodeId);
+ signal->theData[0] = cownNodeId;
+ signal->theData[1] = cnewgcp;
+ sendSignal(ref, GSN_GCP_COMMIT, signal, 2, JBA);
+}//Dbdih::sendGCP_COMMIT()
+
+void Dbdih::sendGCP_PREPARE(Signal* signal, Uint32 nodeId)
+{
+ BlockReference ref = calcDihBlockRef(nodeId);
+ signal->theData[0] = cownNodeId;
+ signal->theData[1] = cnewgcp;
+ sendSignal(ref, GSN_GCP_PREPARE, signal, 2, JBA);
+}//Dbdih::sendGCP_PREPARE()
+
+void Dbdih::sendGCP_SAVEREQ(Signal* signal, Uint32 nodeId)
+{
+ GCPSaveReq * const saveReq = (GCPSaveReq*)&signal->theData[0];
+ BlockReference ref = calcLqhBlockRef(nodeId);
+ saveReq->dihBlockRef = reference();
+ saveReq->dihPtr = nodeId;
+ saveReq->gci = coldgcp;
+ sendSignal(ref, GSN_GCP_SAVEREQ, signal, GCPSaveReq::SignalLength, JBB);
+}//Dbdih::sendGCP_SAVEREQ()
+
+void Dbdih::sendINCL_NODEREQ(Signal* signal, Uint32 nodeId)
+{
+ BlockReference nodeDihRef = calcDihBlockRef(nodeId);
+ signal->theData[0] = reference();
+ signal->theData[1] = c_nodeStartMaster.startNode;
+ signal->theData[2] = c_nodeStartMaster.failNr;
+ signal->theData[3] = 0;
+ signal->theData[4] = currentgcp;
+ sendSignal(nodeDihRef, GSN_INCL_NODEREQ, signal, 5, JBB);
+}//Dbdih::sendINCL_NODEREQ()
+
+void Dbdih::sendMASTER_GCPREQ(Signal* signal, Uint32 nodeId)
+{
+ BlockReference ref = calcDihBlockRef(nodeId);
+ sendSignal(ref, GSN_MASTER_GCPREQ, signal, MasterGCPReq::SignalLength, JBB);
+}//Dbdih::sendMASTER_GCPREQ()
+
+void Dbdih::sendMASTER_LCPREQ(Signal* signal, Uint32 nodeId)
+{
+ BlockReference ref = calcDihBlockRef(nodeId);
+ sendSignal(ref, GSN_MASTER_LCPREQ, signal, MasterLCPReq::SignalLength, JBB);
+}//Dbdih::sendMASTER_LCPREQ()
+
+void Dbdih::sendSTART_INFOREQ(Signal* signal, Uint32 nodeId)
+{
+ const BlockReference ref = calcDihBlockRef(nodeId);
+ sendSignal(ref, GSN_START_INFOREQ, signal, StartInfoReq::SignalLength, JBB);
+}//sendSTART_INFOREQ()
+
+void Dbdih::sendSTART_RECREQ(Signal* signal, Uint32 nodeId)
+{
+ StartRecReq * const req = (StartRecReq*)&signal->theData[0];
+ BlockReference ref = calcLqhBlockRef(nodeId);
+ req->receivingNodeId = nodeId;
+ req->senderRef = reference();
+ req->keepGci = SYSFILE->keepGCI;
+ req->lastCompletedGci = SYSFILE->lastCompletedGCI[nodeId];
+ req->newestGci = SYSFILE->newestRestorableGCI;
+ sendSignal(ref, GSN_START_RECREQ, signal, StartRecReq::SignalLength, JBB);
+
+ signal->theData[0] = NDB_LE_StartREDOLog;
+ signal->theData[1] = nodeId;
+ signal->theData[2] = SYSFILE->keepGCI;
+ signal->theData[3] = SYSFILE->lastCompletedGCI[nodeId];
+ signal->theData[4] = SYSFILE->newestRestorableGCI;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 5, JBB);
+}//Dbdih::sendSTART_RECREQ()
+
+void Dbdih::sendSTART_TOREQ(Signal* signal, Uint32 nodeId)
+{
+ BlockReference ref = calcDihBlockRef(nodeId);
+ sendSignal(ref, GSN_START_TOREQ, signal, StartToReq::SignalLength, JBB);
+}//Dbdih::sendSTART_TOREQ()
+
+void Dbdih::sendSTOP_ME_REQ(Signal* signal, Uint32 nodeId)
+{
+ if (nodeId != getOwnNodeId()) {
+ jam();
+ const BlockReference ref = calcDihBlockRef(nodeId);
+ sendSignal(ref, GSN_STOP_ME_REQ, signal, StopMeReq::SignalLength, JBB);
+ }//if
+}//Dbdih::sendSTOP_ME_REQ()
+
+void Dbdih::sendTC_CLOPSIZEREQ(Signal* signal, Uint32 nodeId)
+{
+ BlockReference ref = calcTcBlockRef(nodeId);
+ signal->theData[0] = nodeId;
+ signal->theData[1] = reference();
+ sendSignal(ref, GSN_TC_CLOPSIZEREQ, signal, 2, JBB);
+}//Dbdih::sendTC_CLOPSIZEREQ()
+
+void Dbdih::sendTCGETOPSIZEREQ(Signal* signal, Uint32 nodeId)
+{
+ BlockReference ref = calcTcBlockRef(nodeId);
+ signal->theData[0] = nodeId;
+ signal->theData[1] = reference();
+ sendSignal(ref, GSN_TCGETOPSIZEREQ, signal, 2, JBB);
+}//Dbdih::sendTCGETOPSIZEREQ()
+
+void Dbdih::sendUPDATE_TOREQ(Signal* signal, Uint32 nodeId)
+{
+ const BlockReference ref = calcDihBlockRef(nodeId);
+ sendSignal(ref, GSN_UPDATE_TOREQ, signal, UpdateToReq::SignalLength, JBB);
+}//sendUPDATE_TOREQ()
+
+void Dbdih::execCONTINUEB(Signal* signal)
+{
+ jamEntry();
+ switch ((DihContinueB::Type)signal->theData[0]) {
+ case DihContinueB::ZPACK_TABLE_INTO_PAGES:
+ {
+ jam();
+ Uint32 tableId = signal->theData[1];
+ packTableIntoPagesLab(signal, tableId);
+ return;
+ break;
+ }
+ case DihContinueB::ZPACK_FRAG_INTO_PAGES:
+ {
+ RWFragment wf;
+ jam();
+ wf.rwfTabPtr.i = signal->theData[1];
+ ptrCheckGuard(wf.rwfTabPtr, ctabFileSize, tabRecord);
+ wf.fragId = signal->theData[2];
+ wf.pageIndex = signal->theData[3];
+ wf.wordIndex = signal->theData[4];
+ packFragIntoPagesLab(signal, &wf);
+ return;
+ break;
+ }
+ case DihContinueB::ZREAD_PAGES_INTO_TABLE:
+ {
+ jam();
+ Uint32 tableId = signal->theData[1];
+ readPagesIntoTableLab(signal, tableId);
+ return;
+ break;
+ }
+ case DihContinueB::ZREAD_PAGES_INTO_FRAG:
+ {
+ RWFragment rf;
+ jam();
+ rf.rwfTabPtr.i = signal->theData[1];
+ ptrCheckGuard(rf.rwfTabPtr, ctabFileSize, tabRecord);
+ rf.fragId = signal->theData[2];
+ rf.pageIndex = signal->theData[3];
+ rf.wordIndex = signal->theData[4];
+ readPagesIntoFragLab(signal, &rf);
+ return;
+ break;
+ }
+ case DihContinueB::ZCOPY_TABLE:
+ {
+ jam();
+ Uint32 tableId = signal->theData[1];
+ copyTableLab(signal, tableId);
+ return;
+ }
+ case DihContinueB::ZCOPY_TABLE_NODE:
+ {
+ NodeRecordPtr nodePtr;
+ CopyTableNode ctn;
+ jam();
+ ctn.ctnTabPtr.i = signal->theData[1];
+ ptrCheckGuard(ctn.ctnTabPtr, ctabFileSize, tabRecord);
+ nodePtr.i = signal->theData[2];
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ ctn.pageIndex = signal->theData[3];
+ ctn.wordIndex = signal->theData[4];
+ ctn.noOfWords = signal->theData[5];
+ copyTableNode(signal, &ctn, nodePtr);
+ return;
+ }
+ case DihContinueB::ZSTART_FRAGMENT:
+ {
+ jam();
+ Uint32 tableId = signal->theData[1];
+ Uint32 fragId = signal->theData[2];
+ startFragment(signal, tableId, fragId);
+ return;
+ }
+ case DihContinueB::ZCOMPLETE_RESTART:
+ jam();
+ completeRestartLab(signal);
+ return;
+ case DihContinueB::ZREAD_TABLE_FROM_PAGES:
+ {
+ TabRecordPtr tabPtr;
+ jam();
+ tabPtr.i = signal->theData[1];
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ readTableFromPagesLab(signal, tabPtr);
+ return;
+ }
+ case DihContinueB::ZSR_PHASE2_READ_TABLE:
+ {
+ TabRecordPtr tabPtr;
+ jam();
+ tabPtr.i = signal->theData[1];
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ srPhase2ReadTableLab(signal, tabPtr);
+ return;
+ }
+ case DihContinueB::ZCHECK_TC_COUNTER:
+ jam();
+#ifndef NO_LCP
+ checkTcCounterLab(signal);
+#endif
+ return;
+ case DihContinueB::ZCALCULATE_KEEP_GCI:
+ {
+ jam();
+ Uint32 tableId = signal->theData[1];
+ Uint32 fragId = signal->theData[2];
+ calculateKeepGciLab(signal, tableId, fragId);
+ return;
+ }
+ case DihContinueB::ZSTORE_NEW_LCP_ID:
+ jam();
+ storeNewLcpIdLab(signal);
+ return;
+ case DihContinueB::ZTABLE_UPDATE:
+ {
+ TabRecordPtr tabPtr;
+ jam();
+ tabPtr.i = signal->theData[1];
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ tableUpdateLab(signal, tabPtr);
+ return;
+ }
+ case DihContinueB::ZCHECK_LCP_COMPLETED:
+ {
+ jam();
+ checkLcpCompletedLab(signal);
+ return;
+ }
+ case DihContinueB::ZINIT_LCP:
+ {
+ jam();
+ Uint32 senderRef = signal->theData[1];
+ Uint32 tableId = signal->theData[2];
+ initLcpLab(signal, senderRef, tableId);
+ return;
+ }
+ case DihContinueB::ZADD_TABLE_MASTER_PAGES:
+ {
+ TabRecordPtr tabPtr;
+ jam();
+ tabPtr.i = signal->theData[1];
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ tabPtr.p->tabUpdateState = TabRecord::US_ADD_TABLE_MASTER;
+ tableUpdateLab(signal, tabPtr);
+ return;
+ break;
+ }
+ case DihContinueB::ZDIH_ADD_TABLE_MASTER:
+ {
+ jam();
+ addTable_closeConf(signal, signal->theData[1]);
+ return;
+ }
+ case DihContinueB::ZADD_TABLE_SLAVE_PAGES:
+ {
+ TabRecordPtr tabPtr;
+ jam();
+ tabPtr.i = signal->theData[1];
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ tabPtr.p->tabUpdateState = TabRecord::US_ADD_TABLE_SLAVE;
+ tableUpdateLab(signal, tabPtr);
+ return;
+ }
+ case DihContinueB::ZDIH_ADD_TABLE_SLAVE:
+ {
+ ndbrequire(false);
+ return;
+ }
+ case DihContinueB::ZSTART_GCP:
+ jam();
+#ifndef NO_GCP
+ startGcpLab(signal, signal->theData[1]);
+#endif
+ return;
+ break;
+ case DihContinueB::ZCOPY_GCI:{
+ jam();
+ CopyGCIReq::CopyReason reason = (CopyGCIReq::CopyReason)signal->theData[1];
+ ndbrequire(c_copyGCIMaster.m_copyReason == reason);
+ sendLoopMacro(COPY_GCIREQ, sendCOPY_GCIREQ);
+ return;
+ }
+ break;
+ case DihContinueB::ZEMPTY_VERIFY_QUEUE:
+ jam();
+ emptyverificbuffer(signal, true);
+ return;
+ break;
+ case DihContinueB::ZCHECK_GCP_STOP:
+ jam();
+#ifndef NO_GCP
+ checkGcpStopLab(signal);
+#endif
+ return;
+ break;
+ case DihContinueB::ZREMOVE_NODE_FROM_TABLE:
+ {
+ jam();
+ Uint32 nodeId = signal->theData[1];
+ Uint32 tableId = signal->theData[2];
+ removeNodeFromTables(signal, nodeId, tableId);
+ return;
+ }
+ case DihContinueB::ZCOPY_NODE:
+ {
+ jam();
+ Uint32 tableId = signal->theData[1];
+ copyNodeLab(signal, tableId);
+ return;
+ }
+ case DihContinueB::ZSTART_TAKE_OVER:
+ {
+ jam();
+ Uint32 takeOverPtrI = signal->theData[1];
+ Uint32 startNode = signal->theData[2];
+ Uint32 toNode = signal->theData[3];
+ startTakeOver(signal, takeOverPtrI, startNode, toNode);
+ return;
+ break;
+ }
+ case DihContinueB::ZCHECK_START_TAKE_OVER:
+ jam();
+ checkStartTakeOver(signal);
+ break;
+ case DihContinueB::ZTO_START_COPY_FRAG:
+ {
+ jam();
+ Uint32 takeOverPtrI = signal->theData[1];
+ startNextCopyFragment(signal, takeOverPtrI);
+ return;
+ }
+ case DihContinueB::ZINVALIDATE_NODE_LCP:
+ {
+ jam();
+ const Uint32 nodeId = signal->theData[1];
+ const Uint32 tableId = signal->theData[2];
+ invalidateNodeLCP(signal, nodeId, tableId);
+ return;
+ }
+ case DihContinueB::ZINITIALISE_RECORDS:
+ jam();
+ initialiseRecordsLab(signal,
+ signal->theData[1],
+ signal->theData[2],
+ signal->theData[3]);
+ return;
+ break;
+ case DihContinueB::ZSTART_PERMREQ_AGAIN:
+ jam();
+ nodeRestartPh2Lab(signal);
+ return;
+ break;
+ case DihContinueB::SwitchReplica:
+ {
+ jam();
+ const Uint32 nodeId = signal->theData[1];
+ const Uint32 tableId = signal->theData[2];
+ const Uint32 fragNo = signal->theData[3];
+ switchReplica(signal, nodeId, tableId, fragNo);
+ return;
+ }
+ case DihContinueB::ZSEND_START_TO:
+ {
+ jam();
+ Uint32 takeOverPtrI = signal->theData[1];
+ sendStartTo(signal, takeOverPtrI);
+ return;
+ }
+ case DihContinueB::ZSEND_ADD_FRAG:
+ {
+ jam();
+ Uint32 takeOverPtrI = signal->theData[1];
+ toCopyFragLab(signal, takeOverPtrI);
+ return;
+ }
+ case DihContinueB::ZSEND_UPDATE_TO:
+ {
+ jam();
+ Uint32 takeOverPtrI = signal->theData[1];
+ Uint32 updateState = signal->theData[4];
+ sendUpdateTo(signal, takeOverPtrI, updateState);
+ return;
+ }
+ case DihContinueB::ZSEND_END_TO:
+ {
+ jam();
+ Uint32 takeOverPtrI = signal->theData[1];
+ sendEndTo(signal, takeOverPtrI);
+ return;
+ }
+ case DihContinueB::ZSEND_CREATE_FRAG:
+ {
+ jam();
+ Uint32 takeOverPtrI = signal->theData[1];
+ Uint32 storedType = signal->theData[2];
+ Uint32 startGci = signal->theData[3];
+ sendCreateFragReq(signal, startGci, storedType, takeOverPtrI);
+ return;
+ }
+ case DihContinueB::WAIT_DROP_TAB_WRITING_TO_FILE:{
+ jam();
+ TabRecordPtr tabPtr;
+ tabPtr.i = signal->theData[1];
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ waitDropTabWritingToFile(signal, tabPtr);
+ return;
+ }
+ case DihContinueB::CHECK_WAIT_DROP_TAB_FAILED_LQH:{
+ jam();
+ Uint32 nodeId = signal->theData[1];
+ Uint32 tableId = signal->theData[2];
+ checkWaitDropTabFailedLqh(signal, nodeId, tableId);
+ return;
+ }
+ }//switch
+
+ ndbrequire(false);
+ return;
+}//Dbdih::execCONTINUEB()
+
+void Dbdih::execCOPY_GCIREQ(Signal* signal)
+{
+ CopyGCIReq * const copyGCI = (CopyGCIReq *)&signal->theData[0];
+ jamEntry();
+ CopyGCIReq::CopyReason reason = (CopyGCIReq::CopyReason)copyGCI->copyReason;
+ const Uint32 tstart = copyGCI->startWord;
+
+ ndbrequire(cmasterdihref == signal->senderBlockRef()) ;
+ ndbrequire(c_copyGCISlave.m_copyReason == CopyGCIReq::IDLE);
+ ndbrequire(c_copyGCISlave.m_expectedNextWord == tstart);
+ ndbrequire(reason != CopyGCIReq::IDLE);
+
+ arrGuard(tstart + CopyGCIReq::DATA_SIZE, sizeof(sysfileData)/4);
+ for(Uint32 i = 0; i<CopyGCIReq::DATA_SIZE; i++)
+ cdata[tstart+i] = copyGCI->data[i];
+
+ if ((tstart + CopyGCIReq::DATA_SIZE) >= Sysfile::SYSFILE_SIZE32) {
+ jam();
+ c_copyGCISlave.m_expectedNextWord = 0;
+ } else {
+ jam();
+ c_copyGCISlave.m_expectedNextWord += CopyGCIReq::DATA_SIZE;
+ return;
+ }//if
+
+ memcpy(sysfileData, cdata, sizeof(sysfileData));
+
+ c_copyGCISlave.m_copyReason = reason;
+ c_copyGCISlave.m_senderRef = signal->senderBlockRef();
+ c_copyGCISlave.m_senderData = copyGCI->anyData;
+
+ CRASH_INSERTION2(7020, reason==CopyGCIReq::LOCAL_CHECKPOINT);
+ CRASH_INSERTION2(7008, reason==CopyGCIReq::GLOBAL_CHECKPOINT);
+
+ /* -------------------------------------------------------------------------*/
+ /* WE SET THE REQUESTER OF THE COPY GCI TO THE CURRENT MASTER. IF THE */
+ /* CURRENT MASTER WE DO NOT WANT THE NEW MASTER TO RECEIVE CONFIRM OF */
+ /* SOMETHING HE HAS NOT SENT. THE TAKE OVER MUST BE CAREFUL. */
+ /* -------------------------------------------------------------------------*/
+ bool ok = false;
+ switch(reason){
+ case CopyGCIReq::IDLE:
+ ok = true;
+ jam();
+ ndbrequire(false);
+ break;
+ case CopyGCIReq::LOCAL_CHECKPOINT: {
+ ok = true;
+ jam();
+ c_lcpState.setLcpStatus(LCP_COPY_GCI, __LINE__);
+ c_lcpState.m_masterLcpDihRef = cmasterdihref;
+ setNodeInfo(signal);
+ break;
+ }
+ case CopyGCIReq::RESTART: {
+ ok = true;
+ jam();
+ coldgcp = SYSFILE->newestRestorableGCI;
+ crestartGci = SYSFILE->newestRestorableGCI;
+ Sysfile::setRestartOngoing(SYSFILE->systemRestartBits);
+ currentgcp = coldgcp + 1;
+ cnewgcp = coldgcp + 1;
+ setNodeInfo(signal);
+ if ((Sysfile::getLCPOngoing(SYSFILE->systemRestartBits))) {
+ jam();
+ /* -------------------------------------------------------------------- */
+ // IF THERE WAS A LOCAL CHECKPOINT ONGOING AT THE CRASH MOMENT WE WILL
+ // INVALIDATE THAT LOCAL CHECKPOINT.
+ /* -------------------------------------------------------------------- */
+ invalidateLcpInfoAfterSr();
+ }//if
+ break;
+ }
+ case CopyGCIReq::GLOBAL_CHECKPOINT: {
+ ok = true;
+ jam();
+ cgcpParticipantState = GCP_PARTICIPANT_COPY_GCI_RECEIVED;
+ setNodeInfo(signal);
+ break;
+ }//if
+ case CopyGCIReq::INITIAL_START_COMPLETED:
+ ok = true;
+ jam();
+ break;
+ }
+ ndbrequire(ok);
+
+ /* ----------------------------------------------------------------------- */
+ /* WE START BY TRYING TO OPEN THE FIRST RESTORABLE GCI FILE. */
+ /* ----------------------------------------------------------------------- */
+ FileRecordPtr filePtr;
+ filePtr.i = crestartInfoFile[0];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ if (filePtr.p->fileStatus == FileRecord::OPEN) {
+ jam();
+ openingCopyGciSkipInitLab(signal, filePtr);
+ return;
+ }//if
+ openFileRw(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::OPENING_COPY_GCI;
+ return;
+}//Dbdih::execCOPY_GCIREQ()
+
+void Dbdih::execDICTSTARTCONF(Signal* signal)
+{
+ jamEntry();
+ Uint32 nodeId = refToNode(signal->getSendersBlockRef());
+ if (nodeId != getOwnNodeId()) {
+ jam();
+ nodeDictStartConfLab(signal);
+ } else {
+ jam();
+ dictStartConfLab(signal);
+ }//if
+}//Dbdih::execDICTSTARTCONF()
+
+void Dbdih::execFSCLOSECONF(Signal* signal)
+{
+ FileRecordPtr filePtr;
+ jamEntry();
+ filePtr.i = signal->theData[0];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ filePtr.p->fileStatus = FileRecord::CLOSED;
+ FileRecord::ReqStatus status = filePtr.p->reqStatus;
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ switch (status) {
+ case FileRecord::CLOSING_GCP:
+ jam();
+ closingGcpLab(signal, filePtr);
+ break;
+ case FileRecord::CLOSING_GCP_CRASH:
+ jam();
+ closingGcpCrashLab(signal, filePtr);
+ break;
+ case FileRecord::CLOSING_TABLE_CRASH:
+ jam();
+ closingTableCrashLab(signal, filePtr);
+ break;
+ case FileRecord::CLOSING_TABLE_SR:
+ jam();
+ closingTableSrLab(signal, filePtr);
+ break;
+ case FileRecord::TABLE_CLOSE:
+ jam();
+ tableCloseLab(signal, filePtr);
+ break;
+ case FileRecord::TABLE_CLOSE_DELETE:
+ jam();
+ tableDeleteLab(signal, filePtr);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dbdih::execFSCLOSECONF()
+
+void Dbdih::execFSCLOSEREF(Signal* signal)
+{
+ FileRecordPtr filePtr;
+ jamEntry();
+ filePtr.i = signal->theData[0];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ FileRecord::ReqStatus status = filePtr.p->reqStatus;
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ switch (status) {
+ case FileRecord::CLOSING_GCP:
+ ndbrequire(false);
+ break;
+ case FileRecord::CLOSING_GCP_CRASH:
+ jam();
+ closingGcpCrashLab(signal, filePtr);
+ break;
+ case FileRecord::CLOSING_TABLE_CRASH:
+ jam();
+ closingTableCrashLab(signal, filePtr);
+ break;
+ case FileRecord::CLOSING_TABLE_SR:
+ ndbrequire(false);
+ break;
+ case FileRecord::TABLE_CLOSE:
+ ndbrequire(false);
+ break;
+ case FileRecord::TABLE_CLOSE_DELETE:
+ ndbrequire(false);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dbdih::execFSCLOSEREF()
+
+void Dbdih::execFSOPENCONF(Signal* signal)
+{
+ FileRecordPtr filePtr;
+ jamEntry();
+ filePtr.i = signal->theData[0];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ filePtr.p->fileRef = signal->theData[1];
+ filePtr.p->fileStatus = FileRecord::OPEN;
+ FileRecord::ReqStatus status = filePtr.p->reqStatus;
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ switch (status) {
+ case FileRecord::CREATING_GCP:
+ jam();
+ creatingGcpLab(signal, filePtr);
+ break;
+ case FileRecord::OPENING_COPY_GCI:
+ jam();
+ openingCopyGciSkipInitLab(signal, filePtr);
+ break;
+ case FileRecord::CREATING_COPY_GCI:
+ jam();
+ openingCopyGciSkipInitLab(signal, filePtr);
+ break;
+ case FileRecord::OPENING_GCP:
+ jam();
+ openingGcpLab(signal, filePtr);
+ break;
+ case FileRecord::OPENING_TABLE:
+ jam();
+ openingTableLab(signal, filePtr);
+ break;
+ case FileRecord::TABLE_CREATE:
+ jam();
+ tableCreateLab(signal, filePtr);
+ break;
+ case FileRecord::TABLE_OPEN_FOR_DELETE:
+ jam();
+ tableOpenLab(signal, filePtr);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dbdih::execFSOPENCONF()
+
+void Dbdih::execFSOPENREF(Signal* signal)
+{
+ FileRecordPtr filePtr;
+ jamEntry();
+ filePtr.i = signal->theData[0];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ FileRecord::ReqStatus status = filePtr.p->reqStatus;
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ switch (status) {
+ case FileRecord::CREATING_GCP:
+ /* --------------------------------------------------------------------- */
+ /* WE DID NOT MANAGE TO CREATE A GLOBAL CHECKPOINT FILE. SERIOUS ERROR */
+ /* WHICH CAUSES A SYSTEM RESTART. */
+ /* --------------------------------------------------------------------- */
+ ndbrequire(false);
+ break;
+ case FileRecord::OPENING_COPY_GCI:
+ jam();
+ openingCopyGciErrorLab(signal, filePtr);
+ break;
+ case FileRecord::CREATING_COPY_GCI:
+ ndbrequire(false);
+ break;
+ case FileRecord::OPENING_GCP:
+ jam();
+ openingGcpErrorLab(signal, filePtr);
+ break;
+ case FileRecord::OPENING_TABLE:
+ jam();
+ openingTableErrorLab(signal, filePtr);
+ break;
+ case FileRecord::TABLE_CREATE:
+ ndbrequire(false);
+ break;
+ case FileRecord::TABLE_OPEN_FOR_DELETE:
+ jam();
+ tableDeleteLab(signal, filePtr);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dbdih::execFSOPENREF()
+
+void Dbdih::execFSREADCONF(Signal* signal)
+{
+ FileRecordPtr filePtr;
+ jamEntry();
+ filePtr.i = signal->theData[0];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ FileRecord::ReqStatus status = filePtr.p->reqStatus;
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ switch (status) {
+ case FileRecord::READING_GCP:
+ jam();
+ readingGcpLab(signal, filePtr);
+ break;
+ case FileRecord::READING_TABLE:
+ jam();
+ readingTableLab(signal, filePtr);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dbdih::execFSREADCONF()
+
+void Dbdih::execFSREADREF(Signal* signal)
+{
+ FileRecordPtr filePtr;
+ jamEntry();
+ filePtr.i = signal->theData[0];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ FileRecord::ReqStatus status = filePtr.p->reqStatus;
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ switch (status) {
+ case FileRecord::READING_GCP:
+ jam();
+ readingGcpErrorLab(signal, filePtr);
+ break;
+ case FileRecord::READING_TABLE:
+ jam();
+ readingTableErrorLab(signal, filePtr);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dbdih::execFSREADREF()
+
+void Dbdih::execFSWRITECONF(Signal* signal)
+{
+ FileRecordPtr filePtr;
+ jamEntry();
+ filePtr.i = signal->theData[0];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ FileRecord::ReqStatus status = filePtr.p->reqStatus;
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ switch (status) {
+ case FileRecord::WRITING_COPY_GCI:
+ jam();
+ writingCopyGciLab(signal, filePtr);
+ break;
+ case FileRecord::WRITE_INIT_GCP:
+ jam();
+ writeInitGcpLab(signal, filePtr);
+ break;
+ case FileRecord::TABLE_WRITE:
+ jam();
+ tableWriteLab(signal, filePtr);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dbdih::execFSWRITECONF()
+
+void Dbdih::execFSWRITEREF(Signal* signal)
+{
+ FileRecordPtr filePtr;
+ jamEntry();
+ filePtr.i = signal->theData[0];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ FileRecord::ReqStatus status = filePtr.p->reqStatus;
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ switch (status) {
+ case FileRecord::WRITING_COPY_GCI:
+ /* --------------------------------------------------------------------- */
+ /* EVEN CREATING THE FILE DID NOT WORK. WE WILL THEN CRASH. */
+ /* ERROR IN WRITING FILE. WE WILL NOT CONTINUE FROM HERE. */
+ /* --------------------------------------------------------------------- */
+ ndbrequire(false);
+ break;
+ case FileRecord::WRITE_INIT_GCP:
+ /* --------------------------------------------------------------------- */
+ /* AN ERROR OCCURRED IN WRITING A GCI FILE WHICH IS A SERIOUS ERROR */
+ /* THAT CAUSE A SYSTEM RESTART. */
+ /* --------------------------------------------------------------------- */
+ ndbrequire(false);
+ break;
+ case FileRecord::TABLE_WRITE:
+ ndbrequire(false);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dbdih::execFSWRITEREF()
+
+void Dbdih::execGETGCIREQ(Signal* signal)
+{
+
+ jamEntry();
+ Uint32 userPtr = signal->theData[0];
+ BlockReference userRef = signal->theData[1];
+
+ signal->theData[0] = userPtr;
+ signal->theData[1] = SYSFILE->newestRestorableGCI;
+ sendSignal(userRef, GSN_GETGCICONF, signal, 2, JBB);
+}//Dbdih::execGETGCIREQ()
+
+void Dbdih::execREAD_CONFIG_REQ(Signal* signal)
+{
+ const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr();
+ Uint32 ref = req->senderRef;
+ Uint32 senderData = req->senderData;
+ ndbrequire(req->noOfParameters == 0);
+
+ jamEntry();
+
+ const ndb_mgm_configuration_iterator * p =
+ theConfiguration.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DIH_API_CONNECT,
+ &capiConnectFileSize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DIH_CONNECT,&cconnectFileSize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DIH_FRAG_CONNECT,
+ &cfragstoreFileSize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DIH_REPLICAS,
+ &creplicaFileSize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DIH_TABLE, &ctabFileSize))
+ cfileFileSize = (2 * ctabFileSize) + 2;
+ initRecords();
+ initialiseRecordsLab(signal, 0, ref, senderData);
+ return;
+}//Dbdih::execSIZEALT_REP()
+
+void Dbdih::execSTART_COPYREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}//Dbdih::execSTART_COPYREF()
+
+void Dbdih::execSTART_FRAGCONF(Signal* signal)
+{
+ (void)signal; // Don't want compiler warning
+ /* ********************************************************************* */
+ /* If anyone wants to add functionality in this method, be aware that */
+ /* for temporary tables no START_FRAGREQ is sent and therefore no */
+ /* START_FRAGCONF signal will be received for those tables!! */
+ /* ********************************************************************* */
+ jamEntry();
+ return;
+}//Dbdih::execSTART_FRAGCONF()
+
+void Dbdih::execSTART_MEREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}//Dbdih::execSTART_MEREF()
+
+void Dbdih::execTAB_COMMITREQ(Signal* signal)
+{
+ TabRecordPtr tabPtr;
+ jamEntry();
+ Uint32 tdictPtr = signal->theData[0];
+ BlockReference tdictBlockref = signal->theData[1];
+ tabPtr.i = signal->theData[2];
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_CREATING);
+ tabPtr.p->tabStatus = TabRecord::TS_ACTIVE;
+ signal->theData[0] = tdictPtr;
+ signal->theData[1] = cownNodeId;
+ signal->theData[2] = tabPtr.i;
+ sendSignal(tdictBlockref, GSN_TAB_COMMITCONF, signal, 3, JBB);
+ return;
+}//Dbdih::execTAB_COMMITREQ()
+
+/*
+ 3.2 S T A N D A R D S U B P R O G R A M S I N P L E X
+ *************************************************************
+ */
+/*
+ 3.2.1 S T A R T / R E S T A R T
+ **********************************
+ */
+/*****************************************************************************/
+/* ********** START / RESTART MODULE *************/
+/*****************************************************************************/
+/*
+ 3.2.1.1 LOADING O W N B L O C K R E F E R E N C E (ABSOLUTE PHASE 1)
+ *****************************************************************************
+ */
+void Dbdih::execDIH_RESTARTREQ(Signal* signal)
+{
+ jamEntry();
+ cntrlblockref = signal->theData[0];
+ if(theConfiguration.getInitialStart()){
+ sendSignal(cntrlblockref, GSN_DIH_RESTARTREF, signal, 1, JBB);
+ } else {
+ readGciFileLab(signal);
+ }
+ return;
+}//Dbdih::execDIH_RESTARTREQ()
+
+void Dbdih::execSTTOR(Signal* signal)
+{
+ jamEntry();
+
+ signal->theData[0] = 0;
+ signal->theData[1] = 0;
+ signal->theData[2] = 0;
+ signal->theData[3] = 1; // Next start phase
+ signal->theData[4] = 255; // Next start phase
+ sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 5, JBB);
+ return;
+}//Dbdih::execSTTOR()
+
+void Dbdih::initialStartCompletedLab(Signal* signal)
+{
+ /*-------------------------------------------------------------------------*/
+ /* NOW THAT (RE)START IS COMPLETED WE CAN START THE LCP.*/
+ /*-------------------------------------------------------------------------*/
+ return;
+}//Dbdih::initialStartCompletedLab()
+
+/*
+ * ***************************************************************************
+ * S E N D I N G R E P L Y T O S T A R T / R E S T A R T R E Q U E S T S
+ * ****************************************************************************
+ */
+void Dbdih::ndbsttorry10Lab(Signal* signal, Uint32 _line)
+{
+ /*-------------------------------------------------------------------------*/
+ // AN NDB START PHASE HAS BEEN COMPLETED. WHEN START PHASE 6 IS COMPLETED WE
+ // RECORD THAT THE SYSTEM IS RUNNING.
+ /*-------------------------------------------------------------------------*/
+ signal->theData[0] = reference();
+ sendSignal(cntrlblockref, GSN_NDB_STTORRY, signal, 1, JBB);
+ return;
+}//Dbdih::ndbsttorry10Lab()
+
+/*
+****************************************
+I N T E R N A L P H A S E S
+****************************************
+*/
+/*---------------------------------------------------------------------------*/
+/*NDB_STTOR START SIGNAL AT START/RESTART */
+/*---------------------------------------------------------------------------*/
+void Dbdih::execNDB_STTOR(Signal* signal)
+{
+ jamEntry();
+ BlockReference cntrRef = signal->theData[0]; /* SENDERS BLOCK REFERENCE */
+ Uint32 ownNodeId = signal->theData[1]; /* OWN PROCESSOR ID*/
+ Uint32 phase = signal->theData[2]; /* INTERNAL START PHASE*/
+ Uint32 typestart = signal->theData[3];
+
+ cstarttype = typestart;
+ cstartPhase = phase;
+
+ switch (phase){
+ case ZNDB_SPH1:
+ jam();
+ /*----------------------------------------------------------------------*/
+ /* Set the delay between local checkpoints in ndb startphase 1. */
+ /*----------------------------------------------------------------------*/
+ cownNodeId = ownNodeId;
+ /*-----------------------------------------------------------------------*/
+ // Compute all static block references in this node as part of
+ // ndb start phase 1.
+ /*-----------------------------------------------------------------------*/
+ cntrlblockref = cntrRef;
+ clocaltcblockref = calcTcBlockRef(ownNodeId);
+ clocallqhblockref = calcLqhBlockRef(ownNodeId);
+ cdictblockref = calcDictBlockRef(ownNodeId);
+ ndbsttorry10Lab(signal, __LINE__);
+ break;
+
+ case ZNDB_SPH2:
+ jam();
+ /*-----------------------------------------------------------------------*/
+ // Set the number of replicas, maximum is 4 replicas.
+ // Read the ndb nodes from the configuration.
+ /*-----------------------------------------------------------------------*/
+
+ /*-----------------------------------------------------------------------*/
+ // For node restarts we will also add a request for permission
+ // to continue the system restart.
+ // The permission is given by the master node in the alive set.
+ /*-----------------------------------------------------------------------*/
+ createMutexes(signal, 0);
+ break;
+
+ case ZNDB_SPH3:
+ jam();
+ /*-----------------------------------------------------------------------*/
+ // Non-master nodes performing an initial start will execute
+ // the start request here since the
+ // initial start do not synchronise so much from the master.
+ // In the master nodes the start
+ // request will be sent directly to dih (in ndb_startreq) when all
+ // nodes have completed phase 3 of the start.
+ /*-----------------------------------------------------------------------*/
+ cmasterState = MASTER_IDLE;
+ if(cstarttype == NodeState::ST_INITIAL_START ||
+ cstarttype == NodeState::ST_SYSTEM_RESTART){
+ jam();
+ cmasterState = isMaster() ? MASTER_ACTIVE : MASTER_IDLE;
+ }
+ if (!isMaster() && cstarttype == NodeState::ST_INITIAL_START) {
+ jam();
+ ndbStartReqLab(signal, cntrRef);
+ return;
+ }//if
+ ndbsttorry10Lab(signal, __LINE__);
+ break;
+
+ case ZNDB_SPH4:
+ jam();
+ c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__);
+ cmasterTakeOverNode = ZNIL;
+ switch(typestart){
+ case NodeState::ST_INITIAL_START:
+ jam();
+ ndbsttorry10Lab(signal, __LINE__);
+ return;
+ case NodeState::ST_SYSTEM_RESTART:
+ jam();
+ if (isMaster()) {
+ jam();
+ systemRestartTakeOverLab(signal);
+ if (anyActiveTakeOver() && false) {
+ jam();
+ ndbout_c("1 - anyActiveTakeOver == true");
+ return;
+ }
+ }
+ ndbsttorry10Lab(signal, __LINE__);
+ return;
+ case NodeState::ST_INITIAL_NODE_RESTART:
+ case NodeState::ST_NODE_RESTART:
+ jam();
+ /***********************************************************************
+ * When starting nodes while system is operational we must be controlled
+ * by the master since only one node restart is allowed at a time.
+ * When this signal is confirmed the master has also copied the
+ * dictionary and the distribution information.
+ */
+ StartMeReq * req = (StartMeReq*)&signal->theData[0];
+ req->startingRef = reference();
+ req->startingVersion = 0; // Obsolete
+ sendSignal(cmasterdihref, GSN_START_MEREQ, signal,
+ StartMeReq::SignalLength, JBB);
+ return;
+ }
+ ndbrequire(false);
+ break;
+ case ZNDB_SPH5:
+ jam();
+ switch(typestart){
+ case NodeState::ST_INITIAL_START:
+ case NodeState::ST_SYSTEM_RESTART:
+ jam();
+ jam();
+ /*---------------------------------------------------------------------*/
+ // WE EXECUTE A LOCAL CHECKPOINT AS A PART OF A SYSTEM RESTART.
+ // THE IDEA IS THAT WE NEED TO
+ // ENSURE THAT WE CAN RECOVER FROM PROBLEMS CAUSED BY MANY NODE
+ // CRASHES THAT CAUSES THE LOG
+ // TO GROW AND THE NUMBER OF LOG ROUNDS TO EXECUTE TO GROW.
+ // THIS CAN OTHERWISE GET US INTO
+ // A SITUATION WHICH IS UNREPAIRABLE. THUS WE EXECUTE A CHECKPOINT
+ // BEFORE ALLOWING ANY TRANSACTIONS TO START.
+ /*---------------------------------------------------------------------*/
+ if (!isMaster()) {
+ jam();
+ ndbsttorry10Lab(signal, __LINE__);
+ return;
+ }//if
+
+ c_lcpState.immediateLcpStart = true;
+ cwaitLcpSr = true;
+ checkLcpStart(signal, __LINE__);
+ return;
+ case NodeState::ST_NODE_RESTART:
+ case NodeState::ST_INITIAL_NODE_RESTART:
+ jam();
+ signal->theData[0] = cownNodeId;
+ signal->theData[1] = reference();
+ sendSignal(cmasterdihref, GSN_START_COPYREQ, signal, 2, JBB);
+ return;
+ }
+ ndbrequire(false);
+ case ZNDB_SPH6:
+ jam();
+ switch(typestart){
+ case NodeState::ST_INITIAL_START:
+ case NodeState::ST_SYSTEM_RESTART:
+ jam();
+ if(isMaster()){
+ jam();
+ startGcp(signal);
+ }
+ ndbsttorry10Lab(signal, __LINE__);
+ return;
+ case NodeState::ST_NODE_RESTART:
+ case NodeState::ST_INITIAL_NODE_RESTART:
+ ndbsttorry10Lab(signal, __LINE__);
+ return;
+ }
+ ndbrequire(false);
+ break;
+ default:
+ jam();
+ ndbsttorry10Lab(signal, __LINE__);
+ break;
+ }//switch
+}//Dbdih::execNDB_STTOR()
+
+void
+Dbdih::createMutexes(Signal * signal, Uint32 count){
+ Callback c = { safe_cast(&Dbdih::createMutex_done), count };
+
+ switch(count){
+ case 0:{
+ Mutex mutex(signal, c_mutexMgr, c_startLcpMutexHandle);
+ mutex.create(c);
+ return;
+ }
+ case 1:{
+ Mutex mutex(signal, c_mutexMgr, c_switchPrimaryMutexHandle);
+ mutex.create(c);
+ return;
+ }
+ }
+
+ signal->theData[0] = reference();
+ sendSignal(cntrlblockref, GSN_READ_NODESREQ, signal, 1, JBB);
+}
+
+void
+Dbdih::createMutex_done(Signal* signal, Uint32 senderData, Uint32 retVal){
+ jamEntry();
+ ndbrequire(retVal == 0);
+
+ switch(senderData){
+ case 0:{
+ Mutex mutex(signal, c_mutexMgr, c_startLcpMutexHandle);
+ mutex.release();
+ }
+ case 1:{
+ Mutex mutex(signal, c_mutexMgr, c_switchPrimaryMutexHandle);
+ mutex.release();
+ }
+ }
+
+ createMutexes(signal, senderData + 1);
+}
+
+/*****************************************************************************/
+/* ------------------------------------------------------------------------- */
+/* WE HAVE BEEN REQUESTED BY NDBCNTR TO PERFORM A RESTART OF THE */
+/* DATABASE TABLES. */
+/* THIS SIGNAL IS SENT AFTER COMPLETING PHASE 3 IN ALL BLOCKS IN A */
+/* SYSTEM RESTART. WE WILL ALSO JUMP TO THIS LABEL FROM PHASE 3 IN AN */
+/* INITIAL START. */
+/* ------------------------------------------------------------------------- */
+/*****************************************************************************/
+void Dbdih::execNDB_STARTREQ(Signal* signal)
+{
+ jamEntry();
+ BlockReference ref = signal->theData[0];
+ cstarttype = signal->theData[1];
+ ndbStartReqLab(signal, ref);
+}//Dbdih::execNDB_STARTREQ()
+
+void Dbdih::ndbStartReqLab(Signal* signal, BlockReference ref)
+{
+ cndbStartReqBlockref = ref;
+ if (cstarttype == NodeState::ST_INITIAL_START) {
+ jam();
+ initRestartInfo();
+ initGciFilesLab(signal);
+ return;
+ }
+
+ ndbrequire(isMaster());
+ copyGciLab(signal, CopyGCIReq::RESTART); // We have already read the file!
+}//Dbdih::ndbStartReqLab()
+
+void Dbdih::execREAD_NODESCONF(Signal* signal)
+{
+ unsigned i;
+ ReadNodesConf * const readNodes = (ReadNodesConf *)&signal->theData[0];
+ jamEntry();
+ Uint32 nodeArray[MAX_NDB_NODES];
+
+ csystemnodes = readNodes->noOfNodes;
+ cmasterNodeId = readNodes->masterNodeId;
+ int index = 0;
+ NdbNodeBitmask tmp; tmp.assign(2, readNodes->allNodes);
+ for (i = 1; i < MAX_NDB_NODES; i++){
+ jam();
+ if(tmp.get(i)){
+ jam();
+ nodeArray[index] = i;
+ if(NodeBitmask::get(readNodes->inactiveNodes, i) == false){
+ jam();
+ con_lineNodes++;
+ }//if
+ index++;
+ }//if
+ }//for
+
+ if(cstarttype == NodeState::ST_SYSTEM_RESTART ||
+ cstarttype == NodeState::ST_NODE_RESTART){
+
+ for(i = 1; i<MAX_NDB_NODES; i++){
+ const Uint32 stat = Sysfile::getNodeStatus(i, SYSFILE->nodeStatus);
+ if(stat == Sysfile::NS_NotDefined && !tmp.get(i)){
+ jam();
+ continue;
+ }
+
+ if(tmp.get(i) && stat != Sysfile::NS_NotDefined){
+ jam();
+ continue;
+ }
+ char buf[255];
+ BaseString::snprintf(buf, sizeof(buf),
+ "Illegal configuration change."
+ " Initial start needs to be performed "
+ " when changing no of storage nodes (node %d)", i);
+ progError(__LINE__,
+ ERR_INVALID_CONFIG,
+ buf);
+ }
+ }
+
+ ndbrequire(csystemnodes >= 1 && csystemnodes < MAX_NDB_NODES);
+ if (cstarttype == NodeState::ST_INITIAL_START) {
+ jam();
+ ndbrequire(cnoReplicas <= csystemnodes);
+ calculateHotSpare();
+ ndbrequire(cnoReplicas <= (csystemnodes - cnoHotSpare));
+ }//if
+
+ cmasterdihref = calcDihBlockRef(cmasterNodeId);
+ /*-------------------------------------------------------------------------*/
+ /* MAKE THE LIST OF PRN-RECORD WHICH IS ONE OF THE NODES-LIST IN THIS BLOCK*/
+ /*-------------------------------------------------------------------------*/
+ makePrnList(readNodes, nodeArray);
+ if (cstarttype == NodeState::ST_INITIAL_START) {
+ jam();
+ /**----------------------------------------------------------------------
+ * WHEN WE INITIALLY START A DATABASE WE WILL CREATE NODE GROUPS.
+ * ALL NODES ARE PUT INTO NODE GROUPS ALTHOUGH HOT SPARE NODES ARE PUT
+ * INTO A SPECIAL NODE GROUP. IN EACH NODE GROUP WE HAVE THE SAME AMOUNT
+ * OF NODES AS THERE ARE NUMBER OF REPLICAS.
+ * ONE POSSIBLE USAGE OF NODE GROUPS ARE TO MAKE A NODE GROUP A COMPLETE
+ * FRAGMENT OF THE DATABASE. THIS MEANS THAT ALL REPLICAS WILL BE STORED
+ * IN THE NODE GROUP.
+ *-----------------------------------------------------------------------*/
+ makeNodeGroups(nodeArray);
+ }//if
+ ndbrequire(checkNodeAlive(cmasterNodeId));
+ if (cstarttype == NodeState::ST_INITIAL_START) {
+ jam();
+ /**-----------------------------------------------------------------------
+ * INITIALISE THE SECOND NODE-LIST AND SET NODE BITS AND SOME NODE STATUS.
+ * VERY CONNECTED WITH MAKE_NODE_GROUPS. CHANGING ONE WILL AFFECT THE
+ * OTHER AS WELL.
+ *-----------------------------------------------------------------------*/
+ setInitialActiveStatus();
+ } else if (cstarttype == NodeState::ST_SYSTEM_RESTART) {
+ jam();
+ /*empty*/;
+ } else if ((cstarttype == NodeState::ST_NODE_RESTART) ||
+ (cstarttype == NodeState::ST_INITIAL_NODE_RESTART)) {
+ jam();
+ nodeRestartPh2Lab(signal);
+ return;
+ } else {
+ ndbrequire(false);
+ }//if
+ /**------------------------------------------------------------------------
+ * ESTABLISH CONNECTIONS WITH THE OTHER DIH BLOCKS AND INITIALISE THIS
+ * NODE-LIST THAT HANDLES CONNECTION WITH OTHER DIH BLOCKS.
+ *-------------------------------------------------------------------------*/
+ ndbsttorry10Lab(signal, __LINE__);
+}//Dbdih::execREAD_NODESCONF()
+
+/*---------------------------------------------------------------------------*/
+/* START NODE LOGIC FOR NODE RESTART */
+/*---------------------------------------------------------------------------*/
+void Dbdih::nodeRestartPh2Lab(Signal* signal)
+{
+ /*------------------------------------------------------------------------*/
+ // REQUEST FOR PERMISSION FROM MASTER TO START A NODE IN AN ALREADY
+ // RUNNING SYSTEM.
+ /*------------------------------------------------------------------------*/
+ StartPermReq * const req = (StartPermReq *)&signal->theData[0];
+
+ req->blockRef = reference();
+ req->nodeId = cownNodeId;
+ req->startType = cstarttype;
+ sendSignal(cmasterdihref, GSN_START_PERMREQ, signal, 3, JBB);
+}//Dbdih::nodeRestartPh2Lab()
+
+void Dbdih::execSTART_PERMCONF(Signal* signal)
+{
+ jamEntry();
+ CRASH_INSERTION(7121);
+ Uint32 nodeId = signal->theData[0];
+ cfailurenr = signal->theData[1];
+ ndbrequire(nodeId == cownNodeId);
+ ndbsttorry10Lab(signal, __LINE__);
+}//Dbdih::execSTART_PERMCONF()
+
+void Dbdih::execSTART_PERMREF(Signal* signal)
+{
+ jamEntry();
+ Uint32 errorCode = signal->theData[1];
+ if (errorCode == ZNODE_ALREADY_STARTING_ERROR) {
+ jam();
+ /*-----------------------------------------------------------------------*/
+ // The master was busy adding another node. We will wait for a second and
+ // try again.
+ /*-----------------------------------------------------------------------*/
+ signal->theData[0] = DihContinueB::ZSTART_PERMREQ_AGAIN;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 3000, 1);
+ return;
+ }//if
+ /*------------------------------------------------------------------------*/
+ // Some node process in another node involving our node was still active. We
+ // will recover from this by crashing here.
+ // This is controlled restart using the
+ // already existing features of node crashes. It is not a bug getting here.
+ /*-------------------------------------------------------------------------*/
+ ndbrequire(false);
+ return;
+}//Dbdih::execSTART_PERMREF()
+
+/*---------------------------------------------------------------------------*/
+/* THIS SIGNAL IS RECEIVED IN THE STARTING NODE WHEN THE START_MEREQ */
+/* HAS BEEN EXECUTED IN THE MASTER NODE. */
+/*---------------------------------------------------------------------------*/
+void Dbdih::execSTART_MECONF(Signal* signal)
+{
+ jamEntry();
+ StartMeConf * const startMe = (StartMeConf *)&signal->theData[0];
+ Uint32 nodeId = startMe->startingNodeId;
+ const Uint32 startWord = startMe->startWord;
+ Uint32 i;
+
+ CRASH_INSERTION(7130);
+ ndbrequire(nodeId == cownNodeId);
+ arrGuard(startWord + StartMeConf::DATA_SIZE, sizeof(cdata)/4);
+ for(i = 0; i < StartMeConf::DATA_SIZE; i++)
+ cdata[startWord+i] = startMe->data[i];
+
+ if(startWord + StartMeConf::DATA_SIZE < Sysfile::SYSFILE_SIZE32){
+ jam();
+ /**
+ * We are still waiting for data
+ */
+ return;
+ }
+ jam();
+
+ /**
+ * Copy into sysfile
+ *
+ * But dont copy lastCompletedGCI:s
+ */
+ Uint32 tempGCP[MAX_NDB_NODES];
+ for(i = 0; i < MAX_NDB_NODES; i++)
+ tempGCP[i] = SYSFILE->lastCompletedGCI[i];
+
+ for(i = 0; i < Sysfile::SYSFILE_SIZE32; i++)
+ sysfileData[i] = cdata[i];
+ for(i = 0; i < MAX_NDB_NODES; i++)
+ SYSFILE->lastCompletedGCI[i] = tempGCP[i];
+
+ setNodeActiveStatus();
+ setNodeGroups();
+ ndbsttorry10Lab(signal, __LINE__);
+}//Dbdih::execSTART_MECONF()
+
+void Dbdih::execSTART_COPYCONF(Signal* signal)
+{
+ jamEntry();
+ Uint32 nodeId = signal->theData[0];
+ ndbrequire(nodeId == cownNodeId);
+ CRASH_INSERTION(7132);
+ ndbsttorry10Lab(signal, __LINE__);
+ return;
+}//Dbdih::execSTART_COPYCONF()
+
+/*---------------------------------------------------------------------------*/
+/* MASTER LOGIC FOR NODE RESTART */
+/*---------------------------------------------------------------------------*/
+/* NODE RESTART PERMISSION REQUEST */
+/*---------------------------------------------------------------------------*/
+// A REQUEST FROM A STARTING NODE TO PERFORM A NODE RESTART. IF NO OTHER NODE
+// IS ACTIVE IN PERFORMING A NODE RESTART AND THERE ARE NO ACTIVE PROCESSES IN
+// THIS NODE INVOLVING THE STARTING NODE THIS REQUEST WILL BE GRANTED.
+/*---------------------------------------------------------------------------*/
+void Dbdih::execSTART_PERMREQ(Signal* signal)
+{
+ StartPermReq * const req = (StartPermReq*)&signal->theData[0];
+ jamEntry();
+ const BlockReference retRef = req->blockRef;
+ const Uint32 nodeId = req->nodeId;
+ const Uint32 typeStart = req->startType;
+
+ CRASH_INSERTION(7122);
+ ndbrequire(isMaster());
+ ndbrequire(refToNode(retRef) == nodeId);
+ if ((c_nodeStartMaster.activeState) ||
+ (c_nodeStartMaster.wait != ZFALSE)) {
+ jam();
+ signal->theData[0] = nodeId;
+ signal->theData[1] = ZNODE_ALREADY_STARTING_ERROR;
+ sendSignal(retRef, GSN_START_PERMREF, signal, 2, JBB);
+ return;
+ }//if
+ if (getNodeStatus(nodeId) != NodeRecord::DEAD){
+ ndbout << "nodeStatus in START_PERMREQ = "
+ << (Uint32) getNodeStatus(nodeId) << endl;
+ ndbrequire(false);
+ }//if
+
+ /*----------------------------------------------------------------------
+ * WE START THE INCLUSION PROCEDURE
+ * ---------------------------------------------------------------------*/
+ c_nodeStartMaster.failNr = cfailurenr;
+ c_nodeStartMaster.wait = ZFALSE;
+ c_nodeStartMaster.startInfoErrorCode = 0;
+ c_nodeStartMaster.startNode = nodeId;
+ c_nodeStartMaster.activeState = true;
+ c_nodeStartMaster.m_outstandingGsn = GSN_START_INFOREQ;
+
+ setNodeStatus(nodeId, NodeRecord::STARTING);
+ /**
+ * But if it's a NodeState::ST_INITIAL_NODE_RESTART
+ *
+ * We first have to clear LCP's
+ * For normal node restart we simply ensure that all nodes
+ * are informed of the node restart
+ */
+ StartInfoReq *const r =(StartInfoReq*)&signal->theData[0];
+ r->startingNodeId = nodeId;
+ r->typeStart = typeStart;
+ r->systemFailureNo = cfailurenr;
+ sendLoopMacro(START_INFOREQ, sendSTART_INFOREQ);
+}//Dbdih::execSTART_PERMREQ()
+
+void Dbdih::execSTART_INFOREF(Signal* signal)
+{
+ StartInfoRef * ref = (StartInfoRef*)&signal->theData[0];
+ if (getNodeStatus(ref->startingNodeId) != NodeRecord::STARTING) {
+ jam();
+ return;
+ }//if
+ ndbrequire(c_nodeStartMaster.startNode == ref->startingNodeId);
+ c_nodeStartMaster.startInfoErrorCode = ref->errorCode;
+ startInfoReply(signal, ref->sendingNodeId);
+}//Dbdih::execSTART_INFOREF()
+
+void Dbdih::execSTART_INFOCONF(Signal* signal)
+{
+ jamEntry();
+ StartInfoConf * conf = (StartInfoConf*)&signal->theData[0];
+ if (getNodeStatus(conf->startingNodeId) != NodeRecord::STARTING) {
+ jam();
+ return;
+ }//if
+ ndbrequire(c_nodeStartMaster.startNode == conf->startingNodeId);
+ startInfoReply(signal, conf->sendingNodeId);
+}//Dbdih::execSTART_INFOCONF()
+
+void Dbdih::startInfoReply(Signal* signal, Uint32 nodeId)
+{
+ receiveLoopMacro(START_INFOREQ, nodeId);
+ /**
+ * We're finished with the START_INFOREQ's
+ */
+ if (c_nodeStartMaster.startInfoErrorCode == 0) {
+ jam();
+ /**
+ * Everything has been a success so far
+ */
+ StartPermConf * conf = (StartPermConf*)&signal->theData[0];
+ conf->startingNodeId = c_nodeStartMaster.startNode;
+ conf->systemFailureNo = cfailurenr;
+ sendSignal(calcDihBlockRef(c_nodeStartMaster.startNode),
+ GSN_START_PERMCONF, signal, StartPermConf::SignalLength, JBB);
+ c_nodeStartMaster.m_outstandingGsn = GSN_START_PERMCONF;
+ } else {
+ jam();
+ StartPermRef * ref = (StartPermRef*)&signal->theData[0];
+ ref->startingNodeId = c_nodeStartMaster.startNode;
+ ref->errorCode = c_nodeStartMaster.startInfoErrorCode;
+ sendSignal(calcDihBlockRef(c_nodeStartMaster.startNode),
+ GSN_START_PERMREF, signal, StartPermRef::SignalLength, JBB);
+ nodeResetStart();
+ }//if
+}//Dbdih::startInfoReply()
+
+/*---------------------------------------------------------------------------*/
+/* NODE RESTART CONTINUE REQUEST */
+/*---------------------------------------------------------------------------*/
+// THIS SIGNAL AND THE CODE BELOW IS EXECUTED BY THE MASTER WHEN IT HAS BEEN
+// REQUESTED TO START UP A NEW NODE. The master instructs the starting node
+// how to set up its log for continued execution.
+/*---------------------------------------------------------------------------*/
+void Dbdih::execSTART_MEREQ(Signal* signal)
+{
+ StartMeReq * req = (StartMeReq*)&signal->theData[0];
+ jamEntry();
+ const BlockReference Tblockref = req->startingRef;
+ const Uint32 Tnodeid = refToNode(Tblockref);
+
+ ndbrequire(isMaster());
+ ndbrequire(c_nodeStartMaster.startNode == Tnodeid);
+ ndbrequire(getNodeStatus(Tnodeid) == NodeRecord::STARTING);
+
+ sendSTART_RECREQ(signal, Tnodeid);
+}//Dbdih::execSTART_MEREQ()
+
+void Dbdih::nodeRestartStartRecConfLab(Signal* signal)
+{
+ c_nodeStartMaster.blockLcp = true;
+ if ((c_lcpState.lcpStatus != LCP_STATUS_IDLE) &&
+ (c_lcpState.lcpStatus != LCP_TCGET)) {
+ jam();
+ /*-----------------------------------------------------------------------*/
+ // WE WILL NOT ALLOW A NODE RESTART TO COME IN WHEN A LOCAL CHECKPOINT IS
+ // ONGOING. IT WOULD COMPLICATE THE LCP PROTOCOL TOO MUCH. WE WILL ADD THIS
+ // LATER.
+ /*-----------------------------------------------------------------------*/
+ return;
+ }//if
+ lcpBlockedLab(signal);
+}//Dbdih::nodeRestartStartRecConfLab()
+
+void Dbdih::lcpBlockedLab(Signal* signal)
+{
+ ndbrequire(getNodeStatus(c_nodeStartMaster.startNode)==NodeRecord::STARTING);
+ /*------------------------------------------------------------------------*/
+ // NOW WE HAVE COPIED ALL INFORMATION IN DICT WE ARE NOW READY TO COPY ALL
+ // INFORMATION IN DIH TO THE NEW NODE.
+ /*------------------------------------------------------------------------*/
+ c_nodeStartMaster.wait = 10;
+ signal->theData[0] = DihContinueB::ZCOPY_NODE;
+ signal->theData[1] = 0;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ c_nodeStartMaster.m_outstandingGsn = GSN_COPY_TABREQ;
+}//Dbdih::lcpBlockedLab()
+
+void Dbdih::nodeDictStartConfLab(Signal* signal)
+{
+ /*-------------------------------------------------------------------------*/
+ // NOW WE HAVE COPIED BOTH DIH AND DICT INFORMATION. WE ARE NOW READY TO
+ // INTEGRATE THE NODE INTO THE LCP AND GCP PROTOCOLS AND TO ALLOW UPDATES OF
+ // THE DICTIONARY AGAIN.
+ /*-------------------------------------------------------------------------*/
+ c_nodeStartMaster.wait = ZFALSE;
+ c_nodeStartMaster.blockGcp = true;
+ if (cgcpStatus != GCP_READY) {
+ /*-----------------------------------------------------------------------*/
+ // The global checkpoint is executing. Wait until it is completed before we
+ // continue processing the node recovery.
+ /*-----------------------------------------------------------------------*/
+ jam();
+ return;
+ }//if
+ gcpBlockedLab(signal);
+
+ /*-----------------------------------------------------------------*/
+ // Report that node restart has completed copy of dictionary.
+ /*-----------------------------------------------------------------*/
+ signal->theData[0] = NDB_LE_NR_CopyDict;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 1, JBB);
+}//Dbdih::nodeDictStartConfLab()
+
+void Dbdih::dihCopyCompletedLab(Signal* signal)
+{
+ BlockReference ref = calcDictBlockRef(c_nodeStartMaster.startNode);
+ DictStartReq * req = (DictStartReq*)&signal->theData[0];
+ req->restartGci = cnewgcp;
+ req->senderRef = reference();
+ sendSignal(ref, GSN_DICTSTARTREQ,
+ signal, DictStartReq::SignalLength, JBB);
+ c_nodeStartMaster.m_outstandingGsn = GSN_DICTSTARTREQ;
+ c_nodeStartMaster.wait = 0;
+}//Dbdih::dihCopyCompletedLab()
+
+void Dbdih::gcpBlockedLab(Signal* signal)
+{
+ /*-----------------------------------------------------------------*/
+ // Report that node restart has completed copy of distribution info.
+ /*-----------------------------------------------------------------*/
+ signal->theData[0] = NDB_LE_NR_CopyDistr;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 1, JBB);
+
+ /**
+ * The node DIH will be part of LCP
+ */
+ NodeRecordPtr nodePtr;
+ nodePtr.i = c_nodeStartMaster.startNode;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ nodePtr.p->m_inclDihLcp = true;
+
+ /*-------------------------------------------------------------------------*/
+ // NOW IT IS TIME TO INFORM ALL OTHER NODES IN THE CLUSTER OF THE STARTED
+ // NODE SUCH THAT THEY ALSO INCLUDE THE NODE IN THE NODE LISTS AND SO FORTH.
+ /*------------------------------------------------------------------------*/
+ sendLoopMacro(INCL_NODEREQ, sendINCL_NODEREQ);
+ /*-------------------------------------------------------------------------*/
+ // We also need to send to the starting node to ensure he is aware of the
+ // global checkpoint id and the correct state. We do not wait for any reply
+ // since the starting node will not send any.
+ /*-------------------------------------------------------------------------*/
+ sendINCL_NODEREQ(signal, c_nodeStartMaster.startNode);
+}//Dbdih::gcpBlockedLab()
+
+/*---------------------------------------------------------------------------*/
+// THIS SIGNAL IS EXECUTED IN BOTH SLAVES AND IN THE MASTER
+/*---------------------------------------------------------------------------*/
+void Dbdih::execINCL_NODECONF(Signal* signal)
+{
+ Uint32 TsendNodeId;
+ Uint32 TstartNode_or_blockref;
+
+ jamEntry();
+ TstartNode_or_blockref = signal->theData[0];
+ TsendNodeId = signal->theData[1];
+
+ if (TstartNode_or_blockref == clocallqhblockref) {
+ jam();
+ /*-----------------------------------------------------------------------*/
+ // THIS SIGNAL CAME FROM THE LOCAL LQH BLOCK.
+ // WE WILL NOW SEND INCLUDE TO THE TC BLOCK.
+ /*-----------------------------------------------------------------------*/
+ signal->theData[0] = reference();
+ signal->theData[1] = c_nodeStartSlave.nodeId;
+ sendSignal(clocaltcblockref, GSN_INCL_NODEREQ, signal, 2, JBB);
+ return;
+ }//if
+ if (TstartNode_or_blockref == clocaltcblockref) {
+ jam();
+ /*----------------------------------------------------------------------*/
+ // THIS SIGNAL CAME FROM THE LOCAL LQH BLOCK.
+ // WE WILL NOW SEND INCLUDE TO THE DICT BLOCK.
+ /*----------------------------------------------------------------------*/
+ signal->theData[0] = reference();
+ signal->theData[1] = c_nodeStartSlave.nodeId;
+ sendSignal(cdictblockref, GSN_INCL_NODEREQ, signal, 2, JBB);
+ return;
+ }//if
+ if (TstartNode_or_blockref == cdictblockref) {
+ jam();
+ /*-----------------------------------------------------------------------*/
+ // THIS SIGNAL CAME FROM THE LOCAL DICT BLOCK. WE WILL NOW SEND CONF TO THE
+ // BACKUP.
+ /*-----------------------------------------------------------------------*/
+ signal->theData[0] = reference();
+ signal->theData[1] = c_nodeStartSlave.nodeId;
+ sendSignal(BACKUP_REF, GSN_INCL_NODEREQ, signal, 2, JBB);
+
+ // Suma will not send response to this for now, later...
+ sendSignal(SUMA_REF, GSN_INCL_NODEREQ, signal, 2, JBB);
+ // Grep will not send response to this for now, later...
+ sendSignal(GREP_REF, GSN_INCL_NODEREQ, signal, 2, JBB);
+ return;
+ }//if
+ if (TstartNode_or_blockref == numberToRef(BACKUP, getOwnNodeId())){
+ jam();
+ signal->theData[0] = c_nodeStartSlave.nodeId;
+ signal->theData[1] = cownNodeId;
+ sendSignal(cmasterdihref, GSN_INCL_NODECONF, signal, 2, JBB);
+ c_nodeStartSlave.nodeId = 0;
+ return;
+ }
+
+ ndbrequire(cmasterdihref = reference());
+ receiveLoopMacro(INCL_NODEREQ, TsendNodeId);
+
+ CRASH_INSERTION(7128);
+ /*-------------------------------------------------------------------------*/
+ // Now that we have included the starting node in the node lists in the
+ // various blocks we are ready to start the global checkpoint protocol
+ /*------------------------------------------------------------------------*/
+ c_nodeStartMaster.wait = 11;
+ c_nodeStartMaster.blockGcp = false;
+
+ signal->theData[0] = reference();
+ sendSignal(reference(), GSN_UNBLO_DICTCONF, signal, 1, JBB);
+}//Dbdih::execINCL_NODECONF()
+
+void Dbdih::execUNBLO_DICTCONF(Signal* signal)
+{
+ jamEntry();
+ c_nodeStartMaster.wait = ZFALSE;
+ if (!c_nodeStartMaster.activeState) {
+ jam();
+ return;
+ }//if
+
+ CRASH_INSERTION(7129);
+ /**-----------------------------------------------------------------------
+ * WE HAVE NOW PREPARED IT FOR INCLUSION IN THE LCP PROTOCOL.
+ * WE CAN NOW START THE LCP PROTOCOL AGAIN.
+ * WE HAVE ALSO MADE THIS FOR THE GCP PROTOCOL.
+ * WE ARE READY TO START THE PROTOCOLS AND RESPOND TO THE START REQUEST
+ * FROM THE STARTING NODE.
+ *------------------------------------------------------------------------*/
+
+ StartMeConf * const startMe = (StartMeConf *)&signal->theData[0];
+
+ const Uint32 wordPerSignal = StartMeConf::DATA_SIZE;
+ const int noOfSignals = ((Sysfile::SYSFILE_SIZE32 + (wordPerSignal - 1)) /
+ wordPerSignal);
+
+ startMe->startingNodeId = c_nodeStartMaster.startNode;
+ startMe->startWord = 0;
+
+ const Uint32 ref = calcDihBlockRef(c_nodeStartMaster.startNode);
+ for(int i = 0; i < noOfSignals; i++){
+ jam();
+ { // Do copy
+ const int startWord = startMe->startWord;
+ for(Uint32 j = 0; j < wordPerSignal; j++){
+ startMe->data[j] = sysfileData[j+startWord];
+ }
+ }
+ sendSignal(ref, GSN_START_MECONF, signal, StartMeConf::SignalLength, JBB);
+ startMe->startWord += wordPerSignal;
+ }//for
+ c_nodeStartMaster.m_outstandingGsn = GSN_START_MECONF;
+}//Dbdih::execUNBLO_DICTCONF()
+
+/*---------------------------------------------------------------------------*/
+/* NODE RESTART COPY REQUEST */
+/*---------------------------------------------------------------------------*/
+// A NODE RESTART HAS REACHED ITS FINAL PHASE WHEN THE DATA IS TO BE COPIED
+// TO THE NODE. START_COPYREQ IS EXECUTED BY THE MASTER NODE.
+/*---------------------------------------------------------------------------*/
+void Dbdih::execSTART_COPYREQ(Signal* signal)
+{
+ jamEntry();
+ Uint32 startNodeId = signal->theData[0];
+ //BlockReference startingRef = signal->theData[1];
+ ndbrequire(c_nodeStartMaster.startNode == startNodeId);
+ /*-------------------------------------------------------------------------*/
+ // REPORT Copy process of node restart is now about to start up.
+ /*-------------------------------------------------------------------------*/
+ signal->theData[0] = NDB_LE_NR_CopyFragsStarted;
+ signal->theData[1] = startNodeId;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
+
+ CRASH_INSERTION(7131);
+ nodeRestartTakeOver(signal, startNodeId);
+ // BlockReference ref = calcQmgrBlockRef(startNodeId);
+ // signal->theData[0] = cownNodeId;
+ // Remove comments as soon as I open up the Qmgr block
+ // TODO_RONM
+ // sendSignal(ref, GSN_ALLOW_NODE_CRASHORD, signal, 1, JBB);
+}//Dbdih::execSTART_COPYREQ()
+
+/*---------------------------------------------------------------------------*/
+/* SLAVE LOGIC FOR NODE RESTART */
+/*---------------------------------------------------------------------------*/
+void Dbdih::execSTART_INFOREQ(Signal* signal)
+{
+ jamEntry();
+ StartInfoReq *const req =(StartInfoReq*)&signal->theData[0];
+ Uint32 startNode = req->startingNodeId;
+ if (cfailurenr != req->systemFailureNo) {
+ jam();
+ //---------------------------------------------------------------
+ // A failure occurred since master sent this request. We will ignore
+ // this request since the node is already dead that is starting.
+ //---------------------------------------------------------------
+ return;
+ }//if
+ CRASH_INSERTION(7123);
+ if (isMaster()) {
+ jam();
+ ndbrequire(getNodeStatus(startNode) == NodeRecord::STARTING);
+ } else {
+ jam();
+ ndbrequire(getNodeStatus(startNode) == NodeRecord::DEAD);
+ }//if
+ if ((!getAllowNodeStart(startNode)) ||
+ (c_nodeStartSlave.nodeId != 0) ||
+ (ERROR_INSERTED(7124))) {
+ jam();
+ StartInfoRef *const ref =(StartInfoRef*)&signal->theData[0];
+ ref->startingNodeId = startNode;
+ ref->sendingNodeId = cownNodeId;
+ ref->errorCode = ZNODE_START_DISALLOWED_ERROR;
+ sendSignal(cmasterdihref, GSN_START_INFOREF, signal,
+ StartInfoRef::SignalLength, JBB);
+ return;
+ }//if
+ setNodeStatus(startNode, NodeRecord::STARTING);
+ if (req->typeStart == NodeState::ST_INITIAL_NODE_RESTART) {
+ jam();
+ setAllowNodeStart(startNode, false);
+ invalidateNodeLCP(signal, startNode, 0);
+ } else {
+ jam();
+ StartInfoConf * c = (StartInfoConf*)&signal->theData[0];
+ c->sendingNodeId = cownNodeId;
+ c->startingNodeId = startNode;
+ sendSignal(cmasterdihref, GSN_START_INFOCONF, signal,
+ StartInfoConf::SignalLength, JBB);
+ return;
+ }//if
+}//Dbdih::execSTART_INFOREQ()
+
+void Dbdih::execINCL_NODEREQ(Signal* signal)
+{
+ jamEntry();
+ Uint32 retRef = signal->theData[0];
+ Uint32 nodeId = signal->theData[1];
+ Uint32 tnodeStartFailNr = signal->theData[2];
+ currentgcp = signal->theData[4];
+ CRASH_INSERTION(7127);
+ cnewgcp = currentgcp;
+ coldgcp = currentgcp - 1;
+ if (!isMaster()) {
+ jam();
+ /*-----------------------------------------------------------------------*/
+ // We don't want to change the state of the master since he can be in the
+ // state LCP_TCGET at this time.
+ /*-----------------------------------------------------------------------*/
+ c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__);
+ }//if
+
+ /*-------------------------------------------------------------------------*/
+ // When a node is restarted we must ensure that a lcp will be run
+ // as soon as possible and the reset the delay according to the original
+ // configuration.
+ // Without an initial local checkpoint the new node will not be available.
+ /*-------------------------------------------------------------------------*/
+ if (getOwnNodeId() == nodeId) {
+ jam();
+ /*-----------------------------------------------------------------------*/
+ // We are the starting node. We came here only to set the global checkpoint
+ // id's and the lcp status.
+ /*-----------------------------------------------------------------------*/
+ CRASH_INSERTION(7171);
+ return;
+ }//if
+ if (getNodeStatus(nodeId) != NodeRecord::STARTING) {
+ jam();
+ return;
+ }//if
+ ndbrequire(cfailurenr == tnodeStartFailNr);
+ ndbrequire (c_nodeStartSlave.nodeId == 0);
+ c_nodeStartSlave.nodeId = nodeId;
+
+ ndbrequire (retRef == cmasterdihref);
+
+ NodeRecordPtr nodePtr;
+ nodePtr.i = nodeId;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+
+ Sysfile::ActiveStatus TsaveState = nodePtr.p->activeStatus;
+ Uint32 TnodeGroup = nodePtr.p->nodeGroup;
+
+ new (nodePtr.p) NodeRecord();
+ nodePtr.p->nodeGroup = TnodeGroup;
+ nodePtr.p->activeStatus = TsaveState;
+ nodePtr.p->nodeStatus = NodeRecord::ALIVE;
+ nodePtr.p->useInTransactions = true;
+ nodePtr.p->m_inclDihLcp = true;
+
+ removeDeadNode(nodePtr);
+ insertAlive(nodePtr);
+ con_lineNodes++;
+
+ /*-------------------------------------------------------------------------*/
+ // WE WILL ALSO SEND THE INCLUDE NODE REQUEST TO THE LOCAL LQH BLOCK.
+ /*-------------------------------------------------------------------------*/
+ signal->theData[0] = reference();
+ signal->theData[1] = nodeId;
+ signal->theData[2] = currentgcp;
+ sendSignal(clocallqhblockref, GSN_INCL_NODEREQ, signal, 3, JBB);
+}//Dbdih::execINCL_NODEREQ()
+
+/* ------------------------------------------------------------------------- */
+// execINCL_NODECONF() is found in the master logic part since it is used by
+// both the master and the slaves.
+/* ------------------------------------------------------------------------- */
+
+/*****************************************************************************/
+/*********** TAKE OVER DECISION MODULE *************/
+/*****************************************************************************/
+// This module contains the subroutines that take the decision whether to take
+// over a node now or not.
+/* ------------------------------------------------------------------------- */
+/* MASTER LOGIC FOR SYSTEM RESTART */
+/* ------------------------------------------------------------------------- */
+// WE ONLY COME HERE IF WE ARE THE MASTER AND WE ARE PERFORMING A SYSTEM
+// RESTART. WE ALSO COME HERE DURING THIS SYSTEM RESTART ONE TIME PER NODE
+// THAT NEEDS TAKE OVER.
+/*---------------------------------------------------------------------------*/
+// WE CHECK IF ANY NODE NEEDS TO BE TAKEN OVER AND THE TAKE OVER HAS NOT YET
+// BEEN STARTED OR COMPLETED.
+/*---------------------------------------------------------------------------*/
+void
+Dbdih::systemRestartTakeOverLab(Signal* signal)
+{
+ NodeRecordPtr nodePtr;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRecord);
+ switch (nodePtr.p->activeStatus) {
+ case Sysfile::NS_Active:
+ case Sysfile::NS_ActiveMissed_1:
+ jam();
+ break;
+ /*---------------------------------------------------------------------*/
+ // WE HAVE NOT REACHED A STATE YET WHERE THIS NODE NEEDS TO BE TAKEN OVER
+ /*---------------------------------------------------------------------*/
+ case Sysfile::NS_ActiveMissed_2:
+ case Sysfile::NS_NotActive_NotTakenOver:
+ jam();
+ /*---------------------------------------------------------------------*/
+ // THIS NODE IS IN TROUBLE.
+ // WE MUST SUCCEED WITH A LOCAL CHECKPOINT WITH THIS NODE TO REMOVE THE
+ // DANGER. IF THE NODE IS NOT ALIVE THEN THIS WILL NOT BE
+ // POSSIBLE AND WE CAN START THE TAKE OVER IMMEDIATELY IF WE HAVE ANY
+ // NODES THAT CAN PERFORM A TAKE OVER.
+ /*---------------------------------------------------------------------*/
+ if (nodePtr.p->nodeStatus != NodeRecord::ALIVE) {
+ jam();
+ Uint32 ThotSpareNode = findHotSpare();
+ if (ThotSpareNode != RNIL) {
+ jam();
+ startTakeOver(signal, RNIL, ThotSpareNode, nodePtr.i);
+ }//if
+ } else if(nodePtr.p->activeStatus == Sysfile::NS_NotActive_NotTakenOver){
+ jam();
+ /*-------------------------------------------------------------------*/
+ // NOT ACTIVE NODES THAT HAVE NOT YET BEEN TAKEN OVER NEEDS TAKE OVER
+ // IMMEDIATELY. IF WE ARE ALIVE WE TAKE OVER OUR OWN NODE.
+ /*-------------------------------------------------------------------*/
+ startTakeOver(signal, RNIL, nodePtr.i, nodePtr.i);
+ }//if
+ break;
+ case Sysfile::NS_TakeOver:
+ /**-------------------------------------------------------------------
+ * WE MUST HAVE FAILED IN THE MIDDLE OF THE TAKE OVER PROCESS.
+ * WE WILL CONCLUDE THE TAKE OVER PROCESS NOW.
+ *-------------------------------------------------------------------*/
+ if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) {
+ jam();
+ Uint32 takeOverNode = Sysfile::getTakeOverNode(nodePtr.i,
+ SYSFILE->takeOver);
+ if(takeOverNode == 0){
+ jam();
+ warningEvent("Bug in take-over code restarting");
+ takeOverNode = nodePtr.i;
+ }
+ startTakeOver(signal, RNIL, nodePtr.i, takeOverNode);
+ } else {
+ jam();
+ /**-------------------------------------------------------------------
+ * We are not currently taking over, change our active status.
+ *-------------------------------------------------------------------*/
+ nodePtr.p->activeStatus = Sysfile::NS_NotActive_NotTakenOver;
+ setNodeRestartInfoBits();
+ }//if
+ break;
+ case Sysfile::NS_HotSpare:
+ jam();
+ break;
+ /*---------------------------------------------------------------------*/
+ // WE NEED NOT TAKE OVER NODES THAT ARE HOT SPARE.
+ /*---------------------------------------------------------------------*/
+ case Sysfile::NS_NotDefined:
+ jam();
+ break;
+ /*---------------------------------------------------------------------*/
+ // WE NEED NOT TAKE OVER NODES THAT DO NOT EVEN EXIST IN THE CLUSTER.
+ /*---------------------------------------------------------------------*/
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ }//for
+ /*-------------------------------------------------------------------------*/
+ /* NO TAKE OVER HAS BEEN INITIATED. */
+ /*-------------------------------------------------------------------------*/
+}//Dbdih::systemRestartTakeOverLab()
+
+/*---------------------------------------------------------------------------*/
+// This subroutine is called as part of node restart in the master node.
+/*---------------------------------------------------------------------------*/
+void Dbdih::nodeRestartTakeOver(Signal* signal, Uint32 startNodeId)
+{
+ switch (getNodeActiveStatus(startNodeId)) {
+ case Sysfile::NS_Active:
+ case Sysfile::NS_ActiveMissed_1:
+ case Sysfile::NS_ActiveMissed_2:
+ jam();
+ /*-----------------------------------------------------------------------*/
+ // AN ACTIVE NODE HAS BEEN STARTED. THE ACTIVE NODE MUST THEN GET ALL DATA
+ // IT HAD BEFORE ITS CRASH. WE START THE TAKE OVER IMMEDIATELY.
+ // SINCE WE ARE AN ACTIVE NODE WE WILL TAKE OVER OUR OWN NODE THAT
+ // PREVIOUSLY CRASHED.
+ /*-----------------------------------------------------------------------*/
+ startTakeOver(signal, RNIL, startNodeId, startNodeId);
+ break;
+ case Sysfile::NS_HotSpare:{
+ jam();
+ /*-----------------------------------------------------------------------*/
+ // WHEN STARTING UP A HOT SPARE WE WILL CHECK IF ANY NODE NEEDS TO TAKEN
+ // OVER. IF SO THEN WE WILL START THE TAKE OVER.
+ /*-----------------------------------------------------------------------*/
+ bool takeOverStarted = false;
+ NodeRecordPtr nodePtr;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRecord);
+ if (nodePtr.p->activeStatus == Sysfile::NS_NotActive_NotTakenOver) {
+ jam();
+ takeOverStarted = true;
+ startTakeOver(signal, RNIL, startNodeId, nodePtr.i);
+ }//if
+ }//for
+ if (!takeOverStarted) {
+ jam();
+ /*-------------------------------------------------------------------*/
+ // NO TAKE OVER WAS NEEDED AT THE MOMENT WE START-UP AND WAIT UNTIL A
+ // TAKE OVER IS NEEDED.
+ /*-------------------------------------------------------------------*/
+ BlockReference ref = calcDihBlockRef(startNodeId);
+ signal->theData[0] = startNodeId;
+ sendSignal(ref, GSN_START_COPYCONF, signal, 1, JBB);
+ }//if
+ break;
+ }
+ case Sysfile::NS_NotActive_NotTakenOver:
+ jam();
+ /*-----------------------------------------------------------------------*/
+ // ALL DATA IN THE NODE IS LOST BUT WE HAVE NOT TAKEN OVER YET. WE WILL
+ // TAKE OVER OUR OWN NODE
+ /*-----------------------------------------------------------------------*/
+ startTakeOver(signal, RNIL, startNodeId, startNodeId);
+ break;
+ case Sysfile::NS_TakeOver:{
+ jam();
+ /*--------------------------------------------------------------------
+ * We were in the process of taking over but it was not completed.
+ * We will complete it now instead.
+ *--------------------------------------------------------------------*/
+ Uint32 takeOverNode = Sysfile::getTakeOverNode(startNodeId,
+ SYSFILE->takeOver);
+ startTakeOver(signal, RNIL, startNodeId, takeOverNode);
+ break;
+ }
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ nodeResetStart();
+}//Dbdih::nodeRestartTakeOver()
+
+/*************************************************************************/
+// Ths routine is called when starting a local checkpoint.
+/*************************************************************************/
+void Dbdih::checkStartTakeOver(Signal* signal)
+{
+ NodeRecordPtr csoNodeptr;
+ Uint32 tcsoHotSpareNode;
+ Uint32 tcsoTakeOverNode;
+ if (isMaster()) {
+ /*-----------------------------------------------------------------*/
+ /* WE WILL ONLY START TAKE OVER IF WE ARE MASTER. */
+ /*-----------------------------------------------------------------*/
+ /* WE WILL ONLY START THE TAKE OVER IF THERE WERE A NEED OF */
+ /* A TAKE OVER. */
+ /*-----------------------------------------------------------------*/
+ /* WE CAN ONLY PERFORM THE TAKE OVER IF WE HAVE A HOT SPARE */
+ /* AVAILABLE. */
+ /*-----------------------------------------------------------------*/
+ tcsoTakeOverNode = 0;
+ tcsoHotSpareNode = 0;
+ for (csoNodeptr.i = 1; csoNodeptr.i < MAX_NDB_NODES; csoNodeptr.i++) {
+ ptrAss(csoNodeptr, nodeRecord);
+ if (csoNodeptr.p->activeStatus == Sysfile::NS_NotActive_NotTakenOver) {
+ jam();
+ tcsoTakeOverNode = csoNodeptr.i;
+ } else {
+ jam();
+ if (csoNodeptr.p->activeStatus == Sysfile::NS_HotSpare) {
+ jam();
+ tcsoHotSpareNode = csoNodeptr.i;
+ }//if
+ }//if
+ }//for
+ if ((tcsoTakeOverNode != 0) &&
+ (tcsoHotSpareNode != 0)) {
+ jam();
+ startTakeOver(signal, RNIL, tcsoHotSpareNode, tcsoTakeOverNode);
+ }//if
+ }//if
+}//Dbdih::checkStartTakeOver()
+
+/*****************************************************************************/
+/*********** NODE ADDING MODULE *************/
+/*********** CODE TO HANDLE TAKE OVER *************/
+/*****************************************************************************/
+// A take over can be initiated by a number of things:
+// 1) A node restart, usually the node takes over itself but can also take
+// over somebody else if its own data was already taken over
+// 2) At system restart it is necessary to use the take over code to recover
+// nodes which had too old checkpoints to be restorable by the usual
+// restoration from disk.
+// 3) When a node has missed too many local checkpoints and is decided by the
+// master to be taken over by a hot spare node that sits around waiting
+// for this to happen.
+//
+// To support multiple node failures efficiently the code is written such that
+// only one take over can handle transitions in state but during a copy
+// fragment other take over's can perform state transitions.
+/*****************************************************************************/
+void Dbdih::startTakeOver(Signal* signal,
+ Uint32 takeOverPtrI,
+ Uint32 startNode,
+ Uint32 nodeTakenOver)
+{
+ NodeRecordPtr toNodePtr;
+ NodeGroupRecordPtr NGPtr;
+ toNodePtr.i = nodeTakenOver;
+ ptrCheckGuard(toNodePtr, MAX_NDB_NODES, nodeRecord);
+ NGPtr.i = toNodePtr.p->nodeGroup;
+ ptrCheckGuard(NGPtr, MAX_NDB_NODES, nodeGroupRecord);
+ TakeOverRecordPtr takeOverPtr;
+ if (takeOverPtrI == RNIL) {
+ jam();
+ setAllowNodeStart(startNode, false);
+ seizeTakeOver(takeOverPtr);
+ if (startNode == c_nodeStartMaster.startNode) {
+ jam();
+ takeOverPtr.p->toNodeRestart = true;
+ }//if
+ takeOverPtr.p->toStartingNode = startNode;
+ takeOverPtr.p->toFailedNode = nodeTakenOver;
+ } else {
+ jam();
+ RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr);
+ ndbrequire(takeOverPtr.p->toStartingNode == startNode);
+ ndbrequire(takeOverPtr.p->toFailedNode == nodeTakenOver);
+ ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::TO_WAIT_START_TAKE_OVER);
+ }//if
+ if ((NGPtr.p->activeTakeOver) || (ERROR_INSERTED(7157))) {
+ jam();
+ /**------------------------------------------------------------------------
+ * A take over is already active in this node group. We only allow one
+ * take over per node group. Otherwise we will overload the node group and
+ * also we will require much more checks when starting up copying of
+ * fragments. The parallelism for take over is mainly to ensure that we
+ * can handle take over efficiently in large systems with 4 nodes and above
+ * A typical case is a 8 node system executing on two 8-cpu boxes.
+ * A box crash in one of the boxes will mean 4 nodes crashes.
+ * We want to be able to restart those four nodes to some
+ * extent in parallel.
+ *
+ * We will wait for a few seconds and then try again.
+ */
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_WAIT_START_TAKE_OVER;
+ signal->theData[0] = DihContinueB::ZSTART_TAKE_OVER;
+ signal->theData[1] = takeOverPtr.i;
+ signal->theData[2] = startNode;
+ signal->theData[3] = nodeTakenOver;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 5000, 4);
+ return;
+ }//if
+ NGPtr.p->activeTakeOver = true;
+ if (startNode == nodeTakenOver) {
+ jam();
+ switch (getNodeActiveStatus(nodeTakenOver)) {
+ case Sysfile::NS_Active:
+ case Sysfile::NS_ActiveMissed_1:
+ case Sysfile::NS_ActiveMissed_2:
+ jam();
+ break;
+ case Sysfile::NS_NotActive_NotTakenOver:
+ case Sysfile::NS_TakeOver:
+ jam();
+ setNodeActiveStatus(nodeTakenOver, Sysfile::NS_TakeOver);
+ break;
+ default:
+ ndbrequire(false);
+ }//switch
+ } else {
+ jam();
+ setNodeActiveStatus(nodeTakenOver, Sysfile::NS_HotSpare);
+ setNodeActiveStatus(startNode, Sysfile::NS_TakeOver);
+ changeNodeGroups(startNode, nodeTakenOver);
+ }//if
+ setNodeRestartInfoBits();
+ /* ---------------------------------------------------------------------- */
+ /* WE SET THE RESTART INFORMATION TO INDICATE THAT WE ARE ABOUT TO TAKE */
+ /* OVER THE FAILED NODE. WE SET THIS INFORMATION AND WAIT UNTIL THE */
+ /* GLOBAL CHECKPOINT HAS WRITTEN THE RESTART INFORMATION. */
+ /* ---------------------------------------------------------------------- */
+ Sysfile::setTakeOverNode(takeOverPtr.p->toFailedNode, SYSFILE->takeOver,
+ startNode);
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_START_COPY;
+
+ cstartGcpNow = true;
+}//Dbdih::startTakeOver()
+
+void Dbdih::changeNodeGroups(Uint32 startNode, Uint32 nodeTakenOver)
+{
+ NodeRecordPtr startNodePtr;
+ NodeRecordPtr toNodePtr;
+ startNodePtr.i = startNode;
+ ptrCheckGuard(startNodePtr, MAX_NDB_NODES, nodeRecord);
+ toNodePtr.i = nodeTakenOver;
+ ptrCheckGuard(toNodePtr, MAX_NDB_NODES, nodeRecord);
+ ndbrequire(startNodePtr.p->nodeGroup == ZNIL);
+ NodeGroupRecordPtr NGPtr;
+
+ NGPtr.i = toNodePtr.p->nodeGroup;
+ ptrCheckGuard(NGPtr, MAX_NDB_NODES, nodeGroupRecord);
+ bool nodeFound = false;
+ for (Uint32 i = 0; i < NGPtr.p->nodeCount; i++) {
+ jam();
+ if (NGPtr.p->nodesInGroup[i] == nodeTakenOver) {
+ jam();
+ NGPtr.p->nodesInGroup[i] = startNode;
+ nodeFound = true;
+ }//if
+ }//for
+ ndbrequire(nodeFound);
+ Sysfile::setNodeGroup(startNodePtr.i, SYSFILE->nodeGroups, toNodePtr.p->nodeGroup);
+ startNodePtr.p->nodeGroup = toNodePtr.p->nodeGroup;
+ Sysfile::setNodeGroup(toNodePtr.i, SYSFILE->nodeGroups, NO_NODE_GROUP_ID);
+ toNodePtr.p->nodeGroup = ZNIL;
+}//Dbdih::changeNodeGroups()
+
+void Dbdih::checkToCopy()
+{
+ TakeOverRecordPtr takeOverPtr;
+ for (takeOverPtr.i = 0;takeOverPtr.i < MAX_NDB_NODES; takeOverPtr.i++) {
+ ptrAss(takeOverPtr, takeOverRecord);
+ /*----------------------------------------------------------------------*/
+ // TAKE OVER HANDLING WRITES RESTART INFORMATION THROUGH
+ // THE GLOBAL CHECKPOINT
+ // PROTOCOL. WE CHECK HERE BEFORE STARTING A WRITE OF THE RESTART
+ // INFORMATION.
+ /*-----------------------------------------------------------------------*/
+ if (takeOverPtr.p->toMasterStatus == TakeOverRecord::TO_START_COPY) {
+ jam();
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_START_COPY_ONGOING;
+ } else if (takeOverPtr.p->toMasterStatus == TakeOverRecord::TO_END_COPY) {
+ jam();
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_END_COPY_ONGOING;
+ }//if
+ }//for
+}//Dbdih::checkToCopy()
+
+void Dbdih::checkToCopyCompleted(Signal* signal)
+{
+ /* ------------------------------------------------------------------------*/
+ /* WE CHECK HERE IF THE WRITING OF TAKE OVER INFORMATION ALSO HAS BEEN */
+ /* COMPLETED. */
+ /* ------------------------------------------------------------------------*/
+ TakeOverRecordPtr toPtr;
+ for (toPtr.i = 0; toPtr.i < MAX_NDB_NODES; toPtr.i++) {
+ ptrAss(toPtr, takeOverRecord);
+ if (toPtr.p->toMasterStatus == TakeOverRecord::TO_START_COPY_ONGOING){
+ jam();
+ sendStartTo(signal, toPtr.i);
+ } else if (toPtr.p->toMasterStatus == TakeOverRecord::TO_END_COPY_ONGOING){
+ jam();
+ sendEndTo(signal, toPtr.i);
+ } else {
+ jam();
+ }//if
+ }//for
+}//Dbdih::checkToCopyCompleted()
+
+bool Dbdih::checkToInterrupted(TakeOverRecordPtr& takeOverPtr)
+{
+ if (checkNodeAlive(takeOverPtr.p->toStartingNode)) {
+ jam();
+ return false;
+ } else {
+ jam();
+ endTakeOver(takeOverPtr.i);
+ return true;
+ }//if
+}//Dbdih::checkToInterrupted()
+
+void Dbdih::sendStartTo(Signal* signal, Uint32 takeOverPtrI)
+{
+ TakeOverRecordPtr takeOverPtr;
+ CRASH_INSERTION(7155);
+ RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr);
+ if ((c_startToLock != RNIL) || (ERROR_INSERTED(7158))) {
+ jam();
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_WAIT_START;
+ signal->theData[0] = DihContinueB::ZSEND_START_TO;
+ signal->theData[1] = takeOverPtrI;
+ signal->theData[2] = takeOverPtr.p->toStartingNode;
+ signal->theData[3] = takeOverPtr.p->toFailedNode;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 30, 4);
+ return;
+ }//if
+ c_startToLock = takeOverPtrI;
+ StartToReq * const req = (StartToReq *)&signal->theData[0];
+ req->userPtr = takeOverPtr.i;
+ req->userRef = reference();
+ req->startingNodeId = takeOverPtr.p->toStartingNode;
+ req->nodeTakenOver = takeOverPtr.p->toFailedNode;
+ req->nodeRestart = takeOverPtr.p->toNodeRestart;
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::STARTING;
+ sendLoopMacro(START_TOREQ, sendSTART_TOREQ);
+}//Dbdih::sendStartTo()
+
+void Dbdih::execSTART_TOREQ(Signal* signal)
+{
+ TakeOverRecordPtr takeOverPtr;
+ jamEntry();
+ const StartToReq * const req = (StartToReq *)&signal->theData[0];
+ takeOverPtr.i = req->userPtr;
+ BlockReference ref = req->userRef;
+ Uint32 startingNode = req->startingNodeId;
+
+ CRASH_INSERTION(7133);
+ RETURN_IF_NODE_NOT_ALIVE(req->startingNodeId);
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+ allocateTakeOver(takeOverPtr);
+ initStartTakeOver(req, takeOverPtr);
+
+ StartToConf * const conf = (StartToConf *)&signal->theData[0];
+ conf->userPtr = takeOverPtr.i;
+ conf->sendingNodeId = cownNodeId;
+ conf->startingNodeId = startingNode;
+ sendSignal(ref, GSN_START_TOCONF, signal, StartToConf::SignalLength, JBB);
+}//Dbdih::execSTART_TOREQ()
+
+void Dbdih::execSTART_TOCONF(Signal* signal)
+{
+ TakeOverRecordPtr takeOverPtr;
+ jamEntry();
+ const StartToConf * const conf = (StartToConf *)&signal->theData[0];
+
+ CRASH_INSERTION(7147);
+
+ RETURN_IF_NODE_NOT_ALIVE(conf->startingNodeId);
+
+ takeOverPtr.i = conf->userPtr;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+ ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::STARTING);
+ ndbrequire(takeOverPtr.p->toStartingNode == conf->startingNodeId);
+ receiveLoopMacro(START_TOREQ, conf->sendingNodeId);
+ CRASH_INSERTION(7134);
+ c_startToLock = RNIL;
+
+ startNextCopyFragment(signal, takeOverPtr.i);
+}//Dbdih::execSTART_TOCONF()
+
+void Dbdih::initStartTakeOver(const StartToReq * req,
+ TakeOverRecordPtr takeOverPtr)
+{
+ takeOverPtr.p->toCurrentTabref = 0;
+ takeOverPtr.p->toCurrentFragid = 0;
+ takeOverPtr.p->toStartingNode = req->startingNodeId;
+ takeOverPtr.p->toFailedNode = req->nodeTakenOver;
+ takeOverPtr.p->toSlaveStatus = TakeOverRecord::TO_SLAVE_STARTED;
+ takeOverPtr.p->toCopyNode = RNIL;
+ takeOverPtr.p->toCurrentReplica = RNIL;
+ takeOverPtr.p->toNodeRestart = req->nodeRestart;
+}//Dbdih::initStartTakeOver()
+
+void Dbdih::startNextCopyFragment(Signal* signal, Uint32 takeOverPtrI)
+{
+ TabRecordPtr tabPtr;
+ TakeOverRecordPtr takeOverPtr;
+ Uint32 loopCount;
+ RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr);
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::SELECTING_NEXT;
+ loopCount = 0;
+ if (ERROR_INSERTED(7159)) {
+ loopCount = 100;
+ }//if
+ while (loopCount++ < 100) {
+ tabPtr.i = takeOverPtr.p->toCurrentTabref;
+ if (tabPtr.i >= ctabFileSize) {
+ jam();
+ CRASH_INSERTION(7136);
+ sendUpdateTo(signal, takeOverPtr.i, UpdateToReq::TO_COPY_COMPLETED);
+ return;
+ }//if
+ ptrAss(tabPtr, tabRecord);
+ if (tabPtr.p->tabStatus != TabRecord::TS_ACTIVE){
+ jam();
+ takeOverPtr.p->toCurrentFragid = 0;
+ takeOverPtr.p->toCurrentTabref++;
+ continue;
+ }//if
+ Uint32 fragId = takeOverPtr.p->toCurrentFragid;
+ if (fragId >= tabPtr.p->totalfragments) {
+ jam();
+ takeOverPtr.p->toCurrentFragid = 0;
+ takeOverPtr.p->toCurrentTabref++;
+ if (ERROR_INSERTED(7135)) {
+ if (takeOverPtr.p->toCurrentTabref == 1) {
+ ndbrequire(false);
+ }//if
+ }//if
+ continue;
+ }//if
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, fragId, fragPtr);
+ ReplicaRecordPtr loopReplicaPtr;
+ loopReplicaPtr.i = fragPtr.p->oldStoredReplicas;
+ while (loopReplicaPtr.i != RNIL) {
+ ptrCheckGuard(loopReplicaPtr, creplicaFileSize, replicaRecord);
+ if (loopReplicaPtr.p->procNode == takeOverPtr.p->toFailedNode) {
+ jam();
+ /* ----------------------------------------------------------------- */
+ /* WE HAVE FOUND A REPLICA THAT BELONGED THE FAILED NODE THAT NEEDS */
+ /* TAKE OVER. WE TAKE OVER THIS REPLICA TO THE NEW NODE. */
+ /* ----------------------------------------------------------------- */
+ takeOverPtr.p->toCurrentReplica = loopReplicaPtr.i;
+ toCopyFragLab(signal, takeOverPtr.i);
+ return;
+ } else if (loopReplicaPtr.p->procNode == takeOverPtr.p->toStartingNode) {
+ jam();
+ /* ----------------------------------------------------------------- */
+ /* WE HAVE OBVIOUSLY STARTED TAKING OVER THIS WITHOUT COMPLETING IT. */
+ /* WE */
+ /* NEED TO COMPLETE THE TAKE OVER OF THIS REPLICA. */
+ /* ----------------------------------------------------------------- */
+ takeOverPtr.p->toCurrentReplica = loopReplicaPtr.i;
+ toCopyFragLab(signal, takeOverPtr.i);
+ return;
+ } else {
+ jam();
+ loopReplicaPtr.i = loopReplicaPtr.p->nextReplica;
+ }//if
+ }//while
+ takeOverPtr.p->toCurrentFragid++;
+ }//while
+ signal->theData[0] = DihContinueB::ZTO_START_COPY_FRAG;
+ signal->theData[1] = takeOverPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+}//Dbdih::startNextCopyFragment()
+
+void Dbdih::toCopyFragLab(Signal* signal,
+ Uint32 takeOverPtrI)
+{
+ TakeOverRecordPtr takeOverPtr;
+ RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr);
+
+ CreateReplicaRecordPtr createReplicaPtr;
+ createReplicaPtr.i = 0;
+ ptrAss(createReplicaPtr, createReplicaRecord);
+
+ ReplicaRecordPtr replicaPtr;
+ replicaPtr.i = takeOverPtr.p->toCurrentReplica;
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = takeOverPtr.p->toCurrentTabref;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ /* ----------------------------------------------------------------------- */
+ /* WE HAVE FOUND A REPLICA THAT NEEDS TAKE OVER. WE WILL START THIS TAKE */
+ /* OVER BY ADDING THE FRAGMENT WHEREAFTER WE WILL ORDER THE PRIMARY */
+ /* REPLICA TO COPY ITS CONTENT TO THE NEW STARTING REPLICA. */
+ /* THIS OPERATION IS A SINGLE USER OPERATION UNTIL WE HAVE SENT */
+ /* COPY_FRAGREQ. AFTER SENDING COPY_FRAGREQ WE ARE READY TO START A NEW */
+ /* FRAGMENT REPLICA. WE WILL NOT IMPLEMENT THIS IN THE FIRST PHASE. */
+ /* ----------------------------------------------------------------------- */
+ cnoOfCreateReplicas = 1;
+ createReplicaPtr.p->hotSpareUse = true;
+ createReplicaPtr.p->dataNodeId = takeOverPtr.p->toStartingNode;
+
+ prepareSendCreateFragReq(signal, takeOverPtrI);
+}//Dbdih::toCopyFragLab()
+
+void Dbdih::prepareSendCreateFragReq(Signal* signal, Uint32 takeOverPtrI)
+{
+ TakeOverRecordPtr takeOverPtr;
+ RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr);
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = takeOverPtr.p->toCurrentTabref;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ FragmentstorePtr fragPtr;
+
+ getFragstore(tabPtr.p, takeOverPtr.p->toCurrentFragid, fragPtr);
+ Uint32 nodes[MAX_REPLICAS];
+ extractNodeInfo(fragPtr.p, nodes);
+ takeOverPtr.p->toCopyNode = nodes[0];
+ sendCreateFragReq(signal, 0, CreateFragReq::STORED, takeOverPtr.i);
+}//Dbdih::prepareSendCreateFragReq()
+
+void Dbdih::sendCreateFragReq(Signal* signal,
+ Uint32 startGci,
+ Uint32 replicaType,
+ Uint32 takeOverPtrI)
+{
+ TakeOverRecordPtr takeOverPtr;
+ RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr);
+ if ((c_createFragmentLock != RNIL) ||
+ ((ERROR_INSERTED(7161))&&(replicaType == CreateFragReq::STORED)) ||
+ ((ERROR_INSERTED(7162))&&(replicaType == CreateFragReq::COMMIT_STORED))){
+ if (replicaType == CreateFragReq::STORED) {
+ jam();
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_WAIT_PREPARE_CREATE;
+ } else {
+ ndbrequire(replicaType == CreateFragReq::COMMIT_STORED);
+ jam();
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_WAIT_COMMIT_CREATE;
+ }//if
+ signal->theData[0] = DihContinueB::ZSEND_CREATE_FRAG;
+ signal->theData[1] = takeOverPtr.i;
+ signal->theData[2] = replicaType;
+ signal->theData[3] = startGci;
+ signal->theData[4] = takeOverPtr.p->toStartingNode;
+ signal->theData[5] = takeOverPtr.p->toFailedNode;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 50, 6);
+ return;
+ }//if
+ c_createFragmentLock = takeOverPtr.i;
+ sendLoopMacro(CREATE_FRAGREQ, nullRoutine);
+
+ CreateFragReq * const req = (CreateFragReq *)&signal->theData[0];
+ req->userPtr = takeOverPtr.i;
+ req->userRef = reference();
+ req->tableId = takeOverPtr.p->toCurrentTabref;
+ req->fragId = takeOverPtr.p->toCurrentFragid;
+ req->startingNodeId = takeOverPtr.p->toStartingNode;
+ req->copyNodeId = takeOverPtr.p->toCopyNode;
+ req->startGci = startGci;
+ req->replicaType = replicaType;
+
+ NodeRecordPtr nodePtr;
+ nodePtr.i = cfirstAliveNode;
+ do {
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ BlockReference ref = calcDihBlockRef(nodePtr.i);
+ sendSignal(ref, GSN_CREATE_FRAGREQ, signal,
+ CreateFragReq::SignalLength, JBB);
+ nodePtr.i = nodePtr.p->nextNode;
+ } while (nodePtr.i != RNIL);
+
+ if (replicaType == CreateFragReq::STORED) {
+ jam();
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::PREPARE_CREATE;
+ } else {
+ ndbrequire(replicaType == CreateFragReq::COMMIT_STORED);
+ jam();
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::COMMIT_CREATE;
+ }
+}//Dbdih::sendCreateFragReq()
+
+/* --------------------------------------------------------------------------*/
+/* AN ORDER TO START OR COMMIT THE REPLICA CREATION ARRIVED FROM THE */
+/* MASTER. */
+/* --------------------------------------------------------------------------*/
+void Dbdih::execCREATE_FRAGREQ(Signal* signal)
+{
+ jamEntry();
+ CreateFragReq * const req = (CreateFragReq *)&signal->theData[0];
+
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = req->userPtr;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+
+ BlockReference retRef = req->userRef;
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = req->tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ Uint32 fragId = req->fragId;
+ Uint32 tdestNodeid = req->startingNodeId;
+ Uint32 tsourceNodeid = req->copyNodeId;
+ Uint32 startGci = req->startGci;
+ Uint32 replicaType = req->replicaType;
+
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, fragId, fragPtr);
+ RETURN_IF_NODE_NOT_ALIVE(tdestNodeid);
+ ReplicaRecordPtr frReplicaPtr;
+ findToReplica(takeOverPtr.p, replicaType, fragPtr, frReplicaPtr);
+ ndbrequire(frReplicaPtr.i != RNIL);
+
+ switch (replicaType) {
+ case CreateFragReq::STORED:
+ jam();
+ CRASH_INSERTION(7138);
+ /* ----------------------------------------------------------------------*/
+ /* HERE WE ARE INSERTING THE NEW BACKUP NODE IN THE EXECUTION OF ALL */
+ /* OPERATIONS. FROM HERE ON ALL OPERATIONS ON THIS FRAGMENT WILL INCLUDE*/
+ /* USE OF THE NEW REPLICA. */
+ /* --------------------------------------------------------------------- */
+ insertBackup(fragPtr, tdestNodeid);
+ takeOverPtr.p->toCopyNode = tsourceNodeid;
+ takeOverPtr.p->toSlaveStatus = TakeOverRecord::TO_SLAVE_CREATE_PREPARE;
+
+ fragPtr.p->distributionKey++;
+ fragPtr.p->distributionKey &= 255;
+ break;
+ case CreateFragReq::COMMIT_STORED:
+ jam();
+ CRASH_INSERTION(7139);
+ /* ----------------------------------------------------------------------*/
+ /* HERE WE ARE MOVING THE REPLICA TO THE STORED SECTION SINCE IT IS NOW */
+ /* FULLY LOADED WITH ALL DATA NEEDED. */
+ // We also update the order of the replicas here so that if the new
+ // replica is the desired primary we insert it as primary.
+ /* ----------------------------------------------------------------------*/
+ takeOverPtr.p->toSlaveStatus = TakeOverRecord::TO_SLAVE_CREATE_COMMIT;
+ removeOldStoredReplica(fragPtr, frReplicaPtr);
+ linkStoredReplica(fragPtr, frReplicaPtr);
+ updateNodeInfo(fragPtr);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+
+ /* ------------------------------------------------------------------------*/
+ /* THE NEW NODE OF THIS REPLICA IS THE STARTING NODE. */
+ /* ------------------------------------------------------------------------*/
+ if (frReplicaPtr.p->procNode != takeOverPtr.p->toStartingNode) {
+ jam();
+ /* ---------------------------------------------------------------------*/
+ /* IF WE ARE STARTING A TAKE OVER NODE WE MUST INVALIDATE ALL LCP'S. */
+ /* OTHERWISE WE WILL TRY TO START LCP'S THAT DO NOT EXIST. */
+ /* ---------------------------------------------------------------------*/
+ frReplicaPtr.p->procNode = takeOverPtr.p->toStartingNode;
+ frReplicaPtr.p->noCrashedReplicas = 0;
+ frReplicaPtr.p->createGci[0] = startGci;
+ ndbrequire(startGci != 0xF1F1F1F1);
+ frReplicaPtr.p->replicaLastGci[0] = (Uint32)-1;
+ for (Uint32 i = 0; i < MAX_LCP_STORED; i++) {
+ frReplicaPtr.p->lcpStatus[i] = ZINVALID;
+ }//for
+ } else {
+ jam();
+ const Uint32 noCrashed = frReplicaPtr.p->noCrashedReplicas;
+ arrGuard(noCrashed, 8);
+ frReplicaPtr.p->createGci[noCrashed] = startGci;
+ ndbrequire(startGci != 0xF1F1F1F1);
+ frReplicaPtr.p->replicaLastGci[noCrashed] = (Uint32)-1;
+ }//if
+ takeOverPtr.p->toCurrentTabref = tabPtr.i;
+ takeOverPtr.p->toCurrentFragid = fragId;
+ CreateFragConf * const conf = (CreateFragConf *)&signal->theData[0];
+ conf->userPtr = takeOverPtr.i;
+ conf->tableId = tabPtr.i;
+ conf->fragId = fragId;
+ conf->sendingNodeId = cownNodeId;
+ conf->startingNodeId = tdestNodeid;
+ sendSignal(retRef, GSN_CREATE_FRAGCONF, signal,
+ CreateFragConf::SignalLength, JBB);
+}//Dbdih::execCREATE_FRAGREQ()
+
+void Dbdih::execCREATE_FRAGCONF(Signal* signal)
+{
+ jamEntry();
+ CRASH_INSERTION(7148);
+ const CreateFragConf * const conf = (CreateFragConf *)&signal->theData[0];
+ Uint32 fragId = conf->fragId;
+
+ RETURN_IF_NODE_NOT_ALIVE(conf->startingNodeId);
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = conf->tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = conf->userPtr;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+
+ ndbrequire(tabPtr.i == takeOverPtr.p->toCurrentTabref);
+ ndbrequire(fragId == takeOverPtr.p->toCurrentFragid);
+ receiveLoopMacro(CREATE_FRAGREQ, conf->sendingNodeId);
+ c_createFragmentLock = RNIL;
+
+ if (takeOverPtr.p->toMasterStatus == TakeOverRecord::PREPARE_CREATE) {
+ jam();
+ CRASH_INSERTION(7140);
+ /* --------------------------------------------------------------------- */
+ /* ALL NODES HAVE PREPARED THE INTRODUCTION OF THIS NEW NODE AND IT IS */
+ /* ALREADY IN USE. WE CAN NOW START COPYING THE FRAGMENT. */
+ /*---------------------------------------------------------------------- */
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, fragId, fragPtr);
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::COPY_FRAG;
+ BlockReference ref = calcLqhBlockRef(takeOverPtr.p->toCopyNode);
+ CopyFragReq * const copyFragReq = (CopyFragReq *)&signal->theData[0];
+ copyFragReq->userPtr = takeOverPtr.i;
+ copyFragReq->userRef = reference();
+ copyFragReq->tableId = tabPtr.i;
+ copyFragReq->fragId = fragId;
+ copyFragReq->nodeId = takeOverPtr.p->toStartingNode;
+ copyFragReq->schemaVersion = tabPtr.p->schemaVersion;
+ copyFragReq->distributionKey = fragPtr.p->distributionKey;
+ sendSignal(ref, GSN_COPY_FRAGREQ, signal, CopyFragReq::SignalLength, JBB);
+ } else {
+ ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::COMMIT_CREATE);
+ jam();
+ CRASH_INSERTION(7141);
+ /* --------------------------------------------------------------------- */
+ // REPORT that copy of fragment has been completed.
+ /* --------------------------------------------------------------------- */
+ signal->theData[0] = NDB_LE_NR_CopyFragDone;
+ signal->theData[1] = takeOverPtr.p->toStartingNode;
+ signal->theData[2] = tabPtr.i;
+ signal->theData[3] = takeOverPtr.p->toCurrentFragid;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB);
+ /* --------------------------------------------------------------------- */
+ /* WE HAVE NOW CREATED THIS NEW REPLICA AND WE ARE READY TO TAKE THE */
+ /* THE NEXT REPLICA. */
+ /* --------------------------------------------------------------------- */
+
+ Mutex mutex(signal, c_mutexMgr, takeOverPtr.p->m_switchPrimaryMutexHandle);
+ mutex.unlock(); // ignore result
+
+ takeOverPtr.p->toCurrentFragid++;
+ startNextCopyFragment(signal, takeOverPtr.i);
+ }//if
+}//Dbdih::execCREATE_FRAGCONF()
+
+void Dbdih::execCOPY_FRAGREF(Signal* signal)
+{
+ const CopyFragRef * const ref = (CopyFragRef *)&signal->theData[0];
+ jamEntry();
+ Uint32 takeOverPtrI = ref->userPtr;
+ Uint32 startingNodeId = ref->startingNodeId;
+ Uint32 errorCode = ref->errorCode;
+
+ TakeOverRecordPtr takeOverPtr;
+ RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr);
+ ndbrequire(errorCode != ZNODE_FAILURE_ERROR);
+ ndbrequire(ref->tableId == takeOverPtr.p->toCurrentTabref);
+ ndbrequire(ref->fragId == takeOverPtr.p->toCurrentFragid);
+ ndbrequire(ref->startingNodeId == takeOverPtr.p->toStartingNode);
+ ndbrequire(ref->sendingNodeId == takeOverPtr.p->toCopyNode);
+ ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::COPY_FRAG);
+ endTakeOver(takeOverPtrI);
+ //--------------------------------------------------------------------------
+ // For some reason we did not succeed in copying a fragment. We treat this
+ // as a serious failure and crash the starting node.
+ //--------------------------------------------------------------------------
+ BlockReference cntrRef = calcNdbCntrBlockRef(startingNodeId);
+ SystemError * const sysErr = (SystemError*)&signal->theData[0];
+ sysErr->errorCode = SystemError::CopyFragRefError;
+ sysErr->errorRef = reference();
+ sysErr->data1 = errorCode;
+ sysErr->data2 = 0;
+ sendSignal(cntrRef, GSN_SYSTEM_ERROR, signal,
+ SystemError::SignalLength, JBB);
+ return;
+}//Dbdih::execCOPY_FRAGREF()
+
+void Dbdih::execCOPY_FRAGCONF(Signal* signal)
+{
+ const CopyFragConf * const conf = (CopyFragConf *)&signal->theData[0];
+ jamEntry();
+ CRASH_INSERTION(7142);
+
+ TakeOverRecordPtr takeOverPtr;
+ Uint32 takeOverPtrI = conf->userPtr;
+ RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr);
+
+ ndbrequire(conf->tableId == takeOverPtr.p->toCurrentTabref);
+ ndbrequire(conf->fragId == takeOverPtr.p->toCurrentFragid);
+ ndbrequire(conf->startingNodeId == takeOverPtr.p->toStartingNode);
+ ndbrequire(conf->sendingNodeId == takeOverPtr.p->toCopyNode);
+ ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::COPY_FRAG);
+ sendUpdateTo(signal, takeOverPtr.i,
+ (Uint32)UpdateToReq::TO_COPY_FRAG_COMPLETED);
+}//Dbdih::execCOPY_FRAGCONF()
+
+void Dbdih::sendUpdateTo(Signal* signal,
+ Uint32 takeOverPtrI, Uint32 updateState)
+{
+ TakeOverRecordPtr takeOverPtr;
+ RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr);
+ if ((c_updateToLock != RNIL) ||
+ ((ERROR_INSERTED(7163)) &&
+ (updateState == UpdateToReq::TO_COPY_FRAG_COMPLETED)) ||
+ ((ERROR_INSERTED(7169)) &&
+ (updateState == UpdateToReq::TO_COPY_COMPLETED))) {
+ jam();
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_WAIT_UPDATE_TO;
+ signal->theData[0] = DihContinueB::ZSEND_UPDATE_TO;
+ signal->theData[1] = takeOverPtrI;
+ signal->theData[2] = takeOverPtr.p->toStartingNode;
+ signal->theData[3] = takeOverPtr.p->toFailedNode;
+ signal->theData[4] = updateState;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 30, 5);
+ return;
+ }//if
+ c_updateToLock = takeOverPtrI;
+ if (updateState == UpdateToReq::TO_COPY_FRAG_COMPLETED) {
+ jam();
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_UPDATE_TO;
+ } else {
+ jam();
+ ndbrequire(updateState == UpdateToReq::TO_COPY_COMPLETED);
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_COPY_COMPLETED;
+ }//if
+
+ UpdateToReq * const req = (UpdateToReq *)&signal->theData[0];
+ req->userPtr = takeOverPtr.i;
+ req->userRef = reference();
+ req->updateState = (UpdateToReq::UpdateState)updateState;
+ req->startingNodeId = takeOverPtr.p->toStartingNode;
+ req->tableId = takeOverPtr.p->toCurrentTabref;
+ req->fragmentNo = takeOverPtr.p->toCurrentFragid;
+ sendLoopMacro(UPDATE_TOREQ, sendUPDATE_TOREQ);
+}//Dbdih::sendUpdateTo()
+
+void Dbdih::execUPDATE_TOREQ(Signal* signal)
+{
+ jamEntry();
+ const UpdateToReq * const req = (UpdateToReq *)&signal->theData[0];
+ BlockReference ref = req->userRef;
+ ndbrequire(cmasterdihref == ref);
+
+ CRASH_INSERTION(7154);
+ RETURN_IF_NODE_NOT_ALIVE(req->startingNodeId);
+
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = req->userPtr;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+
+ ndbrequire(req->startingNodeId == takeOverPtr.p->toStartingNode);
+ if (req->updateState == UpdateToReq::TO_COPY_FRAG_COMPLETED) {
+ jam();
+ ndbrequire(takeOverPtr.p->toSlaveStatus == TakeOverRecord::TO_SLAVE_CREATE_PREPARE);
+ takeOverPtr.p->toSlaveStatus = TakeOverRecord::TO_SLAVE_COPY_FRAG_COMPLETED;
+ takeOverPtr.p->toCurrentTabref = req->tableId;
+ takeOverPtr.p->toCurrentFragid = req->fragmentNo;
+ } else {
+ jam();
+ ndbrequire(req->updateState == UpdateToReq::TO_COPY_COMPLETED);
+ takeOverPtr.p->toSlaveStatus = TakeOverRecord::TO_SLAVE_COPY_COMPLETED;
+ setNodeCopyCompleted(takeOverPtr.p->toStartingNode, true);
+ }//if
+
+
+ UpdateToConf * const conf = (UpdateToConf *)&signal->theData[0];
+ conf->userPtr = takeOverPtr.i;
+ conf->sendingNodeId = cownNodeId;
+ conf->startingNodeId = takeOverPtr.p->toStartingNode;
+ sendSignal(ref, GSN_UPDATE_TOCONF, signal, UpdateToConf::SignalLength, JBB);
+}//Dbdih::execUPDATE_TOREQ()
+
+void Dbdih::execUPDATE_TOCONF(Signal* signal)
+{
+ const UpdateToConf * const conf = (UpdateToConf *)&signal->theData[0];
+ CRASH_INSERTION(7152);
+
+ RETURN_IF_NODE_NOT_ALIVE(conf->startingNodeId);
+
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = conf->userPtr;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+
+ receiveLoopMacro(UPDATE_TOREQ, conf->sendingNodeId);
+ CRASH_INSERTION(7153);
+ c_updateToLock = RNIL;
+
+ if (takeOverPtr.p->toMasterStatus == TakeOverRecord::TO_COPY_COMPLETED) {
+ jam();
+ toCopyCompletedLab(signal, takeOverPtr);
+ return;
+ } else {
+ ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::TO_UPDATE_TO);
+ }//if
+ TabRecordPtr tabPtr;
+ tabPtr.i = takeOverPtr.p->toCurrentTabref;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, takeOverPtr.p->toCurrentFragid, fragPtr);
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::COPY_ACTIVE;
+ BlockReference lqhRef = calcLqhBlockRef(takeOverPtr.p->toStartingNode);
+ CopyActiveReq * const req = (CopyActiveReq *)&signal->theData[0];
+ req->userPtr = takeOverPtr.i;
+ req->userRef = reference();
+ req->tableId = takeOverPtr.p->toCurrentTabref;
+ req->fragId = takeOverPtr.p->toCurrentFragid;
+ req->distributionKey = fragPtr.p->distributionKey;
+
+ sendSignal(lqhRef, GSN_COPY_ACTIVEREQ, signal,
+ CopyActiveReq::SignalLength, JBB);
+}//Dbdih::execUPDATE_TOCONF()
+
+void Dbdih::execCOPY_ACTIVECONF(Signal* signal)
+{
+ const CopyActiveConf * const conf = (CopyActiveConf *)&signal->theData[0];
+ jamEntry();
+ CRASH_INSERTION(7143);
+
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = conf->userPtr;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+
+ ndbrequire(conf->tableId == takeOverPtr.p->toCurrentTabref);
+ ndbrequire(conf->fragId == takeOverPtr.p->toCurrentFragid);
+ ndbrequire(checkNodeAlive(conf->startingNodeId));
+ ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::COPY_ACTIVE);
+
+ takeOverPtr.p->startGci = conf->startGci;
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::LOCK_MUTEX;
+
+ Mutex mutex(signal, c_mutexMgr, takeOverPtr.p->m_switchPrimaryMutexHandle);
+ Callback c = { safe_cast(&Dbdih::switchPrimaryMutex_locked), takeOverPtr.i };
+ ndbrequire(mutex.lock(c));
+}//Dbdih::execCOPY_ACTIVECONF()
+
+void
+Dbdih::switchPrimaryMutex_locked(Signal* signal, Uint32 toPtrI, Uint32 retVal){
+ jamEntry();
+ ndbrequire(retVal == 0);
+
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = toPtrI;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+
+ ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::LOCK_MUTEX);
+
+ if (!checkNodeAlive((takeOverPtr.p->toStartingNode))) {
+ // We have mutex
+ Mutex mutex(signal, c_mutexMgr, takeOverPtr.p->m_switchPrimaryMutexHandle);
+ mutex.unlock(); // Ignore result
+
+ c_createFragmentLock = RNIL;
+ c_CREATE_FRAGREQ_Counter.clearWaitingFor();
+ endTakeOver(takeOverPtr.i);
+ return;
+ }
+
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::COMMIT_CREATE;
+ sendCreateFragReq(signal, takeOverPtr.p->startGci,
+ CreateFragReq::COMMIT_STORED, takeOverPtr.i);
+}
+
+void Dbdih::toCopyCompletedLab(Signal * signal, TakeOverRecordPtr takeOverPtr)
+{
+ signal->theData[0] = NDB_LE_NR_CopyFragsCompleted;
+ signal->theData[1] = takeOverPtr.p->toStartingNode;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
+
+ c_lcpState.immediateLcpStart = true;
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::WAIT_LCP;
+
+ /*-----------------------------------------------------------------------*/
+ /* NOW WE CAN ALLOW THE NEW NODE TO PARTICIPATE IN LOCAL CHECKPOINTS. */
+ /* WHEN THE FIRST LOCAL CHECKPOINT IS READY WE DECLARE THE TAKE OVER AS */
+ /* COMPLETED. SINCE LOCAL CHECKPOINTS HAVE BEEN BLOCKED DURING THE COPY */
+ /* PROCESS WE MUST ALSO START A NEW LOCAL CHECKPOINT PROCESS BY ENSURING */
+ /* THAT IT LOOKS LIKE IT IS TIME FOR A NEW LOCAL CHECKPOINT AND BY */
+ /* UNBLOCKING THE LOCAL CHECKPOINT AGAIN. */
+ /* --------------------------------------------------------------------- */
+}//Dbdih::toCopyCompletedLab()
+
+void Dbdih::sendEndTo(Signal* signal, Uint32 takeOverPtrI)
+{
+ TakeOverRecordPtr takeOverPtr;
+ CRASH_INSERTION(7156);
+ RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr);
+ if ((c_endToLock != RNIL) || (ERROR_INSERTED(7164))) {
+ jam();
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_WAIT_ENDING;
+ signal->theData[0] = DihContinueB::ZSEND_END_TO;
+ signal->theData[1] = takeOverPtrI;
+ signal->theData[2] = takeOverPtr.p->toStartingNode;
+ signal->theData[3] = takeOverPtr.p->toFailedNode;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 30, 4);
+ return;
+ }//if
+ c_endToLock = takeOverPtr.i;
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::ENDING;
+ EndToReq * const req = (EndToReq *)&signal->theData[0];
+ req->userPtr = takeOverPtr.i;
+ req->userRef = reference();
+ req->startingNodeId = takeOverPtr.p->toStartingNode;
+ sendLoopMacro(END_TOREQ, sendEND_TOREQ);
+}//Dbdih::sendStartTo()
+
+void Dbdih::execEND_TOREQ(Signal* signal)
+{
+ jamEntry();
+ const EndToReq * const req = (EndToReq *)&signal->theData[0];
+ BlockReference ref = req->userRef;
+ Uint32 startingNodeId = req->startingNodeId;
+
+ CRASH_INSERTION(7144);
+ RETURN_IF_NODE_NOT_ALIVE(startingNodeId);
+
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = req->userPtr;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+
+ ndbrequire(startingNodeId == takeOverPtr.p->toStartingNode);
+ takeOverPtr.p->toSlaveStatus = TakeOverRecord::TO_SLAVE_IDLE;
+
+ if (!isMaster()) {
+ jam();
+ endTakeOver(takeOverPtr.i);
+ }//if
+
+ EndToConf * const conf = (EndToConf *)&signal->theData[0];
+ conf->userPtr = takeOverPtr.i;
+ conf->sendingNodeId = cownNodeId;
+ conf->startingNodeId = startingNodeId;
+ sendSignal(ref, GSN_END_TOCONF, signal, EndToConf::SignalLength, JBB);
+}//Dbdih::execEND_TOREQ()
+
+void Dbdih::execEND_TOCONF(Signal* signal)
+{
+ const EndToConf * const conf = (EndToConf *)&signal->theData[0];
+ jamEntry();
+
+ const Uint32 nodeId = conf->startingNodeId;
+ CRASH_INSERTION(7145);
+
+ RETURN_IF_NODE_NOT_ALIVE(nodeId);
+
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = conf->userPtr;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+
+ ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::ENDING);
+ ndbrequire(nodeId == takeOverPtr.p->toStartingNode);
+
+ receiveLoopMacro(END_TOREQ, conf->sendingNodeId);
+ CRASH_INSERTION(7146);
+ c_endToLock = RNIL;
+
+ /* -----------------------------------------------------------------------*/
+ /* WE HAVE FINALLY COMPLETED THE TAKE OVER. WE RESET THE STATUS AND CHECK*/
+ /* IF ANY MORE TAKE OVERS ARE NEEDED AT THE MOMENT. */
+ /* FIRST WE CHECK IF A RESTART IS ONGOING. IN THAT CASE WE RESTART PHASE */
+ /* 4 AND CHECK IF ANY MORE TAKE OVERS ARE NEEDED BEFORE WE START NDB */
+ /* CLUSTER. THIS CAN ONLY HAPPEN IN A SYSTEM RESTART. */
+ /* ---------------------------------------------------------------------- */
+ if (takeOverPtr.p->toNodeRestart) {
+ jam();
+ /* ----------------------------------------------------------------------*/
+ /* THE TAKE OVER NODE WAS A STARTING NODE. WE WILL SEND START_COPYCONF */
+ /* TO THE STARTING NODE SUCH THAT THE NODE CAN COMPLETE THE START-UP. */
+ /* --------------------------------------------------------------------- */
+ BlockReference ref = calcDihBlockRef(takeOverPtr.p->toStartingNode);
+ signal->theData[0] = takeOverPtr.p->toStartingNode;
+ sendSignal(ref, GSN_START_COPYCONF, signal, 1,JBB);
+ }//if
+ endTakeOver(takeOverPtr.i);
+
+ ndbout_c("2 - endTakeOver");
+ if (cstartPhase == ZNDB_SPH4) {
+ jam();
+ ndbrequire(false);
+ if (anyActiveTakeOver()) {
+ jam();
+ ndbout_c("4 - anyActiveTakeOver == true");
+ return;
+ }//if
+ ndbout_c("5 - anyActiveTakeOver == false -> ndbsttorry10Lab");
+ ndbsttorry10Lab(signal, __LINE__);
+ return;
+ }//if
+ checkStartTakeOver(signal);
+}//Dbdih::execEND_TOCONF()
+
+void Dbdih::allocateTakeOver(TakeOverRecordPtr& takeOverPtr)
+{
+ if (isMaster()) {
+ jam();
+ //--------------------------------------------
+ // Master already seized the take over record.
+ //--------------------------------------------
+ return;
+ }//if
+ if (takeOverPtr.i == cfirstfreeTakeOver) {
+ jam();
+ seizeTakeOver(takeOverPtr);
+ } else {
+ TakeOverRecordPtr nextTakeOverptr;
+ TakeOverRecordPtr prevTakeOverptr;
+ nextTakeOverptr.i = takeOverPtr.p->nextTakeOver;
+ prevTakeOverptr.i = takeOverPtr.p->prevTakeOver;
+ if (prevTakeOverptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(prevTakeOverptr, MAX_NDB_NODES, takeOverRecord);
+ prevTakeOverptr.p->nextTakeOver = nextTakeOverptr.i;
+ }//if
+ if (nextTakeOverptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(nextTakeOverptr, MAX_NDB_NODES, takeOverRecord);
+ nextTakeOverptr.p->prevTakeOver = prevTakeOverptr.i;
+ }//if
+ }//if
+}//Dbdih::allocateTakeOver()
+
+void Dbdih::seizeTakeOver(TakeOverRecordPtr& takeOverPtr)
+{
+ TakeOverRecordPtr nextTakeOverptr;
+ ndbrequire(cfirstfreeTakeOver != RNIL);
+ takeOverPtr.i = cfirstfreeTakeOver;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+ cfirstfreeTakeOver = takeOverPtr.p->nextTakeOver;
+ nextTakeOverptr.i = takeOverPtr.p->nextTakeOver;
+ if (nextTakeOverptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(nextTakeOverptr, MAX_NDB_NODES, takeOverRecord);
+ nextTakeOverptr.p->prevTakeOver = RNIL;
+ }//if
+ takeOverPtr.p->nextTakeOver = RNIL;
+ takeOverPtr.p->prevTakeOver = RNIL;
+}//Dbdih::seizeTakeOver()
+
+void Dbdih::endTakeOver(Uint32 takeOverPtrI)
+{
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = takeOverPtrI;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+
+ releaseTakeOver(takeOverPtrI);
+ if ((takeOverPtr.p->toMasterStatus != TakeOverRecord::IDLE) &&
+ (takeOverPtr.p->toMasterStatus != TakeOverRecord::TO_WAIT_START_TAKE_OVER)) {
+ jam();
+ NodeGroupRecordPtr NGPtr;
+ NodeRecordPtr nodePtr;
+ nodePtr.i = takeOverPtr.p->toStartingNode;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ NGPtr.i = nodePtr.p->nodeGroup;
+ ptrCheckGuard(NGPtr, MAX_NDB_NODES, nodeGroupRecord);
+ NGPtr.p->activeTakeOver = false;
+ }//if
+ setAllowNodeStart(takeOverPtr.p->toStartingNode, true);
+ initTakeOver(takeOverPtr);
+}//Dbdih::endTakeOver()
+
+void Dbdih::releaseTakeOver(Uint32 takeOverPtrI)
+{
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = takeOverPtrI;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+
+ takeOverPtr.p->nextTakeOver = cfirstfreeTakeOver;
+ cfirstfreeTakeOver = takeOverPtr.i;
+}//Dbdih::releaseTakeOver()
+
+void Dbdih::initTakeOver(TakeOverRecordPtr takeOverPtr)
+{
+ takeOverPtr.p->toCopyNode = RNIL;
+ takeOverPtr.p->toCurrentFragid = RNIL;
+ takeOverPtr.p->toCurrentReplica = RNIL;
+ takeOverPtr.p->toCurrentTabref = RNIL;
+ takeOverPtr.p->toFailedNode = RNIL;
+ takeOverPtr.p->toStartingNode = RNIL;
+ takeOverPtr.p->prevTakeOver = RNIL;
+ takeOverPtr.p->nextTakeOver = RNIL;
+ takeOverPtr.p->toNodeRestart = false;
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::IDLE;
+ takeOverPtr.p->toSlaveStatus = TakeOverRecord::TO_SLAVE_IDLE;
+}//Dbdih::initTakeOver()
+
+bool Dbdih::anyActiveTakeOver()
+{
+ TakeOverRecordPtr takeOverPtr;
+ for (takeOverPtr.i = 0; takeOverPtr.i < MAX_NDB_NODES; takeOverPtr.i++) {
+ ptrAss(takeOverPtr, takeOverRecord);
+ if (takeOverPtr.p->toMasterStatus != TakeOverRecord::IDLE) {
+ jam();
+ return true;
+ }//if
+ }//for
+ return false;
+}//Dbdih::anyActiveTakeOver()
+
+/*****************************************************************************/
+/* ------------------------------------------------------------------------- */
+/* WE HAVE BEEN REQUESTED TO PERFORM A SYSTEM RESTART. WE START BY */
+/* READING THE GCI FILES. THIS REQUEST WILL ONLY BE SENT TO THE MASTER */
+/* DIH. THAT MEANS WE HAVE TO REPLICATE THE INFORMATION WE READ FROM */
+/* OUR FILES TO ENSURE THAT ALL NODES HAVE THE SAME DISTRIBUTION */
+/* INFORMATION. */
+/* ------------------------------------------------------------------------- */
+/*****************************************************************************/
+void Dbdih::readGciFileLab(Signal* signal)
+{
+ FileRecordPtr filePtr;
+ filePtr.i = crestartInfoFile[0];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ filePtr.p->reqStatus = FileRecord::OPENING_GCP;
+
+ openFileRo(signal, filePtr);
+}//Dbdih::readGciFileLab()
+
+void Dbdih::openingGcpLab(Signal* signal, FileRecordPtr filePtr)
+{
+ /* ----------------------------------------------------------------------- */
+ /* WE HAVE SUCCESSFULLY OPENED A FILE CONTAINING INFORMATION ABOUT */
+ /* THE GLOBAL CHECKPOINTS THAT ARE POSSIBLE TO RESTART. */
+ /* ----------------------------------------------------------------------- */
+ readRestorableGci(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::READING_GCP;
+}//Dbdih::openingGcpLab()
+
+void Dbdih::readingGcpLab(Signal* signal, FileRecordPtr filePtr)
+{
+ /* ----------------------------------------------------------------------- */
+ /* WE HAVE NOW SUCCESSFULLY MANAGED TO READ IN THE GLOBAL CHECKPOINT */
+ /* INFORMATION FROM FILE. LATER WE WILL ADD SOME FUNCTIONALITY THAT */
+ /* CHECKS THE RESTART TIMERS TO DEDUCE FROM WHERE TO RESTART. */
+ /* NOW WE WILL SIMPLY RESTART FROM THE NEWEST GLOBAL CHECKPOINT */
+ /* POSSIBLE TO RESTORE. */
+ /* */
+ /* BEFORE WE INVOKE DICT WE NEED TO COPY CRESTART_INFO TO ALL NODES. */
+ /* WE ALSO COPY TO OUR OWN NODE. TO ENABLE US TO DO THIS PROPERLY WE */
+ /* START BY CLOSING THIS FILE. */
+ /* ----------------------------------------------------------------------- */
+ closeFile(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::CLOSING_GCP;
+}//Dbdih::readingGcpLab()
+
+void Dbdih::closingGcpLab(Signal* signal, FileRecordPtr filePtr)
+{
+ if (Sysfile::getInitialStartOngoing(SYSFILE->systemRestartBits) == false){
+ jam();
+ selectMasterCandidateAndSend(signal);
+ return;
+ } else {
+ jam();
+ sendSignal(cntrlblockref, GSN_DIH_RESTARTREF, signal, 1, JBB);
+ return;
+ }//if
+}//Dbdih::closingGcpLab()
+
+/* ------------------------------------------------------------------------- */
+/* SELECT THE MASTER CANDIDATE TO BE USED IN SYSTEM RESTARTS. */
+/* ------------------------------------------------------------------------- */
+void Dbdih::selectMasterCandidateAndSend(Signal* signal)
+{
+ Uint32 gci = 0;
+ Uint32 masterCandidateId = 0;
+ NodeRecordPtr nodePtr;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRecord);
+ if (SYSFILE->lastCompletedGCI[nodePtr.i] > gci) {
+ jam();
+ masterCandidateId = nodePtr.i;
+ gci = SYSFILE->lastCompletedGCI[nodePtr.i];
+ }//if
+ }//for
+ ndbrequire(masterCandidateId != 0);
+ setNodeGroups();
+ signal->theData[0] = masterCandidateId;
+ signal->theData[1] = gci;
+ sendSignal(cntrlblockref, GSN_DIH_RESTARTCONF, signal, 2, JBB);
+
+ Uint32 node_groups[MAX_NDB_NODES];
+ memset(node_groups, 0, sizeof(node_groups));
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ const Uint32 ng = Sysfile::getNodeGroup(nodePtr.i, SYSFILE->nodeGroups);
+ if(ng != NO_NODE_GROUP_ID){
+ ndbrequire(ng < MAX_NDB_NODES);
+ node_groups[ng]++;
+ }
+ }
+
+ for (nodePtr.i = 0; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ Uint32 count = node_groups[nodePtr.i];
+ if(count != 0 && count != cnoReplicas){
+ char buf[255];
+ BaseString::snprintf(buf, sizeof(buf),
+ "Illegal configuration change."
+ " Initial start needs to be performed "
+ " when changing no of replicas (%d != %d)",
+ node_groups[nodePtr.i], cnoReplicas);
+ progError(__LINE__,
+ ERR_INVALID_CONFIG,
+ buf);
+ }
+ }
+}//Dbdih::selectMasterCandidate()
+
+/* ------------------------------------------------------------------------- */
+/* ERROR HANDLING DURING READING RESTORABLE GCI FROM FILE. */
+/* ------------------------------------------------------------------------- */
+void Dbdih::openingGcpErrorLab(Signal* signal, FileRecordPtr filePtr)
+{
+ filePtr.p->fileStatus = FileRecord::CRASHED;
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ if (crestartInfoFile[0] == filePtr.i) {
+ jam();
+ /* --------------------------------------------------------------------- */
+ /* THE FIRST FILE WAS NOT ABLE TO BE OPENED. SET STATUS TO CRASHED AND */
+ /* TRY OPEN THE NEXT FILE. */
+ /* --------------------------------------------------------------------- */
+ filePtr.i = crestartInfoFile[1];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ openFileRo(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::OPENING_GCP;
+ } else {
+ jam();
+ /* --------------------------------------------------------------------- */
+ /* WE FAILED IN OPENING THE SECOND FILE. BOTH FILES WERE CORRUPTED. WE */
+ /* CANNOT CONTINUE THE RESTART IN THIS CASE. TELL NDBCNTR OF OUR */
+ /* FAILURE. */
+ /*---------------------------------------------------------------------- */
+ sendSignal(cntrlblockref, GSN_DIH_RESTARTREF, signal, 1, JBB);
+ return;
+ }//if
+}//Dbdih::openingGcpErrorLab()
+
+void Dbdih::readingGcpErrorLab(Signal* signal, FileRecordPtr filePtr)
+{
+ filePtr.p->fileStatus = FileRecord::CRASHED;
+ /* ----------------------------------------------------------------------- */
+ /* WE FAILED IN READING THE FILE AS WELL. WE WILL CLOSE THIS FILE. */
+ /* ----------------------------------------------------------------------- */
+ closeFile(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::CLOSING_GCP_CRASH;
+}//Dbdih::readingGcpErrorLab()
+
+void Dbdih::closingGcpCrashLab(Signal* signal, FileRecordPtr filePtr)
+{
+ if (crestartInfoFile[0] == filePtr.i) {
+ jam();
+ /* --------------------------------------------------------------------- */
+ /* ERROR IN FIRST FILE, TRY THE SECOND FILE. */
+ /* --------------------------------------------------------------------- */
+ filePtr.i = crestartInfoFile[1];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ openFileRw(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::OPENING_GCP;
+ return;
+ }//if
+ /* ----------------------------------------------------------------------- */
+ /* WE DISCOVERED A FAILURE WITH THE SECOND FILE AS WELL. THIS IS A */
+ /* SERIOUS PROBLEM. REPORT FAILURE TO NDBCNTR. */
+ /* ----------------------------------------------------------------------- */
+ sendSignal(cntrlblockref, GSN_DIH_RESTARTREF, signal, 1, JBB);
+}//Dbdih::closingGcpCrashLab()
+
+/*****************************************************************************/
+/* ------------------------------------------------------------------------- */
+/* THIS IS AN INITIAL RESTART. WE WILL CREATE THE TWO FILES DESCRIBING */
+/* THE GLOBAL CHECKPOINTS THAT ARE RESTORABLE. */
+/* ------------------------------------------------------------------------- */
+/*****************************************************************************/
+void Dbdih::initGciFilesLab(Signal* signal)
+{
+ FileRecordPtr filePtr;
+ filePtr.i = crestartInfoFile[0];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ createFileRw(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::CREATING_GCP;
+}//Dbdih::initGciFilesLab()
+
+/* ------------------------------------------------------------------------- */
+/* GLOBAL CHECKPOINT FILE HAVE BEEN SUCCESSFULLY CREATED. */
+/* ------------------------------------------------------------------------- */
+void Dbdih::creatingGcpLab(Signal* signal, FileRecordPtr filePtr)
+{
+ if (filePtr.i == crestartInfoFile[0]) {
+ jam();
+ /* --------------------------------------------------------------------- */
+ /* IF CREATED FIRST THEN ALSO CREATE THE SECOND FILE. */
+ /* --------------------------------------------------------------------- */
+ filePtr.i = crestartInfoFile[1];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ createFileRw(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::CREATING_GCP;
+ } else {
+ jam();
+ /* --------------------------------------------------------------------- */
+ /* BOTH FILES HAVE BEEN CREATED. NOW WRITE THE INITIAL DATA TO BOTH */
+ /* OF THE FILES. */
+ /* --------------------------------------------------------------------- */
+ filePtr.i = crestartInfoFile[0];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ writeRestorableGci(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::WRITE_INIT_GCP;
+ }//if
+}//Dbdih::creatingGcpLab()
+
+/* ------------------------------------------------------------------------- */
+/* WE HAVE SUCCESSFULLY WRITTEN A GCI FILE. */
+/* ------------------------------------------------------------------------- */
+void Dbdih::writeInitGcpLab(Signal* signal, FileRecordPtr filePtr)
+{
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ if (filePtr.i == crestartInfoFile[0]) {
+ jam();
+ /* --------------------------------------------------------------------- */
+ /* WE HAVE WRITTEN THE FIRST FILE NOW ALSO WRITE THE SECOND FILE. */
+ /* --------------------------------------------------------------------- */
+ filePtr.i = crestartInfoFile[1];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ writeRestorableGci(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::WRITE_INIT_GCP;
+ } else {
+ /* --------------------------------------------------------------------- */
+ /* WE HAVE WRITTEN BOTH FILES. LEAVE BOTH FILES OPEN AND CONFIRM OUR */
+ /* PART OF THE INITIAL START. */
+ /* --------------------------------------------------------------------- */
+ if (isMaster()) {
+ jam();
+ /*---------------------------------------------------------------------*/
+ // IN MASTER NODES THE START REQUEST IS RECEIVED FROM NDBCNTR AND WE MUST
+ // RESPOND WHEN COMPLETED.
+ /*---------------------------------------------------------------------*/
+ signal->theData[0] = reference();
+ sendSignal(cndbStartReqBlockref, GSN_NDB_STARTCONF, signal, 1, JBB);
+ } else {
+ jam();
+ ndbsttorry10Lab(signal, __LINE__);
+ return;
+ }//if
+ }//if
+}//Dbdih::writeInitGcpLab()
+
+/*****************************************************************************/
+/* ********** NODES DELETION MODULE *************/
+/*****************************************************************************/
+/*---------------------------------------------------------------------------*/
+/* LOGIC FOR NODE FAILURE */
+/*---------------------------------------------------------------------------*/
+void Dbdih::execNODE_FAILREP(Signal* signal)
+{
+ Uint32 i;
+ Uint32 failedNodes[MAX_NDB_NODES];
+ jamEntry();
+ NodeFailRep * const nodeFail = (NodeFailRep *)&signal->theData[0];
+
+ cfailurenr = nodeFail->failNo;
+ Uint32 newMasterId = nodeFail->masterNodeId;
+ const Uint32 noOfFailedNodes = nodeFail->noOfNodes;
+
+ /*-------------------------------------------------------------------------*/
+ // The first step is to convert from a bit mask to an array of failed nodes.
+ /*-------------------------------------------------------------------------*/
+ Uint32 index = 0;
+ for (i = 1; i < MAX_NDB_NODES; i++) {
+ jam();
+ if(NodeBitmask::get(nodeFail->theNodes, i)){
+ jam();
+ failedNodes[index] = i;
+ index++;
+ }//if
+ }//for
+ ndbrequire(noOfFailedNodes == index);
+ ndbrequire(noOfFailedNodes - 1 < MAX_NDB_NODES);
+
+ /*-------------------------------------------------------------------------*/
+ // The second step is to update the node status of the failed nodes, remove
+ // them from the alive node list and put them into the dead node list. Also
+ // update the number of nodes on-line.
+ // We also set certain state variables ensuring that the node no longer is
+ // used in transactions and also mark that we received this signal.
+ /*-------------------------------------------------------------------------*/
+ for (i = 0; i < noOfFailedNodes; i++) {
+ jam();
+ NodeRecordPtr TNodePtr;
+ TNodePtr.i = failedNodes[i];
+ ptrCheckGuard(TNodePtr, MAX_NDB_NODES, nodeRecord);
+ TNodePtr.p->useInTransactions = false;
+ TNodePtr.p->m_inclDihLcp = false;
+ TNodePtr.p->recNODE_FAILREP = ZTRUE;
+ if (TNodePtr.p->nodeStatus == NodeRecord::ALIVE) {
+ jam();
+ con_lineNodes--;
+ TNodePtr.p->nodeStatus = NodeRecord::DIED_NOW;
+ removeAlive(TNodePtr);
+ insertDeadNode(TNodePtr);
+ }//if
+ }//for
+
+ /*-------------------------------------------------------------------------*/
+ // Verify that we can continue to operate the cluster. If we cannot we will
+ // not return from checkEscalation.
+ /*-------------------------------------------------------------------------*/
+ checkEscalation();
+
+ /*------------------------------------------------------------------------*/
+ // Verify that a starting node has also crashed. Reset the node start record.
+ /*-------------------------------------------------------------------------*/
+ if (c_nodeStartMaster.startNode != RNIL) {
+ ndbrequire(getNodeStatus(c_nodeStartMaster.startNode)!= NodeRecord::ALIVE);
+ }//if
+
+ /*--------------------------------------------------*/
+ /* */
+ /* WE CHANGE THE REFERENCE TO MASTER DIH */
+ /* BLOCK AND POINTER AT THIS PLACE IN THE CODE*/
+ /*--------------------------------------------------*/
+ Uint32 oldMasterId = cmasterNodeId;
+ BlockReference oldMasterRef = cmasterdihref;
+ cmasterdihref = calcDihBlockRef(newMasterId);
+ cmasterNodeId = newMasterId;
+
+ const bool masterTakeOver = (oldMasterId != newMasterId);
+
+ for(i = 0; i < noOfFailedNodes; i++) {
+ NodeRecordPtr failedNodePtr;
+ failedNodePtr.i = failedNodes[i];
+ ptrCheckGuard(failedNodePtr, MAX_NDB_NODES, nodeRecord);
+ Uint32 activeTakeOverPtr = findTakeOver(failedNodes[i]);
+ if (oldMasterRef == reference()) {
+ /*-------------------------------------------------------*/
+ // Functions that need to be called only for master nodes.
+ /*-------------------------------------------------------*/
+ checkCopyTab(failedNodePtr);
+ checkStopPermMaster(signal, failedNodePtr);
+ checkWaitGCPMaster(signal, failedNodes[i]);
+ checkTakeOverInMasterAllNodeFailure(signal, failedNodePtr);
+ checkTakeOverInMasterCopyNodeFailure(signal, failedNodePtr.i);
+ checkTakeOverInMasterStartNodeFailure(signal, activeTakeOverPtr);
+ checkGcpOutstanding(signal, failedNodePtr.i);
+ } else {
+ jam();
+ /*-----------------------------------------------------------*/
+ // Functions that need to be called only for nodes that were
+ // not master before these failures.
+ /*-----------------------------------------------------------*/
+ checkStopPermProxy(signal, failedNodes[i]);
+ checkWaitGCPProxy(signal, failedNodes[i]);
+ if (isMaster()) {
+ /*-----------------------------------------------------------*/
+ // We take over as master since old master has failed
+ /*-----------------------------------------------------------*/
+ handleTakeOverNewMaster(signal, activeTakeOverPtr);
+ } else {
+ /*-----------------------------------------------------------*/
+ // We are not master and will not become master.
+ /*-----------------------------------------------------------*/
+ checkTakeOverInNonMasterStartNodeFailure(signal, activeTakeOverPtr);
+ }//if
+ }//if
+ /*--------------------------------------------------*/
+ // Functions that need to be called for all nodes.
+ /*--------------------------------------------------*/
+ checkStopMe(signal, failedNodePtr);
+ failedNodeLcpHandling(signal, failedNodePtr);
+ checkWaitDropTabFailedLqh(signal, failedNodePtr.i, 0); // 0 = start w/ tab 0
+ startRemoveFailedNode(signal, failedNodePtr);
+
+ /**
+ * This is the last function called
+ * It modifies failedNodePtr.p->nodeStatus
+ */
+ failedNodeSynchHandling(signal, failedNodePtr);
+ }//for
+
+ if(masterTakeOver){
+ jam();
+ startLcpMasterTakeOver(signal, oldMasterId);
+ startGcpMasterTakeOver(signal, oldMasterId);
+
+ if(getNodeState().getNodeRestartInProgress()){
+ jam();
+ progError(__LINE__,
+ ERR_SYSTEM_ERROR,
+ "Unhandle master failure during node restart");
+ }
+ }
+
+
+ if (isMaster()) {
+ jam();
+ setNodeRestartInfoBits();
+ }//if
+}//Dbdih::execNODE_FAILREP()
+
+void Dbdih::checkCopyTab(NodeRecordPtr failedNodePtr)
+{
+ jam();
+
+ if(c_nodeStartMaster.startNode != failedNodePtr.i){
+ jam();
+ return;
+ }
+
+ switch(c_nodeStartMaster.m_outstandingGsn){
+ case GSN_COPY_TABREQ:
+ jam();
+ ndbrequire(c_COPY_TABREQ_Counter.isWaitingFor(failedNodePtr.i));
+ releaseTabPages(failedNodePtr.p->activeTabptr);
+ c_COPY_TABREQ_Counter.clearWaitingFor(failedNodePtr.i);
+ c_nodeStartMaster.wait = ZFALSE;
+ break;
+ case GSN_START_INFOREQ:
+ case GSN_START_PERMCONF:
+ case GSN_DICTSTARTREQ:
+ case GSN_START_MECONF:
+ jam();
+ break;
+ default:
+ ndbout_c("outstanding gsn: %s(%d)",
+ getSignalName(c_nodeStartMaster.m_outstandingGsn),
+ c_nodeStartMaster.m_outstandingGsn);
+ ndbrequire(false);
+ }
+
+ nodeResetStart();
+}//Dbdih::checkCopyTab()
+
+void Dbdih::checkStopMe(Signal* signal, NodeRecordPtr failedNodePtr)
+{
+ jam();
+ if (c_STOP_ME_REQ_Counter.isWaitingFor(failedNodePtr.i)){
+ jam();
+ ndbrequire(c_stopMe.clientRef != 0);
+ StopMeConf * const stopMeConf = (StopMeConf *)&signal->theData[0];
+ stopMeConf->senderRef = calcDihBlockRef(failedNodePtr.i);
+ stopMeConf->senderData = c_stopMe.clientData;
+ sendSignal(reference(), GSN_STOP_ME_CONF, signal,
+ StopMeConf::SignalLength, JBB);
+ }//if
+}//Dbdih::checkStopMe()
+
+void Dbdih::checkStopPermMaster(Signal* signal, NodeRecordPtr failedNodePtr)
+{
+ DihSwitchReplicaRef* const ref = (DihSwitchReplicaRef*)&signal->theData[0];
+ jam();
+ if (c_DIH_SWITCH_REPLICA_REQ_Counter.isWaitingFor(failedNodePtr.i)){
+ jam();
+ ndbrequire(c_stopPermMaster.clientRef != 0);
+ ref->senderNode = failedNodePtr.i;
+ ref->errorCode = StopPermRef::NF_CausedAbortOfStopProcedure;
+ sendSignal(reference(), GSN_DIH_SWITCH_REPLICA_REF, signal,
+ DihSwitchReplicaRef::SignalLength, JBB);
+ return;
+ }//if
+}//Dbdih::checkStopPermMaster()
+
+void Dbdih::checkStopPermProxy(Signal* signal, NodeId failedNodeId)
+{
+ jam();
+ if(c_stopPermProxy.clientRef != 0 &&
+ refToNode(c_stopPermProxy.masterRef) == failedNodeId){
+
+ /**
+ * The master has failed report to proxy-client
+ */
+ jam();
+ StopPermRef* const ref = (StopPermRef*)&signal->theData[0];
+
+ ref->senderData = c_stopPermProxy.clientData;
+ ref->errorCode = StopPermRef::NF_CausedAbortOfStopProcedure;
+ sendSignal(c_stopPermProxy.clientRef, GSN_STOP_PERM_REF, signal, 2, JBB);
+ c_stopPermProxy.clientRef = 0;
+ }//if
+}//Dbdih::checkStopPermProxy()
+
+void
+Dbdih::checkTakeOverInMasterAllNodeFailure(Signal* signal,
+ NodeRecordPtr failedNodePtr)
+{
+ //------------------------------------------------------------------------
+ // This code is used to handle the failure of "all" nodes during the
+ // take over when "all" nodes are informed about state changes in
+ // the take over protocol.
+ //--------------------------------------------------------------------------
+ if (c_START_TOREQ_Counter.isWaitingFor(failedNodePtr.i)){
+ jam();
+ StartToConf * const conf = (StartToConf *)&signal->theData[0];
+ conf->userPtr = c_startToLock;
+ conf->sendingNodeId = failedNodePtr.i;
+ conf->startingNodeId = getStartNode(c_startToLock);
+ sendSignal(reference(), GSN_START_TOCONF, signal,
+ StartToConf::SignalLength, JBB);
+ }//if
+ if (c_CREATE_FRAGREQ_Counter.isWaitingFor(failedNodePtr.i)){
+ jam();
+ CreateFragConf * const conf = (CreateFragConf *)&signal->theData[0];
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = c_createFragmentLock;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+ conf->userPtr = takeOverPtr.i;
+ conf->tableId = takeOverPtr.p->toCurrentTabref;
+ conf->fragId = takeOverPtr.p->toCurrentFragid;
+ conf->sendingNodeId = failedNodePtr.i;
+ conf->startingNodeId = takeOverPtr.p->toStartingNode;
+ sendSignal(reference(), GSN_CREATE_FRAGCONF, signal,
+ CreateFragConf::SignalLength, JBB);
+ }//if
+ if (c_UPDATE_TOREQ_Counter.isWaitingFor(failedNodePtr.i)){
+ jam();
+ UpdateToConf * const conf = (UpdateToConf *)&signal->theData[0];
+ conf->userPtr = c_updateToLock;
+ conf->sendingNodeId = failedNodePtr.i;
+ conf->startingNodeId = getStartNode(c_updateToLock);
+ sendSignal(reference(), GSN_UPDATE_TOCONF, signal,
+ UpdateToConf::SignalLength, JBB);
+ }//if
+
+ if (c_END_TOREQ_Counter.isWaitingFor(failedNodePtr.i)){
+ jam();
+ EndToConf * const conf = (EndToConf *)&signal->theData[0];
+ conf->userPtr = c_endToLock;
+ conf->sendingNodeId = failedNodePtr.i;
+ conf->startingNodeId = getStartNode(c_endToLock);
+ sendSignal(reference(), GSN_END_TOCONF, signal,
+ EndToConf::SignalLength, JBB);
+ }//if
+}//Dbdih::checkTakeOverInMasterAllNodeFailure()
+
+void Dbdih::checkTakeOverInMasterCopyNodeFailure(Signal* signal,
+ Uint32 failedNodeId)
+{
+ //---------------------------------------------------------------------------
+ // This code is used to handle failure of the copying node during a take over
+ //---------------------------------------------------------------------------
+ TakeOverRecordPtr takeOverPtr;
+ for (Uint32 i = 0; i < MAX_NDB_NODES; i++) {
+ jam();
+ takeOverPtr.i = i;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+ if ((takeOverPtr.p->toMasterStatus == TakeOverRecord::COPY_FRAG) &&
+ (takeOverPtr.p->toCopyNode == failedNodeId)) {
+ jam();
+ /**
+ * The copying node failed but the system is still operational.
+ * We restart the copy process by selecting a new copy node.
+ * We do not need to add a fragment however since it is already added.
+ * We start again from the prepare create fragment phase.
+ */
+ prepareSendCreateFragReq(signal, takeOverPtr.i);
+ }//if
+ }//for
+}//Dbdih::checkTakeOverInMasterCopyNodeFailure()
+
+void Dbdih::checkTakeOverInMasterStartNodeFailure(Signal* signal,
+ Uint32 takeOverPtrI)
+{
+ jam();
+ if (takeOverPtrI == RNIL) {
+ jam();
+ return;
+ }
+ //-----------------------------------------------------------------------
+ // We are the master and the starting node has failed during a take over.
+ // We need to handle this failure in different ways depending on the state.
+ //-----------------------------------------------------------------------
+
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = takeOverPtrI;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+
+ bool ok = false;
+ switch (takeOverPtr.p->toMasterStatus) {
+ case TakeOverRecord::IDLE:
+ //-----------------------------------------------------------------------
+ // The state cannot be idle when it has a starting node.
+ //-----------------------------------------------------------------------
+ ndbrequire(false);
+ break;
+ case TakeOverRecord::TO_WAIT_START_TAKE_OVER:
+ jam();
+ case TakeOverRecord::TO_START_COPY:
+ jam();
+ case TakeOverRecord::TO_START_COPY_ONGOING:
+ jam();
+ case TakeOverRecord::TO_WAIT_START:
+ jam();
+ case TakeOverRecord::TO_WAIT_PREPARE_CREATE:
+ jam();
+ case TakeOverRecord::TO_WAIT_UPDATE_TO:
+ jam();
+ case TakeOverRecord::TO_WAIT_COMMIT_CREATE:
+ jam();
+ case TakeOverRecord::TO_END_COPY:
+ jam();
+ case TakeOverRecord::TO_END_COPY_ONGOING:
+ jam();
+ case TakeOverRecord::TO_WAIT_ENDING:
+ jam();
+ //-----------------------------------------------------------------------
+ // We will not do anything since an internal signal process is outstanding.
+ // When the signal arrives the take over will be released.
+ //-----------------------------------------------------------------------
+ ok = true;
+ break;
+ case TakeOverRecord::STARTING:
+ jam();
+ ok = true;
+ c_startToLock = RNIL;
+ c_START_TOREQ_Counter.clearWaitingFor();
+ endTakeOver(takeOverPtr.i);
+ break;
+ case TakeOverRecord::TO_UPDATE_TO:
+ jam();
+ ok = true;
+ c_updateToLock = RNIL;
+ c_UPDATE_TOREQ_Counter.clearWaitingFor();
+ endTakeOver(takeOverPtr.i);
+ break;
+ case TakeOverRecord::ENDING:
+ jam();
+ ok = true;
+ c_endToLock = RNIL;
+ c_END_TOREQ_Counter.clearWaitingFor();
+ endTakeOver(takeOverPtr.i);
+ break;
+ case TakeOverRecord::COMMIT_CREATE:
+ ok = true;
+ jam();
+ {// We have mutex
+ Mutex m(signal, c_mutexMgr, takeOverPtr.p->m_switchPrimaryMutexHandle);
+ m.unlock(); // Ignore result
+ }
+ // Fall through
+ case TakeOverRecord::PREPARE_CREATE:
+ ok = true;
+ jam();
+ c_createFragmentLock = RNIL;
+ c_CREATE_FRAGREQ_Counter.clearWaitingFor();
+ endTakeOver(takeOverPtr.i);
+ break;
+ case TakeOverRecord::LOCK_MUTEX:
+ ok = true;
+ jam();
+ // Lock mutex will return and do endTakeOver
+ break;
+
+ //-----------------------------------------------------------------------
+ // Signals are outstanding to external nodes. These signals carry the node
+ // id of the starting node and will not use the take over record if the
+ // starting node has failed.
+ //-----------------------------------------------------------------------
+ case TakeOverRecord::COPY_FRAG:
+ ok = true;
+ jam();
+ //-----------------------------------------------------------------------
+ // The starting node will discover the problem. We will receive either
+ // COPY_FRAGREQ or COPY_FRAGCONF and then we can release the take over
+ // record and end the process. If the copying node should also die then
+ // we will try to send prepare create fragment and will then discover
+ // that the starting node has failed.
+ //-----------------------------------------------------------------------
+ break;
+ case TakeOverRecord::COPY_ACTIVE:
+ ok = true;
+ jam();
+ //-----------------------------------------------------------------------
+ // In this we are waiting for a signal from the starting node. Thus we
+ // can release the take over record and end the process.
+ //-----------------------------------------------------------------------
+ endTakeOver(takeOverPtr.i);
+ break;
+ case TakeOverRecord::WAIT_LCP:
+ ok = true;
+ jam();
+ //-----------------------------------------------------------------------
+ //-----------------------------------------------------------------------
+ endTakeOver(takeOverPtr.i);
+ break;
+ /**
+ * The following are states that it should not be possible to "be" in
+ */
+ case TakeOverRecord::SELECTING_NEXT:
+ jam();
+ case TakeOverRecord::TO_COPY_COMPLETED:
+ jam();
+ ndbrequire(false);
+ }
+ if(!ok){
+ jamLine(takeOverPtr.p->toSlaveStatus);
+ ndbrequire(ok);
+ }
+}//Dbdih::checkTakeOverInMasterStartNodeFailure()
+
+void Dbdih::checkTakeOverInNonMasterStartNodeFailure(Signal* signal,
+ Uint32 takeOverPtrI)
+{
+ jam();
+ if (takeOverPtrI == RNIL) {
+ jam();
+ return;
+ }
+ //-----------------------------------------------------------------------
+ // We are not master and not taking over as master. A take over was ongoing
+ // but the starting node has now failed. Handle it according to the state
+ // of the take over.
+ //-----------------------------------------------------------------------
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = takeOverPtrI;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+ bool ok = false;
+ switch (takeOverPtr.p->toSlaveStatus) {
+ case TakeOverRecord::TO_SLAVE_IDLE:
+ ndbrequire(false);
+ break;
+ case TakeOverRecord::TO_SLAVE_STARTED:
+ jam();
+ case TakeOverRecord::TO_SLAVE_CREATE_PREPARE:
+ jam();
+ case TakeOverRecord::TO_SLAVE_COPY_FRAG_COMPLETED:
+ jam();
+ case TakeOverRecord::TO_SLAVE_CREATE_COMMIT:
+ jam();
+ case TakeOverRecord::TO_SLAVE_COPY_COMPLETED:
+ jam();
+ ok = true;
+ endTakeOver(takeOverPtr.i);
+ break;
+ }//switch
+ if(!ok){
+ jamLine(takeOverPtr.p->toSlaveStatus);
+ ndbrequire(ok);
+ }
+}//Dbdih::checkTakeOverInNonMasterStartNodeFailure()
+
+void Dbdih::failedNodeSynchHandling(Signal* signal,
+ NodeRecordPtr failedNodePtr)
+{
+ jam();
+ /*----------------------------------------------------*/
+ /* INITIALISE THE VARIABLES THAT KEEP TRACK OF */
+ /* WHEN A NODE FAILURE IS COMPLETED. */
+ /*----------------------------------------------------*/
+ failedNodePtr.p->dbdictFailCompleted = ZFALSE;
+ failedNodePtr.p->dbtcFailCompleted = ZFALSE;
+ failedNodePtr.p->dbdihFailCompleted = ZFALSE;
+ failedNodePtr.p->dblqhFailCompleted = ZFALSE;
+
+ failedNodePtr.p->m_NF_COMPLETE_REP.clearWaitingFor();
+
+ NodeRecordPtr nodePtr;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ ptrAss(nodePtr, nodeRecord);
+ if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) {
+ jam();
+ /**
+ * We'r waiting for nodePtr.i to complete
+ * handling of failedNodePtr.i's death
+ */
+
+ failedNodePtr.p->m_NF_COMPLETE_REP.setWaitingFor(nodePtr.i);
+ } else {
+ jam();
+ if ((nodePtr.p->nodeStatus == NodeRecord::DYING) &&
+ (nodePtr.p->m_NF_COMPLETE_REP.isWaitingFor(failedNodePtr.i))){
+ jam();
+ /*----------------------------------------------------*/
+ /* THE NODE FAILED BEFORE REPORTING THE FAILURE */
+ /* HANDLING COMPLETED ON THIS FAILED NODE. */
+ /* REPORT THAT NODE FAILURE HANDLING WAS */
+ /* COMPLETED ON THE NEW FAILED NODE FOR THIS */
+ /* PARTICULAR OLD FAILED NODE. */
+ /*----------------------------------------------------*/
+ NFCompleteRep * const nf = (NFCompleteRep *)&signal->theData[0];
+ nf->blockNo = 0;
+ nf->nodeId = failedNodePtr.i;
+ nf->failedNodeId = nodePtr.i;
+ nf->from = __LINE__;
+ sendSignal(reference(), GSN_NF_COMPLETEREP, signal,
+ NFCompleteRep::SignalLength, JBB);
+ }//if
+ }//if
+ }//for
+ if (failedNodePtr.p->nodeStatus == NodeRecord::DIED_NOW) {
+ jam();
+ failedNodePtr.p->nodeStatus = NodeRecord::DYING;
+ } else {
+ jam();
+ /*----------------------------------------------------*/
+ // No more processing needed when node not even started
+ // yet. We give the node status to DEAD since we do not
+ // care whether all nodes complete the node failure
+ // handling. The node have not been included in the
+ // node failure protocols.
+ /*----------------------------------------------------*/
+ failedNodePtr.p->nodeStatus = NodeRecord::DEAD;
+ /**-----------------------------------------------------------------------
+ * WE HAVE COMPLETED HANDLING THE NODE FAILURE IN DIH. WE CAN REPORT THIS
+ * TO DIH THAT WAIT FOR THE OTHER BLOCKS TO BE CONCLUDED AS WELL.
+ *-----------------------------------------------------------------------*/
+ NFCompleteRep * const nf = (NFCompleteRep *)&signal->theData[0];
+ nf->blockNo = DBDIH;
+ nf->nodeId = cownNodeId;
+ nf->failedNodeId = failedNodePtr.i;
+ nf->from = __LINE__;
+ sendSignal(reference(), GSN_NF_COMPLETEREP, signal,
+ NFCompleteRep::SignalLength, JBB);
+ }//if
+}//Dbdih::failedNodeSynchHandling()
+
+Uint32 Dbdih::findTakeOver(Uint32 failedNodeId)
+{
+ for (Uint32 i = 0; i < MAX_NDB_NODES; i++) {
+ jam();
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = i;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+ if (takeOverPtr.p->toStartingNode == failedNodeId) {
+ jam();
+ return i;
+ }//if
+ }//for
+ return RNIL;
+}//Dbdih::findTakeOver()
+
+Uint32 Dbdih::getStartNode(Uint32 takeOverPtrI)
+{
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = takeOverPtrI;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+ return takeOverPtr.p->toStartingNode;
+}//Dbdih::getStartNode()
+
+void Dbdih::failedNodeLcpHandling(Signal* signal, NodeRecordPtr failedNodePtr)
+{
+ jam();
+ const Uint32 nodeId = failedNodePtr.i;
+
+ if (c_lcpState.m_participatingLQH.get(failedNodePtr.i)){
+ /*----------------------------------------------------*/
+ /* THE NODE WAS INVOLVED IN A LOCAL CHECKPOINT. WE */
+ /* MUST UPDATE THE ACTIVE STATUS TO INDICATE THAT */
+ /* THE NODE HAVE MISSED A LOCAL CHECKPOINT. */
+ /*----------------------------------------------------*/
+ switch (failedNodePtr.p->activeStatus) {
+ case Sysfile::NS_Active:
+ jam();
+ failedNodePtr.p->activeStatus = Sysfile::NS_ActiveMissed_1;
+ break;
+ case Sysfile::NS_ActiveMissed_1:
+ jam();
+ failedNodePtr.p->activeStatus = Sysfile::NS_ActiveMissed_2;
+ break;
+ case Sysfile::NS_ActiveMissed_2:
+ jam();
+ failedNodePtr.p->activeStatus = Sysfile::NS_NotActive_NotTakenOver;
+ break;
+ case Sysfile::NS_TakeOver:
+ jam();
+ failedNodePtr.p->activeStatus = Sysfile::NS_NotActive_NotTakenOver;
+ break;
+ default:
+ ndbout << "activeStatus = " << (Uint32) failedNodePtr.p->activeStatus;
+ ndbout << " at failure after NODE_FAILREP of node = ";
+ ndbout << failedNodePtr.i << endl;
+ ndbrequire(false);
+ break;
+ }//switch
+ }//if
+
+ c_lcpState.m_participatingDIH.clear(failedNodePtr.i);
+ c_lcpState.m_participatingLQH.clear(failedNodePtr.i);
+
+ if(c_lcpState.m_LCP_COMPLETE_REP_Counter_DIH.isWaitingFor(failedNodePtr.i)){
+ jam();
+ LcpCompleteRep * rep = (LcpCompleteRep*)signal->getDataPtrSend();
+ rep->nodeId = failedNodePtr.i;
+ rep->lcpId = SYSFILE->latestLCP_ID;
+ rep->blockNo = DBDIH;
+ sendSignal(reference(), GSN_LCP_COMPLETE_REP, signal,
+ LcpCompleteRep::SignalLength, JBB);
+ }
+
+ /**
+ * Check if we'r waiting for the failed node's LQH to complete
+ *
+ * Note that this is ran "before" LCP master take over
+ */
+ if(c_lcpState.m_LCP_COMPLETE_REP_Counter_LQH.isWaitingFor(nodeId)){
+ jam();
+
+ LcpCompleteRep * rep = (LcpCompleteRep*)signal->getDataPtrSend();
+ rep->nodeId = nodeId;
+ rep->lcpId = SYSFILE->latestLCP_ID;
+ rep->blockNo = DBLQH;
+ sendSignal(reference(), GSN_LCP_COMPLETE_REP, signal,
+ LcpCompleteRep::SignalLength, JBB);
+
+ if(c_lcpState.m_LAST_LCP_FRAG_ORD.isWaitingFor(nodeId)){
+ jam();
+ /**
+ * Make sure we're ready to accept it
+ */
+ c_lcpState.m_LAST_LCP_FRAG_ORD.clearWaitingFor(nodeId);
+ }
+ }
+
+ if (c_TCGETOPSIZEREQ_Counter.isWaitingFor(failedNodePtr.i)) {
+ jam();
+ signal->theData[0] = failedNodePtr.i;
+ signal->theData[1] = 0;
+ sendSignal(reference(), GSN_TCGETOPSIZECONF, signal, 2, JBB);
+ }//if
+
+ if (c_TC_CLOPSIZEREQ_Counter.isWaitingFor(failedNodePtr.i)) {
+ jam();
+ signal->theData[0] = failedNodePtr.i;
+ sendSignal(reference(), GSN_TC_CLOPSIZECONF, signal, 1, JBB);
+ }//if
+
+ if (c_START_LCP_REQ_Counter.isWaitingFor(failedNodePtr.i)) {
+ jam();
+ StartLcpConf * conf = (StartLcpConf*)signal->getDataPtrSend();
+ conf->senderRef = numberToRef(DBLQH, failedNodePtr.i);
+ conf->lcpId = SYSFILE->latestLCP_ID;
+ sendSignal(reference(), GSN_START_LCP_CONF, signal,
+ StartLcpConf::SignalLength, JBB);
+ }//if
+
+ if (c_EMPTY_LCP_REQ_Counter.isWaitingFor(failedNodePtr.i)) {
+ jam();
+ EmptyLcpConf * const rep = (EmptyLcpConf *)&signal->theData[0];
+ rep->senderNodeId = failedNodePtr.i;
+ rep->tableId = ~0;
+ rep->fragmentId = ~0;
+ rep->lcpNo = 0;
+ rep->lcpId = SYSFILE->latestLCP_ID;
+ rep->idle = true;
+ sendSignal(reference(), GSN_EMPTY_LCP_CONF, signal,
+ EmptyLcpConf::SignalLength, JBB);
+ }//if
+
+ if (c_MASTER_LCPREQ_Counter.isWaitingFor(failedNodePtr.i)) {
+ jam();
+ MasterLCPRef * const ref = (MasterLCPRef *)&signal->theData[0];
+ ref->senderNodeId = failedNodePtr.i;
+ ref->failedNodeId = cmasterTakeOverNode;
+ sendSignal(reference(), GSN_MASTER_LCPREF, signal,
+ MasterLCPRef::SignalLength, JBB);
+ }//if
+
+}//Dbdih::failedNodeLcpHandling()
+
+void Dbdih::checkGcpOutstanding(Signal* signal, Uint32 failedNodeId){
+ if (c_GCP_PREPARE_Counter.isWaitingFor(failedNodeId)){
+ jam();
+ signal->theData[0] = failedNodeId;
+ signal->theData[1] = cnewgcp;
+ sendSignal(reference(), GSN_GCP_PREPARECONF, signal, 2, JBB);
+ }//if
+
+ if (c_GCP_COMMIT_Counter.isWaitingFor(failedNodeId)) {
+ jam();
+ signal->theData[0] = failedNodeId;
+ signal->theData[1] = coldgcp;
+ signal->theData[2] = cfailurenr;
+ sendSignal(reference(), GSN_GCP_NODEFINISH, signal, 3, JBB);
+ }//if
+
+ if (c_GCP_SAVEREQ_Counter.isWaitingFor(failedNodeId)) {
+ jam();
+ GCPSaveRef * const saveRef = (GCPSaveRef*)&signal->theData[0];
+ saveRef->dihPtr = failedNodeId;
+ saveRef->nodeId = failedNodeId;
+ saveRef->gci = coldgcp;
+ saveRef->errorCode = GCPSaveRef::FakedSignalDueToNodeFailure;
+ sendSignal(reference(), GSN_GCP_SAVEREF, signal,
+ GCPSaveRef::SignalLength, JBB);
+ }//if
+
+ if (c_COPY_GCIREQ_Counter.isWaitingFor(failedNodeId)) {
+ jam();
+ signal->theData[0] = failedNodeId;
+ sendSignal(reference(), GSN_COPY_GCICONF, signal, 1, JBB);
+ }//if
+
+ if (c_MASTER_GCPREQ_Counter.isWaitingFor(failedNodeId)){
+ jam();
+ MasterGCPRef * const ref = (MasterGCPRef *)&signal->theData[0];
+ ref->senderNodeId = failedNodeId;
+ ref->failedNodeId = cmasterTakeOverNode;
+ sendSignal(reference(), GSN_MASTER_GCPREF, signal,
+ MasterGCPRef::SignalLength, JBB);
+ }//if
+}//Dbdih::handleGcpStateInMaster()
+
+
+void
+Dbdih::startLcpMasterTakeOver(Signal* signal, Uint32 nodeId){
+ jam();
+
+ c_lcpMasterTakeOverState.minTableId = ~0;
+ c_lcpMasterTakeOverState.minFragId = ~0;
+ c_lcpMasterTakeOverState.failedNodeId = nodeId;
+
+ c_lcpMasterTakeOverState.set(LMTOS_WAIT_EMPTY_LCP, __LINE__);
+
+ if(c_EMPTY_LCP_REQ_Counter.done()){
+ jam();
+ c_lcpState.m_LAST_LCP_FRAG_ORD.clearWaitingFor();
+
+ EmptyLcpReq* req = (EmptyLcpReq*)signal->getDataPtrSend();
+ req->senderRef = reference();
+ sendLoopMacro(EMPTY_LCP_REQ, sendEMPTY_LCP_REQ);
+ ndbrequire(!c_EMPTY_LCP_REQ_Counter.done());
+ } else {
+ /**
+ * Node failure during master take over...
+ */
+ ndbout_c("Nodefail during master take over");
+ }
+
+ setLocalNodefailHandling(signal, nodeId, NF_LCP_TAKE_OVER);
+}
+
+void Dbdih::startGcpMasterTakeOver(Signal* signal, Uint32 oldMasterId){
+ jam();
+ /*--------------------------------------------------*/
+ /* */
+ /* THE MASTER HAVE FAILED AND WE WERE ELECTED */
+ /* TO BE THE NEW MASTER NODE. WE NEED TO QUERY*/
+ /* ALL THE OTHER NODES ABOUT THEIR STATUS IN */
+ /* ORDER TO BE ABLE TO TAKE OVER CONTROL OF */
+ /* THE GLOBAL CHECKPOINT PROTOCOL AND THE */
+ /* LOCAL CHECKPOINT PROTOCOL. */
+ /*--------------------------------------------------*/
+ if(!isMaster()){
+ jam();
+ return;
+ }
+ cmasterState = MASTER_TAKE_OVER_GCP;
+ cmasterTakeOverNode = oldMasterId;
+ MasterGCPReq * const req = (MasterGCPReq *)&signal->theData[0];
+ req->masterRef = reference();
+ req->failedNodeId = oldMasterId;
+ sendLoopMacro(MASTER_GCPREQ, sendMASTER_GCPREQ);
+ cgcpMasterTakeOverState = GMTOS_INITIAL;
+
+ signal->theData[0] = NDB_LE_GCP_TakeoverStarted;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 1, JBB);
+
+ setLocalNodefailHandling(signal, oldMasterId, NF_GCP_TAKE_OVER);
+}//Dbdih::handleNewMaster()
+
+void Dbdih::handleTakeOverNewMaster(Signal* signal, Uint32 takeOverPtrI)
+{
+ jam();
+ if (takeOverPtrI != RNIL) {
+ jam();
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = takeOverPtrI;
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+ bool ok = false;
+ switch (takeOverPtr.p->toSlaveStatus) {
+ case TakeOverRecord::TO_SLAVE_IDLE:
+ ndbrequire(false);
+ break;
+ case TakeOverRecord::TO_SLAVE_STARTED:
+ jam();
+ case TakeOverRecord::TO_SLAVE_CREATE_PREPARE:
+ jam();
+ case TakeOverRecord::TO_SLAVE_COPY_FRAG_COMPLETED:
+ jam();
+ case TakeOverRecord::TO_SLAVE_CREATE_COMMIT:
+ jam();
+ ok = true;
+ infoEvent("Unhandled MasterTO of TO slaveStatus=%d killing node %d",
+ takeOverPtr.p->toSlaveStatus,
+ takeOverPtr.p->toStartingNode);
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::COPY_ACTIVE;
+
+ {
+ BlockReference cntrRef = calcNdbCntrBlockRef(takeOverPtr.p->toStartingNode);
+ SystemError * const sysErr = (SystemError*)&signal->theData[0];
+ sysErr->errorCode = SystemError::CopyFragRefError;
+ sysErr->errorRef = reference();
+ sysErr->data1= 0;
+ sysErr->data2= __LINE__;
+ sendSignal(cntrRef, GSN_SYSTEM_ERROR, signal,
+ SystemError::SignalLength, JBB);
+ }
+ break;
+ case TakeOverRecord::TO_SLAVE_COPY_COMPLETED:
+ ok = true;
+ jam();
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::WAIT_LCP;
+ break;
+ }
+ ndbrequire(ok);
+ }//if
+}//Dbdih::handleTakeOverNewMaster()
+
+void Dbdih::startRemoveFailedNode(Signal* signal, NodeRecordPtr failedNodePtr)
+{
+ Uint32 nodeId = failedNodePtr.i;
+ if(failedNodePtr.p->nodeStatus != NodeRecord::DIED_NOW){
+ jam();
+ /**
+ * Is node isn't alive. It can't be part of LCP
+ */
+ ndbrequire(!c_lcpState.m_LCP_COMPLETE_REP_Counter_LQH.isWaitingFor(nodeId));
+
+ /**
+ * And there is no point in removing any replicas
+ * It's dead...
+ */
+ return;
+ }
+
+ jam();
+ signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE;
+ signal->theData[1] = failedNodePtr.i;
+ signal->theData[2] = 0; // Tab id
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+
+ setLocalNodefailHandling(signal, failedNodePtr.i, NF_REMOVE_NODE_FROM_TABLE);
+}//Dbdih::startRemoveFailedNode()
+
+/*--------------------------------------------------*/
+/* THE MASTER HAS FAILED AND THE NEW MASTER IS*/
+/* QUERYING THIS NODE ABOUT THE STATE OF THE */
+/* GLOBAL CHECKPOINT PROTOCOL */
+/*--------------------------------------------------*/
+void Dbdih::execMASTER_GCPREQ(Signal* signal)
+{
+ NodeRecordPtr failedNodePtr;
+ MasterGCPReq * const masterGCPReq = (MasterGCPReq *)&signal->theData[0];
+ jamEntry();
+ const BlockReference newMasterBlockref = masterGCPReq->masterRef;
+ const Uint32 failedNodeId = masterGCPReq->failedNodeId;
+ if (c_copyGCISlave.m_copyReason != CopyGCIReq::IDLE) {
+ jam();
+ /*--------------------------------------------------*/
+ /* WE ARE CURRENTLY WRITING THE RESTART INFO */
+ /* IN THIS NODE. SINCE ONLY ONE PROCESS IS */
+ /* ALLOWED TO DO THIS AT A TIME WE MUST ENSURE*/
+ /* THAT THIS IS NOT ONGOING WHEN THE NEW */
+ /* MASTER TAKES OVER CONTROL. IF NOT ALL NODES*/
+ /* RECEIVE THE SAME RESTART INFO DUE TO THE */
+ /* FAILURE OF THE MASTER IT IS TAKEN CARE OF */
+ /* BY THE NEW MASTER. */
+ /*--------------------------------------------------*/
+ sendSignalWithDelay(reference(), GSN_MASTER_GCPREQ,
+ signal, 10, MasterGCPReq::SignalLength);
+ return;
+ }//if
+ failedNodePtr.i = failedNodeId;
+ ptrCheckGuard(failedNodePtr, MAX_NDB_NODES, nodeRecord);
+ if (failedNodePtr.p->nodeStatus == NodeRecord::ALIVE) {
+ jam();
+ /*--------------------------------------------------*/
+ /* ENSURE THAT WE HAVE PROCESSED THE SIGNAL */
+ /* NODE_FAILURE BEFORE WE PROCESS THIS REQUEST*/
+ /* FROM THE NEW MASTER. THIS ENSURES THAT WE */
+ /* HAVE REMOVED THE FAILED NODE FROM THE LIST */
+ /* OF ACTIVE NODES AND SO FORTH. */
+ /*--------------------------------------------------*/
+ sendSignalWithDelay(reference(), GSN_MASTER_GCPREQ,
+ signal, 10, MasterGCPReq::SignalLength);
+ return;
+ } else {
+ ndbrequire(failedNodePtr.p->nodeStatus == NodeRecord::DYING);
+ }//if
+ MasterGCPConf::State gcpState;
+ switch (cgcpParticipantState) {
+ case GCP_PARTICIPANT_READY:
+ jam();
+ /*--------------------------------------------------*/
+ /* THE GLOBAL CHECKPOINT IS NOT ACTIVE SINCE */
+ /* THE PREVIOUS GLOBAL CHECKPOINT IS COMPLETED*/
+ /* AND THE NEW HAVE NOT STARTED YET. */
+ /*--------------------------------------------------*/
+ gcpState = MasterGCPConf::GCP_READY;
+ break;
+ case GCP_PARTICIPANT_PREPARE_RECEIVED:
+ jam();
+ /*--------------------------------------------------*/
+ /* GCP_PREPARE HAVE BEEN RECEIVED AND RESPONSE*/
+ /* HAVE BEEN SENT. */
+ /*--------------------------------------------------*/
+ gcpState = MasterGCPConf::GCP_PREPARE_RECEIVED;
+ break;
+ case GCP_PARTICIPANT_COMMIT_RECEIVED:
+ jam();
+ /*------------------------------------------------*/
+ /* GCP_COMMIT HAVE BEEN RECEIVED BUT NOT YET*/
+ /* GCP_TCFINISHED FROM LOCAL TC. */
+ /*------------------------------------------------*/
+ gcpState = MasterGCPConf::GCP_COMMIT_RECEIVED;
+ break;
+ case GCP_PARTICIPANT_TC_FINISHED:
+ jam();
+ /*------------------------------------------------*/
+ /* GCP_COMMIT HAS BEEN RECEIVED AND ALSO */
+ /* GCP_TCFINISHED HAVE BEEN RECEIVED. */
+ /*------------------------------------------------*/
+ gcpState = MasterGCPConf::GCP_TC_FINISHED;
+ break;
+ case GCP_PARTICIPANT_COPY_GCI_RECEIVED:
+ /*--------------------------------------------------*/
+ /* COPY RESTART INFORMATION HAS BEEN RECEIVED */
+ /* BUT NOT YET COMPLETED. */
+ /*--------------------------------------------------*/
+ ndbrequire(false);
+ gcpState= MasterGCPConf::GCP_READY; // remove warning
+ break;
+ default:
+ /*------------------------------------------------*/
+ /* */
+ /* THIS SHOULD NOT OCCUR SINCE THE ABOVE */
+ /* STATES ARE THE ONLY POSSIBLE STATES AT A */
+ /* NODE WHICH WAS NOT A MASTER NODE. */
+ /*------------------------------------------------*/
+ ndbrequire(false);
+ gcpState= MasterGCPConf::GCP_READY; // remove warning
+ break;
+ }//switch
+ MasterGCPConf * const masterGCPConf = (MasterGCPConf *)&signal->theData[0];
+ masterGCPConf->gcpState = gcpState;
+ masterGCPConf->senderNodeId = cownNodeId;
+ masterGCPConf->failedNodeId = failedNodeId;
+ masterGCPConf->newGCP = cnewgcp;
+ masterGCPConf->latestLCP = SYSFILE->latestLCP_ID;
+ masterGCPConf->oldestRestorableGCI = SYSFILE->oldestRestorableGCI;
+ masterGCPConf->keepGCI = SYSFILE->keepGCI;
+ for(Uint32 i = 0; i < NdbNodeBitmask::Size; i++)
+ masterGCPConf->lcpActive[i] = SYSFILE->lcpActive[i];
+ sendSignal(newMasterBlockref, GSN_MASTER_GCPCONF, signal,
+ MasterGCPConf::SignalLength, JBB);
+}//Dbdih::execMASTER_GCPREQ()
+
+void Dbdih::execMASTER_GCPCONF(Signal* signal)
+{
+ NodeRecordPtr senderNodePtr;
+ MasterGCPConf * const masterGCPConf = (MasterGCPConf *)&signal->theData[0];
+ jamEntry();
+ senderNodePtr.i = masterGCPConf->senderNodeId;
+ ptrCheckGuard(senderNodePtr, MAX_NDB_NODES, nodeRecord);
+
+ MasterGCPConf::State gcpState = (MasterGCPConf::State)masterGCPConf->gcpState;
+ const Uint32 failedNodeId = masterGCPConf->failedNodeId;
+ const Uint32 newGcp = masterGCPConf->newGCP;
+ const Uint32 latestLcpId = masterGCPConf->latestLCP;
+ const Uint32 oldestRestorableGci = masterGCPConf->oldestRestorableGCI;
+ const Uint32 oldestKeepGci = masterGCPConf->keepGCI;
+ if (latestLcpId > SYSFILE->latestLCP_ID) {
+ jam();
+#if 0
+ ndbout_c("Dbdih: Setting SYSFILE->latestLCP_ID to %d", latestLcpId);
+ SYSFILE->latestLCP_ID = latestLcpId;
+#endif
+ SYSFILE->keepGCI = oldestKeepGci;
+ SYSFILE->oldestRestorableGCI = oldestRestorableGci;
+ for(Uint32 i = 0; i < NdbNodeBitmask::Size; i++)
+ SYSFILE->lcpActive[i] = masterGCPConf->lcpActive[i];
+ }//if
+ switch (gcpState) {
+ case MasterGCPConf::GCP_READY:
+ jam();
+ senderNodePtr.p->gcpstate = NodeRecord::READY;
+ break;
+ case MasterGCPConf::GCP_PREPARE_RECEIVED:
+ jam();
+ senderNodePtr.p->gcpstate = NodeRecord::PREPARE_RECEIVED;
+ cnewgcp = newGcp;
+ break;
+ case MasterGCPConf::GCP_COMMIT_RECEIVED:
+ jam();
+ senderNodePtr.p->gcpstate = NodeRecord::COMMIT_SENT;
+ break;
+ case MasterGCPConf::GCP_TC_FINISHED:
+ jam();
+ senderNodePtr.p->gcpstate = NodeRecord::NODE_FINISHED;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ switch (cgcpMasterTakeOverState) {
+ case GMTOS_INITIAL:
+ switch (gcpState) {
+ case MasterGCPConf::GCP_READY:
+ jam();
+ cgcpMasterTakeOverState = ALL_READY;
+ break;
+ case MasterGCPConf::GCP_PREPARE_RECEIVED:
+ jam();
+ cgcpMasterTakeOverState = ALL_PREPARED;
+ break;
+ case MasterGCPConf::GCP_COMMIT_RECEIVED:
+ jam();
+ cgcpMasterTakeOverState = COMMIT_STARTED_NOT_COMPLETED;
+ break;
+ case MasterGCPConf::GCP_TC_FINISHED:
+ jam();
+ cgcpMasterTakeOverState = COMMIT_COMPLETED;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ break;
+ case ALL_READY:
+ switch (gcpState) {
+ case MasterGCPConf::GCP_READY:
+ jam();
+ /*empty*/;
+ break;
+ case MasterGCPConf::GCP_PREPARE_RECEIVED:
+ jam();
+ cgcpMasterTakeOverState = PREPARE_STARTED_NOT_COMMITTED;
+ break;
+ case MasterGCPConf::GCP_COMMIT_RECEIVED:
+ ndbrequire(false);
+ break;
+ case MasterGCPConf::GCP_TC_FINISHED:
+ jam();
+ cgcpMasterTakeOverState = SAVE_STARTED_NOT_COMPLETED;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ break;
+ case PREPARE_STARTED_NOT_COMMITTED:
+ switch (gcpState) {
+ case MasterGCPConf::GCP_READY:
+ jam();
+ break;
+ case MasterGCPConf::GCP_PREPARE_RECEIVED:
+ jam();
+ break;
+ case MasterGCPConf::GCP_COMMIT_RECEIVED:
+ ndbrequire(false);
+ break;
+ case MasterGCPConf::GCP_TC_FINISHED:
+ ndbrequire(false);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ break;
+ case ALL_PREPARED:
+ switch (gcpState) {
+ case MasterGCPConf::GCP_READY:
+ jam();
+ cgcpMasterTakeOverState = PREPARE_STARTED_NOT_COMMITTED;
+ break;
+ case MasterGCPConf::GCP_PREPARE_RECEIVED:
+ jam();
+ break;
+ case MasterGCPConf::GCP_COMMIT_RECEIVED:
+ jam();
+ cgcpMasterTakeOverState = COMMIT_STARTED_NOT_COMPLETED;
+ break;
+ case MasterGCPConf::GCP_TC_FINISHED:
+ jam();
+ cgcpMasterTakeOverState = COMMIT_STARTED_NOT_COMPLETED;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ break;
+ case COMMIT_STARTED_NOT_COMPLETED:
+ switch (gcpState) {
+ case MasterGCPConf::GCP_READY:
+ ndbrequire(false);
+ break;
+ case MasterGCPConf::GCP_PREPARE_RECEIVED:
+ jam();
+ break;
+ case MasterGCPConf::GCP_COMMIT_RECEIVED:
+ jam();
+ break;
+ case MasterGCPConf::GCP_TC_FINISHED:
+ jam();
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ break;
+ case COMMIT_COMPLETED:
+ switch (gcpState) {
+ case MasterGCPConf::GCP_READY:
+ cgcpMasterTakeOverState = SAVE_STARTED_NOT_COMPLETED;
+ break;
+ case MasterGCPConf::GCP_PREPARE_RECEIVED:
+ jam();
+ cgcpMasterTakeOverState = COMMIT_STARTED_NOT_COMPLETED;
+ break;
+ case MasterGCPConf::GCP_COMMIT_RECEIVED:
+ jam();
+ cgcpMasterTakeOverState = COMMIT_STARTED_NOT_COMPLETED;
+ break;
+ case MasterGCPConf::GCP_TC_FINISHED:
+ jam();
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ break;
+ case SAVE_STARTED_NOT_COMPLETED:
+ switch (gcpState) {
+ case MasterGCPConf::GCP_READY:
+ jam();
+ break;
+ case MasterGCPConf::GCP_PREPARE_RECEIVED:
+ ndbrequire(false);
+ break;
+ case MasterGCPConf::GCP_COMMIT_RECEIVED:
+ ndbrequire(false);
+ break;
+ case MasterGCPConf::GCP_TC_FINISHED:
+ jam();
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ receiveLoopMacro(MASTER_GCPREQ, senderNodePtr.i);
+ /*-------------------------------------------------------------------------*/
+ // We have now received all responses and are ready to take over the GCP
+ // protocol as master.
+ /*-------------------------------------------------------------------------*/
+ MASTER_GCPhandling(signal, failedNodeId);
+ return;
+}//Dbdih::execMASTER_GCPCONF()
+
+void Dbdih::execMASTER_GCPREF(Signal* signal)
+{
+ const MasterGCPRef * const ref = (MasterGCPRef *)&signal->theData[0];
+ jamEntry();
+ receiveLoopMacro(MASTER_GCPREQ, ref->senderNodeId);
+ /*-------------------------------------------------------------------------*/
+ // We have now received all responses and are ready to take over the GCP
+ // protocol as master.
+ /*-------------------------------------------------------------------------*/
+ MASTER_GCPhandling(signal, ref->failedNodeId);
+}//Dbdih::execMASTER_GCPREF()
+
+void Dbdih::MASTER_GCPhandling(Signal* signal, Uint32 failedNodeId)
+{
+ NodeRecordPtr failedNodePtr;
+ cmasterState = MASTER_ACTIVE;
+ /*----------------------------------------------------------*/
+ /* REMOVE ALL ACTIVE STATUS ON ALREADY FAILED NODES */
+ /* THIS IS PERFORMED HERE SINCE WE GET THE LCP ACTIVE */
+ /* STATUS AS PART OF THE COPY RESTART INFO AND THIS IS*/
+ /* HANDLED BY THE MASTER GCP TAKE OVER PROTOCOL. */
+ /*----------------------------------------------------------*/
+
+ failedNodePtr.i = failedNodeId;
+ ptrCheckGuard(failedNodePtr, MAX_NDB_NODES, nodeRecord);
+ switch (cgcpMasterTakeOverState) {
+ case ALL_READY:
+ jam();
+ startGcp(signal);
+ break;
+ case PREPARE_STARTED_NOT_COMMITTED:
+ {
+ NodeRecordPtr nodePtr;
+ jam();
+ c_GCP_PREPARE_Counter.clearWaitingFor();
+ nodePtr.i = cfirstAliveNode;
+ do {
+ jam();
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ if (nodePtr.p->gcpstate == NodeRecord::READY) {
+ jam();
+ c_GCP_PREPARE_Counter.setWaitingFor(nodePtr.i);
+ sendGCP_PREPARE(signal, nodePtr.i);
+ }//if
+ nodePtr.i = nodePtr.p->nextNode;
+ } while(nodePtr.i != RNIL);
+ if (c_GCP_PREPARE_Counter.done()) {
+ jam();
+ gcpcommitreqLab(signal);
+ }//if
+ break;
+ }
+ case ALL_PREPARED:
+ jam();
+ gcpcommitreqLab(signal);
+ break;
+ case COMMIT_STARTED_NOT_COMPLETED:
+ {
+ NodeRecordPtr nodePtr;
+ jam();
+ c_GCP_COMMIT_Counter.clearWaitingFor();
+ nodePtr.i = cfirstAliveNode;
+ do {
+ jam();
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ if (nodePtr.p->gcpstate == NodeRecord::PREPARE_RECEIVED) {
+ jam();
+ sendGCP_COMMIT(signal, nodePtr.i);
+ c_GCP_COMMIT_Counter.setWaitingFor(nodePtr.i);
+ } else {
+ ndbrequire((nodePtr.p->gcpstate == NodeRecord::NODE_FINISHED) ||
+ (nodePtr.p->gcpstate == NodeRecord::COMMIT_SENT));
+ }//if
+ nodePtr.i = nodePtr.p->nextNode;
+ } while(nodePtr.i != RNIL);
+ if (c_GCP_COMMIT_Counter.done()){
+ jam();
+ gcpsavereqLab(signal);
+ }//if
+ break;
+ }
+ case COMMIT_COMPLETED:
+ jam();
+ gcpsavereqLab(signal);
+ break;
+ case SAVE_STARTED_NOT_COMPLETED:
+ {
+ NodeRecordPtr nodePtr;
+ jam();
+ SYSFILE->newestRestorableGCI = coldgcp;
+ nodePtr.i = cfirstAliveNode;
+ do {
+ jam();
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ SYSFILE->lastCompletedGCI[nodePtr.i] = coldgcp;
+ nodePtr.i = nodePtr.p->nextNode;
+ } while (nodePtr.i != RNIL);
+ /**-------------------------------------------------------------------
+ * THE FAILED NODE DID ALSO PARTICIPATE IN THIS GLOBAL CHECKPOINT
+ * WHICH IS RECORDED.
+ *-------------------------------------------------------------------*/
+ SYSFILE->lastCompletedGCI[failedNodeId] = coldgcp;
+ copyGciLab(signal, CopyGCIReq::GLOBAL_CHECKPOINT);
+ break;
+ }
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+
+ signal->theData[0] = NDB_LE_GCP_TakeoverCompleted;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 1, JBB);
+
+ /*--------------------------------------------------*/
+ /* WE SEPARATE HANDLING OF GLOBAL CHECKPOINTS */
+ /* AND LOCAL CHECKPOINTS HERE. LCP'S HAVE TO */
+ /* REMOVE ALL FAILED FRAGMENTS BEFORE WE CAN */
+ /* HANDLE THE LCP PROTOCOL. */
+ /*--------------------------------------------------*/
+ checkLocalNodefailComplete(signal, failedNodeId, NF_GCP_TAKE_OVER);
+
+ return;
+}//Dbdih::masterGcpConfFromFailedLab()
+
+void
+Dbdih::invalidateNodeLCP(Signal* signal, Uint32 nodeId, Uint32 tableId)
+{
+ jamEntry();
+ TabRecordPtr tabPtr;
+ tabPtr.i = tableId;
+ const Uint32 RT_BREAK = 64;
+ if (ERROR_INSERTED(7125)) {
+ return;
+ }//if
+ for (Uint32 i = 0; i<RT_BREAK; i++) {
+ jam();
+ if (tabPtr.i >= ctabFileSize){
+ jam();
+ /**
+ * Ready with entire loop
+ * Return to master
+ */
+ setAllowNodeStart(nodeId, true);
+ if (getNodeStatus(nodeId) == NodeRecord::STARTING) {
+ jam();
+ StartInfoConf * conf = (StartInfoConf*)&signal->theData[0];
+ conf->sendingNodeId = cownNodeId;
+ conf->startingNodeId = nodeId;
+ sendSignal(cmasterdihref, GSN_START_INFOCONF, signal,
+ StartInfoConf::SignalLength, JBB);
+ }//if
+ return;
+ }//if
+ ptrAss(tabPtr, tabRecord);
+ if (tabPtr.p->tabStatus == TabRecord::TS_ACTIVE) {
+ jam();
+ invalidateNodeLCP(signal, nodeId, tabPtr);
+ return;
+ }//if
+ tabPtr.i++;
+ }//for
+ signal->theData[0] = DihContinueB::ZINVALIDATE_NODE_LCP;
+ signal->theData[1] = nodeId;
+ signal->theData[2] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+}//Dbdih::invalidateNodeLCP()
+
+void
+Dbdih::invalidateNodeLCP(Signal* signal, Uint32 nodeId, TabRecordPtr tabPtr)
+{
+ /**
+ * Check so that no one else is using the tab descriptior
+ */
+ if (tabPtr.p->tabCopyStatus != TabRecord::CS_IDLE) {
+ jam();
+ signal->theData[0] = DihContinueB::ZINVALIDATE_NODE_LCP;
+ signal->theData[1] = nodeId;
+ signal->theData[2] = tabPtr.i;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 20, 3);
+ return;
+ }//if
+
+ /**
+ * For each fragment
+ */
+ bool modified = false;
+ FragmentstorePtr fragPtr;
+ for(Uint32 fragNo = 0; fragNo < tabPtr.p->totalfragments; fragNo++){
+ jam();
+ getFragstore(tabPtr.p, fragNo, fragPtr);
+ /**
+ * For each of replica record
+ */
+ ReplicaRecordPtr replicaPtr;
+ for(replicaPtr.i = fragPtr.p->oldStoredReplicas; replicaPtr.i != RNIL;
+ replicaPtr.i = replicaPtr.p->nextReplica) {
+ jam();
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+ if(replicaPtr.p->procNode == nodeId){
+ jam();
+ /**
+ * Found one with correct node id
+ */
+ /**
+ * Invalidate all LCP's
+ */
+ modified = true;
+ for(int i = 0; i < MAX_LCP_STORED; i++) {
+ replicaPtr.p->lcpStatus[i] = ZINVALID;
+ }//if
+ /**
+ * And reset nextLcp
+ */
+ replicaPtr.p->nextLcp = 0;
+ }//if
+ }//for
+ }//for
+
+ if (modified) {
+ jam();
+ /**
+ * Save table description to disk
+ */
+ tabPtr.p->tabCopyStatus = TabRecord::CS_INVALIDATE_NODE_LCP;
+ tabPtr.p->tabUpdateState = TabRecord::US_INVALIDATE_NODE_LCP;
+ tabPtr.p->tabRemoveNode = nodeId;
+ signal->theData[0] = DihContinueB::ZPACK_TABLE_INTO_PAGES;
+ signal->theData[1] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ }
+
+ jam();
+ /**
+ * Move to next table
+ */
+ tabPtr.i++;
+ signal->theData[0] = DihContinueB::ZINVALIDATE_NODE_LCP;
+ signal->theData[1] = nodeId;
+ signal->theData[2] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+ return;
+}//Dbdih::invalidateNodeLCP()
+
+/*------------------------------------------------*/
+/* INPUT: TABPTR */
+/* TNODEID */
+/*------------------------------------------------*/
+void Dbdih::removeNodeFromTables(Signal* signal,
+ Uint32 nodeId, Uint32 tableId)
+{
+ jamEntry();
+ TabRecordPtr tabPtr;
+ tabPtr.i = tableId;
+ const Uint32 RT_BREAK = 64;
+ for (Uint32 i = 0; i<RT_BREAK; i++) {
+ jam();
+ if (tabPtr.i >= ctabFileSize){
+ jam();
+ removeNodeFromTablesComplete(signal, nodeId);
+ return;
+ }//if
+
+ ptrAss(tabPtr, tabRecord);
+ if (tabPtr.p->tabStatus == TabRecord::TS_ACTIVE) {
+ jam();
+ removeNodeFromTable(signal, nodeId, tabPtr);
+ return;
+ }//if
+ tabPtr.i++;
+ }//for
+ signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE;
+ signal->theData[1] = nodeId;
+ signal->theData[2] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+}
+
+void Dbdih::removeNodeFromTable(Signal* signal,
+ Uint32 nodeId, TabRecordPtr tabPtr){
+
+ /**
+ * Check so that no one else is using the tab descriptior
+ */
+ if (tabPtr.p->tabCopyStatus != TabRecord::CS_IDLE) {
+ jam();
+ signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE;
+ signal->theData[1] = nodeId;
+ signal->theData[2] = tabPtr.i;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 20, 3);
+ return;
+ }//if
+
+ /**
+ * For each fragment
+ */
+ Uint32 noOfRemovedReplicas = 0; // No of replicas removed
+ Uint32 noOfRemovedLcpReplicas = 0; // No of replicas in LCP removed
+ Uint32 noOfRemainingLcpReplicas = 0;// No of replicas in LCP remaining
+
+ //const Uint32 lcpId = SYSFILE->latestLCP_ID;
+ const bool lcpOngoingFlag = (tabPtr.p->tabLcpStatus== TabRecord::TLS_ACTIVE);
+
+ FragmentstorePtr fragPtr;
+ for(Uint32 fragNo = 0; fragNo < tabPtr.p->totalfragments; fragNo++){
+ jam();
+ getFragstore(tabPtr.p, fragNo, fragPtr);
+
+ /**
+ * For each of replica record
+ */
+ Uint32 replicaNo = 0;
+ ReplicaRecordPtr replicaPtr;
+ for(replicaPtr.i = fragPtr.p->storedReplicas; replicaPtr.i != RNIL;
+ replicaPtr.i = replicaPtr.p->nextReplica, replicaNo++) {
+ jam();
+
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+ if(replicaPtr.p->procNode == nodeId){
+ jam();
+ noOfRemovedReplicas++;
+ removeNodeFromStored(nodeId, fragPtr, replicaPtr);
+ if(replicaPtr.p->lcpOngoingFlag){
+ jam();
+ /**
+ * This replica is currently LCP:ed
+ */
+ ndbrequire(fragPtr.p->noLcpReplicas > 0);
+ fragPtr.p->noLcpReplicas --;
+
+ noOfRemovedLcpReplicas ++;
+ replicaPtr.p->lcpOngoingFlag = false;
+ }
+ }
+ }
+ noOfRemainingLcpReplicas += fragPtr.p->noLcpReplicas;
+ }
+
+ if(noOfRemovedReplicas == 0){
+ jam();
+ /**
+ * The table had no replica on the failed node
+ * continue with next table
+ */
+ tabPtr.i++;
+ signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE;
+ signal->theData[1] = nodeId;
+ signal->theData[2] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+ return;
+ }
+
+ /**
+ * We did remove at least one replica
+ */
+ bool ok = false;
+ switch(tabPtr.p->tabLcpStatus){
+ case TabRecord::TLS_COMPLETED:
+ ok = true;
+ jam();
+ /**
+ * WE WILL WRITE THE TABLE DESCRIPTION TO DISK AT THIS TIME
+ * INDEPENDENT OF WHAT THE LOCAL CHECKPOINT NEEDED.
+ * THIS IS TO ENSURE THAT THE FAILED NODES ARE ALSO UPDATED ON DISK
+ * IN THE DIH DATA STRUCTURES BEFORE WE COMPLETE HANDLING OF THE
+ * NODE FAILURE.
+ */
+ ndbrequire(noOfRemovedLcpReplicas == 0);
+
+ tabPtr.p->tabCopyStatus = TabRecord::CS_REMOVE_NODE;
+ tabPtr.p->tabUpdateState = TabRecord::US_REMOVE_NODE;
+ tabPtr.p->tabRemoveNode = nodeId;
+ signal->theData[0] = DihContinueB::ZPACK_TABLE_INTO_PAGES;
+ signal->theData[1] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ break;
+ case TabRecord::TLS_ACTIVE:
+ ok = true;
+ jam();
+ /**
+ * The table is participating in an LCP currently
+ */
+ // Fall through
+ break;
+ case TabRecord::TLS_WRITING_TO_FILE:
+ ok = true;
+ jam();
+ /**
+ * This should never happen since we in the beginning of this function
+ * checks the tabCopyStatus
+ */
+ ndbrequire(lcpOngoingFlag);
+ ndbrequire(false);
+ break;
+ }
+ ndbrequire(ok);
+
+ /**
+ * The table is participating in an LCP currently
+ * and we removed some replicas that should have been checkpointed
+ */
+ ndbrequire(c_lcpState.lcpStatus != LCP_STATUS_IDLE);
+ ndbrequire(tabPtr.p->tabLcpStatus == TabRecord::TLS_ACTIVE);
+
+ /**
+ * Save the table
+ */
+ tabPtr.p->tabCopyStatus = TabRecord::CS_REMOVE_NODE;
+ tabPtr.p->tabUpdateState = TabRecord::US_REMOVE_NODE;
+ tabPtr.p->tabRemoveNode = nodeId;
+ signal->theData[0] = DihContinueB::ZPACK_TABLE_INTO_PAGES;
+ signal->theData[1] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+
+ if(noOfRemainingLcpReplicas == 0){
+ jam();
+ /**
+ * The removal on the failed node made the LCP complete
+ */
+ tabPtr.p->tabLcpStatus = TabRecord::TLS_WRITING_TO_FILE;
+ checkLcpAllTablesDoneInLqh();
+ }
+}
+
+void
+Dbdih::removeNodeFromTablesComplete(Signal* signal, Uint32 nodeId){
+ jam();
+
+ /**
+ * Check if we "accidently" completed a LCP
+ */
+ checkLcpCompletedLab(signal);
+
+ /**
+ * Check if we (DIH) are finished with node fail handling
+ */
+ checkLocalNodefailComplete(signal, nodeId, NF_REMOVE_NODE_FROM_TABLE);
+}
+
+void
+Dbdih::checkLocalNodefailComplete(Signal* signal, Uint32 failedNodeId,
+ NodefailHandlingStep step){
+ jam();
+
+ NodeRecordPtr nodePtr;
+ nodePtr.i = failedNodeId;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+
+ ndbrequire(nodePtr.p->m_nodefailSteps.get(step));
+ nodePtr.p->m_nodefailSteps.clear(step);
+
+ if(nodePtr.p->m_nodefailSteps.count() > 0){
+ jam();
+ return;
+ }
+
+ NFCompleteRep * const nf = (NFCompleteRep *)&signal->theData[0];
+ nf->blockNo = DBDIH;
+ nf->nodeId = cownNodeId;
+ nf->failedNodeId = failedNodeId;
+ nf->from = __LINE__;
+ sendSignal(reference(), GSN_NF_COMPLETEREP, signal,
+ NFCompleteRep::SignalLength, JBB);
+}
+
+
+void
+Dbdih::setLocalNodefailHandling(Signal* signal, Uint32 failedNodeId,
+ NodefailHandlingStep step){
+ jam();
+
+ NodeRecordPtr nodePtr;
+ nodePtr.i = failedNodeId;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+
+ ndbrequire(!nodePtr.p->m_nodefailSteps.get(step));
+ nodePtr.p->m_nodefailSteps.set(step);
+}
+
+void Dbdih::startLcpTakeOverLab(Signal* signal, Uint32 failedNodeId)
+{
+ /*--------------------------------------------------------------------*/
+ // Start LCP master take over process. Consists of the following steps.
+ // 1) Ensure that all LQH's have reported all fragments they have been
+ // told to checkpoint. Can be a fairly long step time-wise.
+ // 2) Query all nodes about their LCP status.
+ // During the query process we do not want our own state to change.
+ // This can change due to delayed reception of LCP_REPORT, completed
+ // save of table on disk or reception of DIH_LCPCOMPLETE from other
+ // node.
+ /*--------------------------------------------------------------------*/
+}//Dbdih::startLcpTakeOver()
+
+void Dbdih::execEMPTY_LCP_CONF(Signal* signal)
+{
+ jamEntry();
+
+ ndbrequire(c_lcpMasterTakeOverState.state == LMTOS_WAIT_EMPTY_LCP);
+
+ const EmptyLcpConf * const conf = (EmptyLcpConf *)&signal->theData[0];
+ Uint32 nodeId = conf->senderNodeId;
+
+ if(!conf->idle){
+ jam();
+ if (conf->tableId < c_lcpMasterTakeOverState.minTableId) {
+ jam();
+ c_lcpMasterTakeOverState.minTableId = conf->tableId;
+ c_lcpMasterTakeOverState.minFragId = conf->fragmentId;
+ } else if (conf->tableId == c_lcpMasterTakeOverState.minTableId &&
+ conf->fragmentId < c_lcpMasterTakeOverState.minFragId) {
+ jam();
+ c_lcpMasterTakeOverState.minFragId = conf->fragmentId;
+ }//if
+ if(isMaster()){
+ jam();
+ c_lcpState.m_LAST_LCP_FRAG_ORD.setWaitingFor(nodeId);
+ }
+ }
+
+ receiveLoopMacro(EMPTY_LCP_REQ, nodeId);
+ /*--------------------------------------------------------------------*/
+ // Received all EMPTY_LCPCONF. We can continue with next phase of the
+ // take over LCP master process.
+ /*--------------------------------------------------------------------*/
+ c_lcpMasterTakeOverState.set(LMTOS_WAIT_LCP_FRAG_REP, __LINE__);
+ checkEmptyLcpComplete(signal);
+ return;
+}//Dbdih::execEMPTY_LCPCONF()
+
+void
+Dbdih::checkEmptyLcpComplete(Signal *signal){
+
+ ndbrequire(c_lcpMasterTakeOverState.state == LMTOS_WAIT_LCP_FRAG_REP);
+
+ if(c_lcpState.noOfLcpFragRepOutstanding > 0){
+ jam();
+ return;
+ }
+
+ if(isMaster()){
+ jam();
+
+ signal->theData[0] = NDB_LE_LCP_TakeoverStarted;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 1, JBB);
+
+ signal->theData[0] = 7012;
+ execDUMP_STATE_ORD(signal);
+
+ c_lcpMasterTakeOverState.set(LMTOS_INITIAL, __LINE__);
+ MasterLCPReq * const req = (MasterLCPReq *)&signal->theData[0];
+ req->masterRef = reference();
+ req->failedNodeId = c_lcpMasterTakeOverState.failedNodeId;
+ sendLoopMacro(MASTER_LCPREQ, sendMASTER_LCPREQ);
+ } else {
+ sendMASTER_LCPCONF(signal);
+ }
+}
+
+/*--------------------------------------------------*/
+/* THE MASTER HAS FAILED AND THE NEW MASTER IS*/
+/* QUERYING THIS NODE ABOUT THE STATE OF THE */
+/* LOCAL CHECKPOINT PROTOCOL. */
+/*--------------------------------------------------*/
+void Dbdih::execMASTER_LCPREQ(Signal* signal)
+{
+ const MasterLCPReq * const req = (MasterLCPReq *)&signal->theData[0];
+ jamEntry();
+ const BlockReference newMasterBlockref = req->masterRef;
+
+ Uint32 failedNodeId = req->failedNodeId;
+
+ /**
+ * There can be no take over with the same master
+ */
+ ndbrequire(c_lcpState.m_masterLcpDihRef != newMasterBlockref);
+ c_lcpState.m_masterLcpDihRef = newMasterBlockref;
+ c_lcpState.m_MASTER_LCPREQ_Received = true;
+ c_lcpState.m_MASTER_LCPREQ_FailedNodeId = failedNodeId;
+
+ if(newMasterBlockref != cmasterdihref){
+ jam();
+ ndbrequire(0);
+ }
+
+ sendMASTER_LCPCONF(signal);
+}//Dbdih::execMASTER_LCPREQ()
+
+void
+Dbdih::sendMASTER_LCPCONF(Signal * signal){
+
+ if(!c_EMPTY_LCP_REQ_Counter.done()){
+ /**
+ * Have not received all EMPTY_LCP_REP
+ * dare not answer MASTER_LCP_CONF yet
+ */
+ jam();
+ return;
+ }
+
+ if(!c_lcpState.m_MASTER_LCPREQ_Received){
+ jam();
+ /**
+ * Has not received MASTER_LCPREQ yet
+ */
+ return;
+ }
+
+ if(c_lcpState.lcpStatus == LCP_INIT_TABLES){
+ jam();
+ /**
+ * Still aborting old initLcpLab
+ */
+ return;
+ }
+
+ if(c_lcpState.lcpStatus == LCP_COPY_GCI){
+ jam();
+ /**
+ * Restart it
+ */
+ //Uint32 lcpId = SYSFILE->latestLCP_ID;
+ SYSFILE->latestLCP_ID--;
+ c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__);
+#if 0
+ if(c_copyGCISlave.m_copyReason == CopyGCIReq::LOCAL_CHECKPOINT){
+ ndbout_c("Dbdih: Also resetting c_copyGCISlave");
+ c_copyGCISlave.m_copyReason = CopyGCIReq::IDLE;
+ c_copyGCISlave.m_expectedNextWord = 0;
+ }
+#endif
+ }
+
+ bool ok = false;
+ MasterLCPConf::State lcpState;
+ switch (c_lcpState.lcpStatus) {
+ case LCP_STATUS_IDLE:
+ ok = true;
+ jam();
+ /*------------------------------------------------*/
+ /* LOCAL CHECKPOINT IS CURRENTLY NOT ACTIVE */
+ /* SINCE NO COPY OF RESTART INFORMATION HAVE*/
+ /* BEEN RECEIVED YET. ALSO THE PREVIOUS */
+ /* CHECKPOINT HAVE BEEN FULLY COMPLETED. */
+ /*------------------------------------------------*/
+ lcpState = MasterLCPConf::LCP_STATUS_IDLE;
+ break;
+ case LCP_STATUS_ACTIVE:
+ ok = true;
+ jam();
+ /*--------------------------------------------------*/
+ /* COPY OF RESTART INFORMATION HAS BEEN */
+ /* PERFORMED AND ALSO RESPONSE HAVE BEEN SENT.*/
+ /*--------------------------------------------------*/
+ lcpState = MasterLCPConf::LCP_STATUS_ACTIVE;
+ break;
+ case LCP_TAB_COMPLETED:
+ ok = true;
+ jam();
+ /*--------------------------------------------------------*/
+ /* ALL LCP_REPORT'S HAVE BEEN COMPLETED FOR */
+ /* ALL TABLES. SAVE OF AT LEAST ONE TABLE IS */
+ /* ONGOING YET. */
+ /*--------------------------------------------------------*/
+ lcpState = MasterLCPConf::LCP_TAB_COMPLETED;
+ break;
+ case LCP_TAB_SAVED:
+ ok = true;
+ jam();
+ /*--------------------------------------------------------*/
+ /* ALL LCP_REPORT'S HAVE BEEN COMPLETED FOR */
+ /* ALL TABLES. ALL TABLES HAVE ALSO BEEN SAVED */
+ /* ALL OTHER NODES ARE NOT YET FINISHED WITH */
+ /* THE LOCAL CHECKPOINT. */
+ /*--------------------------------------------------------*/
+ lcpState = MasterLCPConf::LCP_TAB_SAVED;
+ break;
+ case LCP_TCGET:
+ case LCP_CALCULATE_KEEP_GCI:
+ case LCP_TC_CLOPSIZE:
+ case LCP_START_LCP_ROUND:
+ /**
+ * These should only exists on the master
+ * but since this is master take over
+ * it not allowed
+ */
+ ndbrequire(false);
+ lcpState= MasterLCPConf::LCP_STATUS_IDLE; // remove warning
+ break;
+ case LCP_COPY_GCI:
+ case LCP_INIT_TABLES:
+ ok = true;
+ /**
+ * These two states are handled by if statements above
+ */
+ ndbrequire(false);
+ lcpState= MasterLCPConf::LCP_STATUS_IDLE; // remove warning
+ break;
+ }//switch
+ ndbrequire(ok);
+
+ Uint32 failedNodeId = c_lcpState.m_MASTER_LCPREQ_FailedNodeId;
+ MasterLCPConf * const conf = (MasterLCPConf *)&signal->theData[0];
+ conf->senderNodeId = cownNodeId;
+ conf->lcpState = lcpState;
+ conf->failedNodeId = failedNodeId;
+ sendSignal(c_lcpState.m_masterLcpDihRef, GSN_MASTER_LCPCONF,
+ signal, MasterLCPConf::SignalLength, JBB);
+
+ // Answer to MASTER_LCPREQ sent, reset flag so
+ // that it's not sent again before another request comes in
+ c_lcpState.m_MASTER_LCPREQ_Received = false;
+
+ if(c_lcpState.lcpStatus == LCP_TAB_SAVED){
+#ifdef VM_TRACE
+ ndbout_c("Sending extra GSN_LCP_COMPLETE_REP to new master");
+#endif
+ sendLCP_COMPLETE_REP(signal);
+ }
+
+ if(!isMaster()){
+ c_lcpMasterTakeOverState.set(LMTOS_IDLE, __LINE__);
+ checkLocalNodefailComplete(signal, failedNodeId, NF_LCP_TAKE_OVER);
+ }
+
+ return;
+}
+
+NdbOut&
+operator<<(NdbOut& out, const Dbdih::LcpMasterTakeOverState state){
+ switch(state){
+ case Dbdih::LMTOS_IDLE:
+ out << "LMTOS_IDLE";
+ break;
+ case Dbdih::LMTOS_WAIT_EMPTY_LCP:
+ out << "LMTOS_WAIT_EMPTY_LCP";
+ break;
+ case Dbdih::LMTOS_WAIT_LCP_FRAG_REP:
+ out << "LMTOS_WAIT_EMPTY_LCP";
+ break;
+ case Dbdih::LMTOS_INITIAL:
+ out << "LMTOS_INITIAL";
+ break;
+ case Dbdih::LMTOS_ALL_IDLE:
+ out << "LMTOS_ALL_IDLE";
+ break;
+ case Dbdih::LMTOS_ALL_ACTIVE:
+ out << "LMTOS_ALL_ACTIVE";
+ break;
+ case Dbdih::LMTOS_LCP_CONCLUDING:
+ out << "LMTOS_LCP_CONCLUDING";
+ break;
+ case Dbdih::LMTOS_COPY_ONGOING:
+ out << "LMTOS_COPY_ONGOING";
+ break;
+ }
+ return out;
+}
+
+struct MASTERLCP_StateTransitions {
+ Dbdih::LcpMasterTakeOverState CurrentState;
+ MasterLCPConf::State ParticipantState;
+ Dbdih::LcpMasterTakeOverState NewState;
+};
+
+static const
+MASTERLCP_StateTransitions g_masterLCPTakeoverStateTransitions[] = {
+ /**
+ * Current = LMTOS_INITIAL
+ */
+ { Dbdih::LMTOS_INITIAL,
+ MasterLCPConf::LCP_STATUS_IDLE,
+ Dbdih::LMTOS_ALL_IDLE },
+
+ { Dbdih::LMTOS_INITIAL,
+ MasterLCPConf::LCP_STATUS_ACTIVE,
+ Dbdih::LMTOS_ALL_ACTIVE },
+
+ { Dbdih::LMTOS_INITIAL,
+ MasterLCPConf::LCP_TAB_COMPLETED,
+ Dbdih::LMTOS_LCP_CONCLUDING },
+
+ { Dbdih::LMTOS_INITIAL,
+ MasterLCPConf::LCP_TAB_SAVED,
+ Dbdih::LMTOS_LCP_CONCLUDING },
+
+ /**
+ * Current = LMTOS_ALL_IDLE
+ */
+ { Dbdih::LMTOS_ALL_IDLE,
+ MasterLCPConf::LCP_STATUS_IDLE,
+ Dbdih::LMTOS_ALL_IDLE },
+
+ { Dbdih::LMTOS_ALL_IDLE,
+ MasterLCPConf::LCP_STATUS_ACTIVE,
+ Dbdih::LMTOS_COPY_ONGOING },
+
+ { Dbdih::LMTOS_ALL_IDLE,
+ MasterLCPConf::LCP_TAB_COMPLETED,
+ Dbdih::LMTOS_LCP_CONCLUDING },
+
+ { Dbdih::LMTOS_ALL_IDLE,
+ MasterLCPConf::LCP_TAB_SAVED,
+ Dbdih::LMTOS_LCP_CONCLUDING },
+
+ /**
+ * Current = LMTOS_COPY_ONGOING
+ */
+ { Dbdih::LMTOS_COPY_ONGOING,
+ MasterLCPConf::LCP_STATUS_IDLE,
+ Dbdih::LMTOS_COPY_ONGOING },
+
+ { Dbdih::LMTOS_COPY_ONGOING,
+ MasterLCPConf::LCP_STATUS_ACTIVE,
+ Dbdih::LMTOS_COPY_ONGOING },
+
+ /**
+ * Current = LMTOS_ALL_ACTIVE
+ */
+ { Dbdih::LMTOS_ALL_ACTIVE,
+ MasterLCPConf::LCP_STATUS_IDLE,
+ Dbdih::LMTOS_COPY_ONGOING },
+
+ { Dbdih::LMTOS_ALL_ACTIVE,
+ MasterLCPConf::LCP_STATUS_ACTIVE,
+ Dbdih::LMTOS_ALL_ACTIVE },
+
+ { Dbdih::LMTOS_ALL_ACTIVE,
+ MasterLCPConf::LCP_TAB_COMPLETED,
+ Dbdih::LMTOS_LCP_CONCLUDING },
+
+ { Dbdih::LMTOS_ALL_ACTIVE,
+ MasterLCPConf::LCP_TAB_SAVED,
+ Dbdih::LMTOS_LCP_CONCLUDING },
+
+ /**
+ * Current = LMTOS_LCP_CONCLUDING
+ */
+ { Dbdih::LMTOS_LCP_CONCLUDING,
+ MasterLCPConf::LCP_STATUS_IDLE,
+ Dbdih::LMTOS_LCP_CONCLUDING },
+
+ { Dbdih::LMTOS_LCP_CONCLUDING,
+ MasterLCPConf::LCP_STATUS_ACTIVE,
+ Dbdih::LMTOS_LCP_CONCLUDING },
+
+ { Dbdih::LMTOS_LCP_CONCLUDING,
+ MasterLCPConf::LCP_TAB_COMPLETED,
+ Dbdih::LMTOS_LCP_CONCLUDING },
+
+ { Dbdih::LMTOS_LCP_CONCLUDING,
+ MasterLCPConf::LCP_TAB_SAVED,
+ Dbdih::LMTOS_LCP_CONCLUDING }
+};
+
+const Uint32 g_masterLCPTakeoverStateTransitionsRows =
+sizeof(g_masterLCPTakeoverStateTransitions) / sizeof(struct MASTERLCP_StateTransitions);
+
+void Dbdih::execMASTER_LCPCONF(Signal* signal)
+{
+ const MasterLCPConf * const conf = (MasterLCPConf *)&signal->theData[0];
+ jamEntry();
+ Uint32 senderNodeId = conf->senderNodeId;
+ MasterLCPConf::State lcpState = (MasterLCPConf::State)conf->lcpState;
+ const Uint32 failedNodeId = conf->failedNodeId;
+ NodeRecordPtr nodePtr;
+ nodePtr.i = senderNodeId;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ nodePtr.p->lcpStateAtTakeOver = lcpState;
+
+#ifdef VM_TRACE
+ ndbout_c("MASTER_LCPCONF");
+ printMASTER_LCP_CONF(stdout, &signal->theData[0], 0, 0);
+#endif
+
+ bool found = false;
+ for(Uint32 i = 0; i<g_masterLCPTakeoverStateTransitionsRows; i++){
+ const struct MASTERLCP_StateTransitions * valid =
+ &g_masterLCPTakeoverStateTransitions[i];
+
+ if(valid->CurrentState == c_lcpMasterTakeOverState.state &&
+ valid->ParticipantState == lcpState){
+ jam();
+ found = true;
+ c_lcpMasterTakeOverState.set(valid->NewState, __LINE__);
+ break;
+ }
+ }
+ ndbrequire(found);
+
+ bool ok = false;
+ switch(lcpState){
+ case MasterLCPConf::LCP_STATUS_IDLE:
+ ok = true;
+ break;
+ case MasterLCPConf::LCP_STATUS_ACTIVE:
+ case MasterLCPConf::LCP_TAB_COMPLETED:
+ case MasterLCPConf::LCP_TAB_SAVED:
+ ok = true;
+ c_lcpState.m_LCP_COMPLETE_REP_Counter_DIH.setWaitingFor(nodePtr.i);
+ break;
+ }
+ ndbrequire(ok);
+
+ receiveLoopMacro(MASTER_LCPREQ, senderNodeId);
+ /*-------------------------------------------------------------------------*/
+ // We have now received all responses and are ready to take over the LCP
+ // protocol as master.
+ /*-------------------------------------------------------------------------*/
+ MASTER_LCPhandling(signal, failedNodeId);
+}//Dbdih::execMASTER_LCPCONF()
+
+void Dbdih::execMASTER_LCPREF(Signal* signal)
+{
+ const MasterLCPRef * const ref = (MasterLCPRef *)&signal->theData[0];
+ jamEntry();
+ receiveLoopMacro(MASTER_LCPREQ, ref->senderNodeId);
+ /*-------------------------------------------------------------------------*/
+ // We have now received all responses and are ready to take over the LCP
+ // protocol as master.
+ /*-------------------------------------------------------------------------*/
+ MASTER_LCPhandling(signal, ref->failedNodeId);
+}//Dbdih::execMASTER_LCPREF()
+
+void Dbdih::MASTER_LCPhandling(Signal* signal, Uint32 failedNodeId)
+{
+ /*-------------------------------------------------------------------------
+ *
+ * WE ARE NOW READY TO CONCLUDE THE TAKE OVER AS MASTER.
+ * WE HAVE ENOUGH INFO TO START UP ACTIVITIES IN THE PROPER PLACE.
+ * ALSO SET THE PROPER STATE VARIABLES.
+ *------------------------------------------------------------------------*/
+ c_lcpState.currentFragment.tableId = c_lcpMasterTakeOverState.minTableId;
+ c_lcpState.currentFragment.fragmentId = c_lcpMasterTakeOverState.minFragId;
+ c_lcpState.m_LAST_LCP_FRAG_ORD = c_lcpState.m_LCP_COMPLETE_REP_Counter_LQH;
+
+ NodeRecordPtr failedNodePtr;
+ failedNodePtr.i = failedNodeId;
+ ptrCheckGuard(failedNodePtr, MAX_NDB_NODES, nodeRecord);
+
+ switch (c_lcpMasterTakeOverState.state) {
+ case LMTOS_ALL_IDLE:
+ jam();
+ /* --------------------------------------------------------------------- */
+ // All nodes were idle in the LCP protocol. Start checking for start of LCP
+ // protocol.
+ /* --------------------------------------------------------------------- */
+#ifdef VM_TRACE
+ ndbout_c("MASTER_LCPhandling:: LMTOS_ALL_IDLE -> checkLcpStart");
+#endif
+ checkLcpStart(signal, __LINE__);
+ break;
+ case LMTOS_COPY_ONGOING:
+ jam();
+ /* --------------------------------------------------------------------- */
+ // We were in the starting process of the LCP protocol. We will restart the
+ // protocol by calculating the keep gci and storing the new lcp id.
+ /* --------------------------------------------------------------------- */
+#ifdef VM_TRACE
+ ndbout_c("MASTER_LCPhandling:: LMTOS_COPY_ONGOING -> storeNewLcpId");
+#endif
+ if (c_lcpState.lcpStatus == LCP_STATUS_ACTIVE) {
+ jam();
+ /*---------------------------------------------------------------------*/
+ /* WE NEED TO DECREASE THE LATEST LCP ID SINCE WE HAVE ALREADY */
+ /* STARTED THIS */
+ /* LOCAL CHECKPOINT. */
+ /*---------------------------------------------------------------------*/
+ Uint32 lcpId = SYSFILE->latestLCP_ID;
+#ifdef VM_TRACE
+ ndbout_c("Decreasing latestLCP_ID from %d to %d", lcpId, lcpId - 1);
+#endif
+ SYSFILE->latestLCP_ID--;
+ }//if
+ storeNewLcpIdLab(signal);
+ break;
+ case LMTOS_ALL_ACTIVE:
+ {
+ jam();
+ /* -------------------------------------------------------------------
+ * Everybody was in the active phase. We will restart sending
+ * LCP_FRAGORD to the nodes from the new master.
+ * We also need to set dihLcpStatus to ZACTIVE
+ * in the master node since the master will wait for all nodes to
+ * complete before finalising the LCP process.
+ * ------------------------------------------------------------------ */
+#ifdef VM_TRACE
+ ndbout_c("MASTER_LCPhandling:: LMTOS_ALL_ACTIVE -> "
+ "startLcpRoundLoopLab(table=%u, fragment=%u)",
+ c_lcpMasterTakeOverState.minTableId,
+ c_lcpMasterTakeOverState.minFragId);
+#endif
+
+ c_lcpState.keepGci = SYSFILE->keepGCI;
+ c_lcpState.setLcpStatus(LCP_START_LCP_ROUND, __LINE__);
+ startLcpRoundLoopLab(signal, 0, 0);
+ break;
+ }
+ case LMTOS_LCP_CONCLUDING:
+ {
+ jam();
+ /* ------------------------------------------------------------------- */
+ // The LCP process is in the finalisation phase. We simply wait for it to
+ // complete with signals arriving in. We need to check also if we should
+ // change state due to table write completion during state
+ // collection phase.
+ /* ------------------------------------------------------------------- */
+ ndbrequire(c_lcpState.lcpStatus != LCP_STATUS_IDLE);
+ startLcpRoundLoopLab(signal, 0, 0);
+ break;
+ }
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ signal->theData[0] = NDB_LE_LCP_TakeoverCompleted;
+ signal->theData[1] = c_lcpMasterTakeOverState.state;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
+
+ signal->theData[0] = 7012;
+ execDUMP_STATE_ORD(signal);
+
+ signal->theData[0] = 7015;
+ execDUMP_STATE_ORD(signal);
+
+ c_lcpMasterTakeOverState.set(LMTOS_IDLE, __LINE__);
+
+ checkLocalNodefailComplete(signal, failedNodePtr.i, NF_LCP_TAKE_OVER);
+}
+
+/* ------------------------------------------------------------------------- */
+/* A BLOCK OR A NODE HAS COMPLETED THE HANDLING OF THE NODE FAILURE. */
+/* ------------------------------------------------------------------------- */
+void Dbdih::execNF_COMPLETEREP(Signal* signal)
+{
+ NodeRecordPtr failedNodePtr;
+ NFCompleteRep * const nfCompleteRep = (NFCompleteRep *)&signal->theData[0];
+ jamEntry();
+ const Uint32 blockNo = nfCompleteRep->blockNo;
+ Uint32 nodeId = nfCompleteRep->nodeId;
+ failedNodePtr.i = nfCompleteRep->failedNodeId;
+
+ ptrCheckGuard(failedNodePtr, MAX_NDB_NODES, nodeRecord);
+ switch (blockNo) {
+ case DBTC:
+ jam();
+ ndbrequire(failedNodePtr.p->dbtcFailCompleted == ZFALSE);
+ /* -------------------------------------------------------------------- */
+ // Report the event that DBTC completed node failure handling.
+ /* -------------------------------------------------------------------- */
+ signal->theData[0] = NDB_LE_NodeFailCompleted;
+ signal->theData[1] = DBTC;
+ signal->theData[2] = failedNodePtr.i;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
+
+ failedNodePtr.p->dbtcFailCompleted = ZTRUE;
+ break;
+ case DBDICT:
+ jam();
+ ndbrequire(failedNodePtr.p->dbdictFailCompleted == ZFALSE);
+ /* --------------------------------------------------------------------- */
+ // Report the event that DBDICT completed node failure handling.
+ /* --------------------------------------------------------------------- */
+ signal->theData[0] = NDB_LE_NodeFailCompleted;
+ signal->theData[1] = DBDICT;
+ signal->theData[2] = failedNodePtr.i;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
+
+ failedNodePtr.p->dbdictFailCompleted = ZTRUE;
+ break;
+ case DBDIH:
+ jam();
+ ndbrequire(failedNodePtr.p->dbdihFailCompleted == ZFALSE);
+ /* --------------------------------------------------------------------- */
+ // Report the event that DBDIH completed node failure handling.
+ /* --------------------------------------------------------------------- */
+ signal->theData[0] = NDB_LE_NodeFailCompleted;
+ signal->theData[1] = DBDIH;
+ signal->theData[2] = failedNodePtr.i;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
+
+ failedNodePtr.p->dbdihFailCompleted = ZTRUE;
+ break;
+ case DBLQH:
+ jam();
+ ndbrequire(failedNodePtr.p->dblqhFailCompleted == ZFALSE);
+ /* --------------------------------------------------------------------- */
+ // Report the event that DBDIH completed node failure handling.
+ /* --------------------------------------------------------------------- */
+ signal->theData[0] = NDB_LE_NodeFailCompleted;
+ signal->theData[1] = DBLQH;
+ signal->theData[2] = failedNodePtr.i;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
+
+ failedNodePtr.p->dblqhFailCompleted = ZTRUE;
+ break;
+ case 0: /* Node has finished */
+ jam();
+ ndbrequire(nodeId < MAX_NDB_NODES);
+
+ if (failedNodePtr.p->recNODE_FAILREP == ZFALSE) {
+ jam();
+ /* ------------------------------------------------------------------- */
+ // We received a report about completion of node failure before we
+ // received the message about the NODE failure ourselves.
+ // We will send the signal to ourselves with a small delay
+ // (10 milliseconds).
+ /* ------------------------------------------------------------------- */
+ //nf->from = __LINE__;
+ sendSignalWithDelay(reference(), GSN_NF_COMPLETEREP, signal, 10,
+ signal->length());
+ return;
+ }//if
+
+ if (!failedNodePtr.p->m_NF_COMPLETE_REP.isWaitingFor(nodeId)){
+ jam();
+ return;
+ }
+
+ failedNodePtr.p->m_NF_COMPLETE_REP.clearWaitingFor(nodeId);;
+
+ /* -------------------------------------------------------------------- */
+ // Report the event that nodeId has completed node failure handling.
+ /* -------------------------------------------------------------------- */
+ signal->theData[0] = NDB_LE_NodeFailCompleted;
+ signal->theData[1] = 0;
+ signal->theData[2] = failedNodePtr.i;
+ signal->theData[3] = nodeId;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB);
+
+ nodeFailCompletedCheckLab(signal, failedNodePtr);
+ return;
+ break;
+ default:
+ ndbrequire(false);
+ return;
+ break;
+ }//switch
+ if (failedNodePtr.p->dbtcFailCompleted == ZFALSE) {
+ jam();
+ return;
+ }//if
+ if (failedNodePtr.p->dbdictFailCompleted == ZFALSE) {
+ jam();
+ return;
+ }//if
+ if (failedNodePtr.p->dbdihFailCompleted == ZFALSE) {
+ jam();
+ return;
+ }//if
+ if (failedNodePtr.p->dblqhFailCompleted == ZFALSE) {
+ jam();
+ return;
+ }//if
+ /* ----------------------------------------------------------------------- */
+ /* ALL BLOCKS IN THIS NODE HAVE COMPLETED THEIR PART OF HANDLING THE */
+ /* NODE FAILURE. WE CAN NOW REPORT THIS COMPLETION TO ALL OTHER NODES. */
+ /* ----------------------------------------------------------------------- */
+ NodeRecordPtr nodePtr;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRecord);
+ if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) {
+ jam();
+ BlockReference ref = calcDihBlockRef(nodePtr.i);
+ NFCompleteRep * const nf = (NFCompleteRep *)&signal->theData[0];
+ nf->blockNo = 0;
+ nf->nodeId = cownNodeId;
+ nf->failedNodeId = failedNodePtr.i;
+ nf->from = __LINE__;
+ sendSignal(ref, GSN_NF_COMPLETEREP, signal,
+ NFCompleteRep::SignalLength, JBB);
+ }//if
+ }//for
+ return;
+}//Dbdih::execNF_COMPLETEREP()
+
+void Dbdih::nodeFailCompletedCheckLab(Signal* signal,
+ NodeRecordPtr failedNodePtr)
+{
+ jam();
+ if (!failedNodePtr.p->m_NF_COMPLETE_REP.done()){
+ jam();
+ return;
+ }//if
+ /* ---------------------------------------------------------------------- */
+ /* ALL BLOCKS IN ALL NODES HAVE NOW REPORTED COMPLETION OF THE NODE */
+ /* FAILURE HANDLING. WE ARE NOW READY TO ACCEPT THAT THIS NODE STARTS */
+ /* AGAIN. */
+ /* ---------------------------------------------------------------------- */
+ jam();
+ failedNodePtr.p->nodeStatus = NodeRecord::DEAD;
+ failedNodePtr.p->recNODE_FAILREP = ZFALSE;
+
+ /* ---------------------------------------------------------------------- */
+ // Report the event that all nodes completed node failure handling.
+ /* ---------------------------------------------------------------------- */
+ signal->theData[0] = NDB_LE_NodeFailCompleted;
+ signal->theData[1] = 0;
+ signal->theData[2] = failedNodePtr.i;
+ signal->theData[3] = 0;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB);
+
+ /* ---------------------------------------------------------------------- */
+ // Report to QMGR that we have concluded recovery handling of this node.
+ /* ---------------------------------------------------------------------- */
+ signal->theData[0] = failedNodePtr.i;
+ sendSignal(QMGR_REF, GSN_NDB_FAILCONF, signal, 1, JBB);
+
+ if (isMaster()) {
+ jam();
+ /* --------------------------------------------------------------------- */
+ /* IF WE ARE MASTER WE MUST CHECK IF COPY FRAGMENT WAS INTERRUPTED */
+ /* BY THE FAILED NODES. */
+ /* --------------------------------------------------------------------- */
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = 0;
+ ptrAss(takeOverPtr, takeOverRecord);
+ if ((takeOverPtr.p->toMasterStatus == TakeOverRecord::COPY_FRAG) &&
+ (failedNodePtr.i == takeOverPtr.p->toCopyNode)) {
+ jam();
+#ifdef VM_TRACE
+ ndbrequire("Tell jonas" == 0);
+#endif
+ /*------------------------------------------------------------------*/
+ /* WE ARE CURRENTLY IN THE PROCESS OF COPYING A FRAGMENT. WE */
+ /* WILL CHECK IF THE COPY NODE HAVE FAILED. */
+ /*------------------------------------------------------------------*/
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::SELECTING_NEXT;
+ startNextCopyFragment(signal, takeOverPtr.i);
+ return;
+ }//if
+ checkStartTakeOver(signal);
+ }//if
+ return;
+}//Dbdih::nodeFailCompletedCheckLab()
+
+/*****************************************************************************/
+/* ********** SEIZING / RELEASING MODULE *************/
+/*****************************************************************************/
+/*
+ 3.4 L O C A L N O D E S E I Z E
+ ************************************
+ */
+/*
+ 3.4.1 L O C A L N O D E S E I Z E R E Q U E S T
+ ******************************************************
+ */
+void Dbdih::execDISEIZEREQ(Signal* signal)
+{
+ ConnectRecordPtr connectPtr;
+ jamEntry();
+ Uint32 userPtr = signal->theData[0];
+ BlockReference userRef = signal->theData[1];
+ ndbrequire(cfirstconnect != RNIL);
+ connectPtr.i = cfirstconnect;
+ ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord);
+ cfirstconnect = connectPtr.p->nfConnect;
+ connectPtr.p->nfConnect = RNIL;
+ connectPtr.p->userpointer = userPtr;
+ connectPtr.p->userblockref = userRef;
+ connectPtr.p->connectState = ConnectRecord::INUSE;
+ signal->theData[0] = connectPtr.p->userpointer;
+ signal->theData[1] = connectPtr.i;
+ sendSignal(userRef, GSN_DISEIZECONF, signal, 2, JBB);
+}//Dbdih::execDISEIZEREQ()
+
+/*
+ 3.5 L O C A L N O D E R E L E A S E
+ ****************************************
+ */
+/*
+ 3.5.1 L O C A L N O D E R E L E A S E R E Q U E S T
+ *******************************************************=
+ */
+void Dbdih::execDIRELEASEREQ(Signal* signal)
+{
+ ConnectRecordPtr connectPtr;
+ jamEntry();
+ connectPtr.i = signal->theData[0];
+ Uint32 userRef = signal->theData[2];
+ ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord);
+ ndbrequire(connectPtr.p->connectState != ConnectRecord::FREE);
+ ndbrequire(connectPtr.p->userblockref == userRef);
+ signal->theData[0] = connectPtr.p->userpointer;
+ sendSignal(connectPtr.p->userblockref, GSN_DIRELEASECONF, signal, 1, JBB);
+ release_connect(connectPtr);
+}//Dbdih::execDIRELEASEREQ()
+
+/*
+ 3.7 A D D T A B L E
+ **********************=
+ */
+/*****************************************************************************/
+/* ********** TABLE ADDING MODULE *************/
+/*****************************************************************************/
+/*
+ 3.7.1 A D D T A B L E M A I N L Y
+ ***************************************
+ */
+void Dbdih::execCREATE_FRAGMENTATION_REQ(Signal * signal){
+ jamEntry();
+ CreateFragmentationReq * const req =
+ (CreateFragmentationReq*)signal->getDataPtr();
+
+ const Uint32 senderRef = req->senderRef;
+ const Uint32 senderData = req->senderData;
+ const Uint32 fragmentNode = req->fragmentNode;
+ const Uint32 fragmentType = req->fragmentationType;
+ //const Uint32 fragmentCount = req->noOfFragments;
+ const Uint32 primaryTableId = req->primaryTableId;
+
+ Uint32 err = 0;
+
+ do {
+ Uint32 noOfFragments = 0;
+ Uint32 noOfReplicas = cnoReplicas;
+ switch(fragmentType){
+ case DictTabInfo::AllNodesSmallTable:
+ jam();
+ noOfFragments = csystemnodes;
+ break;
+ case DictTabInfo::AllNodesMediumTable:
+ jam();
+ noOfFragments = 2 * csystemnodes;
+ break;
+ case DictTabInfo::AllNodesLargeTable:
+ jam();
+ noOfFragments = 4 * csystemnodes;
+ break;
+ case DictTabInfo::SingleFragment:
+ jam();
+ noOfFragments = 1;
+ break;
+#if 0
+ case DictTabInfo::SpecifiedFragmentCount:
+ noOfFragments = (fragmentCount == 0 ? 1 : (fragmentCount + 1)/ 2);
+ break;
+#endif
+ default:
+ jam();
+ err = CreateFragmentationRef::InvalidFragmentationType;
+ break;
+ }
+ if(err)
+ break;
+
+ NodeGroupRecordPtr NGPtr;
+ TabRecordPtr primTabPtr;
+ if (primaryTableId == RNIL) {
+ if(fragmentNode == 0){
+ jam();
+ NGPtr.i = 0;
+ if(noOfFragments < csystemnodes)
+ {
+ NGPtr.i = c_nextNodeGroup;
+ c_nextNodeGroup = (NGPtr.i + 1 == cnoOfNodeGroups ? 0 : NGPtr.i + 1);
+ }
+ } else if(! (fragmentNode < MAX_NDB_NODES)) {
+ jam();
+ err = CreateFragmentationRef::InvalidNodeId;
+ } else {
+ jam();
+ const Uint32 stat = Sysfile::getNodeStatus(fragmentNode,
+ SYSFILE->nodeStatus);
+ switch (stat) {
+ case Sysfile::NS_Active:
+ case Sysfile::NS_ActiveMissed_1:
+ case Sysfile::NS_ActiveMissed_2:
+ case Sysfile::NS_TakeOver:
+ jam();
+ break;
+ case Sysfile::NS_NotActive_NotTakenOver:
+ jam();
+ break;
+ case Sysfile::NS_HotSpare:
+ jam();
+ case Sysfile::NS_NotDefined:
+ jam();
+ default:
+ jam();
+ err = CreateFragmentationRef::InvalidNodeType;
+ break;
+ }
+ if(err)
+ break;
+ NGPtr.i = Sysfile::getNodeGroup(fragmentNode,
+ SYSFILE->nodeGroups);
+ break;
+ }
+ } else {
+ if (primaryTableId >= ctabFileSize) {
+ jam();
+ err = CreateFragmentationRef::InvalidPrimaryTable;
+ break;
+ }
+ primTabPtr.i = primaryTableId;
+ ptrAss(primTabPtr, tabRecord);
+ if (primTabPtr.p->tabStatus != TabRecord::TS_ACTIVE) {
+ jam();
+ err = CreateFragmentationRef::InvalidPrimaryTable;
+ break;
+ }
+ if (noOfFragments != primTabPtr.p->totalfragments) {
+ jam();
+ err = CreateFragmentationRef::InvalidFragmentationType;
+ break;
+ }
+ }
+
+ Uint32 count = 2;
+ Uint16 *fragments = (Uint16*)(signal->theData+25);
+ if (primaryTableId == RNIL) {
+ jam();
+ Uint8 next_replica_node[MAX_NDB_NODES];
+ memset(next_replica_node,0,sizeof(next_replica_node));
+ for(Uint32 fragNo = 0; fragNo<noOfFragments; fragNo++){
+ jam();
+ ptrCheckGuard(NGPtr, MAX_NDB_NODES, nodeGroupRecord);
+ const Uint32 max = NGPtr.p->nodeCount;
+
+ Uint32 tmp= next_replica_node[NGPtr.i];
+ for(Uint32 replicaNo = 0; replicaNo<noOfReplicas; replicaNo++)
+ {
+ jam();
+ const Uint32 nodeId = NGPtr.p->nodesInGroup[tmp++];
+ fragments[count++] = nodeId;
+ tmp = (tmp >= max ? 0 : tmp);
+ }
+ tmp++;
+ next_replica_node[NGPtr.i]= (tmp >= max ? 0 : tmp);
+
+ /**
+ * Next node group for next fragment
+ */
+ NGPtr.i++;
+ NGPtr.i = (NGPtr.i == cnoOfNodeGroups ? 0 : NGPtr.i);
+ }
+ } else {
+ for (Uint32 fragNo = 0;
+ fragNo < primTabPtr.p->totalfragments; fragNo++) {
+ jam();
+ FragmentstorePtr fragPtr;
+ ReplicaRecordPtr replicaPtr;
+ getFragstore(primTabPtr.p, fragNo, fragPtr);
+ fragments[count++] = fragPtr.p->preferredPrimary;
+ for (replicaPtr.i = fragPtr.p->storedReplicas;
+ replicaPtr.i != RNIL;
+ replicaPtr.i = replicaPtr.p->nextReplica) {
+ jam();
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+ if (replicaPtr.p->procNode != fragPtr.p->preferredPrimary) {
+ jam();
+ fragments[count++] = replicaPtr.p->procNode;
+ }//if
+ }//for
+ for (replicaPtr.i = fragPtr.p->oldStoredReplicas;
+ replicaPtr.i != RNIL;
+ replicaPtr.i = replicaPtr.p->nextReplica) {
+ jam();
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+ if (replicaPtr.p->procNode != fragPtr.p->preferredPrimary) {
+ jam();
+ fragments[count++] = replicaPtr.p->procNode;
+ }//if
+ }//for
+ }
+ }
+ ndbrequire(count == (2 + noOfReplicas * noOfFragments));
+
+ CreateFragmentationConf * const conf =
+ (CreateFragmentationConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = senderData;
+ conf->noOfReplicas = noOfReplicas;
+ conf->noOfFragments = noOfFragments;
+
+ fragments[0] = noOfReplicas;
+ fragments[1] = noOfFragments;
+
+ if(senderRef != 0)
+ {
+ LinearSectionPtr ptr[3];
+ ptr[0].p = (Uint32*)&fragments[0];
+ ptr[0].sz = (count + 1) / 2;
+ sendSignal(senderRef,
+ GSN_CREATE_FRAGMENTATION_CONF,
+ signal,
+ CreateFragmentationConf::SignalLength,
+ JBB,
+ ptr,
+ 1);
+ }
+ else
+ {
+ // Execute direct
+ signal->theData[0] = 0;
+ }
+ return;
+ } while(false);
+
+ if(senderRef != 0)
+ {
+ CreateFragmentationRef * const ref =
+ (CreateFragmentationRef*)signal->getDataPtrSend();
+ ref->senderRef = reference();
+ ref->senderData = senderData;
+ ref->errorCode = err;
+ sendSignal(senderRef, GSN_CREATE_FRAGMENTATION_REF, signal,
+ CreateFragmentationRef::SignalLength, JBB);
+ }
+ else
+ {
+ // Execute direct
+ signal->theData[0] = err;
+ }
+}
+
+void Dbdih::execDIADDTABREQ(Signal* signal)
+{
+ jamEntry();
+
+ DiAddTabReq * const req = (DiAddTabReq*)signal->getDataPtr();
+
+ // Seize connect record
+ ndbrequire(cfirstconnect != RNIL);
+ ConnectRecordPtr connectPtr;
+ connectPtr.i = cfirstconnect;
+ ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord);
+ cfirstconnect = connectPtr.p->nfConnect;
+
+ const Uint32 userPtr = req->connectPtr;
+ const BlockReference userRef = signal->getSendersBlockRef();
+ connectPtr.p->nfConnect = RNIL;
+ connectPtr.p->userpointer = userPtr;
+ connectPtr.p->userblockref = userRef;
+ connectPtr.p->connectState = ConnectRecord::INUSE;
+ connectPtr.p->table = req->tableId;
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = req->tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ tabPtr.p->connectrec = connectPtr.i;
+ tabPtr.p->tableType = req->tableType;
+ tabPtr.p->schemaVersion = req->schemaVersion;
+ tabPtr.p->primaryTableId = req->primaryTableId;
+
+ if(tabPtr.p->tabStatus == TabRecord::TS_ACTIVE){
+ jam();
+ tabPtr.p->tabStatus = TabRecord::TS_CREATING;
+ sendAddFragreq(signal, connectPtr, tabPtr, 0);
+ return;
+ }
+
+ if(getNodeState().getSystemRestartInProgress() &&
+ tabPtr.p->tabStatus == TabRecord::TS_IDLE){
+ jam();
+
+ ndbrequire(cmasterNodeId == getOwnNodeId());
+ tabPtr.p->tabStatus = TabRecord::TS_CREATING;
+
+ initTableFile(tabPtr);
+ FileRecordPtr filePtr;
+ filePtr.i = tabPtr.p->tabFile[0];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ openFileRw(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::OPENING_TABLE;
+ return;
+ }
+
+ /*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/
+ /* AT THE TIME OF INITIATING THE FILE OF TABLE */
+ /* DESCRIPTION IS CREATED FOR APPROPRIATE SIZE. EACH */
+ /* EACH RECORD IN THIS FILE HAS THE INFORMATION ABOUT */
+ /* ONE TABLE. THE POINTER TO THIS RECORD IS THE TABLE */
+ /* REFERENCE. IN THE BEGINNING ALL RECORDS ARE CREATED */
+ /* BUT THEY DO NOT HAVE ANY INFORMATION ABOUT ANY TABLE*/
+ /*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/
+ tabPtr.p->tabStatus = TabRecord::TS_CREATING;
+ tabPtr.p->storedTable = req->storedTable;
+ tabPtr.p->method = TabRecord::HASH;
+ tabPtr.p->kvalue = req->kValue;
+
+ union {
+ Uint16 fragments[2 + MAX_FRAG_PER_NODE*MAX_REPLICAS*MAX_NDB_NODES];
+ Uint32 align;
+ };
+ SegmentedSectionPtr fragDataPtr;
+ signal->getSection(fragDataPtr, DiAddTabReq::FRAGMENTATION);
+ copy((Uint32*)fragments, fragDataPtr);
+ releaseSections(signal);
+
+ const Uint32 noReplicas = fragments[0];
+ const Uint32 noFragments = fragments[1];
+
+ tabPtr.p->noOfBackups = noReplicas - 1;
+ tabPtr.p->totalfragments = noFragments;
+ ndbrequire(noReplicas == cnoReplicas); // Only allowed
+
+ if (ERROR_INSERTED(7173)) {
+ addtabrefuseLab(signal, connectPtr, ZREPLERROR1);
+ return;
+ }
+ if ((noReplicas * noFragments) > cnoFreeReplicaRec) {
+ jam();
+ addtabrefuseLab(signal, connectPtr, ZREPLERROR1);
+ return;
+ }//if
+ if (noFragments > cremainingfrags) {
+ jam();
+ addtabrefuseLab(signal, connectPtr, ZREPLERROR1);
+ return;
+ }//if
+
+ Uint32 logTotalFragments = 1;
+ while (logTotalFragments <= tabPtr.p->totalfragments) {
+ jam();
+ logTotalFragments <<= 1;
+ }
+ logTotalFragments >>= 1;
+ tabPtr.p->mask = logTotalFragments - 1;
+ tabPtr.p->hashpointer = tabPtr.p->totalfragments - logTotalFragments;
+ allocFragments(tabPtr.p->totalfragments, tabPtr);
+
+ Uint32 index = 2;
+ for (Uint32 fragId = 0; fragId < noFragments; fragId++) {
+ jam();
+ FragmentstorePtr fragPtr;
+ Uint32 activeIndex = 0;
+ getFragstore(tabPtr.p, fragId, fragPtr);
+ fragPtr.p->preferredPrimary = fragments[index];
+ for (Uint32 i = 0; i<noReplicas; i++) {
+ const Uint32 nodeId = fragments[index++];
+ ReplicaRecordPtr replicaPtr;
+ allocStoredReplica(fragPtr, replicaPtr, nodeId);
+ if (getNodeStatus(nodeId) == NodeRecord::ALIVE) {
+ jam();
+ ndbrequire(activeIndex < MAX_REPLICAS);
+ fragPtr.p->activeNodes[activeIndex] = nodeId;
+ activeIndex++;
+ } else {
+ jam();
+ removeStoredReplica(fragPtr, replicaPtr);
+ linkOldStoredReplica(fragPtr, replicaPtr);
+ }//if
+ }//for
+ fragPtr.p->fragReplicas = activeIndex;
+ ndbrequire(activeIndex > 0 && fragPtr.p->storedReplicas != RNIL);
+ }
+ initTableFile(tabPtr);
+ tabPtr.p->tabCopyStatus = TabRecord::CS_ADD_TABLE_MASTER;
+ signal->theData[0] = DihContinueB::ZPACK_TABLE_INTO_PAGES;
+ signal->theData[1] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+}
+
+void
+Dbdih::addTable_closeConf(Signal * signal, Uint32 tabPtrI){
+ TabRecordPtr tabPtr;
+ tabPtr.i = tabPtrI;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ ConnectRecordPtr connectPtr;
+ connectPtr.i = tabPtr.p->connectrec;
+ ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord);
+
+ sendAddFragreq(signal, connectPtr, tabPtr, 0);
+}
+
+void
+Dbdih::sendAddFragreq(Signal* signal, ConnectRecordPtr connectPtr,
+ TabRecordPtr tabPtr, Uint32 fragId){
+ jam();
+ const Uint32 fragCount = tabPtr.p->totalfragments;
+ ReplicaRecordPtr replicaPtr; replicaPtr.i = RNIL;
+ for(; fragId<fragCount; fragId++){
+ jam();
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, fragId, fragPtr);
+
+ replicaPtr.i = fragPtr.p->storedReplicas;
+ while(replicaPtr.i != RNIL){
+ jam();
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+ if(replicaPtr.p->procNode == getOwnNodeId()){
+ break;
+ }
+ replicaPtr.i = replicaPtr.p->nextReplica;
+ }
+
+ if(replicaPtr.i != RNIL){
+ jam();
+ break;
+ }
+
+ replicaPtr.i = fragPtr.p->oldStoredReplicas;
+ while(replicaPtr.i != RNIL){
+ jam();
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+ if(replicaPtr.p->procNode == getOwnNodeId()){
+ break;
+ }
+ replicaPtr.i = replicaPtr.p->nextReplica;
+ }
+
+ if(replicaPtr.i != RNIL){
+ jam();
+ break;
+ }
+ }
+
+ if(replicaPtr.i != RNIL){
+ jam();
+ ndbrequire(fragId < fragCount);
+ ndbrequire(replicaPtr.p->procNode == getOwnNodeId());
+
+ Uint32 requestInfo = 0;
+ if(!tabPtr.p->storedTable){
+ requestInfo |= LqhFragReq::TemporaryTable;
+ }
+
+ if(getNodeState().getNodeRestartInProgress()){
+ requestInfo |= LqhFragReq::CreateInRunning;
+ }
+
+ AddFragReq* const req = (AddFragReq*)signal->getDataPtr();
+ req->dihPtr = connectPtr.i;
+ req->senderData = connectPtr.p->userpointer;
+ req->fragmentId = fragId;
+ req->requestInfo = requestInfo;
+ req->tableId = tabPtr.i;
+ req->nextLCP = 0;
+ req->nodeId = getOwnNodeId();
+ req->totalFragments = fragCount;
+ req->startGci = SYSFILE->newestRestorableGCI;
+ sendSignal(DBDICT_REF, GSN_ADD_FRAGREQ, signal,
+ AddFragReq::SignalLength, JBB);
+ return;
+ }
+
+ // Done
+ DiAddTabConf * const conf = (DiAddTabConf*)signal->getDataPtr();
+ conf->senderData = connectPtr.p->userpointer;
+ sendSignal(connectPtr.p->userblockref, GSN_DIADDTABCONF, signal,
+ DiAddTabConf::SignalLength, JBB);
+
+ // Release
+ release_connect(connectPtr);
+}
+void
+Dbdih::release_connect(ConnectRecordPtr ptr)
+{
+ ptr.p->userblockref = ZNIL;
+ ptr.p->userpointer = RNIL;
+ ptr.p->connectState = ConnectRecord::FREE;
+ ptr.p->nfConnect = cfirstconnect;
+ cfirstconnect = ptr.i;
+}
+
+void
+Dbdih::execADD_FRAGCONF(Signal* signal){
+ jamEntry();
+ AddFragConf * const conf = (AddFragConf*)signal->getDataPtr();
+
+ ConnectRecordPtr connectPtr;
+ connectPtr.i = conf->dihPtr;
+ ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord);
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = connectPtr.p->table;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ sendAddFragreq(signal, connectPtr, tabPtr, conf->fragId + 1);
+}
+
+void
+Dbdih::execADD_FRAGREF(Signal* signal){
+ jamEntry();
+ AddFragRef * const ref = (AddFragRef*)signal->getDataPtr();
+
+ ConnectRecordPtr connectPtr;
+ connectPtr.i = ref->dihPtr;
+ ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord);
+
+ {
+ DiAddTabRef * const ref = (DiAddTabRef*)signal->getDataPtr();
+ ref->senderData = connectPtr.p->userpointer;
+ ref->errorCode = ~0;
+ sendSignal(connectPtr.p->userblockref, GSN_DIADDTABREF, signal,
+ DiAddTabRef::SignalLength, JBB);
+ }
+
+ // Release
+ release_connect(connectPtr);
+}
+
+/*
+ 3.7.1.3 R E F U S E
+ *********************
+ */
+void Dbdih::addtabrefuseLab(Signal* signal, ConnectRecordPtr connectPtr, Uint32 errorCode)
+{
+ signal->theData[0] = connectPtr.p->userpointer;
+ signal->theData[1] = errorCode;
+ sendSignal(connectPtr.p->userblockref, GSN_DIADDTABREF, signal, 2, JBB);
+ release_connect(connectPtr);
+ return;
+}//Dbdih::addtabrefuseLab()
+
+/*
+ 3.7.2 A D D T A B L E D U P L I C A T I O N
+ *************************************************
+ */
+/*
+ 3.7.2.1 A D D T A B L E D U P L I C A T I O N R E Q U E S T
+ *******************************************************************=
+ */
+
+/*
+ D E L E T E T A B L E
+ **********************=
+ */
+/*****************************************************************************/
+/*********** DELETE TABLE MODULE *************/
+/*****************************************************************************/
+void
+Dbdih::execDROP_TAB_REQ(Signal* signal){
+ jamEntry();
+ DropTabReq* req = (DropTabReq*)signal->getDataPtr();
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = req->tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ tabPtr.p->m_dropTab.tabUserRef = req->senderRef;
+ tabPtr.p->m_dropTab.tabUserPtr = req->senderData;
+
+ DropTabReq::RequestType rt = (DropTabReq::RequestType)req->requestType;
+
+ switch(rt){
+ case DropTabReq::OnlineDropTab:
+ jam();
+ ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_DROPPING);
+ releaseTable(tabPtr);
+ break;
+ case DropTabReq::CreateTabDrop:
+ jam();
+ releaseTable(tabPtr);
+ break;
+ case DropTabReq::RestartDropTab:
+ break;
+ }
+
+ startDeleteFile(signal, tabPtr);
+}
+
+void Dbdih::startDeleteFile(Signal* signal, TabRecordPtr tabPtr)
+{
+ if (tabPtr.p->tabFile[0] == RNIL) {
+ jam();
+ initTableFile(tabPtr);
+ }//if
+ openTableFileForDelete(signal, tabPtr.p->tabFile[0]);
+}//Dbdih::startDeleteFile()
+
+void Dbdih::openTableFileForDelete(Signal* signal, Uint32 fileIndex)
+{
+ FileRecordPtr filePtr;
+ filePtr.i = fileIndex;
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ openFileRw(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::TABLE_OPEN_FOR_DELETE;
+}//Dbdih::openTableFileForDelete()
+
+void Dbdih::tableOpenLab(Signal* signal, FileRecordPtr filePtr)
+{
+ closeFileDelete(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::TABLE_CLOSE_DELETE;
+ return;
+}//Dbdih::tableOpenLab()
+
+void Dbdih::tableDeleteLab(Signal* signal, FileRecordPtr filePtr)
+{
+ TabRecordPtr tabPtr;
+ tabPtr.i = filePtr.p->tabRef;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ if (filePtr.i == tabPtr.p->tabFile[0]) {
+ jam();
+ openTableFileForDelete(signal, tabPtr.p->tabFile[1]);
+ return;
+ }//if
+ ndbrequire(filePtr.i == tabPtr.p->tabFile[1]);
+
+ releaseFile(tabPtr.p->tabFile[0]);
+ releaseFile(tabPtr.p->tabFile[1]);
+ tabPtr.p->tabFile[0] = tabPtr.p->tabFile[1] = RNIL;
+
+ tabPtr.p->tabStatus = TabRecord::TS_IDLE;
+
+ DropTabConf * const dropConf = (DropTabConf *)signal->getDataPtrSend();
+ dropConf->senderRef = reference();
+ dropConf->senderData = tabPtr.p->m_dropTab.tabUserPtr;
+ dropConf->tableId = tabPtr.i;
+ sendSignal(tabPtr.p->m_dropTab.tabUserRef, GSN_DROP_TAB_CONF,
+ signal, DropTabConf::SignalLength, JBB);
+
+ tabPtr.p->m_dropTab.tabUserPtr = RNIL;
+ tabPtr.p->m_dropTab.tabUserRef = 0;
+}//Dbdih::tableDeleteLab()
+
+
+void Dbdih::releaseTable(TabRecordPtr tabPtr)
+{
+ FragmentstorePtr fragPtr;
+ if (tabPtr.p->noOfFragChunks > 0) {
+ for (Uint32 fragId = 0; fragId < tabPtr.p->totalfragments; fragId++) {
+ jam();
+ getFragstore(tabPtr.p, fragId, fragPtr);
+ releaseReplicas(fragPtr.p->storedReplicas);
+ releaseReplicas(fragPtr.p->oldStoredReplicas);
+ }//for
+ releaseFragments(tabPtr);
+ }
+ if (tabPtr.p->tabFile[0] != RNIL) {
+ jam();
+ releaseFile(tabPtr.p->tabFile[0]);
+ releaseFile(tabPtr.p->tabFile[1]);
+ tabPtr.p->tabFile[0] = tabPtr.p->tabFile[1] = RNIL;
+ }//if
+}//Dbdih::releaseTable()
+
+void Dbdih::releaseReplicas(Uint32 replicaPtrI)
+{
+ ReplicaRecordPtr replicaPtr;
+ replicaPtr.i = replicaPtrI;
+ jam();
+ while (replicaPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+ Uint32 tmp = replicaPtr.p->nextReplica;
+ replicaPtr.p->nextReplica = cfirstfreeReplica;
+ cfirstfreeReplica = replicaPtr.i;
+ replicaPtr.i = tmp;
+ cnoFreeReplicaRec++;
+ }//while
+}//Dbdih::releaseReplicas()
+
+void Dbdih::seizeReplicaRec(ReplicaRecordPtr& replicaPtr)
+{
+ replicaPtr.i = cfirstfreeReplica;
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+ cfirstfreeReplica = replicaPtr.p->nextReplica;
+ cnoFreeReplicaRec--;
+ replicaPtr.p->nextReplica = RNIL;
+}//Dbdih::seizeReplicaRec()
+
+void Dbdih::releaseFile(Uint32 fileIndex)
+{
+ FileRecordPtr filePtr;
+ filePtr.i = fileIndex;
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ filePtr.p->nextFile = cfirstfreeFile;
+ cfirstfreeFile = filePtr.i;
+}//Dbdih::releaseFile()
+
+
+void Dbdih::execALTER_TAB_REQ(Signal * signal)
+{
+ AlterTabReq* const req = (AlterTabReq*)signal->getDataPtr();
+ const Uint32 senderRef = req->senderRef;
+ const Uint32 senderData = req->senderData;
+ const Uint32 changeMask = req->changeMask;
+ const Uint32 tableId = req->tableId;
+ const Uint32 tableVersion = req->tableVersion;
+ const Uint32 gci = req->gci;
+ AlterTabReq::RequestType requestType =
+ (AlterTabReq::RequestType) req->requestType;
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ tabPtr.p->schemaVersion = tableVersion;
+
+ // Request handled successfully
+ AlterTabConf * conf = (AlterTabConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = senderData;
+ conf->changeMask = changeMask;
+ conf->tableId = tableId;
+ conf->tableVersion = tableVersion;
+ conf->gci = gci;
+ conf->requestType = requestType;
+ sendSignal(senderRef, GSN_ALTER_TAB_CONF, signal,
+ AlterTabConf::SignalLength, JBB);
+}
+
+/*
+ G E T N O D E S
+ **********************=
+ */
+/*****************************************************************************/
+/* ********** TRANSACTION HANDLING MODULE *************/
+/*****************************************************************************/
+/*
+ 3.8.1 G E T N O D E S R E Q U E S T
+ ******************************************
+ Asks what nodes should be part of a transaction.
+*/
+void Dbdih::execDIGETNODESREQ(Signal* signal)
+{
+ const DiGetNodesReq * const req = (DiGetNodesReq *)&signal->theData[0];
+ FragmentstorePtr fragPtr;
+ TabRecordPtr tabPtr;
+ tabPtr.i = req->tableId;
+ Uint32 hashValue = req->hashValue;
+ Uint32 ttabFileSize = ctabFileSize;
+ TabRecord* regTabDesc = tabRecord;
+ jamEntry();
+ ptrCheckGuard(tabPtr, ttabFileSize, regTabDesc);
+ Uint32 fragId = hashValue & tabPtr.p->mask;
+ ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_ACTIVE);
+ if (fragId < tabPtr.p->hashpointer) {
+ jam();
+ fragId = hashValue & ((tabPtr.p->mask << 1) + 1);
+ }//if
+ getFragstore(tabPtr.p, fragId, fragPtr);
+ DiGetNodesConf * const conf = (DiGetNodesConf *)&signal->theData[0];
+ Uint32 nodeCount = extractNodeInfo(fragPtr.p, conf->nodes);
+ Uint32 sig2 = (nodeCount - 1) +
+ (fragPtr.p->distributionKey << 16);
+ conf->zero = 0;
+ conf->reqinfo = sig2;
+ conf->fragId = fragId;
+}//Dbdih::execDIGETNODESREQ()
+
+Uint32 Dbdih::extractNodeInfo(const Fragmentstore * fragPtr, Uint32 nodes[])
+{
+ Uint32 nodeCount = 0;
+ for (Uint32 i = 0; i < fragPtr->fragReplicas; i++) {
+ jam();
+ NodeRecordPtr nodePtr;
+ ndbrequire(i < MAX_REPLICAS);
+ nodePtr.i = fragPtr->activeNodes[i];
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ if (nodePtr.p->useInTransactions) {
+ jam();
+ nodes[nodeCount] = nodePtr.i;
+ nodeCount++;
+ }//if
+ }//for
+ ndbrequire(nodeCount > 0);
+ return nodeCount;
+}//Dbdih::extractNodeInfo()
+
+void
+Dbdih::getFragstore(TabRecord * tab, //In parameter
+ Uint32 fragNo, //In parameter
+ FragmentstorePtr & fragptr) //Out parameter
+{
+ FragmentstorePtr fragPtr;
+ Uint32 chunkNo = fragNo >> LOG_NO_OF_FRAGS_PER_CHUNK;
+ Uint32 chunkIndex = fragNo & (NO_OF_FRAGS_PER_CHUNK - 1);
+ Uint32 TfragstoreFileSize = cfragstoreFileSize;
+ Fragmentstore* TfragStore = fragmentstore;
+ if (chunkNo < MAX_NDB_NODES) {
+ fragPtr.i = tab->startFid[chunkNo] + chunkIndex;
+ ptrCheckGuard(fragPtr, TfragstoreFileSize, TfragStore);
+ fragptr = fragPtr;
+ return;
+ }//if
+ ndbrequire(false);
+}//Dbdih::getFragstore()
+
+void Dbdih::allocFragments(Uint32 noOfFragments, TabRecordPtr tabPtr)
+{
+ FragmentstorePtr fragPtr;
+ Uint32 noOfChunks = (noOfFragments + (NO_OF_FRAGS_PER_CHUNK - 1)) >> LOG_NO_OF_FRAGS_PER_CHUNK;
+ ndbrequire(cremainingfrags >= noOfFragments);
+ for (Uint32 i = 0; i < noOfChunks; i++) {
+ jam();
+ Uint32 baseFrag = cfirstfragstore;
+ tabPtr.p->startFid[i] = baseFrag;
+ fragPtr.i = baseFrag;
+ ptrCheckGuard(fragPtr, cfragstoreFileSize, fragmentstore);
+ cfirstfragstore = fragPtr.p->nextFragmentChunk;
+ cremainingfrags -= NO_OF_FRAGS_PER_CHUNK;
+ for (Uint32 j = 0; j < NO_OF_FRAGS_PER_CHUNK; j++) {
+ jam();
+ fragPtr.i = baseFrag + j;
+ ptrCheckGuard(fragPtr, cfragstoreFileSize, fragmentstore);
+ initFragstore(fragPtr);
+ }//if
+ }//for
+ tabPtr.p->noOfFragChunks = noOfChunks;
+}//Dbdih::allocFragments()
+
+void Dbdih::releaseFragments(TabRecordPtr tabPtr)
+{
+ FragmentstorePtr fragPtr;
+ for (Uint32 i = 0; i < tabPtr.p->noOfFragChunks; i++) {
+ jam();
+ Uint32 baseFrag = tabPtr.p->startFid[i];
+ fragPtr.i = baseFrag;
+ ptrCheckGuard(fragPtr, cfragstoreFileSize, fragmentstore);
+ fragPtr.p->nextFragmentChunk = cfirstfragstore;
+ cfirstfragstore = baseFrag;
+ tabPtr.p->startFid[i] = RNIL;
+ cremainingfrags += NO_OF_FRAGS_PER_CHUNK;
+ }//for
+ tabPtr.p->noOfFragChunks = 0;
+}//Dbdih::releaseFragments()
+
+void Dbdih::initialiseFragstore()
+{
+ Uint32 i;
+ FragmentstorePtr fragPtr;
+ for (i = 0; i < cfragstoreFileSize; i++) {
+ fragPtr.i = i;
+ ptrCheckGuard(fragPtr, cfragstoreFileSize, fragmentstore);
+ initFragstore(fragPtr);
+ }//for
+ Uint32 noOfChunks = cfragstoreFileSize >> LOG_NO_OF_FRAGS_PER_CHUNK;
+ fragPtr.i = 0;
+ cfirstfragstore = RNIL;
+ cremainingfrags = 0;
+ for (i = 0; i < noOfChunks; i++) {
+ refresh_watch_dog();
+ ptrCheckGuard(fragPtr, cfragstoreFileSize, fragmentstore);
+ fragPtr.p->nextFragmentChunk = cfirstfragstore;
+ cfirstfragstore = fragPtr.i;
+ fragPtr.i += NO_OF_FRAGS_PER_CHUNK;
+ cremainingfrags += NO_OF_FRAGS_PER_CHUNK;
+ }//for
+}//Dbdih::initialiseFragstore()
+
+/*
+ 3.9 V E R I F I C A T I O N
+ ****************************=
+ */
+/****************************************************************************/
+/* ********** VERIFICATION SUB-MODULE *************/
+/****************************************************************************/
+/*
+ 3.9.1 R E C E I V I N G O F V E R I F I C A T I O N R E Q U E S T
+ *************************************************************************
+ */
+void Dbdih::execDIVERIFYREQ(Signal* signal)
+{
+
+ jamEntry();
+ if ((getBlockCommit() == false) &&
+ (cfirstVerifyQueue == RNIL)) {
+ jam();
+ /*-----------------------------------------------------------------------*/
+ // We are not blocked and the verify queue was empty currently so we can
+ // simply reply back to TC immediately. The method was called with
+ // EXECUTE_DIRECT so we reply back by setting signal data and returning.
+ // theData[0] already contains the correct information so
+ // we need not touch it.
+ /*-----------------------------------------------------------------------*/
+ signal->theData[1] = currentgcp;
+ signal->theData[2] = 0;
+ return;
+ }//if
+ /*-------------------------------------------------------------------------*/
+ // Since we are blocked we need to put this operation last in the verify
+ // queue to ensure that operation starts up in the correct order.
+ /*-------------------------------------------------------------------------*/
+ ApiConnectRecordPtr tmpApiConnectptr;
+ ApiConnectRecordPtr localApiConnectptr;
+
+ cverifyQueueCounter++;
+ localApiConnectptr.i = signal->theData[0];
+ tmpApiConnectptr.i = clastVerifyQueue;
+ ptrCheckGuard(localApiConnectptr, capiConnectFileSize, apiConnectRecord);
+ localApiConnectptr.p->apiGci = cnewgcp;
+ localApiConnectptr.p->nextApi = RNIL;
+ clastVerifyQueue = localApiConnectptr.i;
+ if (tmpApiConnectptr.i == RNIL) {
+ jam();
+ cfirstVerifyQueue = localApiConnectptr.i;
+ } else {
+ jam();
+ ptrCheckGuard(tmpApiConnectptr, capiConnectFileSize, apiConnectRecord);
+ tmpApiConnectptr.p->nextApi = localApiConnectptr.i;
+ }//if
+ emptyverificbuffer(signal, false);
+ signal->theData[2] = 1; // Indicate no immediate return
+ return;
+}//Dbdih::execDIVERIFYREQ()
+
+void Dbdih::execDI_FCOUNTREQ(Signal* signal)
+{
+ ConnectRecordPtr connectPtr;
+ TabRecordPtr tabPtr;
+ jamEntry();
+ connectPtr.i = signal->theData[0];
+ tabPtr.i = signal->theData[1];
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_ACTIVE);
+
+ if(connectPtr.i != RNIL){
+ ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord);
+ if (connectPtr.p->connectState == ConnectRecord::INUSE) {
+ jam();
+ signal->theData[0] = connectPtr.p->userpointer;
+ signal->theData[1] = tabPtr.p->totalfragments;
+ sendSignal(connectPtr.p->userblockref, GSN_DI_FCOUNTCONF, signal,2, JBB);
+ return;
+ }//if
+ signal->theData[0] = connectPtr.p->userpointer;
+ signal->theData[1] = ZERRONOUSSTATE;
+ sendSignal(connectPtr.p->userblockref, GSN_DI_FCOUNTREF, signal, 2, JBB);
+ return;
+ }//if
+
+ //connectPtr.i == RNIL -> question without connect record
+ const Uint32 senderData = signal->theData[2];
+ const BlockReference senderRef = signal->senderBlockRef();
+ signal->theData[0] = RNIL;
+ signal->theData[1] = tabPtr.p->totalfragments;
+ signal->theData[2] = tabPtr.i;
+ signal->theData[3] = senderData;
+ signal->theData[4] = tabPtr.p->noOfBackups;
+ sendSignal(senderRef, GSN_DI_FCOUNTCONF, signal, 5, JBB);
+}//Dbdih::execDI_FCOUNTREQ()
+
+void Dbdih::execDIGETPRIMREQ(Signal* signal)
+{
+ FragmentstorePtr fragPtr;
+ ConnectRecordPtr connectPtr;
+ TabRecordPtr tabPtr;
+ jamEntry();
+ Uint32 passThrough = signal->theData[1];
+ tabPtr.i = signal->theData[2];
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ if (DictTabInfo::isOrderedIndex(tabPtr.p->tableType)) {
+ jam();
+ tabPtr.i = tabPtr.p->primaryTableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ }
+ Uint32 fragId = signal->theData[3];
+
+ ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_ACTIVE);
+ connectPtr.i = signal->theData[0];
+ if(connectPtr.i != RNIL)
+ {
+ jam();
+ ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord);
+ signal->theData[0] = connectPtr.p->userpointer;
+ }
+ else
+ {
+ jam();
+ signal->theData[0] = RNIL;
+ }
+
+ Uint32 nodes[MAX_REPLICAS];
+ getFragstore(tabPtr.p, fragId, fragPtr);
+ Uint32 count = extractNodeInfo(fragPtr.p, nodes);
+
+ signal->theData[1] = passThrough;
+ signal->theData[2] = nodes[0];
+ signal->theData[3] = nodes[1];
+ signal->theData[4] = nodes[2];
+ signal->theData[5] = nodes[3];
+ signal->theData[6] = count;
+ signal->theData[7] = tabPtr.i;
+ signal->theData[8] = fragId;
+
+ const BlockReference senderRef = signal->senderBlockRef();
+ sendSignal(senderRef, GSN_DIGETPRIMCONF, signal, 9, JBB);
+}//Dbdih::execDIGETPRIMREQ()
+
+/****************************************************************************/
+/* ********** GLOBAL-CHECK-POINT HANDLING MODULE *************/
+/****************************************************************************/
+/*
+ 3.10 G L O B A L C H E C K P O I N T ( IN M A S T E R R O L E)
+ *******************************************************************
+ */
+void Dbdih::checkGcpStopLab(Signal* signal)
+{
+ Uint32 tgcpStatus;
+
+ tgcpStatus = cgcpStatus;
+ if (tgcpStatus == coldGcpStatus) {
+ jam();
+ if (coldGcpId == cnewgcp) {
+ jam();
+ if (cgcpStatus != GCP_READY) {
+ jam();
+ cgcpSameCounter++;
+ if (cgcpSameCounter == 1200) {
+ jam();
+#ifdef VM_TRACE
+ ndbout << "System crash due to GCP Stop in state = ";
+ ndbout << (Uint32) cgcpStatus << endl;
+#endif
+ crashSystemAtGcpStop(signal);
+ return;
+ }//if
+ } else {
+ jam();
+ if (cgcpOrderBlocked == 0) {
+ jam();
+ cgcpSameCounter++;
+ if (cgcpSameCounter == 1200) {
+ jam();
+#ifdef VM_TRACE
+ ndbout << "System crash due to GCP Stop in state = ";
+ ndbout << (Uint32) cgcpStatus << endl;
+#endif
+ crashSystemAtGcpStop(signal);
+ return;
+ }//if
+ } else {
+ jam();
+ cgcpSameCounter = 0;
+ }//if
+ }//if
+ } else {
+ jam();
+ cgcpSameCounter = 0;
+ }//if
+ } else {
+ jam();
+ cgcpSameCounter = 0;
+ }//if
+ signal->theData[0] = DihContinueB::ZCHECK_GCP_STOP;
+ signal->theData[1] = coldGcpStatus;
+ signal->theData[2] = cgcpStatus;
+ signal->theData[3] = coldGcpId;
+ signal->theData[4] = cnewgcp;
+ signal->theData[5] = cgcpSameCounter;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 6);
+ coldGcpStatus = cgcpStatus;
+ coldGcpId = cnewgcp;
+ return;
+}//Dbdih::checkGcpStopLab()
+
+void Dbdih::startGcpLab(Signal* signal, Uint32 aWaitTime)
+{
+ if ((cgcpOrderBlocked == 1) ||
+ (c_nodeStartMaster.blockGcp == true) ||
+ (cfirstVerifyQueue != RNIL)) {
+ /*************************************************************************/
+ // 1: Global Checkpoint has been stopped by management command
+ // 2: Global Checkpoint is blocked by node recovery activity
+ // 3: Previous global checkpoint is not yet completed.
+ // All this means that global checkpoint cannot start now.
+ /*************************************************************************/
+ jam();
+ cgcpStartCounter++;
+ signal->theData[0] = DihContinueB::ZSTART_GCP;
+ signal->theData[1] = aWaitTime > 100 ? (aWaitTime - 100) : 0;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 2);
+ return;
+ }//if
+ if (cstartGcpNow == false && aWaitTime > 100){
+ /*************************************************************************/
+ // We still have more than 100 milliseconds before we start the next and
+ // nobody has ordered immediate start of a global checkpoint.
+ // During initial start we will use continuos global checkpoints to
+ // speed it up since we need to complete a global checkpoint after
+ // inserting a lot of records.
+ /*************************************************************************/
+ jam();
+ cgcpStartCounter++;
+ signal->theData[0] = DihContinueB::ZSTART_GCP;
+ signal->theData[1] = (aWaitTime - 100);
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 2);
+ return;
+ }//if
+ cgcpStartCounter = 0;
+ cstartGcpNow = false;
+ /***************************************************************************/
+ // Report the event that a global checkpoint has started.
+ /***************************************************************************/
+ signal->theData[0] = NDB_LE_GlobalCheckpointStarted; //Event type
+ signal->theData[1] = cnewgcp;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
+
+ CRASH_INSERTION(7000);
+ cnewgcp++;
+ signal->setTrace(TestOrd::TraceGlobalCheckpoint);
+ sendLoopMacro(GCP_PREPARE, sendGCP_PREPARE);
+ cgcpStatus = GCP_PREPARE_SENT;
+}//Dbdih::startGcpLab()
+
+void Dbdih::execGCP_PREPARECONF(Signal* signal)
+{
+ jamEntry();
+ Uint32 senderNodeId = signal->theData[0];
+ Uint32 gci = signal->theData[1];
+ ndbrequire(gci == cnewgcp);
+ receiveLoopMacro(GCP_PREPARE, senderNodeId);
+ //-------------------------------------------------------------
+ // We have now received all replies. We are ready to continue
+ // with committing the global checkpoint.
+ //-------------------------------------------------------------
+ gcpcommitreqLab(signal);
+}//Dbdih::execGCP_PREPARECONF()
+
+void Dbdih::gcpcommitreqLab(Signal* signal)
+{
+ CRASH_INSERTION(7001);
+ sendLoopMacro(GCP_COMMIT, sendGCP_COMMIT);
+ cgcpStatus = GCP_COMMIT_SENT;
+ return;
+}//Dbdih::gcpcommitreqLab()
+
+void Dbdih::execGCP_NODEFINISH(Signal* signal)
+{
+ jamEntry();
+ const Uint32 senderNodeId = signal->theData[0];
+ const Uint32 gci = signal->theData[1];
+ const Uint32 failureNr = signal->theData[2];
+ if (!isMaster()) {
+ jam();
+ ndbrequire(failureNr > cfailurenr);
+ //-------------------------------------------------------------
+ // Another node thinks we are master. This could happen when he
+ // has heard of a node failure which I have not heard of. Ignore
+ // signal in this case since we will discover it by sending
+ // MASTER_GCPREQ to the node.
+ //-------------------------------------------------------------
+ return;
+ } else if (cmasterState == MASTER_TAKE_OVER_GCP) {
+ jam();
+ //-------------------------------------------------------------
+ // We are currently taking over as master. We will delay the
+ // signal until we have completed the take over gcp handling.
+ //-------------------------------------------------------------
+ sendSignalWithDelay(reference(), GSN_GCP_NODEFINISH, signal, 20, 3);
+ return;
+ } else {
+ ndbrequire(cmasterState == MASTER_ACTIVE);
+ }//if
+ ndbrequire(gci == coldgcp);
+ receiveLoopMacro(GCP_COMMIT, senderNodeId);
+ //-------------------------------------------------------------
+ // We have now received all replies. We are ready to continue
+ // with saving the global checkpoint to disk.
+ //-------------------------------------------------------------
+ CRASH_INSERTION(7002);
+ gcpsavereqLab(signal);
+ return;
+}//Dbdih::execGCP_NODEFINISH()
+
+void Dbdih::gcpsavereqLab(Signal* signal)
+{
+ sendLoopMacro(GCP_SAVEREQ, sendGCP_SAVEREQ);
+ cgcpStatus = GCP_NODE_FINISHED;
+}//Dbdih::gcpsavereqLab()
+
+void Dbdih::execGCP_SAVECONF(Signal* signal)
+{
+ jamEntry();
+ const GCPSaveConf * const saveConf = (GCPSaveConf*)&signal->theData[0];
+ ndbrequire(saveConf->gci == coldgcp);
+ ndbrequire(saveConf->nodeId == saveConf->dihPtr);
+ SYSFILE->lastCompletedGCI[saveConf->nodeId] = saveConf->gci;
+ GCP_SAVEhandling(signal, saveConf->nodeId);
+}//Dbdih::execGCP_SAVECONF()
+
+void Dbdih::execGCP_SAVEREF(Signal* signal)
+{
+ jamEntry();
+ const GCPSaveRef * const saveRef = (GCPSaveRef*)&signal->theData[0];
+ ndbrequire(saveRef->gci == coldgcp);
+ ndbrequire(saveRef->nodeId == saveRef->dihPtr);
+ /**
+ * Only allow reason not to save
+ */
+ ndbrequire(saveRef->errorCode == GCPSaveRef::NodeShutdownInProgress ||
+ saveRef->errorCode == GCPSaveRef::FakedSignalDueToNodeFailure ||
+ saveRef->errorCode == GCPSaveRef::NodeRestartInProgress);
+ GCP_SAVEhandling(signal, saveRef->nodeId);
+}//Dbdih::execGCP_SAVEREF()
+
+void Dbdih::GCP_SAVEhandling(Signal* signal, Uint32 nodeId)
+{
+ receiveLoopMacro(GCP_SAVEREQ, nodeId);
+ /*-------------------------------------------------------------------------*/
+ // All nodes have replied. We are ready to update the system file.
+ /*-------------------------------------------------------------------------*/
+ cgcpStatus = GCP_SAVE_LQH_FINISHED;
+ CRASH_INSERTION(7003);
+ checkToCopy();
+ /**------------------------------------------------------------------------
+ * SET NEW RECOVERABLE GCI. ALSO RESET RESTART COUNTER TO ZERO.
+ * THIS INDICATES THAT THE SYSTEM HAS BEEN RECOVERED AND SURVIVED AT
+ * LEAST ONE GLOBAL CHECKPOINT PERIOD. WE WILL USE THIS PARAMETER TO
+ * SET BACK THE RESTART GCI IF WE ENCOUNTER MORE THAN ONE UNSUCCESSFUL
+ * RESTART.
+ *------------------------------------------------------------------------*/
+ SYSFILE->newestRestorableGCI = coldgcp;
+ if(Sysfile::getInitialStartOngoing(SYSFILE->systemRestartBits) &&
+ getNodeState().startLevel == NodeState::SL_STARTED){
+ jam();
+#if 0
+ ndbout_c("Dbdih: Clearing initial start ongoing");
+#endif
+ Sysfile::clearInitialStartOngoing(SYSFILE->systemRestartBits);
+ }
+ copyGciLab(signal, CopyGCIReq::GLOBAL_CHECKPOINT);
+}//Dbdih::GCP_SAVEhandling()
+
+/*
+ 3.11 G L O B A L C H E C K P O I N T (N O T - M A S T E R)
+ *************************************************************
+ */
+void Dbdih::execGCP_PREPARE(Signal* signal)
+{
+ jamEntry();
+ CRASH_INSERTION(7005);
+ Uint32 masterNodeId = signal->theData[0];
+ Uint32 gci = signal->theData[1];
+ BlockReference retRef = calcDihBlockRef(masterNodeId);
+
+ ndbrequire (cmasterdihref == retRef);
+ ndbrequire (cgcpParticipantState == GCP_PARTICIPANT_READY);
+ ndbrequire (gci == (currentgcp + 1));
+
+ cgckptflag = true;
+ cgcpParticipantState = GCP_PARTICIPANT_PREPARE_RECEIVED;
+ cnewgcp = gci;
+
+ signal->theData[0] = cownNodeId;
+ signal->theData[1] = gci;
+ sendSignal(retRef, GSN_GCP_PREPARECONF, signal, 2, JBA);
+ return;
+}//Dbdih::execGCP_PREPARE()
+
+void Dbdih::execGCP_COMMIT(Signal* signal)
+{
+ jamEntry();
+ CRASH_INSERTION(7006);
+ Uint32 masterNodeId = signal->theData[0];
+ Uint32 gci = signal->theData[1];
+
+ ndbrequire(gci == (currentgcp + 1));
+ ndbrequire(masterNodeId = cmasterNodeId);
+ ndbrequire(cgcpParticipantState == GCP_PARTICIPANT_PREPARE_RECEIVED);
+
+ coldgcp = currentgcp;
+ currentgcp = cnewgcp;
+ cgckptflag = false;
+ emptyverificbuffer(signal, true);
+ cgcpParticipantState = GCP_PARTICIPANT_COMMIT_RECEIVED;
+ signal->theData[1] = coldgcp;
+ sendSignal(clocaltcblockref, GSN_GCP_NOMORETRANS, signal, 2, JBB);
+ return;
+}//Dbdih::execGCP_COMMIT()
+
+void Dbdih::execGCP_TCFINISHED(Signal* signal)
+{
+ jamEntry();
+ CRASH_INSERTION(7007);
+ Uint32 gci = signal->theData[1];
+ ndbrequire(gci == coldgcp);
+
+ cgcpParticipantState = GCP_PARTICIPANT_TC_FINISHED;
+ signal->theData[0] = cownNodeId;
+ signal->theData[1] = coldgcp;
+ signal->theData[2] = cfailurenr;
+ sendSignal(cmasterdihref, GSN_GCP_NODEFINISH, signal, 3, JBB);
+}//Dbdih::execGCP_TCFINISHED()
+
+/*****************************************************************************/
+//****** RECEIVING TAMPER REQUEST FROM NDBAPI ******
+/*****************************************************************************/
+void Dbdih::execDIHNDBTAMPER(Signal* signal)
+{
+ jamEntry();
+ Uint32 tcgcpblocked = signal->theData[0];
+ /* ACTION TO BE TAKEN BY DIH */
+ Uint32 tuserpointer = signal->theData[1];
+ BlockReference tuserblockref = signal->theData[2];
+ switch (tcgcpblocked) {
+ case 1:
+ jam();
+ if (isMaster()) {
+ jam();
+ cgcpOrderBlocked = 1;
+ } else {
+ jam();
+ /* TRANSFER THE REQUEST */
+ /* TO MASTER*/
+ signal->theData[0] = tcgcpblocked;
+ signal->theData[1] = tuserpointer;
+ signal->theData[2] = tuserblockref;
+ sendSignal(cmasterdihref, GSN_DIHNDBTAMPER, signal, 3, JBB);
+ }//if
+ break;
+ case 2:
+ jam();
+ if (isMaster()) {
+ jam();
+ cgcpOrderBlocked = 0;
+ } else {
+ jam();
+ /* TRANSFER THE REQUEST */
+ /* TO MASTER*/
+ signal->theData[0] = tcgcpblocked;
+ signal->theData[1] = tuserpointer;
+ signal->theData[2] = tuserblockref;
+ sendSignal(cmasterdihref, GSN_DIHNDBTAMPER, signal, 3, JBB);
+ }//if
+ break;
+ case 3:
+ ndbrequire(false);
+ return;
+ break;
+ case 4:
+ jam();
+ signal->theData[0] = tuserpointer;
+ signal->theData[1] = crestartGci;
+ sendSignal(tuserblockref, GSN_DIHNDBTAMPER, signal, 2, JBB);
+ break;
+#ifdef ERROR_INSERT
+ case 5:
+ jam();
+ if(tuserpointer == 0)
+ {
+ jam();
+ signal->theData[0] = 0;
+ sendSignal(QMGR_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(NDBCNTR_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(NDBFS_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(DBACC_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(DBTUP_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(DBLQH_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(DBDICT_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(DBDIH_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(DBTC_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(CMVMI_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ return;
+ }
+ /*----------------------------------------------------------------------*/
+ // Insert errors.
+ /*----------------------------------------------------------------------*/
+ if (tuserpointer < 1000) {
+ /*--------------------------------------------------------------------*/
+ // Insert errors into QMGR.
+ /*--------------------------------------------------------------------*/
+ jam();
+ tuserblockref = QMGR_REF;
+ } else if (tuserpointer < 2000) {
+ /*--------------------------------------------------------------------*/
+ // Insert errors into NDBCNTR.
+ /*--------------------------------------------------------------------*/
+ jam();
+ tuserblockref = NDBCNTR_REF;
+ } else if (tuserpointer < 3000) {
+ /*--------------------------------------------------------------------*/
+ // Insert errors into NDBFS.
+ /*--------------------------------------------------------------------*/
+ jam();
+ tuserblockref = NDBFS_REF;
+ } else if (tuserpointer < 4000) {
+ /*--------------------------------------------------------------------*/
+ // Insert errors into DBACC.
+ /*--------------------------------------------------------------------*/
+ jam();
+ tuserblockref = DBACC_REF;
+ } else if (tuserpointer < 5000) {
+ /*--------------------------------------------------------------------*/
+ // Insert errors into DBTUP.
+ /*--------------------------------------------------------------------*/
+ jam();
+ tuserblockref = DBTUP_REF;
+ } else if (tuserpointer < 6000) {
+ /*---------------------------------------------------------------------*/
+ // Insert errors into DBLQH.
+ /*---------------------------------------------------------------------*/
+ jam();
+ tuserblockref = DBLQH_REF;
+ } else if (tuserpointer < 7000) {
+ /*---------------------------------------------------------------------*/
+ // Insert errors into DBDICT.
+ /*---------------------------------------------------------------------*/
+ jam();
+ tuserblockref = DBDICT_REF;
+ } else if (tuserpointer < 8000) {
+ /*---------------------------------------------------------------------*/
+ // Insert errors into DBDIH.
+ /*--------------------------------------------------------------------*/
+ jam();
+ tuserblockref = DBDIH_REF;
+ } else if (tuserpointer < 9000) {
+ /*--------------------------------------------------------------------*/
+ // Insert errors into DBTC.
+ /*--------------------------------------------------------------------*/
+ jam();
+ tuserblockref = DBTC_REF;
+ } else if (tuserpointer < 10000) {
+ /*--------------------------------------------------------------------*/
+ // Insert errors into CMVMI.
+ /*--------------------------------------------------------------------*/
+ jam();
+ tuserblockref = CMVMI_REF;
+ } else if (tuserpointer < 11000) {
+ jam();
+ tuserblockref = BACKUP_REF;
+ } else if (tuserpointer < 12000) {
+ // DBUTIL_REF ?
+ jam();
+ } else if (tuserpointer < 13000) {
+ jam();
+ tuserblockref = DBTUX_REF;
+ } else if (tuserpointer < 14000) {
+ jam();
+ tuserblockref = SUMA_REF;
+ } else if (tuserpointer < 15000) {
+ jam();
+ tuserblockref = DBDICT_REF;
+ } else if (tuserpointer < 30000) {
+ /*--------------------------------------------------------------------*/
+ // Ignore errors in the 20000-range.
+ /*--------------------------------------------------------------------*/
+ jam();
+ return;
+ } else if (tuserpointer < 40000) {
+ jam();
+ /*--------------------------------------------------------------------*/
+ // Redirect errors to master DIH in the 30000-range.
+ /*--------------------------------------------------------------------*/
+ tuserblockref = cmasterdihref;
+ tuserpointer -= 30000;
+ signal->theData[0] = 5;
+ signal->theData[1] = tuserpointer;
+ signal->theData[2] = tuserblockref;
+ sendSignal(tuserblockref, GSN_DIHNDBTAMPER, signal, 3, JBB);
+ return;
+ } else if (tuserpointer < 50000) {
+ NodeRecordPtr localNodeptr;
+ Uint32 Tfound = 0;
+ jam();
+ /*--------------------------------------------------------------------*/
+ // Redirect errors to non-master DIH in the 40000-range.
+ /*--------------------------------------------------------------------*/
+ tuserpointer -= 40000;
+ for (localNodeptr.i = 1;
+ localNodeptr.i < MAX_NDB_NODES;
+ localNodeptr.i++) {
+ jam();
+ ptrAss(localNodeptr, nodeRecord);
+ if ((localNodeptr.p->nodeStatus == NodeRecord::ALIVE) &&
+ (localNodeptr.i != cmasterNodeId)) {
+ jam();
+ tuserblockref = calcDihBlockRef(localNodeptr.i);
+ Tfound = 1;
+ break;
+ }//if
+ }//for
+ if (Tfound == 0) {
+ jam();
+ /*-------------------------------------------------------------------*/
+ // Ignore since no non-master node existed.
+ /*-------------------------------------------------------------------*/
+ return;
+ }//if
+ signal->theData[0] = 5;
+ signal->theData[1] = tuserpointer;
+ signal->theData[2] = tuserblockref;
+ sendSignal(tuserblockref, GSN_DIHNDBTAMPER, signal, 3, JBB);
+ return;
+ } else {
+ jam();
+ return;
+ }//if
+ signal->theData[0] = tuserpointer;
+ if (tuserpointer != 0) {
+ sendSignal(tuserblockref, GSN_NDB_TAMPER, signal, 1, JBB);
+ } else {
+ sendSignal(QMGR_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(NDBCNTR_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(NDBFS_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(DBACC_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(DBTUP_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(DBLQH_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(DBDICT_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(DBDIH_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(DBTC_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(CMVMI_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ }//if
+ break;
+#endif
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dbdih::execDIHNDBTAMPER()
+
+/*****************************************************************************/
+/* ********** FILE HANDLING MODULE *************/
+/*****************************************************************************/
+void Dbdih::copyGciLab(Signal* signal, CopyGCIReq::CopyReason reason)
+{
+ if(c_copyGCIMaster.m_copyReason != CopyGCIReq::IDLE){
+ /**
+ * There can currently only be one waiting
+ */
+ ndbrequire(c_copyGCIMaster.m_waiting == CopyGCIReq::IDLE);
+ c_copyGCIMaster.m_waiting = reason;
+ return;
+ }
+ c_copyGCIMaster.m_copyReason = reason;
+ sendLoopMacro(COPY_GCIREQ, sendCOPY_GCIREQ);
+
+}//Dbdih::copyGciLab()
+
+/* ------------------------------------------------------------------------- */
+/* COPY_GCICONF RESPONSE TO COPY_GCIREQ */
+/* ------------------------------------------------------------------------- */
+void Dbdih::execCOPY_GCICONF(Signal* signal)
+{
+ jamEntry();
+ NodeRecordPtr senderNodePtr;
+ senderNodePtr.i = signal->theData[0];
+ receiveLoopMacro(COPY_GCIREQ, senderNodePtr.i);
+
+ CopyGCIReq::CopyReason waiting = c_copyGCIMaster.m_waiting;
+ CopyGCIReq::CopyReason current = c_copyGCIMaster.m_copyReason;
+
+ c_copyGCIMaster.m_copyReason = CopyGCIReq::IDLE;
+ c_copyGCIMaster.m_waiting = CopyGCIReq::IDLE;
+
+ bool ok = false;
+ switch(current){
+ case CopyGCIReq::RESTART:{
+ ok = true;
+ jam();
+ DictStartReq * req = (DictStartReq*)&signal->theData[0];
+ req->restartGci = SYSFILE->newestRestorableGCI;
+ req->senderRef = reference();
+ sendSignal(cdictblockref, GSN_DICTSTARTREQ,
+ signal, DictStartReq::SignalLength, JBB);
+ break;
+ }
+ case CopyGCIReq::LOCAL_CHECKPOINT:{
+ ok = true;
+ jam();
+ startLcpRoundLab(signal);
+ break;
+ }
+ case CopyGCIReq::GLOBAL_CHECKPOINT:
+ ok = true;
+ jam();
+ checkToCopyCompleted(signal);
+
+ /************************************************************************/
+ // Report the event that a global checkpoint has completed.
+ /************************************************************************/
+ signal->setTrace(0);
+ signal->theData[0] = NDB_LE_GlobalCheckpointCompleted; //Event type
+ signal->theData[1] = coldgcp;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
+
+ CRASH_INSERTION(7004);
+ emptyWaitGCPMasterQueue(signal);
+ cgcpStatus = GCP_READY;
+ signal->theData[0] = DihContinueB::ZSTART_GCP;
+ signal->theData[1] = cgcpDelay;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 2);
+ if (c_nodeStartMaster.blockGcp == true) {
+ jam();
+ /* ------------------------------------------------------------------ */
+ /* A NEW NODE WANTS IN AND WE MUST ALLOW IT TO COME IN NOW SINCE THE */
+ /* GCP IS COMPLETED. */
+ /* ------------------------------------------------------------------ */
+ gcpBlockedLab(signal);
+ }//if
+ break;
+ case CopyGCIReq::INITIAL_START_COMPLETED:
+ ok = true;
+ jam();
+ initialStartCompletedLab(signal);
+ break;
+ case CopyGCIReq::IDLE:
+ ok = false;
+ jam();
+ }
+ ndbrequire(ok);
+
+ /**
+ * Pop queue
+ */
+ if(waiting != CopyGCIReq::IDLE){
+ c_copyGCIMaster.m_copyReason = waiting;
+ signal->theData[0] = DihContinueB::ZCOPY_GCI;
+ signal->theData[1] = waiting;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ }
+}//Dbdih::execCOPY_GCICONF()
+
+void Dbdih::invalidateLcpInfoAfterSr()
+{
+ NodeRecordPtr nodePtr;
+ SYSFILE->latestLCP_ID--;
+ Sysfile::clearLCPOngoing(SYSFILE->systemRestartBits);
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRecord);
+ if (!NdbNodeBitmask::get(SYSFILE->lcpActive, nodePtr.i)){
+ jam();
+ /* ------------------------------------------------------------------- */
+ // The node was not active in the local checkpoint.
+ // To avoid that we step the active status too fast to not
+ // active we step back one step from Sysfile::NS_ActiveMissed_x.
+ /* ------------------------------------------------------------------- */
+ switch (nodePtr.p->activeStatus) {
+ case Sysfile::NS_Active:
+ /* ----------------------------------------------------------------- */
+ // When not active in ongoing LCP and still active is a contradiction.
+ /* ----------------------------------------------------------------- */
+ ndbrequire(false);
+ case Sysfile::NS_ActiveMissed_1:
+ jam();
+ nodePtr.p->activeStatus = Sysfile::NS_Active;
+ break;
+ case Sysfile::NS_ActiveMissed_2:
+ jam();
+ nodePtr.p->activeStatus = Sysfile::NS_ActiveMissed_1;
+ break;
+ default:
+ jam();
+ break;
+ }//switch
+ }//if
+ }//for
+ setNodeRestartInfoBits();
+}//Dbdih::invalidateLcpInfoAfterSr()
+
+/* ------------------------------------------------------------------------- */
+/* THE NEXT STEP IS TO WRITE THE FILE. */
+/* ------------------------------------------------------------------------- */
+void Dbdih::openingCopyGciSkipInitLab(Signal* signal, FileRecordPtr filePtr)
+{
+ writeRestorableGci(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::WRITING_COPY_GCI;
+ return;
+}//Dbdih::openingCopyGciSkipInitLab()
+
+void Dbdih::writingCopyGciLab(Signal* signal, FileRecordPtr filePtr)
+{
+ /* ----------------------------------------------------------------------- */
+ /* WE HAVE NOW WRITTEN THIS FILE. WRITE ALSO NEXT FILE IF THIS IS NOT */
+ /* ALREADY THE LAST. */
+ /* ----------------------------------------------------------------------- */
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ if (filePtr.i == crestartInfoFile[0]) {
+ jam();
+ filePtr.i = crestartInfoFile[1];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ if (filePtr.p->fileStatus == FileRecord::OPEN) {
+ jam();
+ openingCopyGciSkipInitLab(signal, filePtr);
+ return;
+ }//if
+ openFileRw(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::OPENING_COPY_GCI;
+ return;
+ }//if
+ /* ----------------------------------------------------------------------- */
+ /* WE HAVE COMPLETED WRITING BOTH FILES SUCCESSFULLY. NOW REPORT OUR */
+ /* SUCCESS TO THE MASTER DIH. BUT FIRST WE NEED TO RESET A NUMBER OF */
+ /* VARIABLES USED BY THE LOCAL CHECKPOINT PROCESS (ONLY IF TRIGGERED */
+ /* BY LOCAL CHECKPOINT PROCESS. */
+ /* ----------------------------------------------------------------------- */
+ CopyGCIReq::CopyReason reason = c_copyGCISlave.m_copyReason;
+
+ if (reason == CopyGCIReq::GLOBAL_CHECKPOINT) {
+ jam();
+ cgcpParticipantState = GCP_PARTICIPANT_READY;
+
+ SubGcpCompleteRep * const rep = (SubGcpCompleteRep*)signal->getDataPtr();
+ rep->gci = coldgcp;
+ rep->senderData = 0;
+ sendSignal(SUMA_REF, GSN_SUB_GCP_COMPLETE_REP, signal,
+ SubGcpCompleteRep::SignalLength, JBB);
+ }
+
+ jam();
+ c_copyGCISlave.m_copyReason = CopyGCIReq::IDLE;
+
+ if(c_copyGCISlave.m_senderRef == cmasterdihref){
+ jam();
+ /**
+ * Only if same master
+ */
+ signal->theData[0] = c_copyGCISlave.m_senderData;
+ sendSignal(c_copyGCISlave.m_senderRef, GSN_COPY_GCICONF, signal, 1, JBB);
+
+ }
+ return;
+}//Dbdih::writingCopyGciLab()
+
+void Dbdih::execSTART_LCP_REQ(Signal* signal){
+ StartLcpReq * req = (StartLcpReq*)signal->getDataPtr();
+
+ CRASH_INSERTION2(7021, isMaster());
+ CRASH_INSERTION2(7022, !isMaster());
+
+ ndbrequire(c_lcpState.m_masterLcpDihRef = req->senderRef);
+ c_lcpState.m_participatingDIH = req->participatingDIH;
+ c_lcpState.m_participatingLQH = req->participatingLQH;
+
+ c_lcpState.m_LCP_COMPLETE_REP_Counter_LQH = req->participatingLQH;
+ if(isMaster()){
+ jam();
+ ndbrequire(isActiveMaster());
+ c_lcpState.m_LCP_COMPLETE_REP_Counter_DIH = req->participatingDIH;
+
+ } else {
+ c_lcpState.m_LCP_COMPLETE_REP_Counter_DIH.clearWaitingFor();
+ }
+
+ c_lcpState.m_LCP_COMPLETE_REP_From_Master_Received = false;
+
+ c_lcpState.setLcpStatus(LCP_INIT_TABLES, __LINE__);
+
+ signal->theData[0] = DihContinueB::ZINIT_LCP;
+ signal->theData[1] = c_lcpState.m_masterLcpDihRef;
+ signal->theData[2] = 0;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+}
+
+void Dbdih::initLcpLab(Signal* signal, Uint32 senderRef, Uint32 tableId)
+{
+ TabRecordPtr tabPtr;
+ tabPtr.i = tableId;
+
+ if(c_lcpState.m_masterLcpDihRef != senderRef){
+ jam();
+ /**
+ * This is LCP master takeover
+ */
+#ifdef VM_TRACE
+ ndbout_c("initLcpLab aborted due to LCP master takeover - 1");
+#endif
+ c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__);
+ sendMASTER_LCPCONF(signal);
+ return;
+ }
+
+ if(c_lcpState.m_masterLcpDihRef != cmasterdihref){
+ jam();
+ /**
+ * Master take over but has not yet received MASTER_LCPREQ
+ */
+#ifdef VM_TRACE
+ ndbout_c("initLcpLab aborted due to LCP master takeover - 2");
+#endif
+ return;
+ }
+
+ //const Uint32 lcpId = SYSFILE->latestLCP_ID;
+
+ for(; tabPtr.i < ctabFileSize; tabPtr.i++){
+
+ ptrAss(tabPtr, tabRecord);
+
+ if (tabPtr.p->tabStatus != TabRecord::TS_ACTIVE) {
+ jam();
+ tabPtr.p->tabLcpStatus = TabRecord::TLS_COMPLETED;
+ continue;
+ }
+
+ if (tabPtr.p->storedTable == 0) {
+ /**
+ * Temporary table
+ */
+ jam();
+ tabPtr.p->tabLcpStatus = TabRecord::TLS_COMPLETED;
+ continue;
+ }
+
+ if (tabPtr.p->tabCopyStatus != TabRecord::CS_IDLE) {
+ /* ----------------------------------------------------------------- */
+ // We protect the updates of table data structures by this variable.
+ /* ----------------------------------------------------------------- */
+ jam();
+ signal->theData[0] = DihContinueB::ZINIT_LCP;
+ signal->theData[1] = senderRef;
+ signal->theData[2] = tabPtr.i;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 20, 3);
+ return;
+ }//if
+
+ /**
+ * Found a table
+ */
+ tabPtr.p->tabLcpStatus = TabRecord::TLS_ACTIVE;
+
+ /**
+ * For each fragment
+ */
+ for (Uint32 fragId = 0; fragId < tabPtr.p->totalfragments; fragId++) {
+ jam();
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, fragId, fragPtr);
+
+ /**
+ * For each of replica record
+ */
+ Uint32 replicaCount = 0;
+ ReplicaRecordPtr replicaPtr;
+ for(replicaPtr.i = fragPtr.p->storedReplicas; replicaPtr.i != RNIL;
+ replicaPtr.i = replicaPtr.p->nextReplica) {
+ jam();
+
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+ Uint32 nodeId = replicaPtr.p->procNode;
+ if(c_lcpState.m_participatingLQH.get(nodeId)){
+ jam();
+ replicaCount++;
+ replicaPtr.p->lcpOngoingFlag = true;
+ }
+ }
+
+ fragPtr.p->noLcpReplicas = replicaCount;
+ }//for
+
+ signal->theData[0] = DihContinueB::ZINIT_LCP;
+ signal->theData[1] = senderRef;
+ signal->theData[2] = tabPtr.i + 1;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+ return;
+ }
+
+ /**
+ * No more tables
+ */
+ jam();
+
+ if (c_lcpState.m_masterLcpDihRef != reference()){
+ jam();
+ ndbrequire(!isMaster());
+ c_lcpState.setLcpStatus(LCP_STATUS_ACTIVE, __LINE__);
+ } else {
+ jam();
+ ndbrequire(isMaster());
+ }
+
+ CRASH_INSERTION2(7023, isMaster());
+ CRASH_INSERTION2(7024, !isMaster());
+
+ jam();
+ StartLcpConf * conf = (StartLcpConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ sendSignal(c_lcpState.m_masterLcpDihRef, GSN_START_LCP_CONF, signal,
+ StartLcpConf::SignalLength, JBB);
+ return;
+}//Dbdih::initLcpLab()
+
+/* ------------------------------------------------------------------------- */
+/* ERROR HANDLING FOR COPY RESTORABLE GCI FILE. */
+/* ------------------------------------------------------------------------- */
+void Dbdih::openingCopyGciErrorLab(Signal* signal, FileRecordPtr filePtr)
+{
+ createFileRw(signal, filePtr);
+ /* ------------------------------------------------------------------------- */
+ /* ERROR IN OPENING FILE. WE WILL TRY BY CREATING FILE INSTEAD. */
+ /* ------------------------------------------------------------------------- */
+ filePtr.p->reqStatus = FileRecord::CREATING_COPY_GCI;
+ return;
+}//Dbdih::openingCopyGciErrorLab()
+
+/* ------------------------------------------------------------------------- */
+/* ENTER DICTSTARTCONF WITH */
+/* TBLOCKREF */
+/* ------------------------------------------------------------------------- */
+void Dbdih::dictStartConfLab(Signal* signal)
+{
+ /* ----------------------------------------------------------------------- */
+ /* WE HAVE NOW RECEIVED ALL THE TABLES TO RESTART. */
+ /* ----------------------------------------------------------------------- */
+ signal->theData[0] = DihContinueB::ZSTART_FRAGMENT;
+ signal->theData[1] = 0; /* START WITH TABLE 0 */
+ signal->theData[2] = 0; /* AND FRAGMENT 0 */
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+ return;
+}//Dbdih::dictStartConfLab()
+
+
+void Dbdih::openingTableLab(Signal* signal, FileRecordPtr filePtr)
+{
+ /* ---------------------------------------------------------------------- */
+ /* SUCCESSFULLY OPENED A FILE. READ THE FIRST PAGE OF THIS FILE. */
+ /* ---------------------------------------------------------------------- */
+ TabRecordPtr tabPtr;
+ PageRecordPtr pagePtr;
+
+ tabPtr.i = filePtr.p->tabRef;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ tabPtr.p->noPages = 1;
+ allocpage(pagePtr);
+ tabPtr.p->pageRef[0] = pagePtr.i;
+ readTabfile(signal, tabPtr.p, filePtr);
+ filePtr.p->reqStatus = FileRecord::READING_TABLE;
+ return;
+}//Dbdih::openingTableLab()
+
+void Dbdih::openingTableErrorLab(Signal* signal, FileRecordPtr filePtr)
+{
+ TabRecordPtr tabPtr;
+ tabPtr.i = filePtr.p->tabRef;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ /* ---------------------------------------------------------------------- */
+ /* WE FAILED IN OPENING A FILE. IF THE FIRST FILE THEN TRY WITH THE */
+ /* DUPLICATE FILE, OTHERWISE WE REPORT AN ERROR IN THE SYSTEM RESTART. */
+ /* ---------------------------------------------------------------------- */
+ ndbrequire(filePtr.i == tabPtr.p->tabFile[0]);
+ filePtr.i = tabPtr.p->tabFile[1];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ openFileRw(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::OPENING_TABLE;
+}//Dbdih::openingTableErrorLab()
+
+void Dbdih::readingTableLab(Signal* signal, FileRecordPtr filePtr)
+{
+ TabRecordPtr tabPtr;
+ PageRecordPtr pagePtr;
+ /* ---------------------------------------------------------------------- */
+ /* WE HAVE SUCCESSFULLY READ A NUMBER OF PAGES IN THE TABLE FILE. IF */
+ /* MORE PAGES EXIST IN THE FILE THEN READ ALL PAGES IN THE FILE. */
+ /* ---------------------------------------------------------------------- */
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ tabPtr.i = filePtr.p->tabRef;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ pagePtr.i = tabPtr.p->pageRef[0];
+ ptrCheckGuard(pagePtr, cpageFileSize, pageRecord);
+ Uint32 noOfStoredPages = pagePtr.p->word[33];
+ if (tabPtr.p->noPages < noOfStoredPages) {
+ jam();
+ ndbrequire(noOfStoredPages <= 8);
+ for (Uint32 i = tabPtr.p->noPages; i < noOfStoredPages; i++) {
+ jam();
+ allocpage(pagePtr);
+ tabPtr.p->pageRef[i] = pagePtr.i;
+ }//for
+ tabPtr.p->noPages = noOfStoredPages;
+ readTabfile(signal, tabPtr.p, filePtr);
+ filePtr.p->reqStatus = FileRecord::READING_TABLE;
+ } else {
+ ndbrequire(tabPtr.p->noPages == pagePtr.p->word[33]);
+ ndbrequire(tabPtr.p->tabCopyStatus == TabRecord::CS_IDLE);
+ jam();
+ /* --------------------------------------------------------------------- */
+ /* WE HAVE READ ALL PAGES. NOW READ FROM PAGES INTO TABLE AND FRAGMENT */
+ /* DATA STRUCTURES. */
+ /* --------------------------------------------------------------------- */
+ tabPtr.p->tabCopyStatus = TabRecord::CS_SR_PHASE1_READ_PAGES;
+ signal->theData[0] = DihContinueB::ZREAD_PAGES_INTO_TABLE;
+ signal->theData[1] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ }//if
+ return;
+}//Dbdih::readingTableLab()
+
+void Dbdih::readTableFromPagesLab(Signal* signal, TabRecordPtr tabPtr)
+{
+ FileRecordPtr filePtr;
+ filePtr.i = tabPtr.p->tabFile[0];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ /* ---------------------------------------------------------------------- */
+ /* WE HAVE NOW COPIED TO OUR NODE. WE HAVE NOW COMPLETED RESTORING */
+ /* THIS TABLE. CONTINUE WITH THE NEXT TABLE. */
+ /* WE ALSO NEED TO CLOSE THE TABLE FILE. */
+ /* ---------------------------------------------------------------------- */
+ if (filePtr.p->fileStatus != FileRecord::OPEN) {
+ jam();
+ filePtr.i = tabPtr.p->tabFile[1];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ }//if
+ closeFile(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::CLOSING_TABLE_SR;
+ return;
+}//Dbdih::readTableFromPagesLab()
+
+void Dbdih::closingTableSrLab(Signal* signal, FileRecordPtr filePtr)
+{
+ /**
+ * Update table/fragment info
+ */
+ TabRecordPtr tabPtr;
+ tabPtr.i = filePtr.p->tabRef;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ resetReplicaSr(tabPtr);
+
+ signal->theData[0] = DihContinueB::ZCOPY_TABLE;
+ signal->theData[1] = filePtr.p->tabRef;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+
+ return;
+}//Dbdih::closingTableSrLab()
+
+void
+Dbdih::resetReplicaSr(TabRecordPtr tabPtr){
+
+ const Uint32 newestRestorableGCI = SYSFILE->newestRestorableGCI;
+
+ for(Uint32 i = 0; i<tabPtr.p->totalfragments; i++){
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, i, fragPtr);
+
+ /**
+ * 1) Start by moving all replicas into oldStoredReplicas
+ */
+ prepareReplicas(fragPtr);
+
+ /**
+ * 2) Move all "alive" replicas into storedReplicas
+ * + update noCrashedReplicas...
+ */
+ ReplicaRecordPtr replicaPtr;
+ replicaPtr.i = fragPtr.p->oldStoredReplicas;
+ while (replicaPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+ const Uint32 nextReplicaPtrI = replicaPtr.p->nextReplica;
+
+ NodeRecordPtr nodePtr;
+ nodePtr.i = replicaPtr.p->procNode;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+
+ const Uint32 noCrashedReplicas = replicaPtr.p->noCrashedReplicas;
+ if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) {
+ jam();
+ switch (nodePtr.p->activeStatus) {
+ case Sysfile::NS_Active:
+ case Sysfile::NS_ActiveMissed_1:
+ case Sysfile::NS_ActiveMissed_2:{
+ jam();
+ /* --------------------------------------------------------------- */
+ /* THE NODE IS ALIVE AND KICKING AND ACTIVE, LET'S USE IT. */
+ /* --------------------------------------------------------------- */
+ arrGuard(noCrashedReplicas, 8);
+ Uint32 lastGci = replicaPtr.p->replicaLastGci[noCrashedReplicas];
+ if(lastGci >= newestRestorableGCI){
+ jam();
+ /** -------------------------------------------------------------
+ * THE REPLICA WAS ALIVE AT THE SYSTEM FAILURE. WE WILL SET THE
+ * LAST REPLICA GCI TO MINUS ONE SINCE IT HASN'T FAILED YET IN THE
+ * NEW SYSTEM.
+ *-------------------------------------------------------------- */
+ replicaPtr.p->replicaLastGci[noCrashedReplicas] = (Uint32)-1;
+ } else {
+ jam();
+ /*--------------------------------------------------------------
+ * SINCE IT WAS NOT ALIVE AT THE TIME OF THE SYSTEM CRASH THIS IS
+ * A COMPLETELY NEW REPLICA. WE WILL SET THE CREATE GCI TO BE THE
+ * NEXT GCI TO BE EXECUTED.
+ *--------_----------------------------------------------------- */
+ const Uint32 nextCrashed = noCrashedReplicas + 1;
+ replicaPtr.p->noCrashedReplicas = nextCrashed;
+ arrGuard(nextCrashed, 8);
+ replicaPtr.p->createGci[nextCrashed] = newestRestorableGCI + 1;
+ ndbrequire(newestRestorableGCI + 1 != 0xF1F1F1F1);
+ replicaPtr.p->replicaLastGci[nextCrashed] = (Uint32)-1;
+ }//if
+
+ resetReplicaLcp(replicaPtr.p, newestRestorableGCI);
+
+ /* -----------------------------------------------------------------
+ * LINK THE REPLICA INTO THE STORED REPLICA LIST. WE WILL USE THIS
+ * NODE AS A STORED REPLICA.
+ * WE MUST FIRST LINK IT OUT OF THE LIST OF OLD STORED REPLICAS.
+ * --------------------------------------------------------------- */
+ removeOldStoredReplica(fragPtr, replicaPtr);
+ linkStoredReplica(fragPtr, replicaPtr);
+
+ }
+ default:
+ jam();
+ /*empty*/;
+ break;
+ }
+ }
+ replicaPtr.i = nextReplicaPtrI;
+ }//while
+ }
+}
+
+void
+Dbdih::resetReplicaLcp(ReplicaRecord * replicaP, Uint32 stopGci){
+
+ Uint32 lcpNo = replicaP->nextLcp;
+ const Uint32 startLcpNo = lcpNo;
+ do {
+ lcpNo = prevLcpNo(lcpNo);
+ ndbrequire(lcpNo < MAX_LCP_STORED);
+ if (replicaP->lcpStatus[lcpNo] == ZVALID) {
+ if (replicaP->maxGciStarted[lcpNo] < stopGci) {
+ jam();
+ /* ----------------------------------------------------------------- */
+ /* WE HAVE FOUND A USEFUL LOCAL CHECKPOINT THAT CAN BE USED FOR */
+ /* RESTARTING THIS FRAGMENT REPLICA. */
+ /* ----------------------------------------------------------------- */
+ return ;
+ }//if
+ }//if
+
+ /**
+ * WE COULD NOT USE THIS LOCAL CHECKPOINT. IT WAS TOO
+ * RECENT OR SIMPLY NOT A VALID CHECKPOINT.
+ * WE SHOULD THUS REMOVE THIS LOCAL CHECKPOINT SINCE IT WILL NEVER
+ * AGAIN BE USED. SET LCP_STATUS TO INVALID.
+ */
+ replicaP->nextLcp = lcpNo;
+ replicaP->lcpId[lcpNo] = 0;
+ replicaP->lcpStatus[lcpNo] = ZINVALID;
+ } while (lcpNo != startLcpNo);
+
+ replicaP->nextLcp = 0;
+}
+
+void Dbdih::readingTableErrorLab(Signal* signal, FileRecordPtr filePtr)
+{
+ TabRecordPtr tabPtr;
+ tabPtr.i = filePtr.p->tabRef;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ /* ---------------------------------------------------------------------- */
+ /* READING THIS FILE FAILED. CLOSE IT AFTER RELEASING ALL PAGES. */
+ /* ---------------------------------------------------------------------- */
+ ndbrequire(tabPtr.p->noPages <= 8);
+ for (Uint32 i = 0; i < tabPtr.p->noPages; i++) {
+ jam();
+ releasePage(tabPtr.p->pageRef[i]);
+ }//for
+ closeFile(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::CLOSING_TABLE_CRASH;
+ return;
+}//Dbdih::readingTableErrorLab()
+
+void Dbdih::closingTableCrashLab(Signal* signal, FileRecordPtr filePtr)
+{
+ TabRecordPtr tabPtr;
+ /* ---------------------------------------------------------------------- */
+ /* WE HAVE NOW CLOSED A FILE WHICH WE HAD A READ ERROR WITH. PROCEED */
+ /* WITH NEXT FILE IF NOT THE LAST OTHERWISE REPORT ERROR. */
+ /* ---------------------------------------------------------------------- */
+ tabPtr.i = filePtr.p->tabRef;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ ndbrequire(filePtr.i == tabPtr.p->tabFile[0]);
+ filePtr.i = tabPtr.p->tabFile[1];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ openFileRw(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::OPENING_TABLE;
+}//Dbdih::closingTableCrashLab()
+
+/*****************************************************************************/
+/* ********** COPY TABLE MODULE *************/
+/*****************************************************************************/
+void Dbdih::execCOPY_TABREQ(Signal* signal)
+{
+ CRASH_INSERTION(7172);
+
+ TabRecordPtr tabPtr;
+ PageRecordPtr pagePtr;
+ jamEntry();
+ BlockReference ref = signal->theData[0];
+ Uint32 reqinfo = signal->theData[1];
+ tabPtr.i = signal->theData[2];
+ Uint32 schemaVersion = signal->theData[3];
+ Uint32 noOfWords = signal->theData[4];
+ ndbrequire(ref == cmasterdihref);
+ ndbrequire(!isMaster());
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ if (reqinfo == 1) {
+ jam();
+ tabPtr.p->schemaVersion = schemaVersion;
+ initTableFile(tabPtr);
+ }//if
+ ndbrequire(tabPtr.p->noPages < 8);
+ if (tabPtr.p->noOfWords == 0) {
+ jam();
+ allocpage(pagePtr);
+ tabPtr.p->pageRef[tabPtr.p->noPages] = pagePtr.i;
+ tabPtr.p->noPages++;
+ } else {
+ jam();
+ pagePtr.i = tabPtr.p->pageRef[tabPtr.p->noPages - 1];
+ ptrCheckGuard(pagePtr, cpageFileSize, pageRecord);
+ }//if
+ ndbrequire(tabPtr.p->noOfWords + 15 < 2048);
+ ndbrequire(tabPtr.p->noOfWords < 2048);
+ MEMCOPY_NO_WORDS(&pagePtr.p->word[tabPtr.p->noOfWords], &signal->theData[5], 16);
+ tabPtr.p->noOfWords += 16;
+ if (tabPtr.p->noOfWords == 2048) {
+ jam();
+ tabPtr.p->noOfWords = 0;
+ }//if
+ if (noOfWords > 16) {
+ jam();
+ return;
+ }//if
+ tabPtr.p->noOfWords = 0;
+ ndbrequire(tabPtr.p->tabCopyStatus == TabRecord::CS_IDLE);
+ tabPtr.p->tabCopyStatus = TabRecord::CS_COPY_TAB_REQ;
+ signal->theData[0] = DihContinueB::ZREAD_PAGES_INTO_TABLE;
+ signal->theData[1] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+}//Dbdih::execCOPY_TABREQ()
+
+void
+Dbdih::copyTabReq_complete(Signal* signal, TabRecordPtr tabPtr){
+ if (!isMaster()) {
+ jam();
+ //----------------------------------------------------------------------------
+ // In this particular case we do not release table pages if we are master. The
+ // reason is that the master could still be sending the table info to another
+ // node.
+ //----------------------------------------------------------------------------
+ releaseTabPages(tabPtr.i);
+ tabPtr.p->tabStatus = TabRecord::TS_ACTIVE;
+ for (Uint32 fragId = 0; fragId < tabPtr.p->totalfragments; fragId++) {
+ jam();
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, fragId, fragPtr);
+ updateNodeInfo(fragPtr);
+ }//for
+ }//if
+ signal->theData[0] = cownNodeId;
+ signal->theData[1] = tabPtr.i;
+ sendSignal(cmasterdihref, GSN_COPY_TABCONF, signal, 2, JBB);
+}
+
+/*****************************************************************************/
+/* ****** READ FROM A NUMBER OF PAGES INTO THE TABLE DATA STRUCTURES ********/
+/*****************************************************************************/
+void Dbdih::readPagesIntoTableLab(Signal* signal, Uint32 tableId)
+{
+ RWFragment rf;
+ rf.wordIndex = 35;
+ rf.pageIndex = 0;
+ rf.rwfTabPtr.i = tableId;
+ ptrCheckGuard(rf.rwfTabPtr, ctabFileSize, tabRecord);
+ rf.rwfPageptr.i = rf.rwfTabPtr.p->pageRef[0];
+ ptrCheckGuard(rf.rwfPageptr, cpageFileSize, pageRecord);
+ rf.rwfTabPtr.p->totalfragments = readPageWord(&rf);
+ rf.rwfTabPtr.p->noOfBackups = readPageWord(&rf);
+ rf.rwfTabPtr.p->hashpointer = readPageWord(&rf);
+ rf.rwfTabPtr.p->kvalue = readPageWord(&rf);
+ rf.rwfTabPtr.p->mask = readPageWord(&rf);
+ ndbrequire(readPageWord(&rf) == TabRecord::HASH);
+ rf.rwfTabPtr.p->method = TabRecord::HASH;
+ /* ---------------------------------- */
+ /* Type of table, 2 = temporary table */
+ /* ---------------------------------- */
+ rf.rwfTabPtr.p->storedTable = readPageWord(&rf);
+
+ Uint32 noOfFrags = rf.rwfTabPtr.p->totalfragments;
+ ndbrequire(noOfFrags > 0);
+ ndbrequire((noOfFrags * (rf.rwfTabPtr.p->noOfBackups + 1)) <= cnoFreeReplicaRec);
+ allocFragments(noOfFrags, rf.rwfTabPtr);
+
+ signal->theData[0] = DihContinueB::ZREAD_PAGES_INTO_FRAG;
+ signal->theData[1] = rf.rwfTabPtr.i;
+ signal->theData[2] = 0;
+ signal->theData[3] = rf.pageIndex;
+ signal->theData[4] = rf.wordIndex;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 5, JBB);
+ return;
+}//Dbdih::readPagesIntoTableLab()
+
+void Dbdih::readPagesIntoFragLab(Signal* signal, RWFragment* rf)
+{
+ ndbrequire(rf->pageIndex < 8);
+ rf->rwfPageptr.i = rf->rwfTabPtr.p->pageRef[rf->pageIndex];
+ ptrCheckGuard(rf->rwfPageptr, cpageFileSize, pageRecord);
+ FragmentstorePtr fragPtr;
+ getFragstore(rf->rwfTabPtr.p, rf->fragId, fragPtr);
+ readFragment(rf, fragPtr);
+ readReplicas(rf, fragPtr);
+ rf->fragId++;
+ if (rf->fragId == rf->rwfTabPtr.p->totalfragments) {
+ jam();
+ switch (rf->rwfTabPtr.p->tabCopyStatus) {
+ case TabRecord::CS_SR_PHASE1_READ_PAGES:
+ jam();
+ releaseTabPages(rf->rwfTabPtr.i);
+ rf->rwfTabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
+ signal->theData[0] = DihContinueB::ZREAD_TABLE_FROM_PAGES;
+ signal->theData[1] = rf->rwfTabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ break;
+ case TabRecord::CS_COPY_TAB_REQ:
+ jam();
+ rf->rwfTabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
+ if(getNodeState().getSystemRestartInProgress()){
+ jam();
+ copyTabReq_complete(signal, rf->rwfTabPtr);
+ return;
+ }
+ rf->rwfTabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
+ rf->rwfTabPtr.p->tabUpdateState = TabRecord::US_COPY_TAB_REQ;
+ signal->theData[0] = DihContinueB::ZTABLE_UPDATE;
+ signal->theData[1] = rf->rwfTabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ break;
+ default:
+ ndbrequire(false);
+ return;
+ break;
+ }//switch
+ } else {
+ jam();
+ signal->theData[0] = DihContinueB::ZREAD_PAGES_INTO_FRAG;
+ signal->theData[1] = rf->rwfTabPtr.i;
+ signal->theData[2] = rf->fragId;
+ signal->theData[3] = rf->pageIndex;
+ signal->theData[4] = rf->wordIndex;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 5, JBB);
+ }//if
+ return;
+}//Dbdih::readPagesIntoFragLab()
+
+/*****************************************************************************/
+/***** WRITING FROM TABLE DATA STRUCTURES INTO A SET OF PAGES ******/
+// execCONTINUEB(ZPACK_TABLE_INTO_PAGES)
+/*****************************************************************************/
+void Dbdih::packTableIntoPagesLab(Signal* signal, Uint32 tableId)
+{
+ RWFragment wf;
+ TabRecordPtr tabPtr;
+ allocpage(wf.rwfPageptr);
+ tabPtr.i = tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ tabPtr.p->pageRef[0] = wf.rwfPageptr.i;
+ tabPtr.p->noPages = 1;
+ wf.wordIndex = 35;
+ wf.pageIndex = 0;
+ writePageWord(&wf, tabPtr.p->totalfragments);
+ writePageWord(&wf, tabPtr.p->noOfBackups);
+ writePageWord(&wf, tabPtr.p->hashpointer);
+ writePageWord(&wf, tabPtr.p->kvalue);
+ writePageWord(&wf, tabPtr.p->mask);
+ writePageWord(&wf, TabRecord::HASH);
+ writePageWord(&wf, tabPtr.p->storedTable);
+
+ signal->theData[0] = DihContinueB::ZPACK_FRAG_INTO_PAGES;
+ signal->theData[1] = tabPtr.i;
+ signal->theData[2] = 0;
+ signal->theData[3] = wf.pageIndex;
+ signal->theData[4] = wf.wordIndex;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 5, JBB);
+}//Dbdih::packTableIntoPagesLab()
+
+/*****************************************************************************/
+// execCONTINUEB(ZPACK_FRAG_INTO_PAGES)
+/*****************************************************************************/
+void Dbdih::packFragIntoPagesLab(Signal* signal, RWFragment* wf)
+{
+ ndbrequire(wf->pageIndex < 8);
+ wf->rwfPageptr.i = wf->rwfTabPtr.p->pageRef[wf->pageIndex];
+ ptrCheckGuard(wf->rwfPageptr, cpageFileSize, pageRecord);
+ FragmentstorePtr fragPtr;
+ getFragstore(wf->rwfTabPtr.p, wf->fragId, fragPtr);
+ writeFragment(wf, fragPtr);
+ writeReplicas(wf, fragPtr.p->storedReplicas);
+ writeReplicas(wf, fragPtr.p->oldStoredReplicas);
+ wf->fragId++;
+ if (wf->fragId == wf->rwfTabPtr.p->totalfragments) {
+ jam();
+ PageRecordPtr pagePtr;
+ pagePtr.i = wf->rwfTabPtr.p->pageRef[0];
+ ptrCheckGuard(pagePtr, cpageFileSize, pageRecord);
+ pagePtr.p->word[33] = wf->rwfTabPtr.p->noPages;
+ pagePtr.p->word[34] = ((wf->rwfTabPtr.p->noPages - 1) * 2048) + wf->wordIndex;
+ switch (wf->rwfTabPtr.p->tabCopyStatus) {
+ case TabRecord::CS_SR_PHASE2_READ_TABLE:
+ /* -------------------------------------------------------------------*/
+ // We are performing a system restart and we are now ready to copy the
+ // table from this node (the master) to all other nodes.
+ /* -------------------------------------------------------------------*/
+ jam();
+ wf->rwfTabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
+ signal->theData[0] = DihContinueB::ZSR_PHASE2_READ_TABLE;
+ signal->theData[1] = wf->rwfTabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ break;
+ case TabRecord::CS_COPY_NODE_STATE:
+ jam();
+ tableCopyNodeLab(signal, wf->rwfTabPtr);
+ return;
+ break;
+ case TabRecord::CS_LCP_READ_TABLE:
+ jam();
+ signal->theData[0] = DihContinueB::ZTABLE_UPDATE;
+ signal->theData[1] = wf->rwfTabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ break;
+ case TabRecord::CS_REMOVE_NODE:
+ case TabRecord::CS_INVALIDATE_NODE_LCP:
+ jam();
+ signal->theData[0] = DihContinueB::ZTABLE_UPDATE;
+ signal->theData[1] = wf->rwfTabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ break;
+ case TabRecord::CS_ADD_TABLE_MASTER:
+ jam();
+ wf->rwfTabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
+ signal->theData[0] = DihContinueB::ZADD_TABLE_MASTER_PAGES;
+ signal->theData[1] = wf->rwfTabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ break;
+ case TabRecord::CS_ADD_TABLE_SLAVE:
+ jam();
+ wf->rwfTabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
+ signal->theData[0] = DihContinueB::ZADD_TABLE_SLAVE_PAGES;
+ signal->theData[1] = wf->rwfTabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ break;
+ default:
+ ndbrequire(false);
+ return;
+ break;
+ }//switch
+ } else {
+ jam();
+ signal->theData[0] = DihContinueB::ZPACK_FRAG_INTO_PAGES;
+ signal->theData[1] = wf->rwfTabPtr.i;
+ signal->theData[2] = wf->fragId;
+ signal->theData[3] = wf->pageIndex;
+ signal->theData[4] = wf->wordIndex;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 5, JBB);
+ }//if
+ return;
+}//Dbdih::packFragIntoPagesLab()
+
+/*****************************************************************************/
+/* ********** START FRAGMENT MODULE *************/
+/*****************************************************************************/
+void Dbdih::startFragment(Signal* signal, Uint32 tableId, Uint32 fragId)
+{
+ Uint32 TloopCount = 0;
+ TabRecordPtr tabPtr;
+ while (true) {
+ if (TloopCount > 100) {
+ jam();
+ signal->theData[0] = DihContinueB::ZSTART_FRAGMENT;
+ signal->theData[1] = tableId;
+ signal->theData[2] = 0;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+ return;
+ }
+
+ if (tableId >= ctabFileSize) {
+ jam();
+ signal->theData[0] = DihContinueB::ZCOMPLETE_RESTART;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 1, JBB);
+ return;
+ }//if
+
+ tabPtr.i = tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ if (tabPtr.p->tabStatus != TabRecord::TS_ACTIVE){
+ jam();
+ TloopCount++;
+ tableId++;
+ fragId = 0;
+ continue;
+ }
+
+ if(tabPtr.p->storedTable == 0){
+ jam();
+ TloopCount++;
+ tableId++;
+ fragId = 0;
+ continue;
+ }
+
+ jam();
+ break;
+ }//while
+
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, fragId, fragPtr);
+ /* ----------------------------------------------------------------------- */
+ /* WE NEED TO RESET THE REPLICA DATA STRUCTURES. THIS MEANS THAT WE */
+ /* MUST REMOVE REPLICAS THAT WAS NOT STARTED AT THE GCI TO RESTORE. WE */
+ /* NEED TO PUT ALL STORED REPLICAS ON THE LIST OF OLD STORED REPLICAS */
+ /* RESET THE NUMBER OF REPLICAS TO CREATE. */
+ /* ----------------------------------------------------------------------- */
+ cnoOfCreateReplicas = 0;
+ /* ----------------------------------------------------------------------- */
+ /* WE WILL NEVER START MORE THAN FOUR FRAGMENT REPLICAS WHATEVER THE */
+ /* DESIRED REPLICATION IS. */
+ /* ----------------------------------------------------------------------- */
+ ndbrequire(tabPtr.p->noOfBackups < 4);
+ /* ----------------------------------------------------------------------- */
+ /* SEARCH FOR STORED REPLICAS THAT CAN BE USED TO RESTART THE SYSTEM. */
+ /* ----------------------------------------------------------------------- */
+ searchStoredReplicas(fragPtr);
+ if (cnoOfCreateReplicas == 0) {
+ /* --------------------------------------------------------------------- */
+ /* THERE WERE NO STORED REPLICAS AVAILABLE THAT CAN SERVE AS REPLICA TO*/
+ /* RESTART THE SYSTEM FROM. IN A LATER RELEASE WE WILL ADD */
+ /* FUNCTIONALITY TO CHECK IF THERE ARE ANY STANDBY NODES THAT COULD DO */
+ /* THIS TASK INSTEAD IN THIS IMPLEMENTATION WE SIMPLY CRASH THE SYSTEM.*/
+ /* THIS WILL DECREASE THE GCI TO RESTORE WHICH HOPEFULLY WILL MAKE IT */
+ /* POSSIBLE TO RESTORE THE SYSTEM. */
+ /* --------------------------------------------------------------------- */
+ char buf[100];
+ BaseString::snprintf(buf, sizeof(buf),
+ "Unable to find restorable replica for "
+ "table: %d fragment: %d gci: %d",
+ tableId, fragId, SYSFILE->newestRestorableGCI);
+ progError(__LINE__,
+ ERR_SYSTEM_ERROR,
+ buf);
+ ndbrequire(false);
+ return;
+ }//if
+
+ /* ----------------------------------------------------------------------- */
+ /* WE HAVE CHANGED THE NODE TO BE PRIMARY REPLICA AND THE NODES TO BE */
+ /* BACKUP NODES. WE MUST UPDATE THIS NODES DATA STRUCTURE SINCE WE */
+ /* WILL NOT COPY THE TABLE DATA TO OURSELF. */
+ /* ----------------------------------------------------------------------- */
+ updateNodeInfo(fragPtr);
+ /* ----------------------------------------------------------------------- */
+ /* NOW WE HAVE COLLECTED ALL THE REPLICAS WE COULD GET. WE WILL NOW */
+ /* RESTART THE FRAGMENT REPLICAS WE HAVE FOUND IRRESPECTIVE OF IF THERE*/
+ /* ARE ENOUGH ACCORDING TO THE DESIRED REPLICATION. */
+ /* ----------------------------------------------------------------------- */
+ /* WE START BY SENDING ADD_FRAGREQ FOR THOSE REPLICAS THAT NEED IT. */
+ /* ----------------------------------------------------------------------- */
+ CreateReplicaRecordPtr createReplicaPtr;
+ for (createReplicaPtr.i = 0;
+ createReplicaPtr.i < cnoOfCreateReplicas;
+ createReplicaPtr.i++) {
+ jam();
+ ptrCheckGuard(createReplicaPtr, 4, createReplicaRecord);
+ createReplicaPtr.p->hotSpareUse = false;
+ }//for
+
+ sendStartFragreq(signal, tabPtr, fragId);
+
+ /**
+ * Don't wait for START_FRAGCONF
+ */
+ fragId++;
+ if (fragId >= tabPtr.p->totalfragments) {
+ jam();
+ tabPtr.i++;
+ fragId = 0;
+ }//if
+ signal->theData[0] = DihContinueB::ZSTART_FRAGMENT;
+ signal->theData[1] = tabPtr.i;
+ signal->theData[2] = fragId;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+
+ return;
+}//Dbdih::startFragmentLab()
+
+
+/*****************************************************************************/
+/* ********** COMPLETE RESTART MODULE *************/
+/*****************************************************************************/
+void Dbdih::completeRestartLab(Signal* signal)
+{
+ sendLoopMacro(START_RECREQ, sendSTART_RECREQ);
+}//completeRestartLab()
+
+/* ------------------------------------------------------------------------- */
+// SYSTEM RESTART:
+/* A NODE HAS COMPLETED RESTORING ALL DATABASE FRAGMENTS. */
+// NODE RESTART:
+// THE STARTING NODE HAS PREPARED ITS LOG FILES TO ENABLE EXECUTION
+// OF TRANSACTIONS.
+// Precondition:
+// This signal must be received by the master node.
+/* ------------------------------------------------------------------------- */
+void Dbdih::execSTART_RECCONF(Signal* signal)
+{
+ jamEntry();
+ Uint32 senderNodeId = signal->theData[0];
+ ndbrequire(isMaster());
+ if (getNodeState().startLevel >= NodeState::SL_STARTED){
+ /* --------------------------------------------------------------------- */
+ // Since our node is already up and running this must be a node restart.
+ // This means that we should be the master node,
+ // otherwise we have a problem.
+ /* --------------------------------------------------------------------- */
+ jam();
+ ndbrequire(senderNodeId == c_nodeStartMaster.startNode);
+ nodeRestartStartRecConfLab(signal);
+ return;
+ } else {
+ /* --------------------------------------------------------------------- */
+ // This was the system restart case. We set the state indicating that the
+ // node has completed restoration of all fragments.
+ /* --------------------------------------------------------------------- */
+ receiveLoopMacro(START_RECREQ, senderNodeId);
+
+ signal->theData[0] = reference();
+ sendSignal(cntrlblockref, GSN_NDB_STARTCONF, signal, 1, JBB);
+ return;
+ }//if
+}//Dbdih::execSTART_RECCONF()
+
+void Dbdih::copyNodeLab(Signal* signal, Uint32 tableId)
+{
+ /* ----------------------------------------------------------------------- */
+ // This code is executed by the master to assist a node restart in receiving
+ // the data in the master.
+ /* ----------------------------------------------------------------------- */
+ Uint32 TloopCount = 0;
+
+ if (!c_nodeStartMaster.activeState) {
+ jam();
+ /* --------------------------------------------------------------------- */
+ // Obviously the node crashed in the middle of its node restart. We will
+ // stop this process simply by returning after resetting the wait indicator.
+ /* ---------------------------------------------------------------------- */
+ c_nodeStartMaster.wait = ZFALSE;
+ return;
+ }//if
+ TabRecordPtr tabPtr;
+ tabPtr.i = tableId;
+ while (tabPtr.i < ctabFileSize) {
+ ptrAss(tabPtr, tabRecord);
+ if (tabPtr.p->tabStatus == TabRecord::TS_ACTIVE) {
+ /* -------------------------------------------------------------------- */
+ // The table is defined. We will start by packing the table into pages.
+ // The tabCopyStatus indicates to the CONTINUEB(ZPACK_TABLE_INTO_PAGES)
+ // who called it. After packing the table into page(s) it will be sent to
+ // the starting node by COPY_TABREQ signals. After returning from the
+ // starting node we will return to this subroutine and continue
+ // with the next table.
+ /* -------------------------------------------------------------------- */
+ ndbrequire(tabPtr.p->tabCopyStatus == TabRecord::CS_IDLE);
+ tabPtr.p->tabCopyStatus = TabRecord::CS_COPY_NODE_STATE;
+ signal->theData[0] = DihContinueB::ZPACK_TABLE_INTO_PAGES;
+ signal->theData[1] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ } else {
+ jam();
+ if (TloopCount > 100) {
+ /* ------------------------------------------------------------------ */
+ // Introduce real-time break after looping through 100 not copied tables
+ /* ----------------------------------------------------------------- */
+ jam();
+ signal->theData[0] = DihContinueB::ZCOPY_NODE;
+ signal->theData[1] = tabPtr.i + 1;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ } else {
+ jam();
+ TloopCount++;
+ tabPtr.i++;
+ }//if
+ }//if
+ }//while
+ dihCopyCompletedLab(signal);
+ return;
+}//Dbdih::copyNodeLab()
+
+void Dbdih::tableCopyNodeLab(Signal* signal, TabRecordPtr tabPtr)
+{
+ /* ----------------------------------------------------------------------- */
+ /* COPY PAGES READ TO STARTING NODE. */
+ /* ----------------------------------------------------------------------- */
+ if (!c_nodeStartMaster.activeState) {
+ jam();
+ releaseTabPages(tabPtr.i);
+ c_nodeStartMaster.wait = ZFALSE;
+ return;
+ }//if
+ NodeRecordPtr copyNodePtr;
+ PageRecordPtr pagePtr;
+ copyNodePtr.i = c_nodeStartMaster.startNode;
+ ptrCheckGuard(copyNodePtr, MAX_NDB_NODES, nodeRecord);
+
+ copyNodePtr.p->activeTabptr = tabPtr.i;
+ pagePtr.i = tabPtr.p->pageRef[0];
+ ptrCheckGuard(pagePtr, cpageFileSize, pageRecord);
+
+ signal->theData[0] = DihContinueB::ZCOPY_TABLE_NODE;
+ signal->theData[1] = tabPtr.i;
+ signal->theData[2] = copyNodePtr.i;
+ signal->theData[3] = 0;
+ signal->theData[4] = 0;
+ signal->theData[5] = pagePtr.p->word[34];
+ sendSignal(reference(), GSN_CONTINUEB, signal, 6, JBB);
+}//Dbdih::tableCopyNodeLab()
+
+/* ------------------------------------------------------------------------- */
+// execCONTINUEB(ZCOPY_TABLE)
+// This routine is used to copy the table descriptions from the master to
+// other nodes. It is used in the system restart to copy from master to all
+// starting nodes.
+/* ------------------------------------------------------------------------- */
+void Dbdih::copyTableLab(Signal* signal, Uint32 tableId)
+{
+ TabRecordPtr tabPtr;
+ tabPtr.i = tableId;
+ ptrAss(tabPtr, tabRecord);
+
+ ndbrequire(tabPtr.p->tabCopyStatus == TabRecord::CS_IDLE);
+ tabPtr.p->tabCopyStatus = TabRecord::CS_SR_PHASE2_READ_TABLE;
+ signal->theData[0] = DihContinueB::ZPACK_TABLE_INTO_PAGES;
+ signal->theData[1] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ return;
+}//Dbdih::copyTableLab()
+
+/* ------------------------------------------------------------------------- */
+// execCONTINUEB(ZSR_PHASE2_READ_TABLE)
+/* ------------------------------------------------------------------------- */
+void Dbdih::srPhase2ReadTableLab(Signal* signal, TabRecordPtr tabPtr)
+{
+ /* ----------------------------------------------------------------------- */
+ // We set the sendCOPY_TABREQState to ZACTIVE for all nodes since it is a long
+ // process to send off all table descriptions. Thus we ensure that we do
+ // not encounter race conditions where one node is completed before the
+ // sending process is completed. This could lead to that we start off the
+ // system before we actually finished all copying of table descriptions
+ // and could lead to strange errors.
+ /* ----------------------------------------------------------------------- */
+
+ //sendLoopMacro(COPY_TABREQ, nullRoutine);
+
+ breakCopyTableLab(signal, tabPtr, cfirstAliveNode);
+ return;
+}//Dbdih::srPhase2ReadTableLab()
+
+/* ------------------------------------------------------------------------- */
+/* COPY PAGES READ TO ALL NODES. */
+/* ------------------------------------------------------------------------- */
+void Dbdih::breakCopyTableLab(Signal* signal, TabRecordPtr tabPtr, Uint32 nodeId)
+{
+ NodeRecordPtr nodePtr;
+ nodePtr.i = nodeId;
+ while (nodePtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ if (nodePtr.i == getOwnNodeId()){
+ jam();
+ /* ------------------------------------------------------------------- */
+ /* NOT NECESSARY TO COPY TO MY OWN NODE. I ALREADY HAVE THE PAGES. */
+ /* I DO HOWEVER NEED TO STORE THE TABLE DESCRIPTION ONTO DISK. */
+ /* ------------------------------------------------------------------- */
+ /* IF WE ARE MASTER WE ONLY NEED TO SAVE THE TABLE ON DISK. WE ALREADY */
+ /* HAVE THE TABLE DESCRIPTION IN THE DATA STRUCTURES. */
+ // AFTER COMPLETING THE WRITE TO DISK THE MASTER WILL ALSO SEND
+ // COPY_TABCONF AS ALL THE OTHER NODES.
+ /* ------------------------------------------------------------------- */
+ c_COPY_TABREQ_Counter.setWaitingFor(nodePtr.i);
+ tabPtr.p->tabUpdateState = TabRecord::US_COPY_TAB_REQ;
+ signal->theData[0] = DihContinueB::ZTABLE_UPDATE;
+ signal->theData[1] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ nodePtr.i = nodePtr.p->nextNode;
+ } else {
+ PageRecordPtr pagePtr;
+ /* -------------------------------------------------------------------- */
+ // RATHER THAN SENDING ALL COPY_TABREQ IN PARALLEL WE WILL SERIALISE THIS
+ // ACTIVITY AND WILL THUS CALL breakCopyTableLab AGAIN WHEN COMPLETED THE
+ // SENDING OF COPY_TABREQ'S.
+ /* -------------------------------------------------------------------- */
+ jam();
+ tabPtr.p->tabCopyStatus = TabRecord::CS_SR_PHASE3_COPY_TABLE;
+ pagePtr.i = tabPtr.p->pageRef[0];
+ ptrCheckGuard(pagePtr, cpageFileSize, pageRecord);
+ signal->theData[0] = DihContinueB::ZCOPY_TABLE_NODE;
+ signal->theData[1] = tabPtr.i;
+ signal->theData[2] = nodePtr.i;
+ signal->theData[3] = 0;
+ signal->theData[4] = 0;
+ signal->theData[5] = pagePtr.p->word[34];
+ sendSignal(reference(), GSN_CONTINUEB, signal, 6, JBB);
+ return;
+ }//if
+ }//while
+ /* ----------------------------------------------------------------------- */
+ /* WE HAVE NOW SENT THE TABLE PAGES TO ALL NODES. EXIT AND WAIT FOR ALL */
+ /* REPLIES. */
+ /* ----------------------------------------------------------------------- */
+ return;
+}//Dbdih::breakCopyTableLab()
+
+/* ------------------------------------------------------------------------- */
+// execCONTINUEB(ZCOPY_TABLE_NODE)
+/* ------------------------------------------------------------------------- */
+void Dbdih::copyTableNode(Signal* signal,
+ CopyTableNode* ctn, NodeRecordPtr nodePtr)
+{
+ if (getNodeState().startLevel >= NodeState::SL_STARTED){
+ /* --------------------------------------------------------------------- */
+ // We are in the process of performing a node restart and are copying a
+ // table description to a starting node. We will check that no nodes have
+ // crashed in this process.
+ /* --------------------------------------------------------------------- */
+ if (!c_nodeStartMaster.activeState) {
+ jam();
+ /** ------------------------------------------------------------------
+ * The starting node crashed. We will release table pages and stop this
+ * copy process and allow new node restarts to start.
+ * ------------------------------------------------------------------ */
+ releaseTabPages(ctn->ctnTabPtr.i);
+ c_nodeStartMaster.wait = ZFALSE;
+ return;
+ }//if
+ }//if
+ ndbrequire(ctn->pageIndex < 8);
+ ctn->ctnPageptr.i = ctn->ctnTabPtr.p->pageRef[ctn->pageIndex];
+ ptrCheckGuard(ctn->ctnPageptr, cpageFileSize, pageRecord);
+ /**
+ * If first page & firstWord reqinfo = 1 (first signal)
+ */
+ Uint32 reqinfo = (ctn->pageIndex == 0) && (ctn->wordIndex == 0);
+ if(reqinfo == 1){
+ c_COPY_TABREQ_Counter.setWaitingFor(nodePtr.i);
+ }
+
+ for (Uint32 i = 0; i < 16; i++) {
+ jam();
+ sendCopyTable(signal, ctn, calcDihBlockRef(nodePtr.i), reqinfo);
+ reqinfo = 0;
+ if (ctn->noOfWords <= 16) {
+ jam();
+ switch (ctn->ctnTabPtr.p->tabCopyStatus) {
+ case TabRecord::CS_SR_PHASE3_COPY_TABLE:
+ /* ------------------------------------------------------------------ */
+ // We have copied the table description to this node.
+ // We will now proceed
+ // with sending the table description to the next node in the node list.
+ /* ------------------------------------------------------------------ */
+ jam();
+ ctn->ctnTabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
+ breakCopyTableLab(signal, ctn->ctnTabPtr, nodePtr.p->nextNode);
+ return;
+ break;
+ case TabRecord::CS_COPY_NODE_STATE:
+ jam();
+ ctn->ctnTabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
+ return;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ } else {
+ jam();
+ ctn->wordIndex += 16;
+ if (ctn->wordIndex == 2048) {
+ jam();
+ ctn->wordIndex = 0;
+ ctn->pageIndex++;
+ ndbrequire(ctn->pageIndex < 8);
+ ctn->ctnPageptr.i = ctn->ctnTabPtr.p->pageRef[ctn->pageIndex];
+ ptrCheckGuard(ctn->ctnPageptr, cpageFileSize, pageRecord);
+ }//if
+ ctn->noOfWords -= 16;
+ }//if
+ }//for
+ signal->theData[0] = DihContinueB::ZCOPY_TABLE_NODE;
+ signal->theData[1] = ctn->ctnTabPtr.i;
+ signal->theData[2] = nodePtr.i;
+ signal->theData[3] = ctn->pageIndex;
+ signal->theData[4] = ctn->wordIndex;
+ signal->theData[5] = ctn->noOfWords;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 6, JBB);
+}//Dbdih::copyTableNodeLab()
+
+void Dbdih::sendCopyTable(Signal* signal, CopyTableNode* ctn,
+ BlockReference ref, Uint32 reqinfo)
+{
+ signal->theData[0] = reference();
+ signal->theData[1] = reqinfo;
+ signal->theData[2] = ctn->ctnTabPtr.i;
+ signal->theData[3] = ctn->ctnTabPtr.p->schemaVersion;
+ signal->theData[4] = ctn->noOfWords;
+ ndbrequire(ctn->wordIndex + 15 < 2048);
+ MEMCOPY_NO_WORDS(&signal->theData[5], &ctn->ctnPageptr.p->word[ctn->wordIndex], 16);
+ sendSignal(ref, GSN_COPY_TABREQ, signal, 21, JBB);
+}//Dbdih::sendCopyTable()
+
+void Dbdih::execCOPY_TABCONF(Signal* signal)
+{
+ NodeRecordPtr nodePtr;
+ jamEntry();
+ nodePtr.i = signal->theData[0];
+ Uint32 tableId = signal->theData[1];
+ if (getNodeState().startLevel >= NodeState::SL_STARTED){
+ /* --------------------------------------------------------------------- */
+ // We are in the process of performing a node restart. Continue by copying
+ // the next table to the starting node.
+ /* --------------------------------------------------------------------- */
+ jam();
+ NodeRecordPtr nodePtr;
+ nodePtr.i = signal->theData[0];
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ c_COPY_TABREQ_Counter.clearWaitingFor(nodePtr.i);
+
+ releaseTabPages(tableId);
+ signal->theData[0] = DihContinueB::ZCOPY_NODE;
+ signal->theData[1] = tableId + 1;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ } else {
+ /* --------------------------------------------------------------------- */
+ // We are in the process of performing a system restart. Check if all nodes
+ // have saved the new table description to file and then continue with the
+ // next table.
+ /* --------------------------------------------------------------------- */
+ receiveLoopMacro(COPY_TABREQ, nodePtr.i);
+ /* --------------------------------------------------------------------- */
+ /* WE HAVE NOW COPIED TO ALL NODES. WE HAVE NOW COMPLETED RESTORING */
+ /* THIS TABLE. CONTINUE WITH THE NEXT TABLE. */
+ /* WE NEED TO RELEASE THE PAGES IN THE TABLE IN THIS NODE HERE. */
+ /* WE ALSO NEED TO CLOSE THE TABLE FILE. */
+ /* --------------------------------------------------------------------- */
+ releaseTabPages(tableId);
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ ConnectRecordPtr connectPtr;
+ connectPtr.i = tabPtr.p->connectrec;
+ ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord);
+
+ sendAddFragreq(signal, connectPtr, tabPtr, 0);
+ return;
+ }//if
+}//Dbdih::execCOPY_TABCONF()
+
+/*
+ 3.13 L O C A L C H E C K P O I N T (M A S T E R)
+ ****************************************************
+ */
+/*****************************************************************************/
+/* ********** LOCAL-CHECK-POINT-HANDLING MODULE *************/
+/*****************************************************************************/
+/* ------------------------------------------------------------------------- */
+/* IT IS TIME TO CHECK IF IT IS TIME TO START A LOCAL CHECKPOINT. */
+/* WE WILL EITHER START AFTER 1 MILLION WORDS HAVE ARRIVED OR WE WILL */
+/* EXECUTE AFTER ABOUT 16 MINUTES HAVE PASSED BY. */
+/* ------------------------------------------------------------------------- */
+void Dbdih::checkTcCounterLab(Signal* signal)
+{
+ CRASH_INSERTION(7009);
+ if (c_lcpState.lcpStatus != LCP_STATUS_IDLE) {
+ ndbout << "lcpStatus = " << (Uint32) c_lcpState.lcpStatus;
+ ndbout << "lcpStatusUpdatedPlace = " <<
+ c_lcpState.lcpStatusUpdatedPlace << endl;
+ ndbrequire(false);
+ return;
+ }//if
+ c_lcpState.ctimer += 32;
+ if ((c_nodeStartMaster.blockLcp == true) ||
+ ((c_lcpState.lcpStartGcp + 1) > currentgcp)) {
+ jam();
+ /* --------------------------------------------------------------------- */
+ // No reason to start juggling the states and checking for start of LCP if
+ // we are blocked to start an LCP anyway.
+ // We also block LCP start if we have not completed one global checkpoints
+ // before starting another local checkpoint.
+ /* --------------------------------------------------------------------- */
+ signal->theData[0] = DihContinueB::ZCHECK_TC_COUNTER;
+ signal->theData[1] = __LINE__;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 1 * 100, 2);
+ return;
+ }//if
+ c_lcpState.setLcpStatus(LCP_TCGET, __LINE__);
+
+ c_lcpState.ctcCounter = c_lcpState.ctimer;
+ sendLoopMacro(TCGETOPSIZEREQ, sendTCGETOPSIZEREQ);
+}//Dbdih::checkTcCounterLab()
+
+void Dbdih::checkLcpStart(Signal* signal, Uint32 lineNo)
+{
+ /* ----------------------------------------------------------------------- */
+ // Verify that we are not attempting to start another instance of the LCP
+ // when it is not alright to do so.
+ /* ----------------------------------------------------------------------- */
+ ndbrequire(c_lcpState.lcpStart == ZIDLE);
+ c_lcpState.lcpStart = ZACTIVE;
+ signal->theData[0] = DihContinueB::ZCHECK_TC_COUNTER;
+ signal->theData[1] = lineNo;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 1000, 2);
+}//Dbdih::checkLcpStart()
+
+/* ------------------------------------------------------------------------- */
+/*TCGETOPSIZECONF HOW MUCH OPERATION SIZE HAVE BEEN EXECUTED BY TC */
+/* ------------------------------------------------------------------------- */
+void Dbdih::execTCGETOPSIZECONF(Signal* signal)
+{
+ jamEntry();
+ Uint32 senderNodeId = signal->theData[0];
+ c_lcpState.ctcCounter += signal->theData[1];
+
+ receiveLoopMacro(TCGETOPSIZEREQ, senderNodeId);
+
+ ndbrequire(c_lcpState.lcpStatus == LCP_TCGET);
+ ndbrequire(c_lcpState.lcpStart == ZACTIVE);
+ /* ----------------------------------------------------------------------- */
+ // We are not actively starting another LCP, still we receive this signal.
+ // This is not ok.
+ /* ---------------------------------------------------------------------- */
+ /* ALL TC'S HAVE RESPONDED NOW. NOW WE WILL CHECK IF ENOUGH OPERATIONS */
+ /* HAVE EXECUTED TO ENABLE US TO START A NEW LOCAL CHECKPOINT. */
+ /* WHILE COPYING DICTIONARY AND DISTRIBUTION INFO TO A STARTING NODE */
+ /* WE WILL ALSO NOT ALLOW THE LOCAL CHECKPOINT TO PROCEED. */
+ /*----------------------------------------------------------------------- */
+ if (c_lcpState.immediateLcpStart == false) {
+ if ((c_lcpState.ctcCounter <
+ ((Uint32)1 << c_lcpState.clcpDelay)) ||
+ (c_nodeStartMaster.blockLcp == true)) {
+ jam();
+ c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__);
+
+ signal->theData[0] = DihContinueB::ZCHECK_TC_COUNTER;
+ signal->theData[1] = __LINE__;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 1 * 100, 2);
+ return;
+ }//if
+ }//if
+ c_lcpState.lcpStart = ZIDLE;
+ c_lcpState.immediateLcpStart = false;
+ /* -----------------------------------------------------------------------
+ * Now the initial lcp is started,
+ * we can reset the delay to its orginal value
+ * --------------------------------------------------------------------- */
+ CRASH_INSERTION(7010);
+ /* ----------------------------------------------------------------------- */
+ /* IF MORE THAN 1 MILLION WORDS PASSED THROUGH THE TC'S THEN WE WILL */
+ /* START A NEW LOCAL CHECKPOINT. CLEAR CTIMER. START CHECKPOINT */
+ /* ACTIVITY BY CALCULATING THE KEEP GLOBAL CHECKPOINT. */
+ // Also remember the current global checkpoint to ensure that we run at least
+ // one global checkpoints between each local checkpoint that we start up.
+ /* ----------------------------------------------------------------------- */
+ c_lcpState.ctimer = 0;
+ c_lcpState.keepGci = coldgcp;
+ c_lcpState.lcpStartGcp = currentgcp;
+ /* ----------------------------------------------------------------------- */
+ /* UPDATE THE NEW LATEST LOCAL CHECKPOINT ID. */
+ /* ----------------------------------------------------------------------- */
+ cnoOfActiveTables = 0;
+ c_lcpState.setLcpStatus(LCP_CALCULATE_KEEP_GCI, __LINE__);
+ c_lcpState.oldestRestorableGci = SYSFILE->oldestRestorableGCI;
+ ndbrequire(((int)c_lcpState.oldestRestorableGci) > 0);
+
+ if (ERROR_INSERTED(7011)) {
+ signal->theData[0] = NDB_LE_LCPStoppedInCalcKeepGci;
+ signal->theData[1] = 0;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
+ return;
+ }//if
+ signal->theData[0] = DihContinueB::ZCALCULATE_KEEP_GCI;
+ signal->theData[1] = 0; /* TABLE ID = 0 */
+ signal->theData[2] = 0; /* FRAGMENT ID = 0 */
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+ return;
+}//Dbdih::execTCGETOPSIZECONF()
+
+/* ------------------------------------------------------------------------- */
+/* WE NEED TO CALCULATE THE OLDEST GLOBAL CHECKPOINT THAT WILL BE */
+/* COMPLETELY RESTORABLE AFTER EXECUTING THIS LOCAL CHECKPOINT. */
+/* ------------------------------------------------------------------------- */
+void Dbdih::calculateKeepGciLab(Signal* signal, Uint32 tableId, Uint32 fragId)
+{
+ TabRecordPtr tabPtr;
+ Uint32 TloopCount = 1;
+ tabPtr.i = tableId;
+ do {
+ if (tabPtr.i >= ctabFileSize) {
+ if (cnoOfActiveTables > 0) {
+ jam();
+ signal->theData[0] = DihContinueB::ZSTORE_NEW_LCP_ID;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 1, JBB);
+ return;
+ } else {
+ jam();
+ /* ------------------------------------------------------------------ */
+ /* THERE ARE NO TABLES TO CHECKPOINT. WE STOP THE CHECKPOINT ALREADY */
+ /* HERE TO AVOID STRANGE PROBLEMS LATER. */
+ /* ------------------------------------------------------------------ */
+ c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__);
+ checkLcpStart(signal, __LINE__);
+ return;
+ }//if
+ }//if
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ if (tabPtr.p->tabStatus != TabRecord::TS_ACTIVE ||
+ tabPtr.p->storedTable == 0) {
+ if (TloopCount > 100) {
+ jam();
+ signal->theData[0] = DihContinueB::ZCALCULATE_KEEP_GCI;
+ signal->theData[1] = tabPtr.i + 1;
+ signal->theData[2] = 0;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+ return;
+ } else {
+ jam();
+ TloopCount++;
+ tabPtr.i++;
+ }//if
+ } else {
+ jam();
+ TloopCount = 0;
+ }//if
+ } while (TloopCount != 0);
+ cnoOfActiveTables++;
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, fragId, fragPtr);
+ checkKeepGci(fragPtr.p->storedReplicas);
+ fragId++;
+ if (fragId >= tabPtr.p->totalfragments) {
+ jam();
+ tabPtr.i++;
+ fragId = 0;
+ }//if
+ signal->theData[0] = DihContinueB::ZCALCULATE_KEEP_GCI;
+ signal->theData[1] = tabPtr.i;
+ signal->theData[2] = fragId;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+ return;
+}//Dbdih::calculateKeepGciLab()
+
+/* ------------------------------------------------------------------------- */
+/* WE NEED TO STORE ON DISK THE FACT THAT WE ARE STARTING THIS LOCAL */
+/* CHECKPOINT ROUND. THIS WILL INVALIDATE ALL THE LOCAL CHECKPOINTS */
+/* THAT WILL EVENTUALLY BE OVERWRITTEN AS PART OF THIS LOCAL CHECKPOINT*/
+/* ------------------------------------------------------------------------- */
+void Dbdih::storeNewLcpIdLab(Signal* signal)
+{
+ /***************************************************************************/
+ // Report the event that a local checkpoint has started.
+ /***************************************************************************/
+ signal->theData[0] = NDB_LE_LocalCheckpointStarted; //Event type
+ signal->theData[1] = SYSFILE->latestLCP_ID + 1;
+ signal->theData[2] = c_lcpState.keepGci;
+ signal->theData[3] = c_lcpState.oldestRestorableGci;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB);
+
+ signal->setTrace(TestOrd::TraceLocalCheckpoint);
+
+ CRASH_INSERTION(7013);
+ SYSFILE->keepGCI = c_lcpState.keepGci;
+ //Uint32 lcpId = SYSFILE->latestLCP_ID;
+ SYSFILE->latestLCP_ID++;
+ SYSFILE->oldestRestorableGCI = c_lcpState.oldestRestorableGci;
+
+ const Uint32 oldestRestorableGCI = SYSFILE->oldestRestorableGCI;
+ //const Uint32 newestRestorableGCI = SYSFILE->newestRestorableGCI;
+ //ndbrequire(newestRestorableGCI >= oldestRestorableGCI);
+
+ Int32 val = oldestRestorableGCI;
+ ndbrequire(val > 0);
+
+ /* ----------------------------------------------------------------------- */
+ /* SET BIT INDICATING THAT LOCAL CHECKPOINT IS ONGOING. THIS IS CLEARED */
+ /* AT THE END OF A LOCAL CHECKPOINT. */
+ /* ----------------------------------------------------------------------- */
+ SYSFILE->setLCPOngoing(SYSFILE->systemRestartBits);
+ /* ---------------------------------------------------------------------- */
+ /* CHECK IF ANY NODE MUST BE TAKEN OUT OF SERVICE AND REFILLED WITH */
+ /* NEW FRESH DATA FROM AN ACTIVE NODE. */
+ /* ---------------------------------------------------------------------- */
+ setLcpActiveStatusStart(signal);
+ c_lcpState.setLcpStatus(LCP_COPY_GCI, __LINE__);
+ //#ifdef VM_TRACE
+ // infoEvent("LocalCheckpoint %d started", SYSFILE->latestLCP_ID);
+ // signal->theData[0] = 7012;
+ // execDUMP_STATE_ORD(signal);
+ //#endif
+
+ copyGciLab(signal, CopyGCIReq::LOCAL_CHECKPOINT);
+}//Dbdih::storeNewLcpIdLab()
+
+void Dbdih::startLcpRoundLab(Signal* signal) {
+ jam();
+
+ Mutex mutex(signal, c_mutexMgr, c_startLcpMutexHandle);
+ Callback c = { safe_cast(&Dbdih::startLcpMutex_locked), 0 };
+ ndbrequire(mutex.lock(c));
+}
+
+void
+Dbdih::startLcpMutex_locked(Signal* signal, Uint32 senderData, Uint32 retVal){
+ jamEntry();
+ ndbrequire(retVal == 0);
+
+ StartLcpReq* req = (StartLcpReq*)signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->lcpId = SYSFILE->latestLCP_ID;
+ req->participatingLQH = c_lcpState.m_participatingLQH;
+ req->participatingDIH = c_lcpState.m_participatingDIH;
+ sendLoopMacro(START_LCP_REQ, sendSTART_LCP_REQ);
+}
+void
+Dbdih::sendSTART_LCP_REQ(Signal* signal, Uint32 nodeId){
+ BlockReference ref = calcDihBlockRef(nodeId);
+ sendSignal(ref, GSN_START_LCP_REQ, signal, StartLcpReq::SignalLength, JBB);
+}
+
+void
+Dbdih::execSTART_LCP_CONF(Signal* signal){
+ StartLcpConf * conf = (StartLcpConf*)signal->getDataPtr();
+
+ Uint32 nodeId = refToNode(conf->senderRef);
+ receiveLoopMacro(START_LCP_REQ, nodeId);
+
+ Mutex mutex(signal, c_mutexMgr, c_startLcpMutexHandle);
+ Callback c = { safe_cast(&Dbdih::startLcpMutex_unlocked), 0 };
+ mutex.unlock(c);
+}
+
+void
+Dbdih::startLcpMutex_unlocked(Signal* signal, Uint32 data, Uint32 retVal){
+ jamEntry();
+ ndbrequire(retVal == 0);
+
+ Mutex mutex(signal, c_mutexMgr, c_startLcpMutexHandle);
+ mutex.release();
+
+ CRASH_INSERTION(7014);
+ c_lcpState.setLcpStatus(LCP_TC_CLOPSIZE, __LINE__);
+ sendLoopMacro(TC_CLOPSIZEREQ, sendTC_CLOPSIZEREQ);
+}
+
+void Dbdih::execTC_CLOPSIZECONF(Signal* signal) {
+ jamEntry();
+ Uint32 senderNodeId = signal->theData[0];
+ receiveLoopMacro(TC_CLOPSIZEREQ, senderNodeId);
+
+ ndbrequire(c_lcpState.lcpStatus == LCP_TC_CLOPSIZE);
+ /* ----------------------------------------------------------------------- */
+ /* ALL TC'S HAVE CLEARED THEIR OPERATION SIZE COUNTERS. NOW PROCEED BY */
+ /* STARTING THE LOCAL CHECKPOINT IN EACH LQH. */
+ /* ----------------------------------------------------------------------- */
+ c_lcpState.m_LAST_LCP_FRAG_ORD = c_lcpState.m_participatingLQH;
+
+ CRASH_INSERTION(7015);
+ c_lcpState.setLcpStatus(LCP_START_LCP_ROUND, __LINE__);
+ startLcpRoundLoopLab(signal, 0, 0);
+}//Dbdih::execTC_CLOPSIZECONF()
+
+void Dbdih::startLcpRoundLoopLab(Signal* signal,
+ Uint32 startTableId, Uint32 startFragId)
+{
+ NodeRecordPtr nodePtr;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ ptrAss(nodePtr, nodeRecord);
+ if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) {
+ ndbrequire(nodePtr.p->noOfStartedChkpt == 0);
+ ndbrequire(nodePtr.p->noOfQueuedChkpt == 0);
+ }//if
+ }//if
+ c_lcpState.currentFragment.tableId = startTableId;
+ c_lcpState.currentFragment.fragmentId = startFragId;
+ startNextChkpt(signal);
+}//Dbdih::startLcpRoundLoopLab()
+
+void Dbdih::startNextChkpt(Signal* signal)
+{
+ Uint32 lcpId = SYSFILE->latestLCP_ID;
+
+ NdbNodeBitmask busyNodes;
+ busyNodes.clear();
+ const Uint32 lcpNodes = c_lcpState.m_participatingLQH.count();
+
+ bool save = true;
+ LcpState::CurrentFragment curr = c_lcpState.currentFragment;
+
+ while (curr.tableId < ctabFileSize) {
+ TabRecordPtr tabPtr;
+ tabPtr.i = curr.tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ if ((tabPtr.p->tabStatus != TabRecord::TS_ACTIVE) ||
+ (tabPtr.p->tabLcpStatus != TabRecord::TLS_ACTIVE)) {
+ curr.tableId++;
+ curr.fragmentId = 0;
+ continue;
+ }//if
+
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, curr.fragmentId, fragPtr);
+
+ ReplicaRecordPtr replicaPtr;
+ for(replicaPtr.i = fragPtr.p->storedReplicas;
+ replicaPtr.i != RNIL ;
+ replicaPtr.i = replicaPtr.p->nextReplica){
+
+ jam();
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+
+ NodeRecordPtr nodePtr;
+ nodePtr.i = replicaPtr.p->procNode;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+
+ if (replicaPtr.p->lcpOngoingFlag &&
+ replicaPtr.p->lcpIdStarted < lcpId) {
+ jam();
+ //-------------------------------------------------------------------
+ // We have found a replica on a node that performs local checkpoint
+ // that is alive and that have not yet been started.
+ //-------------------------------------------------------------------
+
+ if (nodePtr.p->noOfStartedChkpt < 2) {
+ jam();
+ /**
+ * Send LCP_FRAG_ORD to LQH
+ */
+
+ /**
+ * Mark the replica so with lcpIdStarted == true
+ */
+ replicaPtr.p->lcpIdStarted = lcpId;
+
+ Uint32 i = nodePtr.p->noOfStartedChkpt;
+ nodePtr.p->startedChkpt[i].tableId = tabPtr.i;
+ nodePtr.p->startedChkpt[i].fragId = curr.fragmentId;
+ nodePtr.p->startedChkpt[i].replicaPtr = replicaPtr.i;
+ nodePtr.p->noOfStartedChkpt = i + 1;
+
+ sendLCP_FRAG_ORD(signal, nodePtr.p->startedChkpt[i]);
+ } else if (nodePtr.p->noOfQueuedChkpt < 2) {
+ jam();
+ /**
+ * Put LCP_FRAG_ORD "in queue"
+ */
+
+ /**
+ * Mark the replica so with lcpIdStarted == true
+ */
+ replicaPtr.p->lcpIdStarted = lcpId;
+
+ Uint32 i = nodePtr.p->noOfQueuedChkpt;
+ nodePtr.p->queuedChkpt[i].tableId = tabPtr.i;
+ nodePtr.p->queuedChkpt[i].fragId = curr.fragmentId;
+ nodePtr.p->queuedChkpt[i].replicaPtr = replicaPtr.i;
+ nodePtr.p->noOfQueuedChkpt = i + 1;
+ } else {
+ jam();
+
+ if(save){
+ /**
+ * Stop increasing value on first that was "full"
+ */
+ c_lcpState.currentFragment = curr;
+ save = false;
+ }
+
+ busyNodes.set(nodePtr.i);
+ if(busyNodes.count() == lcpNodes){
+ /**
+ * There were no possibility to start the local checkpoint
+ * and it was not possible to queue it up. In this case we
+ * stop the start of local checkpoints until the nodes with a
+ * backlog have performed more checkpoints. We will return and
+ * will not continue the process of starting any more checkpoints.
+ */
+ return;
+ }//if
+ }//if
+ }
+ }//while
+ curr.fragmentId++;
+ if (curr.fragmentId >= tabPtr.p->totalfragments) {
+ jam();
+ curr.fragmentId = 0;
+ curr.tableId++;
+ }//if
+ }//while
+
+ sendLastLCP_FRAG_ORD(signal);
+}//Dbdih::startNextChkpt()
+
+void Dbdih::sendLastLCP_FRAG_ORD(Signal* signal)
+{
+ LcpFragOrd * const lcpFragOrd = (LcpFragOrd *)&signal->theData[0];
+ lcpFragOrd->tableId = RNIL;
+ lcpFragOrd->fragmentId = 0;
+ lcpFragOrd->lcpId = SYSFILE->latestLCP_ID;
+ lcpFragOrd->lcpNo = 0;
+ lcpFragOrd->keepGci = c_lcpState.keepGci;
+ lcpFragOrd->lastFragmentFlag = true;
+
+ NodeRecordPtr nodePtr;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRecord);
+
+ if(nodePtr.p->noOfQueuedChkpt == 0 &&
+ nodePtr.p->noOfStartedChkpt == 0 &&
+ c_lcpState.m_LAST_LCP_FRAG_ORD.isWaitingFor(nodePtr.i)){
+ jam();
+
+ CRASH_INSERTION(7028);
+
+ /**
+ * Nothing queued or started <=> Complete on that node
+ *
+ */
+ c_lcpState.m_LAST_LCP_FRAG_ORD.clearWaitingFor(nodePtr.i);
+ if(ERROR_INSERTED(7075)){
+ continue;
+ }
+ BlockReference ref = calcLqhBlockRef(nodePtr.i);
+ sendSignal(ref, GSN_LCP_FRAG_ORD, signal,LcpFragOrd::SignalLength, JBB);
+ }
+ }
+ if(ERROR_INSERTED(7075)){
+ if(c_lcpState.m_LAST_LCP_FRAG_ORD.done())
+ CRASH_INSERTION(7075);
+ }
+}//Dbdih::sendLastLCP_FRAGORD()
+
+/* ------------------------------------------------------------------------- */
+/* A FRAGMENT REPLICA HAS COMPLETED EXECUTING ITS LOCAL CHECKPOINT. */
+/* CHECK IF ALL REPLICAS IN THE TABLE HAVE COMPLETED. IF SO STORE THE */
+/* THE TABLE DISTRIBUTION ON DISK. ALSO SEND LCP_REPORT TO ALL OTHER */
+/* NODES SO THAT THEY CAN STORE THE TABLE ONTO DISK AS WELL. */
+/* ------------------------------------------------------------------------- */
+void Dbdih::execLCP_FRAG_REP(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(c_lcpState.lcpStatus != LCP_STATUS_IDLE);
+
+#if 0
+ printLCP_FRAG_REP(stdout,
+ signal->getDataPtr(),
+ signal->length(), number());
+#endif
+
+ LcpFragRep * const lcpReport = (LcpFragRep *)&signal->theData[0];
+ Uint32 nodeId = lcpReport->nodeId;
+ Uint32 tableId = lcpReport->tableId;
+ Uint32 fragId = lcpReport->fragId;
+
+ jamEntry();
+
+ CRASH_INSERTION2(7025, isMaster());
+ CRASH_INSERTION2(7016, !isMaster());
+
+ bool fromTimeQueue = (signal->senderBlockRef() == reference());
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ if(tabPtr.p->tabCopyStatus != TabRecord::CS_IDLE) {
+ jam();
+ /*-----------------------------------------------------------------------*/
+ // If the table is currently copied to disk we also
+ // stop already here to avoid strange half-way updates
+ // of the table data structures.
+ /*-----------------------------------------------------------------------*/
+ /*
+ We need to send this signal without a delay since we have discovered
+ that we have run out of space in the short time queue. This problem
+ is very erunlikely to happen but it has and it results in a node crash.
+ This should be considered a "quick fix" and not a permanent solution.
+ A cleaner/better way would be to check the time queue if it is full or
+ not before sending this signal.
+ */
+ sendSignal(reference(), GSN_LCP_FRAG_REP, signal, signal->length(), JBB);
+ /* Kept here for reference
+ sendSignalWithDelay(reference(), GSN_LCP_FRAG_REP,
+ signal, 20, signal->length());
+ */
+
+ if(!fromTimeQueue){
+ c_lcpState.noOfLcpFragRepOutstanding++;
+ }
+
+ return;
+ }//if
+
+ if(fromTimeQueue){
+ jam();
+
+ ndbrequire(c_lcpState.noOfLcpFragRepOutstanding > 0);
+ c_lcpState.noOfLcpFragRepOutstanding--;
+ }
+
+ bool tableDone = reportLcpCompletion(lcpReport);
+
+ if(tableDone){
+ jam();
+
+ if(tabPtr.p->tabStatus == TabRecord::TS_DROPPING){
+ jam();
+ ndbout_c("TS_DROPPING - Neglecting to save Table: %d Frag: %d - ",
+ tableId,
+ fragId);
+ } else {
+ jam();
+ /**
+ * Write table description to file
+ */
+ tabPtr.p->tabLcpStatus = TabRecord::TLS_WRITING_TO_FILE;
+ tabPtr.p->tabCopyStatus = TabRecord::CS_LCP_READ_TABLE;
+ tabPtr.p->tabUpdateState = TabRecord::US_LOCAL_CHECKPOINT;
+ signal->theData[0] = DihContinueB::ZPACK_TABLE_INTO_PAGES;
+ signal->theData[1] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+
+ checkLcpAllTablesDoneInLqh();
+ }
+ }
+
+#ifdef VM_TRACE
+ /* --------------------------------------------------------------------- */
+ // REPORT that local checkpoint have completed this fragment.
+ /* --------------------------------------------------------------------- */
+ signal->theData[0] = NDB_LE_LCPFragmentCompleted;
+ signal->theData[1] = nodeId;
+ signal->theData[2] = tableId;
+ signal->theData[3] = fragId;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB);
+#endif
+
+ bool ok = false;
+ switch(c_lcpMasterTakeOverState.state){
+ case LMTOS_IDLE:
+ ok = true;
+ jam();
+ /**
+ * Fall through
+ */
+ break;
+ case LMTOS_WAIT_EMPTY_LCP: // LCP Take over waiting for EMPTY_LCPCONF
+ jam();
+ return;
+ case LMTOS_WAIT_LCP_FRAG_REP:
+ jam();
+ checkEmptyLcpComplete(signal);
+ return;
+ case LMTOS_INITIAL:
+ case LMTOS_ALL_IDLE:
+ case LMTOS_ALL_ACTIVE:
+ case LMTOS_LCP_CONCLUDING:
+ case LMTOS_COPY_ONGOING:
+ ndbrequire(false);
+ }
+ ndbrequire(ok);
+
+ /* ----------------------------------------------------------------------- */
+ // Check if there are more LCP's to start up.
+ /* ----------------------------------------------------------------------- */
+ if(isMaster()){
+ jam();
+
+ /**
+ * Remove from "running" array
+ */
+ NodeRecordPtr nodePtr;
+ nodePtr.i = nodeId;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+
+ const Uint32 outstanding = nodePtr.p->noOfStartedChkpt;
+ ndbrequire(outstanding > 0);
+ if(nodePtr.p->startedChkpt[0].tableId != tableId ||
+ nodePtr.p->startedChkpt[0].fragId != fragId){
+ jam();
+ ndbrequire(outstanding > 1);
+ ndbrequire(nodePtr.p->startedChkpt[1].tableId == tableId);
+ ndbrequire(nodePtr.p->startedChkpt[1].fragId == fragId);
+ } else {
+ jam();
+ nodePtr.p->startedChkpt[0] = nodePtr.p->startedChkpt[1];
+ }
+ nodePtr.p->noOfStartedChkpt--;
+ checkStartMoreLcp(signal, nodeId);
+ }
+}
+
+bool
+Dbdih::checkLcpAllTablesDoneInLqh(){
+ TabRecordPtr tabPtr;
+
+ /**
+ * Check if finished with all tables
+ */
+ for (tabPtr.i = 0; tabPtr.i < ctabFileSize; tabPtr.i++) {
+ jam();
+ ptrAss(tabPtr, tabRecord);
+ if ((tabPtr.p->tabStatus == TabRecord::TS_ACTIVE) &&
+ (tabPtr.p->tabLcpStatus == TabRecord::TLS_ACTIVE)) {
+ jam();
+ /**
+ * Nope, not finished with all tables
+ */
+ return false;
+ }//if
+ }//for
+
+ CRASH_INSERTION2(7026, isMaster());
+ CRASH_INSERTION2(7017, !isMaster());
+
+ c_lcpState.setLcpStatus(LCP_TAB_COMPLETED, __LINE__);
+ return true;
+}
+
+void Dbdih::findReplica(ReplicaRecordPtr& replicaPtr,
+ Fragmentstore* fragPtrP, Uint32 nodeId)
+{
+ replicaPtr.i = fragPtrP->storedReplicas;
+ while(replicaPtr.i != RNIL){
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+ if (replicaPtr.p->procNode == nodeId) {
+ jam();
+ return;
+ } else {
+ jam();
+ replicaPtr.i = replicaPtr.p->nextReplica;
+ }//if
+ };
+
+#ifdef VM_TRACE
+ ndbout_c("Fragment Replica(node=%d) not found", nodeId);
+ replicaPtr.i = fragPtrP->oldStoredReplicas;
+ while(replicaPtr.i != RNIL){
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+ if (replicaPtr.p->procNode == nodeId) {
+ jam();
+ break;
+ } else {
+ jam();
+ replicaPtr.i = replicaPtr.p->nextReplica;
+ }//if
+ };
+ if(replicaPtr.i != RNIL){
+ ndbout_c("...But was found in oldStoredReplicas");
+ } else {
+ ndbout_c("...And wasn't found in oldStoredReplicas");
+ }
+#endif
+ ndbrequire(false);
+}//Dbdih::findReplica()
+
+/**
+ * Return true if table is all fragment replicas have been checkpointed
+ * to disk (in all LQHs)
+ * false otherwise
+ */
+bool
+Dbdih::reportLcpCompletion(const LcpFragRep* lcpReport)
+{
+ Uint32 lcpNo = lcpReport->lcpNo;
+ Uint32 lcpId = lcpReport->lcpId;
+ Uint32 maxGciStarted = lcpReport->maxGciStarted;
+ Uint32 maxGciCompleted = lcpReport->maxGciCompleted;
+ Uint32 tableId = lcpReport->tableId;
+ Uint32 fragId = lcpReport->fragId;
+ Uint32 nodeId = lcpReport->nodeId;
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, fragId, fragPtr);
+
+ ReplicaRecordPtr replicaPtr;
+ findReplica(replicaPtr, fragPtr.p, nodeId);
+
+ ndbrequire(replicaPtr.p->lcpOngoingFlag == true);
+ if(lcpNo != replicaPtr.p->nextLcp){
+ ndbout_c("lcpNo = %d replicaPtr.p->nextLcp = %d",
+ lcpNo, replicaPtr.p->nextLcp);
+ ndbrequire(false);
+ }
+ ndbrequire(lcpNo == replicaPtr.p->nextLcp);
+ ndbrequire(lcpNo < MAX_LCP_STORED);
+ ndbrequire(replicaPtr.p->lcpId[lcpNo] != lcpId);
+
+ replicaPtr.p->lcpIdStarted = lcpId;
+ replicaPtr.p->lcpOngoingFlag = false;
+
+ removeOldCrashedReplicas(replicaPtr);
+ replicaPtr.p->lcpId[lcpNo] = lcpId;
+ replicaPtr.p->lcpStatus[lcpNo] = ZVALID;
+ replicaPtr.p->maxGciStarted[lcpNo] = maxGciStarted;
+ gth(maxGciStarted + 1, 0);
+ replicaPtr.p->maxGciCompleted[lcpNo] = maxGciCompleted;
+ replicaPtr.p->nextLcp = nextLcpNo(replicaPtr.p->nextLcp);
+
+ ndbrequire(fragPtr.p->noLcpReplicas > 0);
+ fragPtr.p->noLcpReplicas --;
+
+ if(fragPtr.p->noLcpReplicas > 0){
+ jam();
+ return false;
+ }
+
+ for (Uint32 fid = 0; fid < tabPtr.p->totalfragments; fid++) {
+ jam();
+ getFragstore(tabPtr.p, fid, fragPtr);
+ if (fragPtr.p->noLcpReplicas > 0){
+ jam();
+ /* ----------------------------------------------------------------- */
+ // Not all fragments in table have been checkpointed.
+ /* ----------------------------------------------------------------- */
+ if(0)
+ ndbout_c("reportLcpCompletion: fragment %d not ready", fid);
+ return false;
+ }//if
+ }//for
+ return true;
+}//Dbdih::reportLcpCompletion()
+
+void Dbdih::checkStartMoreLcp(Signal* signal, Uint32 nodeId)
+{
+ ndbrequire(isMaster());
+
+ NodeRecordPtr nodePtr;
+ nodePtr.i = nodeId;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+
+ ndbrequire(nodePtr.p->noOfStartedChkpt < 2);
+
+ if (nodePtr.p->noOfQueuedChkpt > 0) {
+ jam();
+ nodePtr.p->noOfQueuedChkpt--;
+ Uint32 i = nodePtr.p->noOfStartedChkpt;
+ nodePtr.p->startedChkpt[i] = nodePtr.p->queuedChkpt[0];
+ nodePtr.p->queuedChkpt[0] = nodePtr.p->queuedChkpt[1];
+ //-------------------------------------------------------------------
+ // We can send a LCP_FRAGORD to the node ordering it to perform a
+ // local checkpoint on this fragment replica.
+ //-------------------------------------------------------------------
+ nodePtr.p->noOfStartedChkpt = i + 1;
+
+ sendLCP_FRAG_ORD(signal, nodePtr.p->startedChkpt[i]);
+ }
+
+ /* ----------------------------------------------------------------------- */
+ // When there are no more outstanding LCP reports and there are no one queued
+ // in at least one node, then we are ready to make sure all nodes have at
+ // least two outstanding LCP requests per node and at least two queued for
+ // sending.
+ /* ----------------------------------------------------------------------- */
+ startNextChkpt(signal);
+}//Dbdih::checkStartMoreLcp()
+
+void
+Dbdih::sendLCP_FRAG_ORD(Signal* signal,
+ NodeRecord::FragmentCheckpointInfo info){
+
+ ReplicaRecordPtr replicaPtr;
+ replicaPtr.i = info.replicaPtr;
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+
+ BlockReference ref = calcLqhBlockRef(replicaPtr.p->procNode);
+
+ LcpFragOrd * const lcpFragOrd = (LcpFragOrd *)&signal->theData[0];
+ lcpFragOrd->tableId = info.tableId;
+ lcpFragOrd->fragmentId = info.fragId;
+ lcpFragOrd->lcpId = SYSFILE->latestLCP_ID;
+ lcpFragOrd->lcpNo = replicaPtr.p->nextLcp;
+ lcpFragOrd->keepGci = c_lcpState.keepGci;
+ lcpFragOrd->lastFragmentFlag = false;
+ sendSignal(ref, GSN_LCP_FRAG_ORD, signal, LcpFragOrd::SignalLength, JBB);
+}
+
+void Dbdih::checkLcpCompletedLab(Signal* signal)
+{
+ if(c_lcpState.lcpStatus < LCP_TAB_COMPLETED){
+ jam();
+ return;
+ }
+
+ TabRecordPtr tabPtr;
+ for (tabPtr.i = 0; tabPtr.i < ctabFileSize; tabPtr.i++) {
+ jam();
+ ptrAss(tabPtr, tabRecord);
+ if (tabPtr.p->tabStatus == TabRecord::TS_ACTIVE) {
+ if (tabPtr.p->tabLcpStatus != TabRecord::TLS_COMPLETED) {
+ jam();
+ return;
+ }//if
+ }//if
+ }//for
+
+ CRASH_INSERTION2(7027, isMaster());
+ CRASH_INSERTION2(7018, !isMaster());
+
+ if(c_lcpState.lcpStatus == LCP_TAB_COMPLETED){
+ /**
+ * We'r done
+ */
+ c_lcpState.setLcpStatus(LCP_TAB_SAVED, __LINE__);
+ sendLCP_COMPLETE_REP(signal);
+ return;
+ }
+
+ ndbrequire(c_lcpState.lcpStatus == LCP_TAB_SAVED);
+ allNodesLcpCompletedLab(signal);
+ return;
+}//Dbdih::checkLcpCompletedLab()
+
+void
+Dbdih::sendLCP_COMPLETE_REP(Signal* signal){
+ jam();
+ LcpCompleteRep * rep = (LcpCompleteRep*)signal->getDataPtrSend();
+ rep->nodeId = getOwnNodeId();
+ rep->lcpId = SYSFILE->latestLCP_ID;
+ rep->blockNo = DBDIH;
+
+ sendSignal(c_lcpState.m_masterLcpDihRef, GSN_LCP_COMPLETE_REP, signal,
+ LcpCompleteRep::SignalLength, JBB);
+}
+
+/*-------------------------------------------------------------------------- */
+/* COMP_LCP_ROUND A LQH HAS COMPLETED A LOCAL CHECKPOINT */
+/*------------------------------------------------------------------------- */
+void Dbdih::execLCP_COMPLETE_REP(Signal* signal)
+{
+ jamEntry();
+
+#if 0
+ ndbout_c("LCP_COMPLETE_REP");
+ printLCP_COMPLETE_REP(stdout,
+ signal->getDataPtr(),
+ signal->length(), number());
+#endif
+
+ LcpCompleteRep * rep = (LcpCompleteRep*)signal->getDataPtr();
+ Uint32 lcpId = rep->lcpId;
+ Uint32 nodeId = rep->nodeId;
+ Uint32 blockNo = rep->blockNo;
+
+ if(c_lcpMasterTakeOverState.state > LMTOS_WAIT_LCP_FRAG_REP){
+ jam();
+ /**
+ * Don't allow LCP_COMPLETE_REP to arrive during
+ * LCP master take over
+ */
+ ndbrequire(isMaster());
+ ndbrequire(blockNo == DBDIH);
+ sendSignalWithDelay(reference(), GSN_LCP_COMPLETE_REP, signal, 100,
+ signal->length());
+ return;
+ }
+
+ ndbrequire(c_lcpState.lcpStatus != LCP_STATUS_IDLE);
+
+ switch(blockNo){
+ case DBLQH:
+ jam();
+ c_lcpState.m_LCP_COMPLETE_REP_Counter_LQH.clearWaitingFor(nodeId);
+ ndbrequire(!c_lcpState.m_LAST_LCP_FRAG_ORD.isWaitingFor(nodeId));
+ break;
+ case DBDIH:
+ jam();
+ ndbrequire(isMaster());
+ c_lcpState.m_LCP_COMPLETE_REP_Counter_DIH.clearWaitingFor(nodeId);
+ break;
+ case 0:
+ jam();
+ ndbrequire(!isMaster());
+ ndbrequire(c_lcpState.m_LCP_COMPLETE_REP_From_Master_Received == false);
+ c_lcpState.m_LCP_COMPLETE_REP_From_Master_Received = true;
+ break;
+ default:
+ ndbrequire(false);
+ }
+ ndbrequire(lcpId == SYSFILE->latestLCP_ID);
+
+ allNodesLcpCompletedLab(signal);
+ return;
+}
+
+void Dbdih::allNodesLcpCompletedLab(Signal* signal)
+{
+ jam();
+
+ if (c_lcpState.lcpStatus != LCP_TAB_SAVED) {
+ jam();
+ /**
+ * We have not sent LCP_COMPLETE_REP to master DIH yet
+ */
+ return;
+ }//if
+
+ if (!c_lcpState.m_LCP_COMPLETE_REP_Counter_LQH.done()){
+ jam();
+ return;
+ }
+
+ if (!c_lcpState.m_LCP_COMPLETE_REP_Counter_DIH.done()){
+ jam();
+ return;
+ }
+
+ if (!isMaster() &&
+ c_lcpState.m_LCP_COMPLETE_REP_From_Master_Received == false){
+ jam();
+ /**
+ * Wait until master DIH has signaled lcp is complete
+ */
+ return;
+ }
+
+ if(c_lcpMasterTakeOverState.state != LMTOS_IDLE){
+ jam();
+#ifdef VM_TRACE
+ ndbout_c("Exiting from allNodesLcpCompletedLab");
+#endif
+ return;
+ }
+
+
+ /*------------------------------------------------------------------------ */
+ /* WE HAVE NOW COMPLETED A LOCAL CHECKPOINT. WE ARE NOW READY TO WAIT */
+ /* FOR THE NEXT LOCAL CHECKPOINT. SEND WITHOUT TIME-OUT SINCE IT MIGHT */
+ /* BE TIME TO START THE NEXT LOCAL CHECKPOINT IMMEDIATELY. */
+ /* CLEAR BIT 3 OF SYSTEM RESTART BITS TO INDICATE THAT THERE IS NO */
+ /* LOCAL CHECKPOINT ONGOING. THIS WILL BE WRITTEN AT SOME LATER TIME */
+ /* DURING A GLOBAL CHECKPOINT. IT IS NOT NECESSARY TO WRITE IT */
+ /* IMMEDIATELY. WE WILL ALSO CLEAR BIT 2 OF SYSTEM RESTART BITS IF ALL */
+ /* CURRENTLY ACTIVE NODES COMPLETED THE LOCAL CHECKPOINT. */
+ /*------------------------------------------------------------------------ */
+ CRASH_INSERTION(7019);
+ signal->setTrace(0);
+
+ c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__);
+ setLcpActiveStatusEnd();
+ Sysfile::clearLCPOngoing(SYSFILE->systemRestartBits);
+
+ if(!isMaster()){
+ jam();
+ /**
+ * We're not master, be content
+ */
+ return;
+ }
+
+ // Send LCP_COMPLETE_REP to all other nodes
+ // allowing them to set their lcpStatus to LCP_STATUS_IDLE
+ LcpCompleteRep * rep = (LcpCompleteRep*)signal->getDataPtrSend();
+ rep->nodeId = getOwnNodeId();
+ rep->lcpId = SYSFILE->latestLCP_ID;
+ rep->blockNo = 0; // 0 = Sent from master
+
+ NodeRecordPtr nodePtr;
+ nodePtr.i = cfirstAliveNode;
+ do {
+ jam();
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ if (nodePtr.i != cownNodeId){
+ BlockReference ref = calcDihBlockRef(nodePtr.i);
+ sendSignal(ref, GSN_LCP_COMPLETE_REP, signal,
+ LcpCompleteRep::SignalLength, JBB);
+ }
+ nodePtr.i = nodePtr.p->nextNode;
+ } while (nodePtr.i != RNIL);
+
+
+ jam();
+ /***************************************************************************/
+ // Report the event that a local checkpoint has completed.
+ /***************************************************************************/
+ signal->theData[0] = NDB_LE_LocalCheckpointCompleted; //Event type
+ signal->theData[1] = SYSFILE->latestLCP_ID;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
+
+ /**
+ * Start checking for next LCP
+ */
+ checkLcpStart(signal, __LINE__);
+
+ if (cwaitLcpSr == true) {
+ jam();
+ cwaitLcpSr = false;
+ ndbsttorry10Lab(signal, __LINE__);
+ return;
+ }//if
+
+ if (c_nodeStartMaster.blockLcp == true) {
+ jam();
+ lcpBlockedLab(signal);
+ return;
+ }//if
+ return;
+}//Dbdih::allNodesLcpCompletedLab()
+
+/******************************************************************************/
+/* ********** TABLE UPDATE MODULE *************/
+/* ****************************************************************************/
+/* ------------------------------------------------------------------------- */
+/* THIS MODULE IS USED TO UPDATE THE TABLE DESCRIPTION. IT STARTS BY */
+/* CREATING THE FIRST TABLE FILE, THEN UPDATES THIS FILE AND CLOSES IT.*/
+/* AFTER THAT THE SAME HAPPENS WITH THE SECOND FILE. AFTER THAT THE */
+/* TABLE DISTRIBUTION HAS BEEN UPDATED. */
+/* */
+/* THE REASON FOR CREATING THE FILE AND NOT OPENING IT IS TO ENSURE */
+/* THAT WE DO NOT GET A MIX OF OLD AND NEW INFORMATION IN THE FILE IN */
+/* ERROR SITUATIONS. */
+/* ------------------------------------------------------------------------- */
+void Dbdih::tableUpdateLab(Signal* signal, TabRecordPtr tabPtr) {
+ FileRecordPtr filePtr;
+ filePtr.i = tabPtr.p->tabFile[0];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ createFileRw(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::TABLE_CREATE;
+ return;
+}//Dbdih::tableUpdateLab()
+
+void Dbdih::tableCreateLab(Signal* signal, FileRecordPtr filePtr)
+{
+ TabRecordPtr tabPtr;
+ tabPtr.i = filePtr.p->tabRef;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ writeTabfile(signal, tabPtr.p, filePtr);
+ filePtr.p->reqStatus = FileRecord::TABLE_WRITE;
+ return;
+}//Dbdih::tableCreateLab()
+
+void Dbdih::tableWriteLab(Signal* signal, FileRecordPtr filePtr)
+{
+ closeFile(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::TABLE_CLOSE;
+ return;
+}//Dbdih::tableWriteLab()
+
+void Dbdih::tableCloseLab(Signal* signal, FileRecordPtr filePtr)
+{
+ TabRecordPtr tabPtr;
+ tabPtr.i = filePtr.p->tabRef;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ if (filePtr.i == tabPtr.p->tabFile[0]) {
+ jam();
+ filePtr.i = tabPtr.p->tabFile[1];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ createFileRw(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::TABLE_CREATE;
+ return;
+ }//if
+ switch (tabPtr.p->tabUpdateState) {
+ case TabRecord::US_LOCAL_CHECKPOINT:
+ jam();
+ releaseTabPages(tabPtr.i);
+ signal->theData[0] = DihContinueB::ZCHECK_LCP_COMPLETED;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 1, JBB);
+
+ tabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
+ tabPtr.p->tabUpdateState = TabRecord::US_IDLE;
+ tabPtr.p->tabLcpStatus = TabRecord::TLS_COMPLETED;
+ return;
+ break;
+ case TabRecord::US_REMOVE_NODE:
+ jam();
+ releaseTabPages(tabPtr.i);
+ for (Uint32 fragId = 0; fragId < tabPtr.p->totalfragments; fragId++) {
+ jam();
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, fragId, fragPtr);
+ updateNodeInfo(fragPtr);
+ }//for
+ tabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
+ tabPtr.p->tabUpdateState = TabRecord::US_IDLE;
+ if (tabPtr.p->tabLcpStatus == TabRecord::TLS_WRITING_TO_FILE) {
+ jam();
+ tabPtr.p->tabLcpStatus = TabRecord::TLS_COMPLETED;
+ signal->theData[0] = DihContinueB::ZCHECK_LCP_COMPLETED;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 1, JBB);
+ }//if
+ signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE;
+ signal->theData[1] = tabPtr.p->tabRemoveNode;
+ signal->theData[2] = tabPtr.i + 1;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+ return;
+ break;
+ case TabRecord::US_INVALIDATE_NODE_LCP:
+ jam();
+ releaseTabPages(tabPtr.i);
+ tabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
+ tabPtr.p->tabUpdateState = TabRecord::US_IDLE;
+
+ signal->theData[0] = DihContinueB::ZINVALIDATE_NODE_LCP;
+ signal->theData[1] = tabPtr.p->tabRemoveNode;
+ signal->theData[2] = tabPtr.i + 1;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+ return;
+ case TabRecord::US_COPY_TAB_REQ:
+ jam();
+ tabPtr.p->tabUpdateState = TabRecord::US_IDLE;
+ copyTabReq_complete(signal, tabPtr);
+ return;
+ break;
+ case TabRecord::US_ADD_TABLE_MASTER:
+ jam();
+ releaseTabPages(tabPtr.i);
+ tabPtr.p->tabUpdateState = TabRecord::US_IDLE;
+ signal->theData[0] = DihContinueB::ZDIH_ADD_TABLE_MASTER;
+ signal->theData[1] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ break;
+ case TabRecord::US_ADD_TABLE_SLAVE:
+ jam();
+ releaseTabPages(tabPtr.i);
+ tabPtr.p->tabUpdateState = TabRecord::US_IDLE;
+ signal->theData[0] = DihContinueB::ZDIH_ADD_TABLE_SLAVE;
+ signal->theData[1] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ break;
+ default:
+ ndbrequire(false);
+ return;
+ break;
+ }//switch
+}//Dbdih::tableCloseLab()
+
+/**
+ * GCP stop detected,
+ * send SYSTEM_ERROR to all other alive nodes
+ */
+void Dbdih::crashSystemAtGcpStop(Signal* signal){
+ NodeRecordPtr nodePtr;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRecord);
+ if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) {
+ jam();
+ const BlockReference ref =
+ numberToRef(refToBlock(cntrlblockref), nodePtr.i);
+ SystemError * const sysErr = (SystemError*)&signal->theData[0];
+ sysErr->errorCode = SystemError::GCPStopDetected;
+ sysErr->errorRef = reference();
+ sysErr->data1 = cgcpStatus;
+ sysErr->data2 = cgcpOrderBlocked;
+ sendSignal(ref, GSN_SYSTEM_ERROR, signal,
+ SystemError::SignalLength, JBA);
+ }//if
+ }//for
+ return;
+}//Dbdih::crashSystemAtGcpStop()
+
+/*************************************************************************/
+/* */
+/* MODULE: ALLOCPAGE */
+/* DESCRIPTION: THE SUBROUTINE IS CALLED WITH POINTER TO PAGE */
+/* RECORD. A PAGE RECORD IS TAKEN FROM */
+/* THE FREE PAGE LIST */
+/*************************************************************************/
+void Dbdih::allocpage(PageRecordPtr& pagePtr)
+{
+ ndbrequire(cfirstfreepage != RNIL);
+ pagePtr.i = cfirstfreepage;
+ ptrCheckGuard(pagePtr, cpageFileSize, pageRecord);
+ cfirstfreepage = pagePtr.p->nextfreepage;
+ pagePtr.p->nextfreepage = RNIL;
+}//Dbdih::allocpage()
+
+/*************************************************************************/
+/* */
+/* MODULE: ALLOC_STORED_REPLICA */
+/* DESCRIPTION: THE SUBROUTINE IS CALLED TO GET A REPLICA RECORD, */
+/* TO INITIALISE IT AND TO LINK IT INTO THE FRAGMENT */
+/* STORE RECORD. USED FOR STORED REPLICAS. */
+/*************************************************************************/
+void Dbdih::allocStoredReplica(FragmentstorePtr fragPtr,
+ ReplicaRecordPtr& newReplicaPtr,
+ Uint32 nodeId)
+{
+ Uint32 i;
+ ReplicaRecordPtr arrReplicaPtr;
+ ReplicaRecordPtr arrPrevReplicaPtr;
+
+ seizeReplicaRec(newReplicaPtr);
+ for (i = 0; i < MAX_LCP_STORED; i++) {
+ newReplicaPtr.p->maxGciCompleted[i] = 0;
+ newReplicaPtr.p->maxGciStarted[i] = 0;
+ newReplicaPtr.p->lcpId[i] = 0;
+ newReplicaPtr.p->lcpStatus[i] = ZINVALID;
+ }//for
+ newReplicaPtr.p->noCrashedReplicas = 0;
+ newReplicaPtr.p->initialGci = currentgcp;
+ for (i = 0; i < 8; i++) {
+ newReplicaPtr.p->replicaLastGci[i] = (Uint32)-1;
+ newReplicaPtr.p->createGci[i] = 0;
+ }//for
+ newReplicaPtr.p->createGci[0] = currentgcp;
+ ndbrequire(currentgcp != 0xF1F1F1F1);
+ newReplicaPtr.p->nextLcp = 0;
+ newReplicaPtr.p->procNode = nodeId;
+ newReplicaPtr.p->lcpOngoingFlag = false;
+ newReplicaPtr.p->lcpIdStarted = 0;
+
+ arrPrevReplicaPtr.i = RNIL;
+ arrReplicaPtr.i = fragPtr.p->storedReplicas;
+ while (arrReplicaPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(arrReplicaPtr, creplicaFileSize, replicaRecord);
+ arrPrevReplicaPtr = arrReplicaPtr;
+ arrReplicaPtr.i = arrReplicaPtr.p->nextReplica;
+ }//while
+ if (arrPrevReplicaPtr.i == RNIL) {
+ jam();
+ fragPtr.p->storedReplicas = newReplicaPtr.i;
+ } else {
+ jam();
+ arrPrevReplicaPtr.p->nextReplica = newReplicaPtr.i;
+ }//if
+ fragPtr.p->noStoredReplicas++;
+}//Dbdih::allocStoredReplica()
+
+/*************************************************************************/
+/* CALCULATE HOW MANY HOT SPARES THAT ARE TO BE ASSIGNED IN THIS SYSTEM */
+/*************************************************************************/
+void Dbdih::calculateHotSpare()
+{
+ Uint32 tchsTmp;
+ Uint32 tchsNoNodes;
+
+ switch (cnoReplicas) {
+ case 1:
+ jam();
+ cnoHotSpare = 0;
+ break;
+ case 2:
+ case 3:
+ case 4:
+ jam();
+ if (csystemnodes > cnoReplicas) {
+ jam();
+ /* --------------------------------------------------------------------- */
+ /* WITH MORE NODES THAN REPLICAS WE WILL ALWAYS USE AT LEAST ONE HOT */
+ /* SPARE IF THAT HAVE BEEN REQUESTED BY THE CONFIGURATION FILE. THE */
+ /* NUMBER OF NODES TO BE USED FOR NORMAL OPERATION IS ALWAYS */
+ /* A MULTIPLE OF THE NUMBER OF REPLICAS SINCE WE WILL ORGANISE NODES */
+ /* INTO NODE GROUPS. THE REMAINING NODES WILL BE HOT SPARE NODES. */
+ /* --------------------------------------------------------------------- */
+ if ((csystemnodes - cnoReplicas) >= cminHotSpareNodes) {
+ jam();
+ /* --------------------------------------------------------------------- */
+ // We set the minimum number of hot spares according to users request
+ // through the configuration file.
+ /* --------------------------------------------------------------------- */
+ tchsNoNodes = csystemnodes - cminHotSpareNodes;
+ cnoHotSpare = cminHotSpareNodes;
+ } else if (cminHotSpareNodes > 0) {
+ jam();
+ /* --------------------------------------------------------------------- */
+ // The user requested at least one hot spare node and we will support him
+ // in that.
+ /* --------------------------------------------------------------------- */
+ tchsNoNodes = csystemnodes - 1;
+ cnoHotSpare = 1;
+ } else {
+ jam();
+ /* --------------------------------------------------------------------- */
+ // The user did not request any hot spare nodes so in this case we will
+ // only use hot spare nodes if the number of nodes is such that we cannot
+ // use all nodes as normal nodes.
+ /* --------------------------------------------------------------------- */
+ tchsNoNodes = csystemnodes;
+ cnoHotSpare = 0;
+ }//if
+ } else {
+ jam();
+ /* --------------------------------------------------------------------- */
+ // We only have enough to support the replicas. We will not have any hot
+ // spares.
+ /* --------------------------------------------------------------------- */
+ tchsNoNodes = csystemnodes;
+ cnoHotSpare = 0;
+ }//if
+ tchsTmp = tchsNoNodes - (cnoReplicas * (tchsNoNodes / cnoReplicas));
+ cnoHotSpare = cnoHotSpare + tchsTmp;
+ break;
+ default:
+ jam();
+ progError(0, 0);
+ break;
+ }//switch
+}//Dbdih::calculateHotSpare()
+
+/*************************************************************************/
+/* CHECK IF THE NODE CRASH IS TO ESCALATE INTO A SYSTEM CRASH. WE COULD */
+/* DO THIS BECAUSE ALL REPLICAS OF SOME FRAGMENT ARE LOST. WE COULD ALSO */
+/* DO IT AFTER MANY NODE FAILURES THAT MAKE IT VERY DIFFICULT TO RESTORE */
+/* DATABASE AFTER A SYSTEM CRASH. IT MIGHT EVEN BE IMPOSSIBLE AND THIS */
+/* MUST BE AVOIDED EVEN MORE THAN AVOIDING SYSTEM CRASHES. */
+/*************************************************************************/
+void Dbdih::checkEscalation()
+{
+ Uint32 TnodeGroup[MAX_NDB_NODES];
+ NodeRecordPtr nodePtr;
+ Uint32 i;
+ for (i = 0; i < MAX_NDB_NODES; i++) {
+ TnodeGroup[i] = ZFALSE;
+ }//for
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRecord);
+ if (nodePtr.p->nodeStatus == NodeRecord::ALIVE &&
+ nodePtr.p->activeStatus == Sysfile::NS_Active){
+ ndbrequire(nodePtr.p->nodeGroup < MAX_NDB_NODES);
+ TnodeGroup[nodePtr.p->nodeGroup] = ZTRUE;
+ }
+ }
+ for (i = 0; i < cnoOfNodeGroups; i++) {
+ jam();
+ if (TnodeGroup[i] == ZFALSE) {
+ jam();
+ progError(__LINE__, ERR_SYSTEM_ERROR, "Lost node group");
+ }//if
+ }//for
+}//Dbdih::checkEscalation()
+
+/*************************************************************************/
+/* */
+/* MODULE: CHECK_KEEP_GCI */
+/* DESCRIPTION: CHECK FOR MINIMUM GCI RESTORABLE WITH NEW LOCAL */
+/* CHECKPOINT. */
+/*************************************************************************/
+void Dbdih::checkKeepGci(Uint32 replicaStartIndex)
+{
+ ReplicaRecordPtr ckgReplicaPtr;
+ ckgReplicaPtr.i = replicaStartIndex;
+ while (ckgReplicaPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(ckgReplicaPtr, creplicaFileSize, replicaRecord);
+ Uint32 keepGci;
+ Uint32 oldestRestorableGci;
+ findMinGci(ckgReplicaPtr, keepGci, oldestRestorableGci);
+ if (keepGci < c_lcpState.keepGci) {
+ jam();
+ /* ------------------------------------------------------------------- */
+ /* WE MUST KEEP LOG RECORDS SO THAT WE CAN USE ALL LOCAL CHECKPOINTS */
+ /* THAT ARE AVAILABLE. THUS WE NEED TO CALCULATE THE MINIMUM OVER ALL */
+ /* FRAGMENTS. */
+ /* ------------------------------------------------------------------- */
+ c_lcpState.keepGci = keepGci;
+ }//if
+ if (oldestRestorableGci > c_lcpState.oldestRestorableGci) {
+ jam();
+ c_lcpState.oldestRestorableGci = oldestRestorableGci;
+ ndbrequire(((int)c_lcpState.oldestRestorableGci) >= 0);
+ }//if
+ ckgReplicaPtr.i = ckgReplicaPtr.p->nextReplica;
+ }//while
+}//Dbdih::checkKeepGci()
+
+void Dbdih::closeFile(Signal* signal, FileRecordPtr filePtr)
+{
+ signal->theData[0] = filePtr.p->fileRef;
+ signal->theData[1] = reference();
+ signal->theData[2] = filePtr.i;
+ signal->theData[3] = ZCLOSE_NO_DELETE;
+ sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 4, JBA);
+}//Dbdih::closeFile()
+
+void Dbdih::closeFileDelete(Signal* signal, FileRecordPtr filePtr)
+{
+ signal->theData[0] = filePtr.p->fileRef;
+ signal->theData[1] = reference();
+ signal->theData[2] = filePtr.i;
+ signal->theData[3] = ZCLOSE_DELETE;
+ sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 4, JBA);
+}//Dbdih::closeFileDelete()
+
+void Dbdih::createFileRw(Signal* signal, FileRecordPtr filePtr)
+{
+ signal->theData[0] = reference();
+ signal->theData[1] = filePtr.i;
+ signal->theData[2] = filePtr.p->fileName[0];
+ signal->theData[3] = filePtr.p->fileName[1];
+ signal->theData[4] = filePtr.p->fileName[2];
+ signal->theData[5] = filePtr.p->fileName[3];
+ signal->theData[6] = ZCREATE_READ_WRITE;
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
+}//Dbdih::createFileRw()
+
+void Dbdih::emptyverificbuffer(Signal* signal, bool aContinueB)
+{
+ if(cfirstVerifyQueue == RNIL){
+ jam();
+ return;
+ }//if
+ ApiConnectRecordPtr localApiConnectptr;
+ if(getBlockCommit() == false){
+ jam();
+ ndbrequire(cverifyQueueCounter > 0);
+ cverifyQueueCounter--;
+ localApiConnectptr.i = cfirstVerifyQueue;
+ ptrCheckGuard(localApiConnectptr, capiConnectFileSize, apiConnectRecord);
+ ndbrequire(localApiConnectptr.p->apiGci <= currentgcp);
+ cfirstVerifyQueue = localApiConnectptr.p->nextApi;
+ if (cfirstVerifyQueue == RNIL) {
+ jam();
+ ndbrequire(cverifyQueueCounter == 0);
+ clastVerifyQueue = RNIL;
+ }//if
+ signal->theData[0] = localApiConnectptr.i;
+ signal->theData[1] = currentgcp;
+ sendSignal(clocaltcblockref, GSN_DIVERIFYCONF, signal, 2, JBB);
+ if (aContinueB == true) {
+ jam();
+ //-----------------------------------------------------------------------
+ // This emptying happened as part of a take-out process by continueb signals.
+ // This ensures that we will empty the queue eventually. We will also empty
+ // one item every time we insert one item to ensure that the list doesn't
+ // grow when it is not blocked.
+ //-----------------------------------------------------------------------
+ signal->theData[0] = DihContinueB::ZEMPTY_VERIFY_QUEUE;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 1, JBB);
+ }//if
+ } else {
+ jam();
+ //-----------------------------------------------------------------------
+ // We are blocked so it is no use in continuing the emptying of the
+ // verify buffer. Whenever the block is removed the emptying will
+ // restart.
+ //-----------------------------------------------------------------------
+ }
+ return;
+}//Dbdih::emptyverificbuffer()
+
+/*----------------------------------------------------------------*/
+/* FIND A FREE HOT SPARE IF AVAILABLE AND ALIVE. */
+/*----------------------------------------------------------------*/
+Uint32 Dbdih::findHotSpare()
+{
+ NodeRecordPtr nodePtr;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRecord);
+ if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) {
+ if (nodePtr.p->activeStatus == Sysfile::NS_HotSpare) {
+ jam();
+ return nodePtr.i;
+ }//if
+ }//if
+ }//for
+ return RNIL;
+}//Dbdih::findHotSpare()
+
+/*************************************************************************/
+/* FIND THE NODES FROM WHICH WE CAN EXECUTE THE LOG TO RESTORE THE */
+/* DATA NODE IN A SYSTEM RESTART. */
+/*************************************************************************/
+bool Dbdih::findLogNodes(CreateReplicaRecord* createReplica,
+ FragmentstorePtr fragPtr,
+ Uint32 startGci,
+ Uint32 stopGci)
+{
+ ConstPtr<ReplicaRecord> flnReplicaPtr;
+ flnReplicaPtr.i = createReplica->replicaRec;
+ ptrCheckGuard(flnReplicaPtr, creplicaFileSize, replicaRecord);
+ /* --------------------------------------------------------------------- */
+ /* WE START BY CHECKING IF THE DATA NODE CAN HANDLE THE LOG ALL BY */
+ /* ITSELF. THIS IS THE DESIRED BEHAVIOUR. IF THIS IS NOT POSSIBLE */
+ /* THEN WE SEARCH FOR THE BEST POSSIBLE NODES AMONG THE NODES THAT */
+ /* ARE PART OF THIS SYSTEM RESTART. */
+ /* THIS CAN ONLY BE HANDLED BY THE LAST CRASHED REPLICA. */
+ /* The condition is that the replica was created before or at the */
+ /* time of the starting gci, in addition it must have been alive */
+ /* at the time of the stopping gci. This is checked by two */
+ /* conditions, the first checks replicaLastGci and the second */
+ /* checks that it is also smaller than the last gci the node was */
+ /* involved in. This is necessary to check since createGci is set */
+ /* Last + 1 and sometimes startGci = stopGci + 1 and in that case */
+ /* it could happen that replicaLastGci is set to -1 with CreateGci */
+ /* set to LastGci + 1. */
+ /* --------------------------------------------------------------------- */
+ arrGuard(flnReplicaPtr.p->noCrashedReplicas, 8);
+ const Uint32 noCrashed = flnReplicaPtr.p->noCrashedReplicas;
+
+ if (!(ERROR_INSERTED(7073) || ERROR_INSERTED(7074))&&
+ (startGci >= flnReplicaPtr.p->createGci[noCrashed]) &&
+ (stopGci <= flnReplicaPtr.p->replicaLastGci[noCrashed]) &&
+ (stopGci <= SYSFILE->lastCompletedGCI[flnReplicaPtr.p->procNode])) {
+ jam();
+ /* --------------------------------------------------------------------- */
+ /* WE FOUND ALL THE LOG RECORDS NEEDED IN THE DATA NODE. WE WILL */
+ /* USE THOSE. */
+ /* --------------------------------------------------------------------- */
+ createReplica->noLogNodes = 1;
+ createReplica->logStartGci[0] = startGci;
+ createReplica->logStopGci[0] = stopGci;
+ createReplica->logNodeId[0] = flnReplicaPtr.p->procNode;
+ return true;
+ }//if
+ Uint32 logNode = 0;
+ do {
+ Uint32 fblStopGci;
+ jam();
+ if(!findBestLogNode(createReplica,
+ fragPtr,
+ startGci,
+ stopGci,
+ logNode,
+ fblStopGci)){
+ jam();
+ return false;
+ }
+
+ logNode++;
+ if (fblStopGci >= stopGci) {
+ jam();
+ createReplica->noLogNodes = logNode;
+ return true;
+ }//if
+ startGci = fblStopGci + 1;
+ if (logNode >= 4) { // Why??
+ jam();
+ break;
+ }//if
+ } while (1);
+ /* --------------------------------------------------------------------- */
+ /* IT WAS NOT POSSIBLE TO RESTORE THE REPLICA. THIS CAN EITHER BE */
+ /* BECAUSE OF LACKING NODES OR BECAUSE OF A REALLY SERIOUS PROBLEM.*/
+ /* --------------------------------------------------------------------- */
+ return false;
+}//Dbdih::findLogNodes()
+
+/*************************************************************************/
+/* FIND THE BEST POSSIBLE LOG NODE TO EXECUTE THE LOG AS SPECIFIED */
+/* BY THE INPUT PARAMETERS. WE SCAN THROUGH ALL ALIVE REPLICAS. */
+/* THIS MEANS STORED, OLD_STORED */
+/*************************************************************************/
+bool
+Dbdih::findBestLogNode(CreateReplicaRecord* createReplica,
+ FragmentstorePtr fragPtr,
+ Uint32 startGci,
+ Uint32 stopGci,
+ Uint32 logNode,
+ Uint32& fblStopGci)
+{
+ ConstPtr<ReplicaRecord> fblFoundReplicaPtr;
+ ConstPtr<ReplicaRecord> fblReplicaPtr;
+
+ /* --------------------------------------------------------------------- */
+ /* WE START WITH ZERO AS FOUND TO ENSURE THAT FIRST HIT WILL BE */
+ /* BETTER. */
+ /* --------------------------------------------------------------------- */
+ fblStopGci = 0;
+ fblReplicaPtr.i = fragPtr.p->storedReplicas;
+ while (fblReplicaPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(fblReplicaPtr, creplicaFileSize, replicaRecord);
+ if (checkNodeAlive(fblReplicaPtr.p->procNode)) {
+ jam();
+ Uint32 fliStopGci = findLogInterval(fblReplicaPtr, startGci);
+ if (fliStopGci > fblStopGci) {
+ jam();
+ fblStopGci = fliStopGci;
+ fblFoundReplicaPtr = fblReplicaPtr;
+ }//if
+ }//if
+ fblReplicaPtr.i = fblReplicaPtr.p->nextReplica;
+ }//while
+ fblReplicaPtr.i = fragPtr.p->oldStoredReplicas;
+ while (fblReplicaPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(fblReplicaPtr, creplicaFileSize, replicaRecord);
+ if (checkNodeAlive(fblReplicaPtr.p->procNode)) {
+ jam();
+ Uint32 fliStopGci = findLogInterval(fblReplicaPtr, startGci);
+ if (fliStopGci > fblStopGci) {
+ jam();
+ fblStopGci = fliStopGci;
+ fblFoundReplicaPtr = fblReplicaPtr;
+ }//if
+ }//if
+ fblReplicaPtr.i = fblReplicaPtr.p->nextReplica;
+ }//while
+ if (fblStopGci != 0) {
+ jam();
+ ndbrequire(logNode < MAX_LOG_EXEC);
+ createReplica->logNodeId[logNode] = fblFoundReplicaPtr.p->procNode;
+ createReplica->logStartGci[logNode] = startGci;
+ if (fblStopGci >= stopGci) {
+ jam();
+ createReplica->logStopGci[logNode] = stopGci;
+ } else {
+ jam();
+ createReplica->logStopGci[logNode] = fblStopGci;
+ }//if
+ }//if
+
+ return fblStopGci != 0;
+}//Dbdih::findBestLogNode()
+
+Uint32 Dbdih::findLogInterval(ConstPtr<ReplicaRecord> replicaPtr,
+ Uint32 startGci)
+{
+ ndbrequire(replicaPtr.p->noCrashedReplicas <= 8);
+ Uint32 loopLimit = replicaPtr.p->noCrashedReplicas + 1;
+ for (Uint32 i = 0; i < loopLimit; i++) {
+ jam();
+ if (replicaPtr.p->createGci[i] <= startGci) {
+ if (replicaPtr.p->replicaLastGci[i] >= startGci) {
+ jam();
+ return replicaPtr.p->replicaLastGci[i];
+ }//if
+ }//if
+ }//for
+ return 0;
+}//Dbdih::findLogInterval()
+
+/*************************************************************************/
+/* */
+/* MODULE: FIND THE MINIMUM GCI THAT THIS NODE HAS LOG RECORDS FOR.*/
+/*************************************************************************/
+void Dbdih::findMinGci(ReplicaRecordPtr fmgReplicaPtr,
+ Uint32& keepGci,
+ Uint32& oldestRestorableGci)
+{
+ Uint32 nextLcpNo;
+ Uint32 lcpNo;
+ for (Uint32 i = 0; i < MAX_LCP_STORED; i++) {
+ jam();
+ if ((fmgReplicaPtr.p->lcpStatus[i] == ZVALID) &&
+ ((fmgReplicaPtr.p->lcpId[i] + MAX_LCP_STORED) <= (SYSFILE->latestLCP_ID + 1))) {
+ jam();
+ /*--------------------------------------------------------------------*/
+ // We invalidate the checkpoint we are preparing to overwrite.
+ // The LCP id is still the old lcp id,
+ // this is the reason of comparing with lcpId + 1.
+ /*---------------------------------------------------------------------*/
+ fmgReplicaPtr.p->lcpStatus[i] = ZINVALID;
+ }//if
+ }//for
+ keepGci = (Uint32)-1;
+ oldestRestorableGci = 0;
+ nextLcpNo = fmgReplicaPtr.p->nextLcp;
+ lcpNo = fmgReplicaPtr.p->nextLcp;
+ do {
+ ndbrequire(lcpNo < MAX_LCP_STORED);
+ if (fmgReplicaPtr.p->lcpStatus[lcpNo] == ZVALID) {
+ jam();
+ keepGci = fmgReplicaPtr.p->maxGciCompleted[lcpNo];
+ oldestRestorableGci = fmgReplicaPtr.p->maxGciStarted[lcpNo];
+ ndbrequire(((int)oldestRestorableGci) >= 0);
+ return;
+ } else {
+ jam();
+ ndbrequire(fmgReplicaPtr.p->lcpStatus[lcpNo] == ZINVALID);
+ if (fmgReplicaPtr.p->createGci[0] == fmgReplicaPtr.p->initialGci) {
+ jam();
+ /*-------------------------------------------------------------------
+ * WE CAN STILL RESTORE THIS REPLICA WITHOUT ANY LOCAL CHECKPOINTS BY
+ * ONLY USING THE LOG. IF THIS IS NOT POSSIBLE THEN WE REPORT THE LAST
+ * VALID LOCAL CHECKPOINT AS THE MINIMUM GCI RECOVERABLE.
+ *-----------------------------------------------------------------*/
+ keepGci = fmgReplicaPtr.p->createGci[0];
+ }//if
+ }//if
+ lcpNo = prevLcpNo(lcpNo);
+ } while (lcpNo != nextLcpNo);
+ return;
+}//Dbdih::findMinGci()
+
+bool Dbdih::findStartGci(ConstPtr<ReplicaRecord> replicaPtr,
+ Uint32 stopGci,
+ Uint32& startGci,
+ Uint32& lcpNo)
+{
+ lcpNo = replicaPtr.p->nextLcp;
+ const Uint32 startLcpNo = lcpNo;
+ do {
+ lcpNo = prevLcpNo(lcpNo);
+ ndbrequire(lcpNo < MAX_LCP_STORED);
+ if (replicaPtr.p->lcpStatus[lcpNo] == ZVALID) {
+ if (replicaPtr.p->maxGciStarted[lcpNo] < stopGci) {
+ jam();
+ /* ----------------------------------------------------------------- */
+ /* WE HAVE FOUND A USEFUL LOCAL CHECKPOINT THAT CAN BE USED FOR */
+ /* RESTARTING THIS FRAGMENT REPLICA. */
+ /* ----------------------------------------------------------------- */
+ startGci = replicaPtr.p->maxGciCompleted[lcpNo] + 1;
+ return true;
+ }
+ }
+ } while (lcpNo != startLcpNo);
+ /* --------------------------------------------------------------------- */
+ /* NO VALID LOCAL CHECKPOINT WAS AVAILABLE. WE WILL ADD THE */
+ /* FRAGMENT. THUS THE NEXT LCP MUST BE SET TO ZERO. */
+ /* WE MUST EXECUTE THE LOG FROM THE INITIAL GLOBAL CHECKPOINT WHEN */
+ /* THE TABLE WAS CREATED. */
+ /* --------------------------------------------------------------------- */
+ startGci = replicaPtr.p->initialGci;
+ ndbrequire(replicaPtr.p->nextLcp == 0);
+ return false;
+}//Dbdih::findStartGci()
+
+/**************************************************************************/
+/* ---------------------------------------------------------------------- */
+/* FIND A TAKE OVER REPLICA WHICH IS TO BE STARTED OR COMMITTED WHEN*/
+/* TAKING OVER A FAILED NODE. */
+/* ---------------------------------------------------------------------- */
+/*************************************************************************/
+void Dbdih::findToReplica(TakeOverRecord* regTakeOver,
+ Uint32 replicaType,
+ FragmentstorePtr fragPtr,
+ ReplicaRecordPtr& ftrReplicaPtr)
+{
+ switch (replicaType) {
+ case CreateFragReq::STORED:
+ case CreateFragReq::COMMIT_STORED:
+ /* ----------------------------------------------------------------------*/
+ /* HERE WE SEARCH FOR STORED REPLICAS. THE REPLICA MUST BE STORED IN THE */
+ /* SECTION FOR OLD STORED REPLICAS SINCE WE HAVE NOT TAKEN OVER YET. */
+ /* ----------------------------------------------------------------------*/
+ ftrReplicaPtr.i = fragPtr.p->oldStoredReplicas;
+ while (ftrReplicaPtr.i != RNIL) {
+ ptrCheckGuard(ftrReplicaPtr, creplicaFileSize, replicaRecord);
+ if (ftrReplicaPtr.p->procNode == regTakeOver->toStartingNode) {
+ jam();
+ return;
+ } else {
+ if (ftrReplicaPtr.p->procNode == regTakeOver->toFailedNode) {
+ jam();
+ return;
+ } else {
+ jam();
+ ftrReplicaPtr.i = ftrReplicaPtr.p->nextReplica;
+ }//if
+ }//if
+ }//while
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+}//Dbdih::findToReplica()
+
+void Dbdih::initCommonData()
+{
+ c_blockCommit = false;
+ c_blockCommitNo = 0;
+ c_createFragmentLock = RNIL;
+ c_endToLock = RNIL;
+ cfailurenr = 1;
+ cfirstAliveNode = RNIL;
+ cfirstDeadNode = RNIL;
+ cfirstVerifyQueue = RNIL;
+ cgckptflag = false;
+ cgcpDelay = 0;
+ cgcpMasterTakeOverState = GMTOS_IDLE;
+ cgcpOrderBlocked = 0;
+ cgcpParticipantState = GCP_PARTICIPANT_READY;
+ cgcpSameCounter = 0;
+ cgcpStartCounter = 0;
+ cgcpStatus = GCP_READY;
+
+ clastVerifyQueue = RNIL;
+ c_lcpMasterTakeOverState.set(LMTOS_IDLE, __LINE__);
+
+ c_lcpState.clcpDelay = 0;
+ c_lcpState.lcpStart = ZIDLE;
+ c_lcpState.lcpStartGcp = 0;
+ c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__);
+ c_lcpState.currentFragment.tableId = 0;
+ c_lcpState.currentFragment.fragmentId = 0;
+ c_lcpState.noOfLcpFragRepOutstanding = 0;
+ c_lcpState.keepGci = 0;
+ c_lcpState.oldestRestorableGci = 0;
+ c_lcpState.ctcCounter = 0;
+ c_lcpState.ctimer = 0;
+ c_lcpState.immediateLcpStart = false;
+ c_lcpState.m_MASTER_LCPREQ_Received = false;
+
+ cmasterdihref = 0;
+ cmasterNodeId = 0;
+ cmasterState = MASTER_IDLE;
+ cmasterTakeOverNode = 0;
+ cnewgcp = 0;
+ cnoHotSpare = 0;
+ cnoOfActiveTables = 0;
+ cnoOfNodeGroups = 0;
+ cnoReplicas = 0;
+ coldgcp = 0;
+ coldGcpId = 0;
+ coldGcpStatus = cgcpStatus;
+ con_lineNodes = 0;
+ creceivedfrag = 0;
+ crestartGci = 0;
+ crestartInfoFile[0] = RNIL;
+ crestartInfoFile[1] = RNIL;
+ cstartGcpNow = false;
+ cstartPhase = 0;
+ c_startToLock = RNIL;
+ cstarttype = (Uint32)-1;
+ csystemnodes = 0;
+ c_updateToLock = RNIL;
+ currentgcp = 0;
+ cverifyQueueCounter = 0;
+ cwaitLcpSr = false;
+
+ nodeResetStart();
+ c_nodeStartMaster.wait = ZFALSE;
+
+ memset(&sysfileData[0], 0, sizeof(sysfileData));
+
+ const ndb_mgm_configuration_iterator * p =
+ theConfiguration.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ c_lcpState.clcpDelay = 20;
+ ndb_mgm_get_int_parameter(p, CFG_DB_LCP_INTERVAL, &c_lcpState.clcpDelay);
+ c_lcpState.clcpDelay = c_lcpState.clcpDelay > 31 ? 31 : c_lcpState.clcpDelay;
+
+ cminHotSpareNodes = 0;
+ //ndb_mgm_get_int_parameter(p, CFG_DB_MIN_HOT_SPARES, &cminHotSpareNodes);
+ cminHotSpareNodes = cminHotSpareNodes > 2 ? 2 : cminHotSpareNodes;
+
+ cnoReplicas = 1;
+ ndb_mgm_get_int_parameter(p, CFG_DB_NO_REPLICAS, &cnoReplicas);
+ cnoReplicas = cnoReplicas > 4 ? 4 : cnoReplicas;
+
+ cgcpDelay = 2000;
+ ndb_mgm_get_int_parameter(p, CFG_DB_GCP_INTERVAL, &cgcpDelay);
+ cgcpDelay = cgcpDelay > 60000 ? 60000 : (cgcpDelay < 10 ? 10 : cgcpDelay);
+}//Dbdih::initCommonData()
+
+void Dbdih::initFragstore(FragmentstorePtr fragPtr)
+{
+ fragPtr.p->storedReplicas = RNIL;
+ fragPtr.p->oldStoredReplicas = RNIL;
+
+ fragPtr.p->noStoredReplicas = 0;
+ fragPtr.p->noOldStoredReplicas = 0;
+ fragPtr.p->fragReplicas = 0;
+ fragPtr.p->preferredPrimary = 0;
+
+ for (Uint32 i = 0; i < MAX_REPLICAS; i++)
+ fragPtr.p->activeNodes[i] = 0;
+
+ fragPtr.p->noLcpReplicas = 0;
+ fragPtr.p->distributionKey = 0;
+}//Dbdih::initFragstore()
+
+/*************************************************************************/
+/* */
+/* MODULE: INIT_RESTART_INFO */
+/* DESCRIPTION: INITIATE RESTART INFO VARIABLE AND VARIABLES FOR */
+/* GLOBAL CHECKPOINTS. */
+/*************************************************************************/
+void Dbdih::initRestartInfo()
+{
+ Uint32 i;
+ for (i = 0; i < MAX_NDB_NODES; i++) {
+ SYSFILE->lastCompletedGCI[i] = 0;
+ }//for
+ NodeRecordPtr nodePtr;
+ nodePtr.i = cfirstAliveNode;
+ do {
+ jam();
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ SYSFILE->lastCompletedGCI[nodePtr.i] = 1;
+ /* FIRST GCP = 1 ALREADY SET BY LQH */
+ nodePtr.i = nodePtr.p->nextNode;
+ } while (nodePtr.i != RNIL);
+ coldgcp = 1;
+ currentgcp = 2;
+ cnewgcp = 2;
+ crestartGci = 1;
+
+ SYSFILE->keepGCI = 1;
+ SYSFILE->oldestRestorableGCI = 1;
+ SYSFILE->newestRestorableGCI = 1;
+ SYSFILE->systemRestartBits = 0;
+ for (i = 0; i < NodeBitmask::Size; i++) {
+ SYSFILE->lcpActive[0] = 0;
+ }//for
+ for (i = 0; i < Sysfile::TAKE_OVER_SIZE; i++) {
+ SYSFILE->takeOver[i] = 0;
+ }//for
+ Sysfile::setInitialStartOngoing(SYSFILE->systemRestartBits);
+}//Dbdih::initRestartInfo()
+
+/*--------------------------------------------------------------------*/
+/* NODE GROUP BITS ARE INITIALISED BEFORE THIS. */
+/* NODE ACTIVE BITS ARE INITIALISED BEFORE THIS. */
+/*--------------------------------------------------------------------*/
+/*************************************************************************/
+/* */
+/* MODULE: INIT_RESTORABLE_GCI_FILES */
+/* DESCRIPTION: THE SUBROUTINE SETS UP THE FILES THAT REFERS TO THE*/
+/* FILES THAT KEEP THE VARIABLE CRESTART_INFO */
+/*************************************************************************/
+void Dbdih::initRestorableGciFiles()
+{
+ Uint32 tirgTmp;
+ FileRecordPtr filePtr;
+ seizeFile(filePtr);
+ filePtr.p->tabRef = RNIL;
+ filePtr.p->fileType = FileRecord::GCP_FILE;
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ filePtr.p->fileStatus = FileRecord::CLOSED;
+ crestartInfoFile[0] = filePtr.i;
+ filePtr.p->fileName[0] = (Uint32)-1; /* T DIRECTORY NOT USED */
+ filePtr.p->fileName[1] = (Uint32)-1; /* F DIRECTORY NOT USED */
+ filePtr.p->fileName[2] = (Uint32)-1; /* S PART IGNORED */
+ tirgTmp = 1; /* FILE NAME VERSION 1 */
+ tirgTmp = (tirgTmp << 8) + 6; /* .SYSFILE */
+ tirgTmp = (tirgTmp << 8) + 1; /* D1 DIRECTORY */
+ tirgTmp = (tirgTmp << 8) + 0; /* P0 FILE NAME */
+ filePtr.p->fileName[3] = tirgTmp;
+ /* --------------------------------------------------------------------- */
+ /* THE NAME BECOMES /D1/DBDICT/S0.SYSFILE */
+ /* --------------------------------------------------------------------- */
+ seizeFile(filePtr);
+ filePtr.p->tabRef = RNIL;
+ filePtr.p->fileType = FileRecord::GCP_FILE;
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ filePtr.p->fileStatus = FileRecord::CLOSED;
+ crestartInfoFile[1] = filePtr.i;
+ filePtr.p->fileName[0] = (Uint32)-1; /* T DIRECTORY NOT USED */
+ filePtr.p->fileName[1] = (Uint32)-1; /* F DIRECTORY NOT USED */
+ filePtr.p->fileName[2] = (Uint32)-1; /* S PART IGNORED */
+ tirgTmp = 1; /* FILE NAME VERSION 1 */
+ tirgTmp = (tirgTmp << 8) + 6; /* .SYSFILE */
+ tirgTmp = (tirgTmp << 8) + 2; /* D1 DIRECTORY */
+ tirgTmp = (tirgTmp << 8) + 0; /* P0 FILE NAME */
+ filePtr.p->fileName[3] = tirgTmp;
+ /* --------------------------------------------------------------------- */
+ /* THE NAME BECOMES /D2/DBDICT/P0.SYSFILE */
+ /* --------------------------------------------------------------------- */
+}//Dbdih::initRestorableGciFiles()
+
+void Dbdih::initTable(TabRecordPtr tabPtr)
+{
+ tabPtr.p->noOfFragChunks = 0;
+ tabPtr.p->method = TabRecord::NOTDEFINED;
+ tabPtr.p->tabStatus = TabRecord::TS_IDLE;
+ tabPtr.p->noOfWords = 0;
+ tabPtr.p->noPages = 0;
+ tabPtr.p->tabLcpStatus = TabRecord::TLS_COMPLETED;
+ tabPtr.p->tabCopyStatus = TabRecord::CS_IDLE;
+ tabPtr.p->tabUpdateState = TabRecord::US_IDLE;
+ tabPtr.p->noOfBackups = 0;
+ tabPtr.p->kvalue = 0;
+ tabPtr.p->hashpointer = (Uint32)-1;
+ tabPtr.p->mask = 0;
+ tabPtr.p->storedTable = 1;
+ tabPtr.p->tabErrorCode = 0;
+ tabPtr.p->schemaVersion = (Uint32)-1;
+ tabPtr.p->tabRemoveNode = RNIL;
+ tabPtr.p->totalfragments = (Uint32)-1;
+ tabPtr.p->connectrec = RNIL;
+ tabPtr.p->tabFile[0] = RNIL;
+ tabPtr.p->tabFile[1] = RNIL;
+ tabPtr.p->m_dropTab.tabUserRef = 0;
+ tabPtr.p->m_dropTab.tabUserPtr = RNIL;
+ Uint32 i;
+ for (i = 0; i < MAX_NDB_NODES; i++) {
+ tabPtr.p->startFid[i] = RNIL;
+ }//for
+ for (i = 0; i < 8; i++) {
+ tabPtr.p->pageRef[i] = RNIL;
+ }//for
+ tabPtr.p->tableType = DictTabInfo::UndefTableType;
+}//Dbdih::initTable()
+
+/*************************************************************************/
+/* */
+/* MODULE: INIT_TABLE_FILES */
+/* DESCRIPTION: THE SUBROUTINE SETS UP THE FILES THAT REFERS TO THE*/
+/* FILES THAT KEEP THE TABLE FRAGMENTATION DESCRIPTION. */
+/*************************************************************************/
+void Dbdih::initTableFile(TabRecordPtr tabPtr)
+{
+ Uint32 titfTmp;
+ FileRecordPtr filePtr;
+ seizeFile(filePtr);
+ filePtr.p->tabRef = tabPtr.i;
+ filePtr.p->fileType = FileRecord::TABLE_FILE;
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ filePtr.p->fileStatus = FileRecord::CLOSED;
+ tabPtr.p->tabFile[0] = filePtr.i;
+ filePtr.p->fileName[0] = (Uint32)-1; /* T DIRECTORY NOT USED */
+ filePtr.p->fileName[1] = (Uint32)-1; /* F DIRECTORY NOT USED */
+ filePtr.p->fileName[2] = tabPtr.i; /* Stid FILE NAME */
+ titfTmp = 1; /* FILE NAME VERSION 1 */
+ titfTmp = (titfTmp << 8) + 3; /* .FRAGLIST */
+ titfTmp = (titfTmp << 8) + 1; /* D1 DIRECTORY */
+ titfTmp = (titfTmp << 8) + 255; /* P PART IGNORED */
+ filePtr.p->fileName[3] = titfTmp;
+ /* --------------------------------------------------------------------- */
+ /* THE NAME BECOMES /D1/DBDICT/Stid.FRAGLIST */
+ /* --------------------------------------------------------------------- */
+ seizeFile(filePtr);
+ filePtr.p->tabRef = tabPtr.i;
+ filePtr.p->fileType = FileRecord::TABLE_FILE;
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ filePtr.p->fileStatus = FileRecord::CLOSED;
+ tabPtr.p->tabFile[1] = filePtr.i;
+ filePtr.p->fileName[0] = (Uint32)-1; /* T DIRECTORY NOT USED */
+ filePtr.p->fileName[1] = (Uint32)-1; /* F DIRECTORY NOT USED */
+ filePtr.p->fileName[2] = tabPtr.i; /* Stid FILE NAME */
+ titfTmp = 1; /* FILE NAME VERSION 1 */
+ titfTmp = (titfTmp << 8) + 3; /* .FRAGLIST */
+ titfTmp = (titfTmp << 8) + 2; /* D2 DIRECTORY */
+ titfTmp = (titfTmp << 8) + 255; /* P PART IGNORED */
+ filePtr.p->fileName[3] = titfTmp;
+ /* --------------------------------------------------------------------- */
+ /* THE NAME BECOMES /D2/DBDICT/Stid.FRAGLIST */
+ /* --------------------------------------------------------------------- */
+}//Dbdih::initTableFile()
+
+void Dbdih::initialiseRecordsLab(Signal* signal,
+ Uint32 stepNo, Uint32 retRef, Uint32 retData)
+{
+ switch (stepNo) {
+ case 0:
+ jam();
+ initCommonData();
+ break;
+ case 1:{
+ ApiConnectRecordPtr apiConnectptr;
+ jam();
+ /******** INTIALIZING API CONNECT RECORDS ********/
+ for (apiConnectptr.i = 0; apiConnectptr.i < capiConnectFileSize; apiConnectptr.i++) {
+ refresh_watch_dog();
+ ptrAss(apiConnectptr, apiConnectRecord);
+ apiConnectptr.p->nextApi = RNIL;
+ }//for
+ jam();
+ break;
+ }
+ case 2:{
+ ConnectRecordPtr connectPtr;
+ jam();
+ /****** CONNECT ******/
+ for (connectPtr.i = 0; connectPtr.i < cconnectFileSize; connectPtr.i++) {
+ refresh_watch_dog();
+ ptrAss(connectPtr, connectRecord);
+ connectPtr.p->userpointer = RNIL;
+ connectPtr.p->userblockref = ZNIL;
+ connectPtr.p->connectState = ConnectRecord::FREE;
+ connectPtr.p->table = RNIL;
+ connectPtr.p->nfConnect = connectPtr.i + 1;
+ }//for
+ connectPtr.i = cconnectFileSize - 1;
+ ptrAss(connectPtr, connectRecord);
+ connectPtr.p->nfConnect = RNIL;
+ cfirstconnect = 0;
+ break;
+ }
+ case 3:
+ {
+ FileRecordPtr filePtr;
+ jam();
+ /******** INTIALIZING FILE RECORDS ********/
+ for (filePtr.i = 0; filePtr.i < cfileFileSize; filePtr.i++) {
+ ptrAss(filePtr, fileRecord);
+ filePtr.p->nextFile = filePtr.i + 1;
+ filePtr.p->fileStatus = FileRecord::CLOSED;
+ filePtr.p->reqStatus = FileRecord::IDLE;
+ }//for
+ filePtr.i = cfileFileSize - 1;
+ ptrAss(filePtr, fileRecord);
+ filePtr.p->nextFile = RNIL;
+ cfirstfreeFile = 0;
+ initRestorableGciFiles();
+ break;
+ }
+ case 4:
+ jam();
+ initialiseFragstore();
+ break;
+ case 5:
+ {
+ jam();
+ /******* NODE GROUP RECORD ******/
+ /******* NODE RECORD ******/
+ NodeGroupRecordPtr loopNGPtr;
+ for (loopNGPtr.i = 0; loopNGPtr.i < MAX_NDB_NODES; loopNGPtr.i++) {
+ ptrAss(loopNGPtr, nodeGroupRecord);
+ loopNGPtr.p->nodesInGroup[0] = RNIL;
+ loopNGPtr.p->nodesInGroup[1] = RNIL;
+ loopNGPtr.p->nodesInGroup[2] = RNIL;
+ loopNGPtr.p->nodesInGroup[3] = RNIL;
+ loopNGPtr.p->nextReplicaNode = 0;
+ loopNGPtr.p->nodeCount = 0;
+ loopNGPtr.p->activeTakeOver = false;
+ }//for
+ NodeRecordPtr nodePtr;
+ for (nodePtr.i = 0; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ ptrAss(nodePtr, nodeRecord);
+ new (nodePtr.p) NodeRecord();
+ }//for
+ break;
+ }
+ case 6:
+ {
+ PageRecordPtr pagePtr;
+ jam();
+ /******* PAGE RECORD ******/
+ for (pagePtr.i = 0; pagePtr.i < cpageFileSize; pagePtr.i++) {
+ refresh_watch_dog();
+ ptrAss(pagePtr, pageRecord);
+ pagePtr.p->nextfreepage = pagePtr.i + 1;
+ }//for
+ pagePtr.i = cpageFileSize - 1;
+ ptrAss(pagePtr, pageRecord);
+ pagePtr.p->nextfreepage = RNIL;
+ cfirstfreepage = 0;
+ break;
+ }
+ case 7:
+ {
+ ReplicaRecordPtr initReplicaPtr;
+ jam();
+ /******* REPLICA RECORD ******/
+ for (initReplicaPtr.i = 0; initReplicaPtr.i < creplicaFileSize;
+ initReplicaPtr.i++) {
+ refresh_watch_dog();
+ ptrAss(initReplicaPtr, replicaRecord);
+ initReplicaPtr.p->lcpIdStarted = 0;
+ initReplicaPtr.p->lcpOngoingFlag = false;
+ initReplicaPtr.p->nextReplica = initReplicaPtr.i + 1;
+ }//for
+ initReplicaPtr.i = creplicaFileSize - 1;
+ ptrAss(initReplicaPtr, replicaRecord);
+ initReplicaPtr.p->nextReplica = RNIL;
+ cnoFreeReplicaRec = creplicaFileSize;
+ cfirstfreeReplica = 0;
+ break;
+ }
+ case 8:
+ {
+ TabRecordPtr loopTabptr;
+ jam();
+ /********* TAB-DESCRIPTOR ********/
+ for (loopTabptr.i = 0; loopTabptr.i < ctabFileSize; loopTabptr.i++) {
+ ptrAss(loopTabptr, tabRecord);
+ refresh_watch_dog();
+ initTable(loopTabptr);
+ }//for
+ break;
+ }
+ case 9:
+ {
+ TakeOverRecordPtr takeOverPtr;
+ jam();
+ cfirstfreeTakeOver = RNIL;
+ for (takeOverPtr.i = 0; takeOverPtr.i < MAX_NDB_NODES; takeOverPtr.i++) {
+ ptrAss(takeOverPtr, takeOverRecord);
+ initTakeOver(takeOverPtr);
+ releaseTakeOver(takeOverPtr.i);
+ }//for
+
+ ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = retData;
+ sendSignal(retRef, GSN_READ_CONFIG_CONF, signal,
+ ReadConfigConf::SignalLength, JBB);
+ return;
+ break;
+ }
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ jam();
+ /* ---------------------------------------------------------------------- */
+ /* SEND REAL-TIME BREAK DURING INIT OF VARIABLES DURING SYSTEM RESTART. */
+ /* ---------------------------------------------------------------------- */
+ signal->theData[0] = DihContinueB::ZINITIALISE_RECORDS;
+ signal->theData[1] = stepNo + 1;
+ signal->theData[2] = retRef;
+ signal->theData[3] = retData;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 4, JBB);
+}//Dbdih::initialiseRecordsLab()
+
+/*************************************************************************/
+/* INSERT THE NODE INTO THE LINKED LIST OF NODES INVOLVED ALL */
+/* DISTRIBUTED PROTOCOLS (EXCEPT GCP PROTOCOL THAT USES THE DIH */
+/* LINKED LIST INSTEAD). */
+/*************************************************************************/
+void Dbdih::insertAlive(NodeRecordPtr newNodePtr)
+{
+ NodeRecordPtr nodePtr;
+
+ nodePtr.i = cfirstAliveNode;
+ if (nodePtr.i == RNIL) {
+ jam();
+ cfirstAliveNode = newNodePtr.i;
+ } else {
+ do {
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ if (nodePtr.p->nextNode == RNIL) {
+ jam();
+ nodePtr.p->nextNode = newNodePtr.i;
+ break;
+ } else {
+ jam();
+ nodePtr.i = nodePtr.p->nextNode;
+ }//if
+ } while (1);
+ }//if
+ newNodePtr.p->nextNode = RNIL;
+}//Dbdih::insertAlive()
+
+void Dbdih::insertBackup(FragmentstorePtr fragPtr, Uint32 nodeId)
+{
+ for (Uint32 i = fragPtr.p->fragReplicas; i > 1; i--) {
+ jam();
+ ndbrequire(i < MAX_REPLICAS && i > 0);
+ fragPtr.p->activeNodes[i] = fragPtr.p->activeNodes[i - 1];
+ }//for
+ fragPtr.p->activeNodes[1] = nodeId;
+ fragPtr.p->fragReplicas++;
+}//Dbdih::insertBackup()
+
+void Dbdih::insertDeadNode(NodeRecordPtr newNodePtr)
+{
+ NodeRecordPtr nodePtr;
+
+ nodePtr.i = cfirstDeadNode;
+ if (nodePtr.i == RNIL) {
+ jam();
+ cfirstDeadNode = newNodePtr.i;
+ } else {
+ do {
+ jam();
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ if (nodePtr.p->nextNode == RNIL) {
+ jam();
+ nodePtr.p->nextNode = newNodePtr.i;
+ break;
+ } else {
+ jam();
+ nodePtr.i = nodePtr.p->nextNode;
+ }//if
+ } while (1);
+ }//if
+ newNodePtr.p->nextNode = RNIL;
+}//Dbdih::insertDeadNode()
+
+void Dbdih::linkOldStoredReplica(FragmentstorePtr fragPtr,
+ ReplicaRecordPtr replicatePtr)
+{
+ ReplicaRecordPtr losReplicaPtr;
+
+ replicatePtr.p->nextReplica = RNIL;
+ fragPtr.p->noOldStoredReplicas++;
+ losReplicaPtr.i = fragPtr.p->oldStoredReplicas;
+ if (losReplicaPtr.i == RNIL) {
+ jam();
+ fragPtr.p->oldStoredReplicas = replicatePtr.i;
+ return;
+ }//if
+ ptrCheckGuard(losReplicaPtr, creplicaFileSize, replicaRecord);
+ while (losReplicaPtr.p->nextReplica != RNIL) {
+ jam();
+ losReplicaPtr.i = losReplicaPtr.p->nextReplica;
+ ptrCheckGuard(losReplicaPtr, creplicaFileSize, replicaRecord);
+ }//if
+ losReplicaPtr.p->nextReplica = replicatePtr.i;
+}//Dbdih::linkOldStoredReplica()
+
+void Dbdih::linkStoredReplica(FragmentstorePtr fragPtr,
+ ReplicaRecordPtr replicatePtr)
+{
+ ReplicaRecordPtr lsrReplicaPtr;
+
+ fragPtr.p->noStoredReplicas++;
+ replicatePtr.p->nextReplica = RNIL;
+ lsrReplicaPtr.i = fragPtr.p->storedReplicas;
+ if (fragPtr.p->storedReplicas == RNIL) {
+ jam();
+ fragPtr.p->storedReplicas = replicatePtr.i;
+ return;
+ }//if
+ ptrCheckGuard(lsrReplicaPtr, creplicaFileSize, replicaRecord);
+ while (lsrReplicaPtr.p->nextReplica != RNIL) {
+ jam();
+ lsrReplicaPtr.i = lsrReplicaPtr.p->nextReplica;
+ ptrCheckGuard(lsrReplicaPtr, creplicaFileSize, replicaRecord);
+ }//if
+ lsrReplicaPtr.p->nextReplica = replicatePtr.i;
+}//Dbdih::linkStoredReplica()
+
+/*************************************************************************/
+/* MAKE NODE GROUPS BASED ON THE LIST OF NODES RECEIVED FROM CNTR */
+/*************************************************************************/
+void Dbdih::makeNodeGroups(Uint32 nodeArray[])
+{
+ NodeRecordPtr mngNodeptr;
+ Uint32 tmngNode;
+ Uint32 tmngNodeGroup;
+ Uint32 tmngLimit;
+ Uint32 i;
+
+ /**-----------------------------------------------------------------------
+ * ASSIGN ALL ACTIVE NODES INTO NODE GROUPS. HOT SPARE NODES ARE ASSIGNED
+ * TO NODE GROUP ZNIL
+ *-----------------------------------------------------------------------*/
+ tmngNodeGroup = 0;
+ tmngLimit = csystemnodes - cnoHotSpare;
+ ndbrequire(tmngLimit < MAX_NDB_NODES);
+ for (i = 0; i < tmngLimit; i++) {
+ NodeGroupRecordPtr NGPtr;
+ jam();
+ tmngNode = nodeArray[i];
+ mngNodeptr.i = tmngNode;
+ ptrCheckGuard(mngNodeptr, MAX_NDB_NODES, nodeRecord);
+ mngNodeptr.p->nodeGroup = tmngNodeGroup;
+ NGPtr.i = tmngNodeGroup;
+ ptrCheckGuard(NGPtr, MAX_NDB_NODES, nodeGroupRecord);
+ arrGuard(NGPtr.p->nodeCount, MAX_REPLICAS);
+ NGPtr.p->nodesInGroup[NGPtr.p->nodeCount++] = mngNodeptr.i;
+ if (NGPtr.p->nodeCount == cnoReplicas) {
+ jam();
+ tmngNodeGroup++;
+ }//if
+ }//for
+ cnoOfNodeGroups = tmngNodeGroup;
+ ndbrequire(csystemnodes < MAX_NDB_NODES);
+ for (i = tmngLimit + 1; i < csystemnodes; i++) {
+ jam();
+ tmngNode = nodeArray[i];
+ mngNodeptr.i = tmngNode;
+ ptrCheckGuard(mngNodeptr, MAX_NDB_NODES, nodeRecord);
+ mngNodeptr.p->nodeGroup = ZNIL;
+ }//for
+ for(i = 0; i < MAX_NDB_NODES; i++){
+ jam();
+ Sysfile::setNodeGroup(i, SYSFILE->nodeGroups, NO_NODE_GROUP_ID);
+ }//for
+ for (mngNodeptr.i = 1; mngNodeptr.i < MAX_NDB_NODES; mngNodeptr.i++) {
+ jam();
+ ptrAss(mngNodeptr, nodeRecord);
+ if (mngNodeptr.p->nodeGroup != ZNIL) {
+ jam();
+ Sysfile::setNodeGroup(mngNodeptr.i, SYSFILE->nodeGroups, mngNodeptr.p->nodeGroup);
+ }//if
+ }//for
+}//Dbdih::makeNodeGroups()
+
+/**
+ * On node failure QMGR asks DIH about node groups. This is
+ * a direct signal (function call in same process). Input is
+ * bitmask of surviving nodes. The routine is not concerned
+ * about node count. Reply is one of:
+ * 1) win - we can survive, and nobody else can
+ * 2) lose - we cannot survive
+ * 3) partition - we can survive but there could be others
+ */
+void Dbdih::execCHECKNODEGROUPSREQ(Signal* signal)
+{
+ jamEntry();
+ CheckNodeGroups* sd = (CheckNodeGroups*)&signal->theData[0];
+
+ bool direct = (sd->requestType & CheckNodeGroups::Direct);
+ bool ok = false;
+ switch(sd->requestType & ~CheckNodeGroups::Direct){
+ case CheckNodeGroups::ArbitCheck:{
+ ok = true;
+ jam();
+ unsigned missall = 0;
+ unsigned haveall = 0;
+ for (Uint32 i = 0; i < cnoOfNodeGroups; i++) {
+ jam();
+ NodeGroupRecordPtr ngPtr;
+ ngPtr.i = i;
+ ptrAss(ngPtr, nodeGroupRecord);
+ Uint32 count = 0;
+ for (Uint32 j = 0; j < ngPtr.p->nodeCount; j++) {
+ jam();
+ Uint32 nodeId = ngPtr.p->nodesInGroup[j];
+ if (sd->mask.get(nodeId)) {
+ jam();
+ count++;
+ }//if
+ }//for
+ if (count == 0) {
+ jam();
+ missall++;
+ }//if
+ if (count == ngPtr.p->nodeCount) {
+ haveall++;
+ }//if
+ }//for
+
+ if (missall) {
+ jam();
+ sd->output = CheckNodeGroups::Lose;
+ } else if (haveall) {
+ jam();
+ sd->output = CheckNodeGroups::Win;
+ } else {
+ jam();
+ sd->output = CheckNodeGroups::Partitioning;
+ }//if
+ }
+ break;
+ case CheckNodeGroups::GetNodeGroup:
+ ok = true;
+ sd->output = Sysfile::getNodeGroup(getOwnNodeId(), SYSFILE->nodeGroups);
+ break;
+ case CheckNodeGroups::GetNodeGroupMembers: {
+ ok = true;
+ Uint32 ownNodeGoup =
+ Sysfile::getNodeGroup(sd->nodeId, SYSFILE->nodeGroups);
+
+ sd->output = ownNodeGoup;
+ sd->mask.clear();
+
+ NodeGroupRecordPtr ngPtr;
+ ngPtr.i = ownNodeGoup;
+ ptrAss(ngPtr, nodeGroupRecord);
+ for (Uint32 j = 0; j < ngPtr.p->nodeCount; j++) {
+ jam();
+ sd->mask.set(ngPtr.p->nodesInGroup[j]);
+ }
+#if 0
+ for (int i = 0; i < MAX_NDB_NODES; i++) {
+ if (ownNodeGoup ==
+ Sysfile::getNodeGroup(i, SYSFILE->nodeGroups)) {
+ sd->mask.set(i);
+ }
+ }
+#endif
+ }
+ break;
+ }
+ ndbrequire(ok);
+
+ if (!direct)
+ sendSignal(sd->blockRef, GSN_CHECKNODEGROUPSCONF, signal,
+ CheckNodeGroups::SignalLength, JBB);
+}//Dbdih::execCHECKNODEGROUPSREQ()
+
+void Dbdih::makePrnList(ReadNodesConf * readNodes, Uint32 nodeArray[])
+{
+ cfirstAliveNode = RNIL;
+ ndbrequire(con_lineNodes > 0);
+ ndbrequire(csystemnodes < MAX_NDB_NODES);
+ for (Uint32 i = 0; i < csystemnodes; i++) {
+ NodeRecordPtr nodePtr;
+ jam();
+ nodePtr.i = nodeArray[i];
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ new (nodePtr.p) NodeRecord();
+ if (NodeBitmask::get(readNodes->inactiveNodes, nodePtr.i) == false){
+ jam();
+ nodePtr.p->nodeStatus = NodeRecord::ALIVE;
+ nodePtr.p->useInTransactions = true;
+ nodePtr.p->copyCompleted = true;
+ nodePtr.p->m_inclDihLcp = true;
+ insertAlive(nodePtr);
+ } else {
+ jam();
+ nodePtr.p->nodeStatus = NodeRecord::DEAD;
+ insertDeadNode(nodePtr);
+ }//if
+ }//for
+}//Dbdih::makePrnList()
+
+/*************************************************************************/
+/* A NEW CRASHED REPLICA IS ADDED BY A NODE FAILURE. */
+/*************************************************************************/
+void Dbdih::newCrashedReplica(Uint32 nodeId, ReplicaRecordPtr ncrReplicaPtr)
+{
+ /*----------------------------------------------------------------------*/
+ /* SET THE REPLICA_LAST_GCI OF THE CRASHED REPLICA TO LAST GCI */
+ /* EXECUTED BY THE FAILED NODE. */
+ /*----------------------------------------------------------------------*/
+ /* WE HAVE A NEW CRASHED REPLICA. INITIATE CREATE GCI TO INDICATE */
+ /* THAT THE NEW REPLICA IS NOT STARTED YET AND REPLICA_LAST_GCI IS*/
+ /* SET TO -1 TO INDICATE THAT IT IS NOT DEAD YET. */
+ /*----------------------------------------------------------------------*/
+ arrGuard(ncrReplicaPtr.p->noCrashedReplicas + 1, 8);
+ ncrReplicaPtr.p->replicaLastGci[ncrReplicaPtr.p->noCrashedReplicas] =
+ SYSFILE->lastCompletedGCI[nodeId];
+ ncrReplicaPtr.p->noCrashedReplicas = ncrReplicaPtr.p->noCrashedReplicas + 1;
+ ncrReplicaPtr.p->createGci[ncrReplicaPtr.p->noCrashedReplicas] = 0;
+ ncrReplicaPtr.p->replicaLastGci[ncrReplicaPtr.p->noCrashedReplicas] =
+ (Uint32)-1;
+}//Dbdih::newCrashedReplica()
+
+/*************************************************************************/
+/* AT NODE FAILURE DURING START OF A NEW NODE WE NEED TO RESET A */
+/* SET OF VARIABLES CONTROLLING THE START AND INDICATING ONGOING */
+/* START OF A NEW NODE. */
+/*************************************************************************/
+void Dbdih::nodeResetStart()
+{
+ jam();
+ c_nodeStartMaster.startNode = RNIL;
+ c_nodeStartMaster.failNr = cfailurenr;
+ c_nodeStartMaster.activeState = false;
+ c_nodeStartMaster.blockGcp = false;
+ c_nodeStartMaster.blockLcp = false;
+ c_nodeStartMaster.m_outstandingGsn = 0;
+}//Dbdih::nodeResetStart()
+
+void Dbdih::openFileRw(Signal* signal, FileRecordPtr filePtr)
+{
+ signal->theData[0] = reference();
+ signal->theData[1] = filePtr.i;
+ signal->theData[2] = filePtr.p->fileName[0];
+ signal->theData[3] = filePtr.p->fileName[1];
+ signal->theData[4] = filePtr.p->fileName[2];
+ signal->theData[5] = filePtr.p->fileName[3];
+ signal->theData[6] = FsOpenReq::OM_READWRITE;
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
+}//Dbdih::openFileRw()
+
+void Dbdih::openFileRo(Signal* signal, FileRecordPtr filePtr)
+{
+ signal->theData[0] = reference();
+ signal->theData[1] = filePtr.i;
+ signal->theData[2] = filePtr.p->fileName[0];
+ signal->theData[3] = filePtr.p->fileName[1];
+ signal->theData[4] = filePtr.p->fileName[2];
+ signal->theData[5] = filePtr.p->fileName[3];
+ signal->theData[6] = FsOpenReq::OM_READONLY;
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
+}//Dbdih::openFileRw()
+
+/*************************************************************************/
+/* REMOVE A CRASHED REPLICA BY PACKING THE ARRAY OF CREATED GCI AND*/
+/* THE LAST GCI OF THE CRASHED REPLICA. */
+/*************************************************************************/
+void Dbdih::packCrashedReplicas(ReplicaRecordPtr replicaPtr)
+{
+ ndbrequire(replicaPtr.p->noCrashedReplicas > 0);
+ ndbrequire(replicaPtr.p->noCrashedReplicas <= 8);
+ for (Uint32 i = 0; i < replicaPtr.p->noCrashedReplicas; i++) {
+ jam();
+ replicaPtr.p->createGci[i] = replicaPtr.p->createGci[i + 1];
+ replicaPtr.p->replicaLastGci[i] = replicaPtr.p->replicaLastGci[i + 1];
+ }//for
+ replicaPtr.p->noCrashedReplicas--;
+
+#ifdef VM_TRACE
+ for (Uint32 i = 0; i < replicaPtr.p->noCrashedReplicas; i++) {
+ jam();
+ ndbrequire(replicaPtr.p->createGci[i] != 0xF1F1F1F1);
+ ndbrequire(replicaPtr.p->replicaLastGci[i] != 0xF1F1F1F1);
+ }//for
+#endif
+}//Dbdih::packCrashedReplicas()
+
+void Dbdih::prepareReplicas(FragmentstorePtr fragPtr)
+{
+ ReplicaRecordPtr prReplicaPtr;
+ Uint32 prevReplica = RNIL;
+
+ /* --------------------------------------------------------------------- */
+ /* BEGIN BY LINKING ALL REPLICA RECORDS ONTO THE OLD STORED REPLICA*/
+ /* LIST. */
+ /* AT A SYSTEM RESTART OBVIOUSLY ALL NODES ARE OLD. */
+ /* --------------------------------------------------------------------- */
+ prReplicaPtr.i = fragPtr.p->storedReplicas;
+ while (prReplicaPtr.i != RNIL) {
+ jam();
+ prevReplica = prReplicaPtr.i;
+ ptrCheckGuard(prReplicaPtr, creplicaFileSize, replicaRecord);
+ prReplicaPtr.i = prReplicaPtr.p->nextReplica;
+ }//while
+ /* --------------------------------------------------------------------- */
+ /* LIST OF STORED REPLICAS WILL BE EMPTY NOW. */
+ /* --------------------------------------------------------------------- */
+ if (prevReplica != RNIL) {
+ prReplicaPtr.i = prevReplica;
+ ptrCheckGuard(prReplicaPtr, creplicaFileSize, replicaRecord);
+ prReplicaPtr.p->nextReplica = fragPtr.p->oldStoredReplicas;
+ fragPtr.p->oldStoredReplicas = fragPtr.p->storedReplicas;
+ fragPtr.p->storedReplicas = RNIL;
+ fragPtr.p->noOldStoredReplicas += fragPtr.p->noStoredReplicas;
+ fragPtr.p->noStoredReplicas = 0;
+ }//if
+}//Dbdih::prepareReplicas()
+
+void Dbdih::readFragment(RWFragment* rf, FragmentstorePtr fragPtr)
+{
+ Uint32 TreadFid = readPageWord(rf);
+ fragPtr.p->preferredPrimary = readPageWord(rf);
+ fragPtr.p->noStoredReplicas = readPageWord(rf);
+ fragPtr.p->noOldStoredReplicas = readPageWord(rf);
+ Uint32 TdistKey = readPageWord(rf);
+
+ ndbrequire(fragPtr.p->noStoredReplicas > 0);
+ ndbrequire(TreadFid == rf->fragId);
+ ndbrequire(TdistKey < 256);
+ if ((cstarttype == NodeState::ST_NODE_RESTART) ||
+ (cstarttype == NodeState::ST_INITIAL_NODE_RESTART)) {
+ jam();
+ fragPtr.p->distributionKey = TdistKey;
+ }//if
+}//Dbdih::readFragment()
+
+Uint32 Dbdih::readPageWord(RWFragment* rf)
+{
+ if (rf->wordIndex >= 2048) {
+ jam();
+ ndbrequire(rf->wordIndex == 2048);
+ rf->pageIndex++;
+ ndbrequire(rf->pageIndex < 8);
+ rf->rwfPageptr.i = rf->rwfTabPtr.p->pageRef[rf->pageIndex];
+ ptrCheckGuard(rf->rwfPageptr, cpageFileSize, pageRecord);
+ rf->wordIndex = 32;
+ }//if
+ Uint32 dataWord = rf->rwfPageptr.p->word[rf->wordIndex];
+ rf->wordIndex++;
+ return dataWord;
+}//Dbdih::readPageWord()
+
+void Dbdih::readReplica(RWFragment* rf, ReplicaRecordPtr readReplicaPtr)
+{
+ Uint32 i;
+ readReplicaPtr.p->procNode = readPageWord(rf);
+ readReplicaPtr.p->initialGci = readPageWord(rf);
+ readReplicaPtr.p->noCrashedReplicas = readPageWord(rf);
+ readReplicaPtr.p->nextLcp = readPageWord(rf);
+
+ for (i = 0; i < MAX_LCP_STORED; i++) {
+ readReplicaPtr.p->maxGciCompleted[i] = readPageWord(rf);
+ readReplicaPtr.p->maxGciStarted[i] = readPageWord(rf);
+ readReplicaPtr.p->lcpId[i] = readPageWord(rf);
+ readReplicaPtr.p->lcpStatus[i] = readPageWord(rf);
+ }//for
+ const Uint32 noCrashedReplicas = readReplicaPtr.p->noCrashedReplicas;
+ ndbrequire(noCrashedReplicas < 8);
+ for (i = 0; i < noCrashedReplicas; i++) {
+ readReplicaPtr.p->createGci[i] = readPageWord(rf);
+ readReplicaPtr.p->replicaLastGci[i] = readPageWord(rf);
+ ndbrequire(readReplicaPtr.p->createGci[i] != 0xF1F1F1F1);
+ ndbrequire(readReplicaPtr.p->replicaLastGci[i] != 0xF1F1F1F1);
+ }//for
+ for(i = noCrashedReplicas; i<8; i++){
+ readReplicaPtr.p->createGci[i] = readPageWord(rf);
+ readReplicaPtr.p->replicaLastGci[i] = readPageWord(rf);
+ // They are not initialized...
+ readReplicaPtr.p->createGci[i] = 0;
+ readReplicaPtr.p->replicaLastGci[i] = ~0;
+ }
+ /* ---------------------------------------------------------------------- */
+ /* IF THE LAST COMPLETED LOCAL CHECKPOINT IS VALID AND LARGER THAN */
+ /* THE LAST COMPLETED CHECKPOINT THEN WE WILL INVALIDATE THIS LOCAL */
+ /* CHECKPOINT FOR THIS REPLICA. */
+ /* ---------------------------------------------------------------------- */
+ Uint32 trraLcp = prevLcpNo(readReplicaPtr.p->nextLcp);
+ ndbrequire(trraLcp < MAX_LCP_STORED);
+ if ((readReplicaPtr.p->lcpStatus[trraLcp] == ZVALID) &&
+ (readReplicaPtr.p->lcpId[trraLcp] > SYSFILE->latestLCP_ID)) {
+ jam();
+ readReplicaPtr.p->lcpStatus[trraLcp] = ZINVALID;
+ }//if
+ /* ---------------------------------------------------------------------- */
+ /* WE ALSO HAVE TO INVALIDATE ANY LOCAL CHECKPOINTS THAT HAVE BEEN */
+ /* INVALIDATED BY MOVING BACK THE RESTART GCI. */
+ /* ---------------------------------------------------------------------- */
+ for (i = 0; i < MAX_LCP_STORED; i++) {
+ jam();
+ if ((readReplicaPtr.p->lcpStatus[i] == ZVALID) &&
+ (readReplicaPtr.p->maxGciStarted[i] > SYSFILE->newestRestorableGCI)) {
+ jam();
+ readReplicaPtr.p->lcpStatus[i] = ZINVALID;
+ }//if
+ }//for
+ /* ---------------------------------------------------------------------- */
+ /* WE WILL REMOVE ANY OCCURRENCES OF REPLICAS THAT HAVE CRASHED */
+ /* THAT ARE NO LONGER VALID DUE TO MOVING RESTART GCI BACKWARDS. */
+ /* ---------------------------------------------------------------------- */
+ removeTooNewCrashedReplicas(readReplicaPtr);
+ /* ---------------------------------------------------------------------- */
+ /* WE WILL REMOVE ANY OCCURRENCES OF REPLICAS THAT HAVE CRASHED */
+ /* THAT ARE NO LONGER VALID SINCE THEY ARE NO LONGER RESTORABLE. */
+ /* ---------------------------------------------------------------------- */
+ removeOldCrashedReplicas(readReplicaPtr);
+ /* --------------------------------------------------------------------- */
+ // We set the last GCI of the replica that was alive before the node
+ // crashed last time. We set it to the last GCI which the node participated in.
+ /* --------------------------------------------------------------------- */
+ ndbrequire(readReplicaPtr.p->noCrashedReplicas < 8);
+ readReplicaPtr.p->replicaLastGci[readReplicaPtr.p->noCrashedReplicas] =
+ SYSFILE->lastCompletedGCI[readReplicaPtr.p->procNode];
+ /* ---------------------------------------------------------------------- */
+ /* FIND PROCESSOR RECORD */
+ /* ---------------------------------------------------------------------- */
+}//Dbdih::readReplica()
+
+void Dbdih::readReplicas(RWFragment* rf, FragmentstorePtr fragPtr)
+{
+ Uint32 i;
+ ReplicaRecordPtr newReplicaPtr;
+ Uint32 noStoredReplicas = fragPtr.p->noStoredReplicas;
+ Uint32 noOldStoredReplicas = fragPtr.p->noOldStoredReplicas;
+ /* ----------------------------------------------------------------------- */
+ /* WE CLEAR THE NUMBER OF STORED REPLICAS SINCE IT WILL BE CALCULATED */
+ /* BY THE LINKING SUBROUTINES. */
+ /* ----------------------------------------------------------------------- */
+ fragPtr.p->noStoredReplicas = 0;
+ fragPtr.p->noOldStoredReplicas = 0;
+ Uint32 replicaIndex = 0;
+ ndbrequire(noStoredReplicas + noOldStoredReplicas <= MAX_REPLICAS);
+ for (i = 0; i < noStoredReplicas; i++) {
+ seizeReplicaRec(newReplicaPtr);
+ readReplica(rf, newReplicaPtr);
+ if (checkNodeAlive(newReplicaPtr.p->procNode)) {
+ jam();
+ ndbrequire(replicaIndex < MAX_REPLICAS);
+ fragPtr.p->activeNodes[replicaIndex] = newReplicaPtr.p->procNode;
+ replicaIndex++;
+ linkStoredReplica(fragPtr, newReplicaPtr);
+ } else {
+ jam();
+ linkOldStoredReplica(fragPtr, newReplicaPtr);
+ }//if
+ }//for
+ fragPtr.p->fragReplicas = noStoredReplicas;
+ for (i = 0; i < noOldStoredReplicas; i++) {
+ jam();
+ seizeReplicaRec(newReplicaPtr);
+ readReplica(rf, newReplicaPtr);
+ linkOldStoredReplica(fragPtr, newReplicaPtr);
+ }//for
+}//Dbdih::readReplicas()
+
+void Dbdih::readRestorableGci(Signal* signal, FileRecordPtr filePtr)
+{
+ signal->theData[0] = filePtr.p->fileRef;
+ signal->theData[1] = reference();
+ signal->theData[2] = filePtr.i;
+ signal->theData[3] = ZLIST_OF_PAIRS;
+ signal->theData[4] = ZVAR_NO_CRESTART_INFO;
+ signal->theData[5] = 1;
+ signal->theData[6] = 0;
+ signal->theData[7] = 0;
+ sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 8, JBA);
+}//Dbdih::readRestorableGci()
+
+void Dbdih::readTabfile(Signal* signal, TabRecord* tab, FileRecordPtr filePtr)
+{
+ signal->theData[0] = filePtr.p->fileRef;
+ signal->theData[1] = reference();
+ signal->theData[2] = filePtr.i;
+ signal->theData[3] = ZLIST_OF_PAIRS;
+ signal->theData[4] = ZVAR_NO_WORD;
+ signal->theData[5] = tab->noPages;
+ for (Uint32 i = 0; i < tab->noPages; i++) {
+ signal->theData[6 + (2 * i)] = tab->pageRef[i];
+ signal->theData[7 + (2 * i)] = i;
+ }//for
+ sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 22, JBA);
+}//Dbdih::readTabfile()
+
+void Dbdih::releasePage(Uint32 pageIndex)
+{
+ PageRecordPtr pagePtr;
+ pagePtr.i = pageIndex;
+ ptrCheckGuard(pagePtr, cpageFileSize, pageRecord);
+ pagePtr.p->nextfreepage = cfirstfreepage;
+ cfirstfreepage = pagePtr.i;
+}//Dbdih::releasePage()
+
+void Dbdih::releaseTabPages(Uint32 tableId)
+{
+ TabRecordPtr tabPtr;
+ tabPtr.i = tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+ ndbrequire(tabPtr.p->noPages <= 8);
+ for (Uint32 i = 0; i < tabPtr.p->noPages; i++) {
+ jam();
+ releasePage(tabPtr.p->pageRef[i]);
+ }//for
+ tabPtr.p->noPages = 0;
+}//Dbdih::releaseTabPages()
+
+/*************************************************************************/
+/* REMOVE NODE FROM SET OF ALIVE NODES. */
+/*************************************************************************/
+void Dbdih::removeAlive(NodeRecordPtr removeNodePtr)
+{
+ NodeRecordPtr nodePtr;
+
+ nodePtr.i = cfirstAliveNode;
+ if (nodePtr.i == removeNodePtr.i) {
+ jam();
+ cfirstAliveNode = removeNodePtr.p->nextNode;
+ return;
+ }//if
+ do {
+ jam();
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ if (nodePtr.p->nextNode == removeNodePtr.i) {
+ jam();
+ nodePtr.p->nextNode = removeNodePtr.p->nextNode;
+ break;
+ } else {
+ jam();
+ nodePtr.i = nodePtr.p->nextNode;
+ }//if
+ } while (1);
+}//Dbdih::removeAlive()
+
+/*************************************************************************/
+/* REMOVE NODE FROM SET OF DEAD NODES. */
+/*************************************************************************/
+void Dbdih::removeDeadNode(NodeRecordPtr removeNodePtr)
+{
+ NodeRecordPtr nodePtr;
+
+ nodePtr.i = cfirstDeadNode;
+ if (nodePtr.i == removeNodePtr.i) {
+ jam();
+ cfirstDeadNode = removeNodePtr.p->nextNode;
+ return;
+ }//if
+ do {
+ jam();
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ if (nodePtr.p->nextNode == removeNodePtr.i) {
+ jam();
+ nodePtr.p->nextNode = removeNodePtr.p->nextNode;
+ break;
+ } else {
+ jam();
+ nodePtr.i = nodePtr.p->nextNode;
+ }//if
+ } while (1);
+}//Dbdih::removeDeadNode()
+
+/*---------------------------------------------------------------*/
+/* REMOVE REPLICAS OF A FAILED NODE FROM LIST OF STORED */
+/* REPLICAS AND MOVE IT TO THE LIST OF OLD STORED REPLICAS.*/
+/* ALSO UPDATE THE CRASHED REPLICA INFORMATION. */
+/*---------------------------------------------------------------*/
+void Dbdih::removeNodeFromStored(Uint32 nodeId,
+ FragmentstorePtr fragPtr,
+ ReplicaRecordPtr replicatePtr)
+{
+ newCrashedReplica(nodeId, replicatePtr);
+ removeStoredReplica(fragPtr, replicatePtr);
+ linkOldStoredReplica(fragPtr, replicatePtr);
+ ndbrequire(fragPtr.p->storedReplicas != RNIL);
+}//Dbdih::removeNodeFromStored()
+
+/*************************************************************************/
+/* REMOVE ANY OLD CRASHED REPLICAS THAT ARE NOT RESTORABLE ANY MORE*/
+/*************************************************************************/
+void Dbdih::removeOldCrashedReplicas(ReplicaRecordPtr rocReplicaPtr)
+{
+ while (rocReplicaPtr.p->noCrashedReplicas > 0) {
+ jam();
+ /* --------------------------------------------------------------------- */
+ /* ONLY IF THERE IS AT LEAST ONE REPLICA THEN CAN WE REMOVE ANY. */
+ /* --------------------------------------------------------------------- */
+ if (rocReplicaPtr.p->replicaLastGci[0] < SYSFILE->oldestRestorableGCI){
+ jam();
+ /* ------------------------------------------------------------------- */
+ /* THIS CRASHED REPLICA HAS BECOME EXTINCT AND MUST BE REMOVED TO */
+ /* GIVE SPACE FOR NEW CRASHED REPLICAS. */
+ /* ------------------------------------------------------------------- */
+ packCrashedReplicas(rocReplicaPtr);
+ } else {
+ break;
+ }//if
+ }//while
+ if (rocReplicaPtr.p->createGci[0] < SYSFILE->keepGCI){
+ jam();
+ /* --------------------------------------------------------------------- */
+ /* MOVE FORWARD THE CREATE GCI TO A GCI THAT CAN BE USED. WE HAVE */
+ /* NO CERTAINTY IN FINDING ANY LOG RECORDS FROM OLDER GCI'S. */
+ /* --------------------------------------------------------------------- */
+ rocReplicaPtr.p->createGci[0] = SYSFILE->keepGCI;
+ ndbrequire(SYSFILE->keepGCI != 0xF1F1F1F1);
+ }//if
+}//Dbdih::removeOldCrashedReplicas()
+
+void Dbdih::removeOldStoredReplica(FragmentstorePtr fragPtr,
+ ReplicaRecordPtr replicatePtr)
+{
+ ReplicaRecordPtr rosTmpReplicaPtr;
+ ReplicaRecordPtr rosPrevReplicaPtr;
+
+ fragPtr.p->noOldStoredReplicas--;
+ if (fragPtr.p->oldStoredReplicas == replicatePtr.i) {
+ jam();
+ fragPtr.p->oldStoredReplicas = replicatePtr.p->nextReplica;
+ } else {
+ rosPrevReplicaPtr.i = fragPtr.p->oldStoredReplicas;
+ ptrCheckGuard(rosPrevReplicaPtr, creplicaFileSize, replicaRecord);
+ rosTmpReplicaPtr.i = rosPrevReplicaPtr.p->nextReplica;
+ while (rosTmpReplicaPtr.i != replicatePtr.i) {
+ jam();
+ rosPrevReplicaPtr.i = rosTmpReplicaPtr.i;
+ ptrCheckGuard(rosPrevReplicaPtr, creplicaFileSize, replicaRecord);
+ ptrCheckGuard(rosTmpReplicaPtr, creplicaFileSize, replicaRecord);
+ rosTmpReplicaPtr.i = rosTmpReplicaPtr.p->nextReplica;
+ }//if
+ rosPrevReplicaPtr.p->nextReplica = replicatePtr.p->nextReplica;
+ }//if
+}//Dbdih::removeOldStoredReplica()
+
+void Dbdih::removeStoredReplica(FragmentstorePtr fragPtr,
+ ReplicaRecordPtr replicatePtr)
+{
+ ReplicaRecordPtr rsrTmpReplicaPtr;
+ ReplicaRecordPtr rsrPrevReplicaPtr;
+
+ fragPtr.p->noStoredReplicas--;
+ if (fragPtr.p->storedReplicas == replicatePtr.i) {
+ jam();
+ fragPtr.p->storedReplicas = replicatePtr.p->nextReplica;
+ } else {
+ jam();
+ rsrPrevReplicaPtr.i = fragPtr.p->storedReplicas;
+ rsrTmpReplicaPtr.i = fragPtr.p->storedReplicas;
+ ptrCheckGuard(rsrTmpReplicaPtr, creplicaFileSize, replicaRecord);
+ rsrTmpReplicaPtr.i = rsrTmpReplicaPtr.p->nextReplica;
+ while (rsrTmpReplicaPtr.i != replicatePtr.i) {
+ jam();
+ rsrPrevReplicaPtr.i = rsrTmpReplicaPtr.i;
+ ptrCheckGuard(rsrTmpReplicaPtr, creplicaFileSize, replicaRecord);
+ rsrTmpReplicaPtr.i = rsrTmpReplicaPtr.p->nextReplica;
+ }//while
+ ptrCheckGuard(rsrPrevReplicaPtr, creplicaFileSize, replicaRecord);
+ rsrPrevReplicaPtr.p->nextReplica = replicatePtr.p->nextReplica;
+ }//if
+}//Dbdih::removeStoredReplica()
+
+/*************************************************************************/
+/* REMOVE ALL TOO NEW CRASHED REPLICAS THAT IS IN THIS REPLICA. */
+/*************************************************************************/
+void Dbdih::removeTooNewCrashedReplicas(ReplicaRecordPtr rtnReplicaPtr)
+{
+ while (rtnReplicaPtr.p->noCrashedReplicas > 0) {
+ jam();
+ /* --------------------------------------------------------------------- */
+ /* REMOVE ALL REPLICAS THAT ONLY LIVED IN A PERIOD THAT HAVE BEEN */
+ /* REMOVED FROM THE RESTART INFORMATION SINCE THE RESTART FAILED */
+ /* TOO MANY TIMES. */
+ /* --------------------------------------------------------------------- */
+ arrGuard(rtnReplicaPtr.p->noCrashedReplicas - 1, 8);
+ if (rtnReplicaPtr.p->createGci[rtnReplicaPtr.p->noCrashedReplicas - 1] >
+ SYSFILE->newestRestorableGCI){
+ jam();
+ rtnReplicaPtr.p->createGci[rtnReplicaPtr.p->noCrashedReplicas - 1] =
+ (Uint32)-1;
+ rtnReplicaPtr.p->replicaLastGci[rtnReplicaPtr.p->noCrashedReplicas - 1] =
+ (Uint32)-1;
+ rtnReplicaPtr.p->noCrashedReplicas--;
+ } else {
+ break;
+ }//if
+ }//while
+}//Dbdih::removeTooNewCrashedReplicas()
+
+/*************************************************************************/
+/* */
+/* MODULE: SEARCH FOR POSSIBLE REPLICAS THAT CAN HANDLE THE GLOBAL */
+/* CHECKPOINT WITHOUT NEEDING ANY EXTRA LOGGING FACILITIES.*/
+/* A MAXIMUM OF FOUR NODES IS RETRIEVED. */
+/*************************************************************************/
+void Dbdih::searchStoredReplicas(FragmentstorePtr fragPtr)
+{
+ Uint32 nextReplicaPtrI;
+ ConstPtr<ReplicaRecord> replicaPtr;
+
+ replicaPtr.i = fragPtr.p->storedReplicas;
+ while (replicaPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+ nextReplicaPtrI = replicaPtr.p->nextReplica;
+ NodeRecordPtr nodePtr;
+ nodePtr.i = replicaPtr.p->procNode;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) {
+ jam();
+ switch (nodePtr.p->activeStatus) {
+ case Sysfile::NS_Active:
+ case Sysfile::NS_ActiveMissed_1:
+ case Sysfile::NS_ActiveMissed_2:{
+ /* ----------------------------------------------------------------- */
+ /* INITIALISE THE CREATE REPLICA STRUCTURE THAT IS USED FOR SENDING*/
+ /* TO LQH START_FRAGREQ. */
+ /* SET THE DATA NODE WHERE THE LOCAL CHECKPOINT IS FOUND. ALSO */
+ /* SET A REFERENCE TO THE REPLICA POINTER OF THAT. */
+ /* ----------------------------------------------------------------- */
+ CreateReplicaRecordPtr createReplicaPtr;
+ createReplicaPtr.i = cnoOfCreateReplicas;
+ ptrCheckGuard(createReplicaPtr, 4, createReplicaRecord);
+ cnoOfCreateReplicas++;
+ createReplicaPtr.p->dataNodeId = replicaPtr.p->procNode;
+ createReplicaPtr.p->replicaRec = replicaPtr.i;
+ /* ----------------------------------------------------------------- */
+ /* WE NEED TO SEARCH FOR A PROPER LOCAL CHECKPOINT TO USE FOR THE */
+ /* SYSTEM RESTART. */
+ /* ----------------------------------------------------------------- */
+ Uint32 startGci;
+ Uint32 startLcpNo;
+ Uint32 stopGci = SYSFILE->newestRestorableGCI;
+ bool result = findStartGci(replicaPtr,
+ stopGci,
+ startGci,
+ startLcpNo);
+ if (!result) {
+ jam();
+ /* --------------------------------------------------------------- */
+ /* WE COULD NOT FIND ANY LOCAL CHECKPOINT. THE FRAGMENT THUS DO NOT*/
+ /* CONTAIN ANY VALID LOCAL CHECKPOINT. IT DOES HOWEVER CONTAIN A */
+ /* VALID FRAGMENT LOG. THUS BY FIRST CREATING THE FRAGMENT AND THEN*/
+ /* EXECUTING THE FRAGMENT LOG WE CAN CREATE THE FRAGMENT AS */
+ /* DESIRED. THIS SHOULD ONLY OCCUR AFTER CREATING A FRAGMENT. */
+ /* */
+ /* TO INDICATE THAT NO LOCAL CHECKPOINT IS TO BE USED WE SET THE */
+ /* LOCAL CHECKPOINT TO ZNIL. */
+ /* --------------------------------------------------------------- */
+ createReplicaPtr.p->lcpNo = ZNIL;
+ } else {
+ jam();
+ /* --------------------------------------------------------------- */
+ /* WE FOUND A PROPER LOCAL CHECKPOINT TO RESTART FROM. */
+ /* SET LOCAL CHECKPOINT ID AND LOCAL CHECKPOINT NUMBER. */
+ /* --------------------------------------------------------------- */
+ createReplicaPtr.p->lcpNo = startLcpNo;
+ arrGuard(startLcpNo, MAX_LCP_STORED);
+ createReplicaPtr.p->createLcpId = replicaPtr.p->lcpId[startLcpNo];
+ }//if
+
+ if(ERROR_INSERTED(7073) || ERROR_INSERTED(7074)){
+ jam();
+ nodePtr.p->nodeStatus = NodeRecord::DEAD;
+ }
+
+ /* ----------------------------------------------------------------- */
+ /* WE HAVE EITHER FOUND A LOCAL CHECKPOINT OR WE ARE PLANNING TO */
+ /* EXECUTE THE LOG FROM THE INITIAL CREATION OF THE TABLE. IN BOTH */
+ /* CASES WE NEED TO FIND A SET OF LOGS THAT CAN EXECUTE SUCH THAT */
+ /* WE RECOVER TO THE SYSTEM RESTART GLOBAL CHECKPOINT. */
+ /* -_--------------------------------------------------------------- */
+ if (!findLogNodes(createReplicaPtr.p, fragPtr, startGci, stopGci)) {
+ jam();
+ /* --------------------------------------------------------------- */
+ /* WE WERE NOT ABLE TO FIND ANY WAY OF RESTORING THIS REPLICA. */
+ /* THIS IS A POTENTIAL SYSTEM ERROR. */
+ /* --------------------------------------------------------------- */
+ cnoOfCreateReplicas--;
+ return;
+ }//if
+
+ if(ERROR_INSERTED(7073) || ERROR_INSERTED(7074)){
+ jam();
+ nodePtr.p->nodeStatus = NodeRecord::ALIVE;
+ }
+
+ break;
+ }
+ default:
+ jam();
+ /*empty*/;
+ break;
+ }//switch
+ }
+ replicaPtr.i = nextReplicaPtrI;
+ }//while
+}//Dbdih::searchStoredReplicas()
+
+/*************************************************************************/
+/* */
+/* MODULE: SEIZE_FILE */
+/* DESCRIPTION: THE SUBROUTINE SEIZES A FILE RECORD FROM THE */
+/* FREE LIST. */
+/*************************************************************************/
+void Dbdih::seizeFile(FileRecordPtr& filePtr)
+{
+ filePtr.i = cfirstfreeFile;
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ cfirstfreeFile = filePtr.p->nextFile;
+ filePtr.p->nextFile = RNIL;
+}//Dbdih::seizeFile()
+
+/*************************************************************************/
+/* SEND CREATE_FRAGREQ TO ALL NODES IN THE NDB CLUSTER. */
+/*************************************************************************/
+/*************************************************************************/
+/* */
+/* MODULE: FIND THE START GCI AND LOCAL CHECKPOINT TO USE. */
+/*************************************************************************/
+void Dbdih::sendStartFragreq(Signal* signal,
+ TabRecordPtr tabPtr, Uint32 fragId)
+{
+ CreateReplicaRecordPtr replicaPtr;
+ for (replicaPtr.i = 0; replicaPtr.i < cnoOfCreateReplicas; replicaPtr.i++) {
+ jam();
+ ptrAss(replicaPtr, createReplicaRecord);
+ BlockReference ref = calcLqhBlockRef(replicaPtr.p->dataNodeId);
+ StartFragReq * const startFragReq = (StartFragReq *)&signal->theData[0];
+ startFragReq->userPtr = replicaPtr.p->replicaRec;
+ startFragReq->userRef = reference();
+ startFragReq->lcpNo = replicaPtr.p->lcpNo;
+ startFragReq->lcpId = replicaPtr.p->createLcpId;
+ startFragReq->tableId = tabPtr.i;
+ startFragReq->fragId = fragId;
+
+ if(ERROR_INSERTED(7072) || ERROR_INSERTED(7074)){
+ jam();
+ const Uint32 noNodes = replicaPtr.p->noLogNodes;
+ Uint32 start = replicaPtr.p->logStartGci[noNodes - 1];
+ const Uint32 stop = replicaPtr.p->logStopGci[noNodes - 1];
+
+ for(Uint32 i = noNodes; i < 4 && (stop - start) > 0; i++){
+ replicaPtr.p->noLogNodes++;
+ replicaPtr.p->logStopGci[i - 1] = start;
+
+ replicaPtr.p->logNodeId[i] = replicaPtr.p->logNodeId[i-1];
+ replicaPtr.p->logStartGci[i] = start + 1;
+ replicaPtr.p->logStopGci[i] = stop;
+ start += 1;
+ }
+ }
+
+ startFragReq->noOfLogNodes = replicaPtr.p->noLogNodes;
+
+ for (Uint32 i = 0; i < 4 ; i++) {
+ startFragReq->lqhLogNode[i] = replicaPtr.p->logNodeId[i];
+ startFragReq->startGci[i] = replicaPtr.p->logStartGci[i];
+ startFragReq->lastGci[i] = replicaPtr.p->logStopGci[i];
+ }//for
+
+ sendSignal(ref, GSN_START_FRAGREQ, signal,
+ StartFragReq::SignalLength, JBB);
+ }//for
+}//Dbdih::sendStartFragreq()
+
+/*************************************************************************/
+/* SET THE INITIAL ACTIVE STATUS ON ALL NODES AND PUT INTO LISTS. */
+/*************************************************************************/
+void Dbdih::setInitialActiveStatus()
+{
+ NodeRecordPtr siaNodeptr;
+ Uint32 tsiaNodeActiveStatus;
+ Uint32 tsiaNoActiveNodes;
+
+ tsiaNoActiveNodes = csystemnodes - cnoHotSpare;
+ for(Uint32 i = 0; i<Sysfile::NODE_STATUS_SIZE; i++)
+ SYSFILE->nodeStatus[i] = 0;
+ for (siaNodeptr.i = 1; siaNodeptr.i < MAX_NDB_NODES; siaNodeptr.i++) {
+ ptrAss(siaNodeptr, nodeRecord);
+ if (siaNodeptr.p->nodeStatus == NodeRecord::ALIVE) {
+ if (tsiaNoActiveNodes == 0) {
+ jam();
+ siaNodeptr.p->activeStatus = Sysfile::NS_HotSpare;
+ } else {
+ jam();
+ tsiaNoActiveNodes = tsiaNoActiveNodes - 1;
+ siaNodeptr.p->activeStatus = Sysfile::NS_Active;
+ }//if
+ } else {
+ jam();
+ siaNodeptr.p->activeStatus = Sysfile::NS_NotDefined;
+ }//if
+ switch (siaNodeptr.p->activeStatus) {
+ case Sysfile::NS_Active:
+ jam();
+ tsiaNodeActiveStatus = Sysfile::NS_Active;
+ break;
+ case Sysfile::NS_HotSpare:
+ jam();
+ tsiaNodeActiveStatus = Sysfile::NS_HotSpare;
+ break;
+ case Sysfile::NS_NotDefined:
+ jam();
+ tsiaNodeActiveStatus = Sysfile::NS_NotDefined;
+ break;
+ default:
+ ndbrequire(false);
+ return;
+ break;
+ }//switch
+ Sysfile::setNodeStatus(siaNodeptr.i, SYSFILE->nodeStatus,
+ tsiaNodeActiveStatus);
+ }//for
+}//Dbdih::setInitialActiveStatus()
+
+/*************************************************************************/
+/* SET LCP ACTIVE STATUS AT THE END OF A LOCAL CHECKPOINT. */
+/*************************************************************************/
+void Dbdih::setLcpActiveStatusEnd()
+{
+ NodeRecordPtr nodePtr;
+
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRecord);
+ if (c_lcpState.m_participatingLQH.get(nodePtr.i)){
+ switch (nodePtr.p->activeStatus) {
+ case Sysfile::NS_Active:
+ case Sysfile::NS_ActiveMissed_1:
+ case Sysfile::NS_ActiveMissed_2:
+ jam();
+ /*-------------------------------------------------------------------*/
+ /* THE NODE PARTICIPATED IN THIS CHECKPOINT.
+ * WE CAN SET ITS STATUS TO ACTIVE */
+ /*-------------------------------------------------------------------*/
+ nodePtr.p->activeStatus = Sysfile::NS_Active;
+ takeOverCompleted(nodePtr.i);
+ break;
+ case Sysfile::NS_TakeOver:
+ jam();
+ /*-------------------------------------------------------------------*/
+ /* THE NODE HAS COMPLETED A CHECKPOINT AFTER TAKE OVER. WE CAN NOW */
+ /* SET ITS STATUS TO ACTIVE. WE CAN ALSO COMPLETE THE TAKE OVER */
+ /* AND ALSO WE CLEAR THE TAKE OVER NODE IN THE RESTART INFO. */
+ /*-------------------------------------------------------------------*/
+ nodePtr.p->activeStatus = Sysfile::NS_Active;
+ takeOverCompleted(nodePtr.i);
+ break;
+ default:
+ ndbrequire(false);
+ return;
+ break;
+ }//switch
+ }//if
+ }//for
+
+ if(getNodeState().getNodeRestartInProgress()){
+ jam();
+ if(c_lcpState.m_participatingLQH.get(getOwnNodeId())){
+ nodePtr.i = getOwnNodeId();
+ ptrAss(nodePtr, nodeRecord);
+ ndbrequire(nodePtr.p->activeStatus == Sysfile::NS_Active);
+ ndbout_c("NR: setLcpActiveStatusEnd - m_participatingLQH");
+ } else {
+ ndbout_c("NR: setLcpActiveStatusEnd - !m_participatingLQH");
+ }
+ }
+
+ c_lcpState.m_participatingDIH.clear();
+ c_lcpState.m_participatingLQH.clear();
+ if (isMaster()) {
+ jam();
+ setNodeRestartInfoBits();
+ }//if
+}//Dbdih::setLcpActiveStatusEnd()
+
+void Dbdih::takeOverCompleted(Uint32 aNodeId)
+{
+ TakeOverRecordPtr takeOverPtr;
+ takeOverPtr.i = findTakeOver(aNodeId);
+ if (takeOverPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+ if (takeOverPtr.p->toMasterStatus != TakeOverRecord::WAIT_LCP) {
+ jam();
+ ndbrequire(!isMaster());
+ return;
+ }//if
+ ndbrequire(isMaster());
+ Sysfile::setTakeOverNode(aNodeId, SYSFILE->takeOver, 0);
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_END_COPY;
+ cstartGcpNow = true;
+ }//if
+}//Dbdih::takeOverCompleted()
+
+/*************************************************************************/
+/* SET LCP ACTIVE STATUS BEFORE STARTING A LOCAL CHECKPOINT. */
+/*************************************************************************/
+void Dbdih::setLcpActiveStatusStart(Signal* signal)
+{
+ NodeRecordPtr nodePtr;
+
+ c_lcpState.m_participatingLQH.clear();
+ c_lcpState.m_participatingDIH.clear();
+
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ ptrAss(nodePtr, nodeRecord);
+#if 0
+ if(nodePtr.p->nodeStatus != NodeRecord::NOT_IN_CLUSTER){
+ infoEvent("Node %d nodeStatus=%d activeStatus=%d copyCompleted=%d lcp=%d",
+ nodePtr.i,
+ nodePtr.p->nodeStatus,
+ nodePtr.p->activeStatus,
+ nodePtr.p->copyCompleted,
+ nodePtr.p->m_inclDihLcp);
+ }
+#endif
+ if(nodePtr.p->nodeStatus == NodeRecord::ALIVE && nodePtr.p->m_inclDihLcp){
+ jam();
+ c_lcpState.m_participatingDIH.set(nodePtr.i);
+ }
+
+ if ((nodePtr.p->nodeStatus == NodeRecord::ALIVE) &&
+ (nodePtr.p->copyCompleted)) {
+ switch (nodePtr.p->activeStatus) {
+ case Sysfile::NS_Active:
+ jam();
+ /*-------------------------------------------------------------------*/
+ // The normal case. Starting a LCP for a started node which hasn't
+ // missed the previous LCP.
+ /*-------------------------------------------------------------------*/
+ c_lcpState.m_participatingLQH.set(nodePtr.i);
+ break;
+ case Sysfile::NS_ActiveMissed_1:
+ jam();
+ /*-------------------------------------------------------------------*/
+ // The node is starting up and is participating in a local checkpoint
+ // as the final phase of the start-up. We can still use the checkpoints
+ // on the node after a system restart.
+ /*-------------------------------------------------------------------*/
+ c_lcpState.m_participatingLQH.set(nodePtr.i);
+ break;
+ case Sysfile::NS_ActiveMissed_2:
+ jam();
+ /*-------------------------------------------------------------------*/
+ // The node is starting up and is participating in a local checkpoint
+ // as the final phase of the start-up. We have missed so
+ // many checkpoints that we no longer can use this node to
+ // recreate fragments from disk.
+ // It must be taken over with the copy fragment process after a system
+ // crash. We indicate this by setting the active status to TAKE_OVER.
+ /*-------------------------------------------------------------------*/
+ nodePtr.p->activeStatus = Sysfile::NS_TakeOver;
+ //break; // Fall through
+ case Sysfile::NS_TakeOver:{
+ TakeOverRecordPtr takeOverPtr;
+ jam();
+ /*-------------------------------------------------------------------*/
+ /* THIS NODE IS CURRENTLY TAKING OVER A FAILED NODE. */
+ /*-------------------------------------------------------------------*/
+ takeOverPtr.i = findTakeOver(nodePtr.i);
+ if (takeOverPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord);
+ if (takeOverPtr.p->toMasterStatus == TakeOverRecord::WAIT_LCP) {
+ jam();
+ /*---------------------------------------------------------------
+ * ALL THE INFORMATION HAVE BEEN REPLICATED TO THE NEW
+ * NODE AND WE ARE ONLY WAITING FOR A LOCAL CHECKPOINT TO BE
+ * PERFORMED ON THE NODE TO SET ITS STATUS TO ACTIVE.
+ */
+ infoEvent("Node %d is WAIT_LCP including in LCP", nodePtr.i);
+ c_lcpState.m_participatingLQH.set(nodePtr.i);
+ }//if
+ }//if
+ break;
+ }
+ default:
+ jam();
+ /*empty*/;
+ break;
+ }//switch
+ } else {
+ switch (nodePtr.p->activeStatus) {
+ case Sysfile::NS_Active:
+ jam();
+ nodePtr.p->activeStatus = Sysfile::NS_ActiveMissed_1;
+ break;
+ case Sysfile::NS_ActiveMissed_1:
+ jam();
+ nodePtr.p->activeStatus = Sysfile::NS_ActiveMissed_2;
+ break;
+ case Sysfile::NS_ActiveMissed_2:
+ jam();
+ if ((nodePtr.p->nodeStatus == NodeRecord::ALIVE) &&
+ (!nodePtr.p->copyCompleted)) {
+ jam();
+ /*-----------------------------------------------------------------*/
+ // The node is currently starting up and has not completed the
+ // copy phase.
+ // It will thus be in the TAKE_OVER state.
+ /*-----------------------------------------------------------------*/
+ ndbrequire(findTakeOver(nodePtr.i) != RNIL);
+ nodePtr.p->activeStatus = Sysfile::NS_TakeOver;
+ } else {
+ jam();
+ /*-----------------------------------------------------------------*/
+ /* THE NODE IS ACTIVE AND HAS NOT COMPLETED ANY OF THE LAST 3
+ * CHECKPOINTS */
+ /* WE MUST TAKE IT OUT OF ACTION AND START A NEW NODE TO TAKE OVER.*/
+ /*-----------------------------------------------------------------*/
+ nodePtr.p->activeStatus = Sysfile::NS_NotActive_NotTakenOver;
+ }//if
+ break;
+ case Sysfile::NS_TakeOver:
+ jam();
+ break;
+ default:
+ jam();
+ /*empty*/;
+ break;
+ }//switch
+ }//if
+ }//for
+ if (isMaster()) {
+ jam();
+ checkStartTakeOver(signal);
+ setNodeRestartInfoBits();
+ }//if
+}//Dbdih::setLcpActiveStatusStart()
+
+/*************************************************************************/
+/* SET NODE ACTIVE STATUS AT SYSTEM RESTART AND WHEN UPDATED BY MASTER */
+/*************************************************************************/
+void Dbdih::setNodeActiveStatus()
+{
+ NodeRecordPtr snaNodeptr;
+
+ for (snaNodeptr.i = 1; snaNodeptr.i < MAX_NDB_NODES; snaNodeptr.i++) {
+ ptrAss(snaNodeptr, nodeRecord);
+ const Uint32 tsnaNodeBits = Sysfile::getNodeStatus(snaNodeptr.i,
+ SYSFILE->nodeStatus);
+ switch (tsnaNodeBits) {
+ case Sysfile::NS_Active:
+ jam();
+ snaNodeptr.p->activeStatus = Sysfile::NS_Active;
+ break;
+ case Sysfile::NS_ActiveMissed_1:
+ jam();
+ snaNodeptr.p->activeStatus = Sysfile::NS_ActiveMissed_1;
+ break;
+ case Sysfile::NS_ActiveMissed_2:
+ jam();
+ snaNodeptr.p->activeStatus = Sysfile::NS_ActiveMissed_2;
+ break;
+ case Sysfile::NS_TakeOver:
+ jam();
+ snaNodeptr.p->activeStatus = Sysfile::NS_TakeOver;
+ break;
+ case Sysfile::NS_HotSpare:
+ jam();
+ snaNodeptr.p->activeStatus = Sysfile::NS_HotSpare;
+ break;
+ case Sysfile::NS_NotActive_NotTakenOver:
+ jam();
+ snaNodeptr.p->activeStatus = Sysfile::NS_NotActive_NotTakenOver;
+ break;
+ case Sysfile::NS_NotDefined:
+ jam();
+ snaNodeptr.p->activeStatus = Sysfile::NS_NotDefined;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ }//for
+}//Dbdih::setNodeActiveStatus()
+
+/***************************************************************************/
+/* SET THE NODE GROUP BASED ON THE RESTART INFORMATION OR AS SET BY MASTER */
+/***************************************************************************/
+void Dbdih::setNodeGroups()
+{
+ NodeGroupRecordPtr NGPtr;
+ NodeRecordPtr sngNodeptr;
+ Uint32 Ti;
+
+ for (Ti = 0; Ti < MAX_NDB_NODES; Ti++) {
+ NGPtr.i = Ti;
+ ptrAss(NGPtr, nodeGroupRecord);
+ NGPtr.p->nodeCount = 0;
+ }//for
+ for (sngNodeptr.i = 1; sngNodeptr.i < MAX_NDB_NODES; sngNodeptr.i++) {
+ ptrAss(sngNodeptr, nodeRecord);
+ Sysfile::ActiveStatus s =
+ (Sysfile::ActiveStatus)Sysfile::getNodeStatus(sngNodeptr.i,
+ SYSFILE->nodeStatus);
+ switch (s){
+ case Sysfile::NS_Active:
+ case Sysfile::NS_ActiveMissed_1:
+ case Sysfile::NS_ActiveMissed_2:
+ case Sysfile::NS_NotActive_NotTakenOver:
+ case Sysfile::NS_TakeOver:
+ jam();
+ sngNodeptr.p->nodeGroup = Sysfile::getNodeGroup(sngNodeptr.i,
+ SYSFILE->nodeGroups);
+ NGPtr.i = sngNodeptr.p->nodeGroup;
+ ptrCheckGuard(NGPtr, MAX_NDB_NODES, nodeGroupRecord);
+ NGPtr.p->nodesInGroup[NGPtr.p->nodeCount] = sngNodeptr.i;
+ NGPtr.p->nodeCount++;
+ break;
+ case Sysfile::NS_HotSpare:
+ case Sysfile::NS_NotDefined:
+ jam();
+ sngNodeptr.p->nodeGroup = ZNIL;
+ break;
+ default:
+ ndbrequire(false);
+ return;
+ break;
+ }//switch
+ }//for
+ cnoOfNodeGroups = 0;
+ for (Ti = 0; Ti < MAX_NDB_NODES; Ti++) {
+ jam();
+ NGPtr.i = Ti;
+ ptrAss(NGPtr, nodeGroupRecord);
+ if (NGPtr.p->nodeCount != 0) {
+ jam();
+ cnoOfNodeGroups++;
+ }//if
+ }//for
+ cnoHotSpare = csystemnodes - (cnoOfNodeGroups * cnoReplicas);
+}//Dbdih::setNodeGroups()
+
+/*************************************************************************/
+/* SET NODE INFORMATION AFTER RECEIVING RESTART INFORMATION FROM MASTER. */
+/* WE TAKE THE OPPORTUNITY TO SYNCHRONISE OUR DATA WITH THE MASTER. IT */
+/* IS ONLY THE MASTER THAT WILL ACT ON THIS DATA. WE WILL KEEP THEM */
+/* UPDATED FOR THE CASE WHEN WE HAVE TO BECOME MASTER. */
+/*************************************************************************/
+void Dbdih::setNodeInfo(Signal* signal)
+{
+ setNodeActiveStatus();
+ setNodeGroups();
+ sendHOT_SPAREREP(signal);
+}//Dbdih::setNodeInfo()
+
+/*************************************************************************/
+// Keep also DBDICT informed about the Hot Spare situation in the cluster.
+/*************************************************************************/
+void Dbdih::sendHOT_SPAREREP(Signal* signal)
+{
+ NodeRecordPtr locNodeptr;
+ Uint32 Ti = 0;
+ HotSpareRep * const hotSpare = (HotSpareRep*)&signal->theData[0];
+ NodeBitmask::clear(hotSpare->theHotSpareNodes);
+ for (locNodeptr.i = 1; locNodeptr.i < MAX_NDB_NODES; locNodeptr.i++) {
+ ptrAss(locNodeptr, nodeRecord);
+ switch (locNodeptr.p->activeStatus) {
+ case Sysfile::NS_HotSpare:
+ jam();
+ NodeBitmask::set(hotSpare->theHotSpareNodes, locNodeptr.i);
+ Ti++;
+ break;
+ default:
+ jam();
+ break;
+ }//switch
+ }//for
+ hotSpare->noHotSpareNodes = Ti;
+ sendSignal(DBDICT_REF, GSN_HOT_SPAREREP,
+ signal, HotSpareRep::SignalLength, JBB);
+}//Dbdih::sendHOT_SPAREREP()
+
+/*************************************************************************/
+/* SET LCP ACTIVE STATUS FOR ALL NODES BASED ON THE INFORMATION IN */
+/* THE RESTART INFORMATION. */
+/*************************************************************************/
+#if 0
+void Dbdih::setNodeLcpActiveStatus()
+{
+ c_lcpState.m_lcpActiveStatus.clear();
+ for (Uint32 i = 1; i < MAX_NDB_NODES; i++) {
+ if (NodeBitmask::get(SYSFILE->lcpActive, i)) {
+ jam();
+ c_lcpState.m_lcpActiveStatus.set(i);
+ }//if
+ }//for
+}//Dbdih::setNodeLcpActiveStatus()
+#endif
+
+/*************************************************************************/
+/* SET THE RESTART INFO BITS BASED ON THE NODES ACTIVE STATUS. */
+/*************************************************************************/
+void Dbdih::setNodeRestartInfoBits()
+{
+ NodeRecordPtr nodePtr;
+ Uint32 tsnrNodeGroup;
+ Uint32 tsnrNodeActiveStatus;
+ Uint32 i;
+ for(i = 1; i < MAX_NDB_NODES; i++){
+ Sysfile::setNodeStatus(i, SYSFILE->nodeStatus, Sysfile::NS_Active);
+ }//for
+ for(i = 1; i < Sysfile::NODE_GROUPS_SIZE; i++){
+ SYSFILE->nodeGroups[i] = 0;
+ }//for
+ NdbNodeBitmask::clear(SYSFILE->lcpActive);
+
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ ptrAss(nodePtr, nodeRecord);
+ switch (nodePtr.p->activeStatus) {
+ case Sysfile::NS_Active:
+ jam();
+ tsnrNodeActiveStatus = Sysfile::NS_Active;
+ break;
+ case Sysfile::NS_ActiveMissed_1:
+ jam();
+ tsnrNodeActiveStatus = Sysfile::NS_ActiveMissed_1;
+ break;
+ case Sysfile::NS_ActiveMissed_2:
+ jam();
+ tsnrNodeActiveStatus = Sysfile::NS_ActiveMissed_2;
+ break;
+ case Sysfile::NS_HotSpare:
+ jam();
+ tsnrNodeActiveStatus = Sysfile::NS_HotSpare;
+ break;
+ case Sysfile::NS_TakeOver:
+ jam();
+ tsnrNodeActiveStatus = Sysfile::NS_TakeOver;
+ break;
+ case Sysfile::NS_NotActive_NotTakenOver:
+ jam();
+ tsnrNodeActiveStatus = Sysfile::NS_NotActive_NotTakenOver;
+ break;
+ case Sysfile::NS_NotDefined:
+ jam();
+ tsnrNodeActiveStatus = Sysfile::NS_NotDefined;
+ break;
+ default:
+ ndbrequire(false);
+ tsnrNodeActiveStatus = Sysfile::NS_NotDefined; // remove warning
+ break;
+ }//switch
+ Sysfile::setNodeStatus(nodePtr.i, SYSFILE->nodeStatus,
+ tsnrNodeActiveStatus);
+ if (nodePtr.p->nodeGroup == ZNIL) {
+ jam();
+ tsnrNodeGroup = NO_NODE_GROUP_ID;
+ } else {
+ jam();
+ tsnrNodeGroup = nodePtr.p->nodeGroup;
+ }//if
+ Sysfile::setNodeGroup(nodePtr.i, SYSFILE->nodeGroups, tsnrNodeGroup);
+ if (c_lcpState.m_participatingLQH.get(nodePtr.i)){
+ jam();
+ NodeBitmask::set(SYSFILE->lcpActive, nodePtr.i);
+ }//if
+ }//for
+}//Dbdih::setNodeRestartInfoBits()
+
+/*************************************************************************/
+/* START THE GLOBAL CHECKPOINT PROTOCOL IN MASTER AT START-UP */
+/*************************************************************************/
+void Dbdih::startGcp(Signal* signal)
+{
+ cgcpStatus = GCP_READY;
+ coldGcpStatus = cgcpStatus;
+ coldGcpId = cnewgcp;
+ cgcpSameCounter = 0;
+ signal->theData[0] = DihContinueB::ZSTART_GCP;
+ signal->theData[1] = 0;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+ signal->theData[0] = DihContinueB::ZCHECK_GCP_STOP;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 1);
+}//Dbdih::startGcp()
+
+void Dbdih::updateNodeInfo(FragmentstorePtr fragPtr)
+{
+ ReplicaRecordPtr replicatePtr;
+ Uint32 index = 0;
+ replicatePtr.i = fragPtr.p->storedReplicas;
+ do {
+ jam();
+ ptrCheckGuard(replicatePtr, creplicaFileSize, replicaRecord);
+ ndbrequire(index < MAX_REPLICAS);
+ fragPtr.p->activeNodes[index] = replicatePtr.p->procNode;
+ index++;
+ replicatePtr.i = replicatePtr.p->nextReplica;
+ } while (replicatePtr.i != RNIL);
+ fragPtr.p->fragReplicas = index;
+
+ /* ----------------------------------------------------------------------- */
+ // We switch primary to the preferred primary if the preferred primary is
+ // in the list.
+ /* ----------------------------------------------------------------------- */
+ const Uint32 prefPrim = fragPtr.p->preferredPrimary;
+ for (Uint32 i = 1; i < index; i++) {
+ jam();
+ ndbrequire(i < MAX_REPLICAS);
+ if (fragPtr.p->activeNodes[i] == prefPrim){
+ jam();
+ Uint32 switchNode = fragPtr.p->activeNodes[0];
+ fragPtr.p->activeNodes[0] = prefPrim;
+ fragPtr.p->activeNodes[i] = switchNode;
+ break;
+ }//if
+ }//for
+}//Dbdih::updateNodeInfo()
+
+void Dbdih::writeFragment(RWFragment* wf, FragmentstorePtr fragPtr)
+{
+ writePageWord(wf, wf->fragId);
+ writePageWord(wf, fragPtr.p->preferredPrimary);
+ writePageWord(wf, fragPtr.p->noStoredReplicas);
+ writePageWord(wf, fragPtr.p->noOldStoredReplicas);
+ writePageWord(wf, fragPtr.p->distributionKey);
+}//Dbdih::writeFragment()
+
+void Dbdih::writePageWord(RWFragment* wf, Uint32 dataWord)
+{
+ if (wf->wordIndex >= 2048) {
+ jam();
+ ndbrequire(wf->wordIndex == 2048);
+ allocpage(wf->rwfPageptr);
+ wf->wordIndex = 32;
+ wf->pageIndex++;
+ ndbrequire(wf->pageIndex < 8);
+ wf->rwfTabPtr.p->pageRef[wf->pageIndex] = wf->rwfPageptr.i;
+ wf->rwfTabPtr.p->noPages++;
+ }//if
+ wf->rwfPageptr.p->word[wf->wordIndex] = dataWord;
+ wf->wordIndex++;
+}//Dbdih::writePageWord()
+
+void Dbdih::writeReplicas(RWFragment* wf, Uint32 replicaStartIndex)
+{
+ ReplicaRecordPtr wfReplicaPtr;
+ wfReplicaPtr.i = replicaStartIndex;
+ while (wfReplicaPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(wfReplicaPtr, creplicaFileSize, replicaRecord);
+ writePageWord(wf, wfReplicaPtr.p->procNode);
+ writePageWord(wf, wfReplicaPtr.p->initialGci);
+ writePageWord(wf, wfReplicaPtr.p->noCrashedReplicas);
+ writePageWord(wf, wfReplicaPtr.p->nextLcp);
+ Uint32 i;
+ for (i = 0; i < MAX_LCP_STORED; i++) {
+ writePageWord(wf, wfReplicaPtr.p->maxGciCompleted[i]);
+ writePageWord(wf, wfReplicaPtr.p->maxGciStarted[i]);
+ writePageWord(wf, wfReplicaPtr.p->lcpId[i]);
+ writePageWord(wf, wfReplicaPtr.p->lcpStatus[i]);
+ }//if
+ for (i = 0; i < 8; i++) {
+ writePageWord(wf, wfReplicaPtr.p->createGci[i]);
+ writePageWord(wf, wfReplicaPtr.p->replicaLastGci[i]);
+ }//if
+
+ wfReplicaPtr.i = wfReplicaPtr.p->nextReplica;
+ }//while
+}//Dbdih::writeReplicas()
+
+void Dbdih::writeRestorableGci(Signal* signal, FileRecordPtr filePtr)
+{
+ for (Uint32 i = 0; i < Sysfile::SYSFILE_SIZE32; i++) {
+ sysfileDataToFile[i] = sysfileData[i];
+ }//for
+ signal->theData[0] = filePtr.p->fileRef;
+ signal->theData[1] = reference();
+ signal->theData[2] = filePtr.i;
+ signal->theData[3] = ZLIST_OF_PAIRS_SYNCH;
+ signal->theData[4] = ZVAR_NO_CRESTART_INFO_TO_FILE;
+ signal->theData[5] = 1; /* AMOUNT OF PAGES */
+ signal->theData[6] = 0; /* MEMORY PAGE = 0 SINCE COMMON STORED VARIABLE */
+ signal->theData[7] = 0;
+ sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA);
+}//Dbdih::writeRestorableGci()
+
+void Dbdih::writeTabfile(Signal* signal, TabRecord* tab, FileRecordPtr filePtr)
+{
+ signal->theData[0] = filePtr.p->fileRef;
+ signal->theData[1] = reference();
+ signal->theData[2] = filePtr.i;
+ signal->theData[3] = ZLIST_OF_PAIRS;
+ signal->theData[4] = ZVAR_NO_WORD;
+ signal->theData[5] = tab->noPages;
+ for (Uint32 i = 0; i < tab->noPages; i++) {
+ jam();
+ signal->theData[6 + (2 * i)] = tab->pageRef[i];
+ signal->theData[7 + (2 * i)] = i;
+ }//for
+ Uint32 length = 6 + (2 * tab->noPages);
+ sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, length, JBA);
+}//Dbdih::writeTabfile()
+
+void Dbdih::execDEBUG_SIG(Signal* signal)
+{
+ signal = signal; //Avoid compiler warnings
+}//Dbdih::execDEBUG_SIG()
+
+void
+Dbdih::execDUMP_STATE_ORD(Signal* signal)
+{
+ DumpStateOrd * const & dumpState = (DumpStateOrd *)&signal->theData[0];
+ if (dumpState->args[0] == DumpStateOrd::DihDumpNodeRestartInfo) {
+ infoEvent("c_nodeStartMaster.blockLcp = %d, c_nodeStartMaster.blockGcp = %d, c_nodeStartMaster.wait = %d",
+ c_nodeStartMaster.blockLcp, c_nodeStartMaster.blockGcp, c_nodeStartMaster.wait);
+ infoEvent("cstartGcpNow = %d, cgcpStatus = %d",
+ cstartGcpNow, cgcpStatus);
+ infoEvent("cfirstVerifyQueue = %d, cverifyQueueCounter = %d",
+ cfirstVerifyQueue, cverifyQueueCounter);
+ infoEvent("cgcpOrderBlocked = %d, cgcpStartCounter = %d",
+ cgcpOrderBlocked, cgcpStartCounter);
+ }//if
+ if (dumpState->args[0] == DumpStateOrd::DihDumpNodeStatusInfo) {
+ NodeRecordPtr localNodePtr;
+ infoEvent("Printing nodeStatus of all nodes");
+ for (localNodePtr.i = 1; localNodePtr.i < MAX_NDB_NODES; localNodePtr.i++) {
+ ptrAss(localNodePtr, nodeRecord);
+ if (localNodePtr.p->nodeStatus != NodeRecord::NOT_IN_CLUSTER) {
+ infoEvent("Node = %d has status = %d",
+ localNodePtr.i, localNodePtr.p->nodeStatus);
+ }//if
+ }//for
+ }//if
+
+ if (dumpState->args[0] == DumpStateOrd::DihPrintFragmentation){
+ infoEvent("Printing fragmentation of all tables --");
+ for(Uint32 i = 0; i<ctabFileSize; i++){
+ TabRecordPtr tabPtr;
+ tabPtr.i = i;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ if(tabPtr.p->tabStatus != TabRecord::TS_ACTIVE)
+ continue;
+
+ for(Uint32 j = 0; j < tabPtr.p->totalfragments; j++){
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, j, fragPtr);
+
+ Uint32 nodeOrder[MAX_REPLICAS];
+ const Uint32 noOfReplicas = extractNodeInfo(fragPtr.p, nodeOrder);
+ char buf[100];
+ BaseString::snprintf(buf, sizeof(buf), " Table %d Fragment %d - ", tabPtr.i, j);
+ for(Uint32 k = 0; k < noOfReplicas; k++){
+ char tmp[100];
+ BaseString::snprintf(tmp, sizeof(tmp), "%d ", nodeOrder[k]);
+ strcat(buf, tmp);
+ }
+ infoEvent(buf);
+ }
+ }
+ }
+
+ if (signal->theData[0] == 7000) {
+ infoEvent("ctimer = %d, cgcpParticipantState = %d, cgcpStatus = %d",
+ c_lcpState.ctimer, cgcpParticipantState, cgcpStatus);
+ infoEvent("coldGcpStatus = %d, coldGcpId = %d, cmasterState = %d",
+ coldGcpStatus, coldGcpId, cmasterState);
+ infoEvent("cmasterTakeOverNode = %d, ctcCounter = %d",
+ cmasterTakeOverNode, c_lcpState.ctcCounter);
+ }//if
+ if (signal->theData[0] == 7001) {
+ infoEvent("c_lcpState.keepGci = %d",
+ c_lcpState.keepGci);
+ infoEvent("c_lcpState.lcpStatus = %d, clcpStartGcp = %d",
+ c_lcpState.lcpStatus,
+ c_lcpState.lcpStartGcp);
+ infoEvent("cgcpStartCounter = %d, cimmediateLcpStart = %d",
+ cgcpStartCounter, c_lcpState.immediateLcpStart);
+ }//if
+ if (signal->theData[0] == 7002) {
+ infoEvent("cnoOfActiveTables = %d, cgcpDelay = %d",
+ cnoOfActiveTables, cgcpDelay);
+ infoEvent("cdictblockref = %d, cfailurenr = %d",
+ cdictblockref, cfailurenr);
+ infoEvent("con_lineNodes = %d, reference() = %d, creceivedfrag = %d",
+ con_lineNodes, reference(), creceivedfrag);
+ }//if
+ if (signal->theData[0] == 7003) {
+ infoEvent("cfirstAliveNode = %d, cgckptflag = %d",
+ cfirstAliveNode, cgckptflag);
+ infoEvent("clocallqhblockref = %d, clocaltcblockref = %d, cgcpOrderBlocked = %d",
+ clocallqhblockref, clocaltcblockref, cgcpOrderBlocked);
+ infoEvent("cstarttype = %d, csystemnodes = %d, currentgcp = %d",
+ cstarttype, csystemnodes, currentgcp);
+ }//if
+ if (signal->theData[0] == 7004) {
+ infoEvent("cmasterdihref = %d, cownNodeId = %d, cnewgcp = %d",
+ cmasterdihref, cownNodeId, cnewgcp);
+ infoEvent("cndbStartReqBlockref = %d, cremainingfrags = %d",
+ cndbStartReqBlockref, cremainingfrags);
+ infoEvent("cntrlblockref = %d, cgcpSameCounter = %d, coldgcp = %d",
+ cntrlblockref, cgcpSameCounter, coldgcp);
+ }//if
+ if (signal->theData[0] == 7005) {
+ infoEvent("crestartGci = %d",
+ crestartGci);
+ }//if
+ if (signal->theData[0] == 7006) {
+ infoEvent("clcpDelay = %d, cgcpMasterTakeOverState = %d",
+ c_lcpState.clcpDelay, cgcpMasterTakeOverState);
+ infoEvent("cmasterNodeId = %d", cmasterNodeId);
+ infoEvent("cnoHotSpare = %d, c_nodeStartMaster.startNode = %d, c_nodeStartMaster.wait = %d",
+ cnoHotSpare, c_nodeStartMaster.startNode, c_nodeStartMaster.wait);
+ }//if
+ if (signal->theData[0] == 7007) {
+ infoEvent("c_nodeStartMaster.failNr = %d", c_nodeStartMaster.failNr);
+ infoEvent("c_nodeStartMaster.startInfoErrorCode = %d",
+ c_nodeStartMaster.startInfoErrorCode);
+ infoEvent("c_nodeStartMaster.blockLcp = %d, c_nodeStartMaster.blockGcp = %d",
+ c_nodeStartMaster.blockLcp, c_nodeStartMaster.blockGcp);
+ }//if
+ if (signal->theData[0] == 7008) {
+ infoEvent("cfirstDeadNode = %d, cstartPhase = %d, cnoReplicas = %d",
+ cfirstDeadNode, cstartPhase, cnoReplicas);
+ infoEvent("cwaitLcpSr = %d",cwaitLcpSr);
+ }//if
+ if (signal->theData[0] == 7009) {
+ infoEvent("ccalcOldestRestorableGci = %d, cnoOfNodeGroups = %d",
+ c_lcpState.oldestRestorableGci, cnoOfNodeGroups);
+ infoEvent("cstartGcpNow = %d",
+ cstartGcpNow);
+ infoEvent("crestartGci = %d",
+ crestartGci);
+ }//if
+ if (signal->theData[0] == 7010) {
+ infoEvent("cminHotSpareNodes = %d, c_lcpState.lcpStatusUpdatedPlace = %d, cLcpStart = %d",
+ cminHotSpareNodes, c_lcpState.lcpStatusUpdatedPlace, c_lcpState.lcpStart);
+ infoEvent("c_blockCommit = %d, c_blockCommitNo = %d",
+ c_blockCommit, c_blockCommitNo);
+ }//if
+ if (signal->theData[0] == 7011){
+ infoEvent("c_COPY_GCIREQ_Counter = %s",
+ c_COPY_GCIREQ_Counter.getText());
+ infoEvent("c_COPY_TABREQ_Counter = %s",
+ c_COPY_TABREQ_Counter.getText());
+ infoEvent("c_CREATE_FRAGREQ_Counter = %s",
+ c_CREATE_FRAGREQ_Counter.getText());
+ infoEvent("c_DIH_SWITCH_REPLICA_REQ_Counter = %s",
+ c_DIH_SWITCH_REPLICA_REQ_Counter.getText());
+ infoEvent("c_EMPTY_LCP_REQ_Counter = %s",c_EMPTY_LCP_REQ_Counter.getText());
+ infoEvent("c_END_TOREQ_Counter = %s", c_END_TOREQ_Counter.getText());
+ infoEvent("c_GCP_COMMIT_Counter = %s", c_GCP_COMMIT_Counter.getText());
+ infoEvent("c_GCP_PREPARE_Counter = %s", c_GCP_PREPARE_Counter.getText());
+ infoEvent("c_GCP_SAVEREQ_Counter = %s", c_GCP_SAVEREQ_Counter.getText());
+ infoEvent("c_INCL_NODEREQ_Counter = %s", c_INCL_NODEREQ_Counter.getText());
+ infoEvent("c_MASTER_GCPREQ_Counter = %s",
+ c_MASTER_GCPREQ_Counter.getText());
+ infoEvent("c_MASTER_LCPREQ_Counter = %s",
+ c_MASTER_LCPREQ_Counter.getText());
+ infoEvent("c_START_INFOREQ_Counter = %s",
+ c_START_INFOREQ_Counter.getText());
+ infoEvent("c_START_RECREQ_Counter = %s", c_START_RECREQ_Counter.getText());
+ infoEvent("c_START_TOREQ_Counter = %s", c_START_TOREQ_Counter.getText());
+ infoEvent("c_STOP_ME_REQ_Counter = %s", c_STOP_ME_REQ_Counter.getText());
+ infoEvent("c_TC_CLOPSIZEREQ_Counter = %s",
+ c_TC_CLOPSIZEREQ_Counter.getText());
+ infoEvent("c_TCGETOPSIZEREQ_Counter = %s",
+ c_TCGETOPSIZEREQ_Counter.getText());
+ infoEvent("c_UPDATE_TOREQ_Counter = %s", c_UPDATE_TOREQ_Counter.getText());
+ }
+
+ if(signal->theData[0] == 7012){
+ char buf[8*_NDB_NODE_BITMASK_SIZE+1];
+ infoEvent("ParticipatingDIH = %s", c_lcpState.m_participatingDIH.getText(buf));
+ infoEvent("ParticipatingLQH = %s", c_lcpState.m_participatingLQH.getText(buf));
+ infoEvent("m_LCP_COMPLETE_REP_Counter_DIH = %s",
+ c_lcpState.m_LCP_COMPLETE_REP_Counter_DIH.getText());
+ infoEvent("m_LCP_COMPLETE_REP_Counter_LQH = %s",
+ c_lcpState.m_LCP_COMPLETE_REP_Counter_LQH.getText());
+ infoEvent("m_LAST_LCP_FRAG_ORD = %s",
+ c_lcpState.m_LAST_LCP_FRAG_ORD.getText());
+ infoEvent("m_LCP_COMPLETE_REP_From_Master_Received = %d",
+ c_lcpState.m_LCP_COMPLETE_REP_From_Master_Received);
+
+ NodeRecordPtr nodePtr;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRecord);
+ if(nodePtr.p->nodeStatus == NodeRecord::ALIVE){
+ Uint32 i;
+ for(i = 0; i<nodePtr.p->noOfStartedChkpt; i++){
+ infoEvent("Node %d: started: table=%d fragment=%d replica=%d",
+ nodePtr.i,
+ nodePtr.p->startedChkpt[i].tableId,
+ nodePtr.p->startedChkpt[i].fragId,
+ nodePtr.p->startedChkpt[i].replicaPtr);
+ }
+
+ for(i = 0; i<nodePtr.p->noOfQueuedChkpt; i++){
+ infoEvent("Node %d: queued: table=%d fragment=%d replica=%d",
+ nodePtr.i,
+ nodePtr.p->queuedChkpt[i].tableId,
+ nodePtr.p->queuedChkpt[i].fragId,
+ nodePtr.p->queuedChkpt[i].replicaPtr);
+ }
+ }
+ }
+ }
+
+ if(dumpState->args[0] == DumpStateOrd::DihDumpLCPState){
+ infoEvent("-- Node %d LCP STATE --", getOwnNodeId());
+ infoEvent("lcpStatus = %d (update place = %d) ",
+ c_lcpState.lcpStatus, c_lcpState.lcpStatusUpdatedPlace);
+ infoEvent
+ ("lcpStart = %d lcpStartGcp = %d keepGci = %d oldestRestorable = %d",
+ c_lcpState.lcpStart, c_lcpState.lcpStartGcp,
+ c_lcpState.keepGci, c_lcpState.oldestRestorableGci);
+
+ infoEvent
+ ("immediateLcpStart = %d masterLcpNodeId = %d",
+ c_lcpState.immediateLcpStart,
+ refToNode(c_lcpState.m_masterLcpDihRef));
+ infoEvent("-- Node %d LCP STATE --", getOwnNodeId());
+ }
+
+ if(dumpState->args[0] == DumpStateOrd::DihDumpLCPMasterTakeOver){
+ infoEvent("-- Node %d LCP MASTER TAKE OVER STATE --", getOwnNodeId());
+ infoEvent
+ ("c_lcpMasterTakeOverState.state = %d updatePlace = %d failedNodeId = %d",
+ c_lcpMasterTakeOverState.state,
+ c_lcpMasterTakeOverState.updatePlace,
+ c_lcpMasterTakeOverState.failedNodeId);
+
+ infoEvent("c_lcpMasterTakeOverState.minTableId = %u minFragId = %u",
+ c_lcpMasterTakeOverState.minTableId,
+ c_lcpMasterTakeOverState.minFragId);
+
+ infoEvent("-- Node %d LCP MASTER TAKE OVER STATE --", getOwnNodeId());
+ }
+
+ if (signal->theData[0] == 7015){
+ for(Uint32 i = 0; i<ctabFileSize; i++){
+ TabRecordPtr tabPtr;
+ tabPtr.i = i;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ if(tabPtr.p->tabStatus != TabRecord::TS_ACTIVE)
+ continue;
+
+ infoEvent
+ ("Table %d: TabCopyStatus: %d TabUpdateStatus: %d TabLcpStatus: %d",
+ tabPtr.i,
+ tabPtr.p->tabCopyStatus,
+ tabPtr.p->tabUpdateState,
+ tabPtr.p->tabLcpStatus);
+
+ FragmentstorePtr fragPtr;
+ for (Uint32 fid = 0; fid < tabPtr.p->totalfragments; fid++) {
+ jam();
+ getFragstore(tabPtr.p, fid, fragPtr);
+
+ char buf[100], buf2[100];
+ BaseString::snprintf(buf, sizeof(buf), " Fragment %d: noLcpReplicas==%d ",
+ fid, fragPtr.p->noLcpReplicas);
+
+ Uint32 num=0;
+ ReplicaRecordPtr replicaPtr;
+ replicaPtr.i = fragPtr.p->storedReplicas;
+ do {
+ ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
+ BaseString::snprintf(buf2, sizeof(buf2), "%s %d(on %d)=%d(%s)",
+ buf, num,
+ replicaPtr.p->procNode,
+ replicaPtr.p->lcpIdStarted,
+ replicaPtr.p->lcpOngoingFlag ? "Ongoing" : "Idle");
+ BaseString::snprintf(buf, sizeof(buf), "%s", buf2);
+
+ num++;
+ replicaPtr.i = replicaPtr.p->nextReplica;
+ } while (replicaPtr.i != RNIL);
+ infoEvent(buf);
+ }
+ }
+ }
+
+ if(dumpState->args[0] == DumpStateOrd::EnableUndoDelayDataWrite){
+ ndbout << "Dbdih:: delay write of datapages for table = "
+ << dumpState->args[1]<< endl;
+ // Send this dump to ACC and TUP
+ EXECUTE_DIRECT(DBACC, GSN_DUMP_STATE_ORD, signal, 2);
+ EXECUTE_DIRECT(DBTUP, GSN_DUMP_STATE_ORD, signal, 2);
+
+ // Start immediate LCP
+ c_lcpState.ctimer += (1 << c_lcpState.clcpDelay);
+ return;
+ }
+
+ if (signal->theData[0] == DumpStateOrd::DihAllAllowNodeStart) {
+ for (Uint32 i = 1; i < MAX_NDB_NODES; i++)
+ setAllowNodeStart(i, true);
+ return;
+ }//if
+ if (signal->theData[0] == DumpStateOrd::DihMinTimeBetweenLCP) {
+ // Set time between LCP to min value
+ ndbout << "Set time between LCP to min value" << endl;
+ c_lcpState.clcpDelay = 0; // TimeBetweenLocalCheckpoints.min
+ return;
+ }
+ if (signal->theData[0] == DumpStateOrd::DihMaxTimeBetweenLCP) {
+ // Set time between LCP to max value
+ ndbout << "Set time between LCP to max value" << endl;
+ c_lcpState.clcpDelay = 31; // TimeBetweenLocalCheckpoints.max
+ return;
+ }
+
+ if(dumpState->args[0] == 7098){
+ if(signal->length() == 3){
+ jam();
+ infoEvent("startLcpRoundLoopLab(tabel=%d, fragment=%d)",
+ signal->theData[1], signal->theData[2]);
+ startLcpRoundLoopLab(signal, signal->theData[1], signal->theData[2]);
+ return;
+ } else {
+ infoEvent("Invalid no of arguments to 7098 - startLcpRoundLoopLab -"
+ " expected 2 (tableId, fragmentId)");
+ }
+ }
+
+ if(dumpState->args[0] == DumpStateOrd::DihStartLcpImmediately){
+ c_lcpState.ctimer += (1 << c_lcpState.clcpDelay);
+ return;
+ }
+}//Dbdih::execDUMP_STATE_ORD()
+
+void
+Dbdih::execPREP_DROP_TAB_REQ(Signal* signal){
+ jamEntry();
+
+ PrepDropTabReq* req = (PrepDropTabReq*)signal->getDataPtr();
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = req->tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ Uint32 senderRef = req->senderRef;
+ Uint32 senderData = req->senderData;
+
+ PrepDropTabRef::ErrorCode err = PrepDropTabRef::OK;
+ { /**
+ * Check table state
+ */
+ bool ok = false;
+ switch(tabPtr.p->tabStatus){
+ case TabRecord::TS_IDLE:
+ ok = true;
+ jam();
+ err = PrepDropTabRef::NoSuchTable;
+ break;
+ case TabRecord::TS_DROPPING:
+ ok = true;
+ jam();
+ err = PrepDropTabRef::PrepDropInProgress;
+ break;
+ case TabRecord::TS_CREATING:
+ jam();
+ ok = true;
+ break;
+ case TabRecord::TS_ACTIVE:
+ ok = true;
+ jam();
+ break;
+ }
+ ndbrequire(ok);
+ }
+
+ if(err != PrepDropTabRef::OK){
+ jam();
+ PrepDropTabRef* ref = (PrepDropTabRef*)signal->getDataPtrSend();
+ ref->senderRef = reference();
+ ref->senderData = senderData;
+ ref->tableId = tabPtr.i;
+ ref->errorCode = err;
+ sendSignal(senderRef, GSN_PREP_DROP_TAB_REF, signal,
+ PrepDropTabRef::SignalLength, JBB);
+ return;
+ }
+
+ tabPtr.p->tabStatus = TabRecord::TS_DROPPING;
+ tabPtr.p->m_prepDropTab.senderRef = senderRef;
+ tabPtr.p->m_prepDropTab.senderData = senderData;
+
+ if(isMaster()){
+ /**
+ * Remove from queue
+ */
+ NodeRecordPtr nodePtr;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRecord);
+ if (c_lcpState.m_participatingLQH.get(nodePtr.i)){
+
+ Uint32 index = 0;
+ Uint32 count = nodePtr.p->noOfQueuedChkpt;
+ while(index < count){
+ if(nodePtr.p->queuedChkpt[index].tableId == tabPtr.i){
+ jam();
+ // ndbout_c("Unqueuing %d", index);
+
+ count--;
+ for(Uint32 i = index; i<count; i++){
+ jam();
+ nodePtr.p->queuedChkpt[i] = nodePtr.p->queuedChkpt[i + 1];
+ }
+ } else {
+ index++;
+ }
+ }
+ nodePtr.p->noOfQueuedChkpt = count;
+ }
+ }
+ }
+
+ { /**
+ * Check table lcp state
+ */
+
+ bool ok = false;
+ switch(tabPtr.p->tabLcpStatus){
+ case TabRecord::TLS_COMPLETED:
+ case TabRecord::TLS_WRITING_TO_FILE:
+ ok = true;
+ jam();
+ break;
+ return;
+ case TabRecord::TLS_ACTIVE:
+ ok = true;
+ jam();
+
+ tabPtr.p->tabLcpStatus = TabRecord::TLS_COMPLETED;
+
+ /**
+ * First check if all fragments are done
+ */
+ if(checkLcpAllTablesDoneInLqh()){
+ jam();
+
+ ndbout_c("This is the last table");
+
+ /**
+ * Then check if saving of tab info is done for all tables
+ */
+ LcpStatus a = c_lcpState.lcpStatus;
+ checkLcpCompletedLab(signal);
+
+ if(a != c_lcpState.lcpStatus){
+ ndbout_c("And all tables are written to already written disk");
+ }
+ }
+ break;
+ }
+ ndbrequire(ok);
+ }
+
+ { /**
+ * Send WaitDropTabReq to all LQH
+ */
+ WaitDropTabReq * req = (WaitDropTabReq*)signal->getDataPtrSend();
+ req->tableId = tabPtr.i;
+ req->senderRef = reference();
+
+ NodeRecordPtr nodePtr;
+ nodePtr.i = cfirstAliveNode;
+ tabPtr.p->m_prepDropTab.waitDropTabCount.clearWaitingFor();
+ while(nodePtr.i != RNIL){
+ jam();
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+
+ tabPtr.p->m_prepDropTab.waitDropTabCount.setWaitingFor(nodePtr.i);
+ sendSignal(calcLqhBlockRef(nodePtr.i), GSN_WAIT_DROP_TAB_REQ,
+ signal, WaitDropTabReq::SignalLength, JBB);
+
+ nodePtr.i = nodePtr.p->nextNode;
+ }
+ }
+
+ waitDropTabWritingToFile(signal, tabPtr);
+}
+
+void
+Dbdih::waitDropTabWritingToFile(Signal* signal, TabRecordPtr tabPtr){
+
+ if(tabPtr.p->tabLcpStatus == TabRecord::TLS_WRITING_TO_FILE){
+ jam();
+ signal->theData[0] = DihContinueB::WAIT_DROP_TAB_WRITING_TO_FILE;
+ signal->theData[1] = tabPtr.i;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 2);
+ return;
+ }
+
+ ndbrequire(tabPtr.p->tabLcpStatus == TabRecord::TLS_COMPLETED);
+ checkPrepDropTabComplete(signal, tabPtr);
+}
+
+void
+Dbdih::checkPrepDropTabComplete(Signal* signal, TabRecordPtr tabPtr){
+
+ if(tabPtr.p->tabLcpStatus != TabRecord::TLS_COMPLETED){
+ jam();
+ return;
+ }
+
+ if(!tabPtr.p->m_prepDropTab.waitDropTabCount.done()){
+ jam();
+ return;
+ }
+
+ const Uint32 ref = tabPtr.p->m_prepDropTab.senderRef;
+ if(ref != 0){
+ PrepDropTabConf* conf = (PrepDropTabConf*)signal->getDataPtrSend();
+ conf->tableId = tabPtr.i;
+ conf->senderRef = reference();
+ conf->senderData = tabPtr.p->m_prepDropTab.senderData;
+ sendSignal(tabPtr.p->m_prepDropTab.senderRef, GSN_PREP_DROP_TAB_CONF,
+ signal, PrepDropTabConf::SignalLength, JBB);
+ tabPtr.p->m_prepDropTab.senderRef = 0;
+ }
+}
+
+void
+Dbdih::execWAIT_DROP_TAB_REF(Signal* signal){
+ jamEntry();
+ WaitDropTabRef * ref = (WaitDropTabRef*)signal->getDataPtr();
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = ref->tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_DROPPING);
+ Uint32 nodeId = refToNode(ref->senderRef);
+
+ ndbrequire(ref->errorCode == WaitDropTabRef::NoSuchTable ||
+ ref->errorCode == WaitDropTabRef::NF_FakeErrorREF);
+
+ tabPtr.p->m_prepDropTab.waitDropTabCount.clearWaitingFor(nodeId);
+ checkPrepDropTabComplete(signal, tabPtr);
+}
+
+void
+Dbdih::execWAIT_DROP_TAB_CONF(Signal* signal){
+ jamEntry();
+ WaitDropTabConf * conf = (WaitDropTabConf*)signal->getDataPtr();
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = conf->tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_DROPPING);
+ Uint32 nodeId = refToNode(conf->senderRef);
+ tabPtr.p->m_prepDropTab.waitDropTabCount.clearWaitingFor(nodeId);
+ checkPrepDropTabComplete(signal, tabPtr);
+}
+
+void
+Dbdih::checkWaitDropTabFailedLqh(Signal* signal, Uint32 nodeId, Uint32 tableId){
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = tableId;
+
+ WaitDropTabConf * conf = (WaitDropTabConf*)signal->getDataPtr();
+ conf->tableId = tableId;
+
+ const Uint32 RT_BREAK = 16;
+ for(Uint32 i = 0; i<RT_BREAK && tabPtr.i < ctabFileSize; i++, tabPtr.i++){
+ ptrAss(tabPtr, tabRecord);
+ if(tabPtr.p->tabStatus == TabRecord::TS_DROPPING){
+ if(tabPtr.p->m_prepDropTab.waitDropTabCount.isWaitingFor(nodeId)){
+ conf->senderRef = calcLqhBlockRef(nodeId);
+ execWAIT_DROP_TAB_CONF(signal);
+ tabPtr.i++;
+ break;
+ }
+ }
+ }
+
+ if(tabPtr.i == ctabFileSize){
+ /**
+ * Finished
+ */
+ jam();
+ return;
+ }
+
+ signal->theData[0] = DihContinueB::CHECK_WAIT_DROP_TAB_FAILED_LQH;
+ signal->theData[1] = nodeId;
+ signal->theData[2] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+}
+
+
+void
+Dbdih::execNDB_TAMPER(Signal* signal)
+{
+ if ((ERROR_INSERTED(7011)) &&
+ (signal->theData[0] == 7012)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ calculateKeepGciLab(signal, 0, 0);
+ return;
+ }//if
+ SET_ERROR_INSERT_VALUE(signal->theData[0]);
+ return;
+}//Dbdih::execNDB_TAMPER()
+
+void Dbdih::execSET_VAR_REQ(Signal* signal) {
+#if 0
+ SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
+ ConfigParamId var = setVarReq->variable();
+ int val = setVarReq->value();
+
+
+ switch (var) {
+ case TimeBetweenLocalCheckpoints:
+ c_lcpState.clcpDelay = val;
+ sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
+ break;
+
+ case TimeBetweenGlobalCheckpoints:
+ cgcpDelay = val;
+ sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
+ break;
+
+ default:
+ sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
+ } // switch
+#endif
+}
+
+void Dbdih::execBLOCK_COMMIT_ORD(Signal* signal){
+ BlockCommitOrd* const block = (BlockCommitOrd *)&signal->theData[0];
+
+ jamEntry();
+#if 0
+ ndbrequire(c_blockCommit == false ||
+ c_blockCommitNo == block->failNo);
+#else
+ if(!(c_blockCommit == false || c_blockCommitNo == block->failNo)){
+ infoEvent("Possible bug in Dbdih::execBLOCK_COMMIT_ORD c_blockCommit = %d c_blockCommitNo = %d"
+ " sig->failNo = %d", c_blockCommit, c_blockCommitNo, block->failNo);
+ }
+#endif
+ c_blockCommit = true;
+ c_blockCommitNo = block->failNo;
+}
+
+void Dbdih::execUNBLOCK_COMMIT_ORD(Signal* signal){
+ UnblockCommitOrd* const unblock = (UnblockCommitOrd *)&signal->theData[0];
+ (void)unblock;
+
+ jamEntry();
+
+ if(c_blockCommit == true){
+ jam();
+ // ndbrequire(c_blockCommitNo == unblock->failNo);
+
+ c_blockCommit = false;
+ emptyverificbuffer(signal, true);
+ }
+}
+
+void Dbdih::execSTOP_PERM_REQ(Signal* signal){
+
+ jamEntry();
+
+ StopPermReq* const req = (StopPermReq*)&signal->theData[0];
+ StopPermRef* const ref = (StopPermRef*)&signal->theData[0];
+
+ const Uint32 senderData = req->senderData;
+ const BlockReference senderRef = req->senderRef;
+ const NodeId nodeId = refToNode(senderRef);
+
+ if (isMaster()) {
+ /**
+ * Master
+ */
+ jam();
+ CRASH_INSERTION(7065);
+ if (c_stopPermMaster.clientRef != 0) {
+ jam();
+
+ ref->senderData = senderData;
+ ref->errorCode = StopPermRef::NodeShutdownInProgress;
+ sendSignal(senderRef, GSN_STOP_PERM_REF, signal,
+ StopPermRef::SignalLength, JBB);
+ return;
+ }//if
+
+ if (c_nodeStartMaster.activeState) {
+ jam();
+ ref->senderData = senderData;
+ ref->errorCode = StopPermRef::NodeStartInProgress;
+ sendSignal(senderRef, GSN_STOP_PERM_REF, signal,
+ StopPermRef::SignalLength, JBB);
+ return;
+ }//if
+
+ /**
+ * Lock
+ */
+ c_nodeStartMaster.activeState = true;
+ c_stopPermMaster.clientRef = senderRef;
+
+ c_stopPermMaster.clientData = senderData;
+ c_stopPermMaster.returnValue = 0;
+ c_switchReplicas.clear();
+
+ Mutex mutex(signal, c_mutexMgr, c_switchPrimaryMutexHandle);
+ Callback c = { safe_cast(&Dbdih::switch_primary_stop_node), nodeId };
+ ndbrequire(mutex.lock(c));
+ } else {
+ /**
+ * Proxy part
+ */
+ jam();
+ CRASH_INSERTION(7066);
+ if(c_stopPermProxy.clientRef != 0){
+ jam();
+ ref->senderData = senderData;
+ ref->errorCode = StopPermRef::NodeShutdownInProgress;
+ sendSignal(senderRef, GSN_STOP_PERM_REF, signal, 2, JBB);
+ return;
+ }//if
+
+ c_stopPermProxy.clientRef = senderRef;
+ c_stopPermProxy.masterRef = cmasterdihref;
+ c_stopPermProxy.clientData = senderData;
+
+ req->senderRef = reference();
+ req->senderData = senderData;
+ sendSignal(cmasterdihref, GSN_STOP_PERM_REQ, signal,
+ StopPermReq::SignalLength, JBB);
+ }//if
+}//Dbdih::execSTOP_PERM_REQ()
+
+void
+Dbdih::switch_primary_stop_node(Signal* signal, Uint32 node_id, Uint32 ret_val)
+{
+ ndbrequire(ret_val == 0);
+ signal->theData[0] = DihContinueB::SwitchReplica;
+ signal->theData[1] = node_id;
+ signal->theData[2] = 0; // table id
+ signal->theData[3] = 0; // fragment id
+ sendSignal(reference(), GSN_CONTINUEB, signal, 4, JBB);
+}
+
+void Dbdih::execSTOP_PERM_REF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(c_stopPermProxy.clientRef != 0);
+ ndbrequire(c_stopPermProxy.masterRef == signal->senderBlockRef());
+ sendSignal(c_stopPermProxy.clientRef, GSN_STOP_PERM_REF, signal, 2, JBB);
+ c_stopPermProxy.clientRef = 0;
+}//Dbdih::execSTOP_PERM_REF()
+
+void Dbdih::execSTOP_PERM_CONF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(c_stopPermProxy.clientRef != 0);
+ ndbrequire(c_stopPermProxy.masterRef == signal->senderBlockRef());
+ sendSignal(c_stopPermProxy.clientRef, GSN_STOP_PERM_CONF, signal, 1, JBB);
+ c_stopPermProxy.clientRef = 0;
+}//Dbdih::execSTOP_PERM_CONF()
+
+void Dbdih::execDIH_SWITCH_REPLICA_REQ(Signal* signal)
+{
+ jamEntry();
+ DihSwitchReplicaReq* const req = (DihSwitchReplicaReq*)&signal->theData[0];
+ const Uint32 tableId = req->tableId;
+ const Uint32 fragNo = req->fragNo;
+ const BlockReference senderRef = req->senderRef;
+
+ CRASH_INSERTION(7067);
+ TabRecordPtr tabPtr;
+ tabPtr.i = tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_ACTIVE);
+ if (tabPtr.p->tabCopyStatus != TabRecord::CS_IDLE) {
+ jam();
+ sendSignal(reference(), GSN_DIH_SWITCH_REPLICA_REQ, signal,
+ DihSwitchReplicaReq::SignalLength, JBB);
+ return;
+ }//if
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, fragNo, fragPtr);
+
+ /**
+ * Do funky stuff
+ */
+ Uint32 oldOrder[MAX_REPLICAS];
+ const Uint32 noOfReplicas = extractNodeInfo(fragPtr.p, oldOrder);
+
+ if (noOfReplicas < req->noOfReplicas) {
+ jam();
+ //---------------------------------------------------------------------
+ // A crash occurred in the middle of our switch handling.
+ //---------------------------------------------------------------------
+ DihSwitchReplicaRef* const ref = (DihSwitchReplicaRef*)&signal->theData[0];
+ ref->senderNode = cownNodeId;
+ ref->errorCode = StopPermRef::NF_CausedAbortOfStopProcedure;
+ sendSignal(senderRef, GSN_DIH_SWITCH_REPLICA_REF, signal,
+ DihSwitchReplicaRef::SignalLength, JBB);
+ }//if
+ for (Uint32 i = 0; i < noOfReplicas; i++) {
+ jam();
+ ndbrequire(i < MAX_REPLICAS);
+ fragPtr.p->activeNodes[i] = req->newNodeOrder[i];
+ }//for
+ /**
+ * Reply
+ */
+ DihSwitchReplicaConf* const conf = (DihSwitchReplicaConf*)&signal->theData[0];
+ conf->senderNode = cownNodeId;
+ sendSignal(senderRef, GSN_DIH_SWITCH_REPLICA_CONF, signal,
+ DihSwitchReplicaConf::SignalLength, JBB);
+}//Dbdih::execDIH_SWITCH_REPLICA_REQ()
+
+void Dbdih::execDIH_SWITCH_REPLICA_CONF(Signal* signal)
+{
+ jamEntry();
+ /**
+ * Response to master
+ */
+ CRASH_INSERTION(7068);
+ DihSwitchReplicaConf* const conf = (DihSwitchReplicaConf*)&signal->theData[0];
+ switchReplicaReply(signal, conf->senderNode);
+}//Dbdih::execDIH_SWITCH_REPLICA_CONF()
+
+void Dbdih::execDIH_SWITCH_REPLICA_REF(Signal* signal)
+{
+ jamEntry();
+ DihSwitchReplicaRef* const ref = (DihSwitchReplicaRef*)&signal->theData[0];
+ if(c_stopPermMaster.returnValue == 0){
+ jam();
+ c_stopPermMaster.returnValue = ref->errorCode;
+ }//if
+ switchReplicaReply(signal, ref->senderNode);
+}//Dbdih::execDIH_SWITCH_REPLICA_REF()
+
+void Dbdih::switchReplicaReply(Signal* signal,
+ NodeId nodeId){
+ jam();
+ receiveLoopMacro(DIH_SWITCH_REPLICA_REQ, nodeId);
+ //------------------------------------------------------
+ // We have received all responses from the nodes. Thus
+ // we have completed switching replica roles. Continue
+ // with the next fragment.
+ //------------------------------------------------------
+ if(c_stopPermMaster.returnValue != 0){
+ jam();
+ c_switchReplicas.tableId = ctabFileSize + 1;
+ }//if
+ c_switchReplicas.fragNo++;
+
+ signal->theData[0] = DihContinueB::SwitchReplica;
+ signal->theData[1] = c_switchReplicas.nodeId;
+ signal->theData[2] = c_switchReplicas.tableId;
+ signal->theData[3] = c_switchReplicas.fragNo;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 4, JBB);
+}//Dbdih::switchReplicaReply()
+
+void
+Dbdih::switchReplica(Signal* signal,
+ Uint32 nodeId,
+ Uint32 tableId,
+ Uint32 fragNo){
+ jam();
+ DihSwitchReplicaReq* const req = (DihSwitchReplicaReq*)&signal->theData[0];
+
+ const Uint32 RT_BREAK = 64;
+
+ for (Uint32 i = 0; i < RT_BREAK; i++) {
+ jam();
+ if (tableId >= ctabFileSize) {
+ jam();
+ StopPermConf* const conf = (StopPermConf*)&signal->theData[0];
+ StopPermRef* const ref = (StopPermRef*)&signal->theData[0];
+ /**
+ * Finished with all tables
+ */
+ if(c_stopPermMaster.returnValue == 0) {
+ jam();
+ conf->senderData = c_stopPermMaster.clientData;
+ sendSignal(c_stopPermMaster.clientRef, GSN_STOP_PERM_CONF,
+ signal, 1, JBB);
+ } else {
+ jam();
+ ref->senderData = c_stopPermMaster.clientData;
+ ref->errorCode = c_stopPermMaster.returnValue;
+ sendSignal(c_stopPermMaster.clientRef, GSN_STOP_PERM_REF, signal, 2,JBB);
+ }//if
+
+ /**
+ * UnLock
+ */
+ c_nodeStartMaster.activeState = false;
+ c_stopPermMaster.clientRef = 0;
+ c_stopPermMaster.clientData = 0;
+ c_stopPermMaster.returnValue = 0;
+ Mutex mutex(signal, c_mutexMgr, c_switchPrimaryMutexHandle);
+ mutex.unlock(); // ignore result
+ return;
+ }//if
+
+ TabRecordPtr tabPtr;
+ tabPtr.i = tableId;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ if (tabPtr.p->tabStatus != TabRecord::TS_ACTIVE) {
+ jam();
+ tableId++;
+ fragNo = 0;
+ continue;
+ }//if
+ if (fragNo >= tabPtr.p->totalfragments) {
+ jam();
+ tableId++;
+ fragNo = 0;
+ continue;
+ }//if
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, fragNo, fragPtr);
+
+ Uint32 oldOrder[MAX_REPLICAS];
+ const Uint32 noOfReplicas = extractNodeInfo(fragPtr.p, oldOrder);
+
+ if(oldOrder[0] != nodeId) {
+ jam();
+ fragNo++;
+ continue;
+ }//if
+ req->tableId = tableId;
+ req->fragNo = fragNo;
+ req->noOfReplicas = noOfReplicas;
+ for (Uint32 i = 0; i < (noOfReplicas - 1); i++) {
+ req->newNodeOrder[i] = oldOrder[i+1];
+ }//for
+ req->newNodeOrder[noOfReplicas-1] = nodeId;
+ req->senderRef = reference();
+
+ /**
+ * Initialize struct
+ */
+ c_switchReplicas.tableId = tableId;
+ c_switchReplicas.fragNo = fragNo;
+ c_switchReplicas.nodeId = nodeId;
+
+ sendLoopMacro(DIH_SWITCH_REPLICA_REQ, sendDIH_SWITCH_REPLICA_REQ);
+ return;
+ }//for
+
+ signal->theData[0] = DihContinueB::SwitchReplica;
+ signal->theData[1] = nodeId;
+ signal->theData[2] = tableId;
+ signal->theData[3] = fragNo;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 4, JBB);
+}//Dbdih::switchReplica()
+
+void Dbdih::execSTOP_ME_REQ(Signal* signal)
+{
+ jamEntry();
+ StopMeReq* const req = (StopMeReq*)&signal->theData[0];
+ const BlockReference senderRef = req->senderRef;
+ const Uint32 senderData = req->senderData;
+ const Uint32 nodeId = refToNode(senderRef);
+ {
+ /**
+ * Set node dead (remove from operations)
+ */
+ NodeRecordPtr nodePtr;
+ nodePtr.i = nodeId;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ nodePtr.p->useInTransactions = false;
+ }
+ if (nodeId != getOwnNodeId()) {
+ jam();
+ StopMeConf * const stopMeConf = (StopMeConf *)&signal->theData[0];
+ stopMeConf->senderData = senderData;
+ stopMeConf->senderRef = reference();
+ sendSignal(senderRef, GSN_STOP_ME_CONF, signal,
+ StopMeConf::SignalLength, JBB);
+ return;
+ }//if
+
+ /**
+ * Local signal
+ */
+ jam();
+ ndbrequire(c_stopMe.clientRef == 0);
+
+ c_stopMe.clientData = senderData;
+ c_stopMe.clientRef = senderRef;
+
+ req->senderData = senderData;
+ req->senderRef = reference();
+
+ sendLoopMacro(STOP_ME_REQ, sendSTOP_ME_REQ);
+
+ /**
+ * Send conf to self
+ */
+ StopMeConf * const stopMeConf = (StopMeConf *)&signal->theData[0];
+ stopMeConf->senderData = senderData;
+ stopMeConf->senderRef = reference();
+ sendSignal(reference(), GSN_STOP_ME_CONF, signal,
+ StopMeConf::SignalLength, JBB);
+}//Dbdih::execSTOP_ME_REQ()
+
+void Dbdih::execSTOP_ME_REF(Signal* signal)
+{
+ ndbrequire(false);
+}
+
+void Dbdih::execSTOP_ME_CONF(Signal* signal)
+{
+ jamEntry();
+ StopMeConf * const stopMeConf = (StopMeConf *)&signal->theData[0];
+
+ const Uint32 senderRef = stopMeConf->senderRef;
+ const Uint32 senderData = stopMeConf->senderData;
+ const Uint32 nodeId = refToNode(senderRef);
+
+ ndbrequire(c_stopMe.clientRef != 0);
+ ndbrequire(c_stopMe.clientData == senderData);
+
+ receiveLoopMacro(STOP_ME_REQ, nodeId);
+ //---------------------------------------------------------
+ // All STOP_ME_REQ have been received. We will send the
+ // confirmation back to the requesting block.
+ //---------------------------------------------------------
+
+ stopMeConf->senderRef = reference();
+ stopMeConf->senderData = c_stopMe.clientData;
+ sendSignal(c_stopMe.clientRef, GSN_STOP_ME_CONF, signal,
+ StopMeConf::SignalLength, JBB);
+ c_stopMe.clientRef = 0;
+}//Dbdih::execSTOP_ME_CONF()
+
+void Dbdih::execWAIT_GCP_REQ(Signal* signal)
+{
+ jamEntry();
+ WaitGCPReq* const req = (WaitGCPReq*)&signal->theData[0];
+ WaitGCPRef* const ref = (WaitGCPRef*)&signal->theData[0];
+ WaitGCPConf* const conf = (WaitGCPConf*)&signal->theData[0];
+ const Uint32 senderData = req->senderData;
+ const BlockReference senderRef = req->senderRef;
+ const Uint32 requestType = req->requestType;
+
+ if(requestType == WaitGCPReq::CurrentGCI) {
+ jam();
+ conf->senderData = senderData;
+ conf->gcp = cnewgcp;
+ sendSignal(senderRef, GSN_WAIT_GCP_CONF, signal,
+ WaitGCPConf::SignalLength, JBB);
+ return;
+ }//if
+
+ if(isMaster()) {
+ /**
+ * Master
+ */
+ jam();
+
+ if((requestType == WaitGCPReq::CompleteIfRunning) &&
+ (cgcpStatus == GCP_READY)) {
+ jam();
+ conf->senderData = senderData;
+ conf->gcp = coldgcp;
+ sendSignal(senderRef, GSN_WAIT_GCP_CONF, signal,
+ WaitGCPConf::SignalLength, JBB);
+ return;
+ }//if
+
+ WaitGCPMasterPtr ptr;
+ if(c_waitGCPMasterList.seize(ptr) == false){
+ jam();
+ ref->senderData = senderData;
+ ref->errorCode = WaitGCPRef::NoWaitGCPRecords;
+ sendSignal(senderRef, GSN_WAIT_GCP_REF, signal,
+ WaitGCPRef::SignalLength, JBB);
+ return;
+ }//if
+ ptr.p->clientRef = senderRef;
+ ptr.p->clientData = senderData;
+
+ if((requestType == WaitGCPReq::CompleteForceStart) &&
+ (cgcpStatus == GCP_READY)) {
+ jam();
+ cstartGcpNow = true;
+ }//if
+ return;
+ } else {
+ /**
+ * Proxy part
+ */
+ jam();
+ WaitGCPProxyPtr ptr;
+ if (c_waitGCPProxyList.seize(ptr) == false) {
+ jam();
+ ref->senderData = senderData;
+ ref->errorCode = WaitGCPRef::NoWaitGCPRecords;
+ sendSignal(senderRef, GSN_WAIT_GCP_REF, signal,
+ WaitGCPRef::SignalLength, JBB);
+ return;
+ }//if
+ ptr.p->clientRef = senderRef;
+ ptr.p->clientData = senderData;
+ ptr.p->masterRef = cmasterdihref;
+
+ req->senderData = ptr.i;
+ req->senderRef = reference();
+ req->requestType = requestType;
+
+ sendSignal(cmasterdihref, GSN_WAIT_GCP_REQ, signal,
+ WaitGCPReq::SignalLength, JBB);
+ return;
+ }//if
+}//Dbdih::execWAIT_GCP_REQ()
+
+void Dbdih::execWAIT_GCP_REF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(!isMaster());
+ WaitGCPRef* const ref = (WaitGCPRef*)&signal->theData[0];
+
+ const Uint32 proxyPtr = ref->senderData;
+ const Uint32 errorCode = ref->errorCode;
+
+ WaitGCPProxyPtr ptr;
+ ptr.i = proxyPtr;
+ c_waitGCPProxyList.getPtr(ptr);
+
+ ref->senderData = ptr.p->clientData;
+ ref->errorCode = errorCode;
+ sendSignal(ptr.p->clientRef, GSN_WAIT_GCP_REF, signal,
+ WaitGCPRef::SignalLength, JBB);
+
+ c_waitGCPProxyList.release(ptr);
+}//Dbdih::execWAIT_GCP_REF()
+
+void Dbdih::execWAIT_GCP_CONF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(!isMaster());
+ WaitGCPConf* const conf = (WaitGCPConf*)&signal->theData[0];
+ const Uint32 proxyPtr = conf->senderData;
+ const Uint32 gcp = conf->gcp;
+ WaitGCPProxyPtr ptr;
+
+ ptr.i = proxyPtr;
+ c_waitGCPProxyList.getPtr(ptr);
+
+ conf->senderData = ptr.p->clientData;
+ conf->gcp = gcp;
+ sendSignal(ptr.p->clientRef, GSN_WAIT_GCP_CONF, signal,
+ WaitGCPConf::SignalLength, JBB);
+
+ c_waitGCPProxyList.release(ptr);
+}//Dbdih::execWAIT_GCP_CONF()
+
+void Dbdih::checkWaitGCPProxy(Signal* signal, NodeId failedNodeId)
+{
+ jam();
+ WaitGCPRef* const ref = (WaitGCPRef*)&signal->theData[0];
+ ref->errorCode = WaitGCPRef::NF_CausedAbortOfProcedure;
+
+ WaitGCPProxyPtr ptr;
+ c_waitGCPProxyList.first(ptr);
+ while(ptr.i != RNIL) {
+ jam();
+ const Uint32 i = ptr.i;
+ const Uint32 clientData = ptr.p->clientData;
+ const BlockReference clientRef = ptr.p->clientRef;
+ const BlockReference masterRef = ptr.p->masterRef;
+
+ c_waitGCPProxyList.next(ptr);
+ if(refToNode(masterRef) == failedNodeId) {
+ jam();
+ c_waitGCPProxyList.release(i);
+ ref->senderData = clientData;
+ sendSignal(clientRef, GSN_WAIT_GCP_REF, signal,
+ WaitGCPRef::SignalLength, JBB);
+ }//if
+ }//while
+}//Dbdih::checkWaitGCPProxy()
+
+void Dbdih::checkWaitGCPMaster(Signal* signal, NodeId failedNodeId)
+{
+ jam();
+ WaitGCPMasterPtr ptr;
+ c_waitGCPMasterList.first(ptr);
+
+ while (ptr.i != RNIL) {
+ jam();
+ const Uint32 i = ptr.i;
+ const NodeId nodeId = refToNode(ptr.p->clientRef);
+
+ c_waitGCPMasterList.next(ptr);
+ if (nodeId == failedNodeId) {
+ jam()
+ c_waitGCPMasterList.release(i);
+ }//if
+ }//while
+}//Dbdih::checkWaitGCPMaster()
+
+void Dbdih::emptyWaitGCPMasterQueue(Signal* signal)
+{
+ jam();
+ WaitGCPConf* const conf = (WaitGCPConf*)&signal->theData[0];
+ conf->gcp = coldgcp;
+
+ WaitGCPMasterPtr ptr;
+ c_waitGCPMasterList.first(ptr);
+ while(ptr.i != RNIL) {
+ jam();
+ const Uint32 i = ptr.i;
+ const Uint32 clientData = ptr.p->clientData;
+ const BlockReference clientRef = ptr.p->clientRef;
+
+ c_waitGCPMasterList.next(ptr);
+ conf->senderData = clientData;
+ sendSignal(clientRef, GSN_WAIT_GCP_CONF, signal,
+ WaitGCPConf::SignalLength, JBB);
+
+ c_waitGCPMasterList.release(i);
+ }//while
+}//Dbdih::emptyWaitGCPMasterQueue()
+
+void Dbdih::setNodeStatus(Uint32 nodeId, NodeRecord::NodeStatus newStatus)
+{
+ NodeRecordPtr nodePtr;
+ nodePtr.i = nodeId;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ nodePtr.p->nodeStatus = newStatus;
+}//Dbdih::setNodeStatus()
+
+Dbdih::NodeRecord::NodeStatus Dbdih::getNodeStatus(Uint32 nodeId)
+{
+ NodeRecordPtr nodePtr;
+ nodePtr.i = nodeId;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ return nodePtr.p->nodeStatus;
+}//Dbdih::getNodeStatus()
+
+Sysfile::ActiveStatus
+Dbdih::getNodeActiveStatus(Uint32 nodeId)
+{
+ NodeRecordPtr nodePtr;
+ nodePtr.i = nodeId;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ return nodePtr.p->activeStatus;
+}//Dbdih::getNodeActiveStatus()
+
+
+void
+Dbdih::setNodeActiveStatus(Uint32 nodeId, Sysfile::ActiveStatus newStatus)
+{
+ NodeRecordPtr nodePtr;
+ nodePtr.i = nodeId;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ nodePtr.p->activeStatus = newStatus;
+}//Dbdih::setNodeActiveStatus()
+
+void Dbdih::setAllowNodeStart(Uint32 nodeId, bool newState)
+{
+ NodeRecordPtr nodePtr;
+ nodePtr.i = nodeId;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ nodePtr.p->allowNodeStart = newState;
+}//Dbdih::setAllowNodeStart()
+
+void Dbdih::setNodeCopyCompleted(Uint32 nodeId, bool newState)
+{
+ NodeRecordPtr nodePtr;
+ nodePtr.i = nodeId;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ nodePtr.p->copyCompleted = newState;
+}//Dbdih::setNodeCopyCompleted()
+
+bool Dbdih::getAllowNodeStart(Uint32 nodeId)
+{
+ NodeRecordPtr nodePtr;
+ nodePtr.i = nodeId;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ return nodePtr.p->allowNodeStart;
+}//Dbdih::getAllowNodeStart()
+
+bool Dbdih::getNodeCopyCompleted(Uint32 nodeId)
+{
+ NodeRecordPtr nodePtr;
+ nodePtr.i = nodeId;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ return nodePtr.p->copyCompleted;
+}//Dbdih::getNodeCopyCompleted()
+
+bool Dbdih::checkNodeAlive(Uint32 nodeId)
+{
+ NodeRecordPtr nodePtr;
+ nodePtr.i = nodeId;
+ ndbrequire(nodeId > 0);
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ if (nodePtr.p->nodeStatus != NodeRecord::ALIVE) {
+ return false;
+ } else {
+ return true;
+ }//if
+}//Dbdih::checkNodeAlive()
+
+bool Dbdih::isMaster()
+{
+ return (reference() == cmasterdihref);
+}//Dbdih::isMaster()
+
+bool Dbdih::isActiveMaster()
+{
+ return ((reference() == cmasterdihref) && (cmasterState == MASTER_ACTIVE));
+}//Dbdih::isActiveMaster()
+
+Dbdih::NodeRecord::NodeRecord(){
+ m_nodefailSteps.clear();
+ gcpstate = NodeRecord::READY;
+
+ activeStatus = Sysfile::NS_NotDefined;
+ recNODE_FAILREP = ZFALSE;
+ nodeGroup = ZNIL;
+ dbtcFailCompleted = ZTRUE;
+ dbdictFailCompleted = ZTRUE;
+ dbdihFailCompleted = ZTRUE;
+ dblqhFailCompleted = ZTRUE;
+ noOfStartedChkpt = 0;
+ noOfQueuedChkpt = 0;
+ lcpStateAtTakeOver = (MasterLCPConf::State)255;
+
+ activeTabptr = RNIL;
+ nodeStatus = NodeRecord::NOT_IN_CLUSTER;
+ useInTransactions = false;
+ copyCompleted = false;
+ allowNodeStart = true;
+}
diff --git a/storage/ndb/src/kernel/blocks/dbdih/LCP.txt b/storage/ndb/src/kernel/blocks/dbdih/LCP.txt
new file mode 100644
index 00000000000..500c82f6baf
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbdih/LCP.txt
@@ -0,0 +1,35 @@
+
+Master DIH LQH
+========== ==========
+
+1) TCGETOPSIZEREQ -> all TC
+
+2) If sum(operation size) < Threshold
+ Goto 1
+
+3) For each table
+ Calc Keep GCI (local using CONTINUEB)
+
+4) COPY_GCIREQ -> all DIH
+
+5) TC_CLOPSIZEREQ -> all TC
+
+6) For each fragment
+ LCP_FRAG_ORD -> LQH
+
+ Do LCP...
+ 1) LCP_FRAG_REP -> all DIH
+ 2) If last fragment
+ LCP_COMPLETE_REP -> all DIH
+
+7) When receiving LCP_COMPLETE_REP from DIH
+ 1) If all DIHs have completed
+ Goto 1
+
+All DIHs
+==========
+1) When receiving LCP_FRAG_REP
+ If all fragments & replicas done in table
+ 1) Save Table descriptor
+ 2) If all tables done + LCP_COMPLETE_REP(from lqh) has arrived
+ LCP_COMPLETE_REP -> master DIH
diff --git a/storage/ndb/src/kernel/blocks/dbdih/Makefile.am b/storage/ndb/src/kernel/blocks/dbdih/Makefile.am
new file mode 100644
index 00000000000..d6ad380b806
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbdih/Makefile.am
@@ -0,0 +1,23 @@
+noinst_LIBRARIES = libdbdih.a
+
+libdbdih_a_SOURCES = DbdihInit.cpp DbdihMain.cpp
+
+include $(top_srcdir)/ndb/config/common.mk.am
+include $(top_srcdir)/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libdbdih.dsp
+
+libdbdih.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-lib.am \
+ $(top_srcdir)/ndb/config/win-name \
+ $(top_srcdir)/ndb/config/win-includes \
+ $(top_srcdir)/ndb/config/win-sources \
+ $(top_srcdir)/ndb/config/win-libraries
+ cat $(top_srcdir)/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/ndb/config/win-sources $@ $(libdbdih_a_SOURCES)
+ @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/storage/ndb/src/kernel/blocks/dbdih/Sysfile.hpp b/storage/ndb/src/kernel/blocks/dbdih/Sysfile.hpp
new file mode 100644
index 00000000000..3e2f3b0dd48
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbdih/Sysfile.hpp
@@ -0,0 +1,275 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef SYSFILE_HPP
+#define SYSFILE_HPP
+
+#include <ndb_types.h>
+#include <ndb_limits.h>
+#include <NodeBitmask.hpp>
+
+/**
+ * No bits in Sysfile to represent nodeid
+ */
+#define NODEID_BITS 8
+
+/**
+ * Constant representing that node do not belong to
+ * any node group
+ */
+#define NO_NODE_GROUP_ID ((1 << NODEID_BITS) - 1)
+
+/**
+ * Dummy macro to make emacs indent better
+ */
+#define _F(x) x
+
+/**
+ * No of 32 bits word in sysfile
+ *
+ * 5 +
+ * MAX_NDB_NODES + // lastCompletedGCI
+ * NODE_ARRAY_SIZE(MAX_NDB_NODES, 4) + // nodeStatus
+ * NODE_ARRAY_SIZE(MAX_NDB_NODES, NODEID_BITS) + // nodeGroups
+ * NODE_ARRAY_SIZE(MAX_NDB_NODES, NODEID_BITS) + // takeOver
+ * NodeBitmask::NDB_NODE_BITMASK_SIZE // Lcp Active
+ */
+#define _SYSFILE_SIZE32 (5 + \
+ MAX_NDB_NODES + \
+ NODE_ARRAY_SIZE(MAX_NDB_NODES, 4) + \
+ NODE_ARRAY_SIZE(MAX_NDB_NODES, NODEID_BITS) + \
+ NODE_ARRAY_SIZE(MAX_NDB_NODES, NODEID_BITS) + \
+ _NDB_NODE_BITMASK_SIZE)
+
+/**
+ * This struct defines the format of P<X>.sysfile
+ */
+struct Sysfile {
+public:
+
+ /**
+ * No of 32 bits words in the sysfile
+ */
+ STATIC_CONST( SYSFILE_SIZE32 = _SYSFILE_SIZE32 );
+
+ Uint32 systemRestartBits;
+
+ static bool getInitialStartOngoing(const Uint32 & systemRestartBits);
+ static void setInitialStartOngoing(Uint32 & systemRestartBits);
+ static void clearInitialStartOngoing(Uint32 & systemRestartBits);
+
+ static bool getRestartOngoing(const Uint32 & systemRestartBits);
+ static void setRestartOngoing(Uint32 & systemRestartBits);
+ static void clearRestartOngoing(Uint32 & systemRestartBits);
+
+ static bool getLCPOngoing(const Uint32 & systemRestartBits);
+ static void setLCPOngoing(Uint32 & systemRestartBits);
+ static void clearLCPOngoing(Uint32 & systemRestartBits);
+
+ Uint32 keepGCI;
+ Uint32 oldestRestorableGCI;
+ Uint32 newestRestorableGCI;
+ Uint32 latestLCP_ID;
+
+ /**
+ * Last completed GCI for each node
+ */
+ Uint32 lastCompletedGCI[MAX_NDB_NODES];
+
+ /**
+ * Active status bits
+ *
+ * It takes 4 bits to represent it
+ */
+ enum ActiveStatus {
+ NS_Active = 0
+ ,NS_ActiveMissed_1 = 1
+ ,NS_ActiveMissed_2 = 2
+ ,NS_ActiveMissed_3 = 3
+ ,NS_HotSpare = 4
+ ,NS_NotActive_NotTakenOver = 5
+ ,NS_TakeOver = 6
+ ,NS_NotActive_TakenOver = 7
+ ,NS_NotDefined = 8
+ ,NS_Standby = 9
+ };
+ STATIC_CONST( NODE_STATUS_SIZE = NODE_ARRAY_SIZE(MAX_NDB_NODES, 4) );
+ Uint32 nodeStatus[NODE_STATUS_SIZE];
+
+ static Uint32 getNodeStatus(NodeId, const Uint32 nodeStatus[]);
+ static void setNodeStatus(NodeId, Uint32 nodeStatus[], Uint32 status);
+
+ /**
+ * The node group of each node
+ * Sizeof(NodeGroup) = 8 Bit
+ */
+ STATIC_CONST( NODE_GROUPS_SIZE = NODE_ARRAY_SIZE(MAX_NDB_NODES,
+ NODEID_BITS) );
+ Uint32 nodeGroups[NODE_GROUPS_SIZE];
+
+ static Uint16 getNodeGroup(NodeId, const Uint32 nodeGroups[]);
+ static void setNodeGroup(NodeId, Uint32 nodeGroups[], Uint16 group);
+
+ /**
+ * Any node can take over for any node
+ */
+ STATIC_CONST( TAKE_OVER_SIZE = NODE_ARRAY_SIZE(MAX_NDB_NODES,
+ NODEID_BITS) );
+ Uint32 takeOver[TAKE_OVER_SIZE];
+
+ static NodeId getTakeOverNode(NodeId, const Uint32 takeOver[]);
+ static void setTakeOverNode(NodeId, Uint32 takeOver[], NodeId toNode);
+
+ /**
+ * Is a node running a LCP
+ */
+ Uint32 lcpActive[NdbNodeBitmask::Size];
+};
+
+#if (MAX_NDB_NODES > (1<<NODEID_BITS))
+#error "Sysfile node id is too small"
+#endif
+
+/**
+ * Restart Info
+ *
+ * i = Initial start completed
+ * r = Crash during system restart
+ * l = Crash during local checkpoint
+
+ * 1111111111222222222233
+ * 01234567890123456789012345678901
+ * irl
+ */
+inline
+bool
+Sysfile::getInitialStartOngoing(const Uint32 & systemRestartBits){
+ return systemRestartBits & 1;
+}
+
+inline
+void
+Sysfile::setInitialStartOngoing(Uint32 & systemRestartBits){
+ systemRestartBits |= 1;
+}
+
+inline
+void
+Sysfile::clearInitialStartOngoing(Uint32 & systemRestartBits){
+ systemRestartBits &= ~1;
+}
+
+inline
+bool
+Sysfile::getRestartOngoing(const Uint32 & systemRestartBits){
+ return (systemRestartBits & 2) != 0;
+}
+
+inline
+void
+Sysfile::setRestartOngoing(Uint32 & systemRestartBits){
+ systemRestartBits |= 2;
+}
+
+inline
+void
+Sysfile::clearRestartOngoing(Uint32 & systemRestartBits){
+ systemRestartBits &= ~2;
+}
+
+inline
+bool
+Sysfile::getLCPOngoing(const Uint32 & systemRestartBits){
+ return systemRestartBits & 4;
+}
+
+inline
+void
+Sysfile::setLCPOngoing(Uint32 & systemRestartBits){
+ systemRestartBits |= 4;
+}
+
+inline
+void
+Sysfile::clearLCPOngoing(Uint32 & systemRestartBits){
+ systemRestartBits &= ~4;
+}
+
+inline
+Uint32
+Sysfile::getNodeStatus(NodeId nodeId, const Uint32 nodeStatus[]){
+ const int word = nodeId >> 3;
+ const int shift = (nodeId & 7) << 2;
+
+ return (nodeStatus[word] >> shift) & 15;
+}
+
+inline
+void
+Sysfile::setNodeStatus(NodeId nodeId, Uint32 nodeStatus[], Uint32 status){
+ const int word = nodeId >> 3;
+ const int shift = (nodeId & 7) << 2;
+
+ const Uint32 mask = ~(((Uint32)15) << shift);
+ const Uint32 tmp = nodeStatus[word];
+
+ nodeStatus[word] = (tmp & mask) | ((status & 15) << shift);
+}
+
+inline
+Uint16
+Sysfile::getNodeGroup(NodeId nodeId, const Uint32 nodeGroups[]){
+ const int word = nodeId >> 2;
+ const int shift = (nodeId & 3) << 3;
+
+ return (nodeGroups[word] >> shift) & 255;
+}
+
+inline
+void
+Sysfile::setNodeGroup(NodeId nodeId, Uint32 nodeGroups[], Uint16 group){
+ const int word = nodeId >> 2;
+ const int shift = (nodeId & 3) << 3;
+
+ const Uint32 mask = ~(((Uint32)255) << shift);
+ const Uint32 tmp = nodeGroups[word];
+
+ nodeGroups[word] = (tmp & mask) | ((group & 255) << shift);
+}
+
+inline
+NodeId
+Sysfile::getTakeOverNode(NodeId nodeId, const Uint32 takeOver[]){
+ const int word = nodeId >> 2;
+ const int shift = (nodeId & 3) << 3;
+
+ return (takeOver[word] >> shift) & 255;
+}
+
+inline
+void
+Sysfile::setTakeOverNode(NodeId nodeId, Uint32 takeOver[], NodeId toNode){
+ const int word = nodeId >> 2;
+ const int shift = (nodeId & 3) << 3;
+
+ const Uint32 mask = ~(((Uint32)255) << shift);
+ const Uint32 tmp = takeOver[word];
+
+ takeOver[word] = (tmp & mask) | ((toNode & 255) << shift);
+}
+
+
+#endif
diff --git a/storage/ndb/src/kernel/blocks/dbdih/printSysfile/Makefile b/storage/ndb/src/kernel/blocks/dbdih/printSysfile/Makefile
new file mode 100644
index 00000000000..4c4b1026aff
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbdih/printSysfile/Makefile
@@ -0,0 +1,12 @@
+include .defs.mk
+
+TYPE := ndbapi
+
+BIN_TARGET := printSysfile
+BIN_TARGET_ARCHIVES := portlib general
+
+CCFLAGS_LOC += -I..
+
+SOURCES := printSysfile.cpp
+
+include $(NDB_TOP)/Epilogue.mk
diff --git a/storage/ndb/src/kernel/blocks/dbdih/printSysfile/printSysfile.cpp b/storage/ndb/src/kernel/blocks/dbdih/printSysfile/printSysfile.cpp
new file mode 100644
index 00000000000..efa4b9c92c5
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbdih/printSysfile/printSysfile.cpp
@@ -0,0 +1,158 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+#include <ndb_global.h>
+
+#include <NdbMain.h>
+#include <NdbOut.hpp>
+#include <Sysfile.hpp>
+
+void
+usage(const char * prg){
+ ndbout << "Usage " << prg
+ << " P[0-1].sysfile" << endl;
+}
+
+struct NSString {
+ Sysfile::ActiveStatus NodeStatus;
+ const char * desc;
+};
+
+static const
+NSString NodeStatusStrings[] = {
+ { Sysfile::NS_Active, "Active " },
+ { Sysfile::NS_ActiveMissed_1, "Active missed 1" },
+ { Sysfile::NS_ActiveMissed_2, "Active missed 2" },
+ { Sysfile::NS_ActiveMissed_3, "Active missed 3" },
+ { Sysfile::NS_HotSpare, "Hot spare " },
+ { Sysfile::NS_NotActive_NotTakenOver, "Not active " },
+ { Sysfile::NS_TakeOver, "Take over " },
+ { Sysfile::NS_NotActive_TakenOver, "Taken over " },
+ { Sysfile::NS_NotDefined, "Not defined " },
+ { Sysfile::NS_Standby, "Stand by " }
+};
+
+const
+char * getNSString(Uint32 ns){
+ for(Uint32 i = 0; i<(sizeof(NodeStatusStrings)/sizeof(NSString)); i++)
+ if((Uint32)NodeStatusStrings[i].NodeStatus == ns)
+ return NodeStatusStrings[i].desc;
+ return "<Unknown state>";
+}
+
+void
+fill(const char * buf, int mod){
+ int len = strlen(buf)+1;
+ ndbout << buf << " ";
+ while((len % mod) != 0){
+ ndbout << " ";
+ len++;
+ }
+}
+
+void
+print(const char * filename, const Sysfile * sysfile){
+ char buf[255];
+ ndbout << "----- Sysfile: " << filename << " -----" << endl;
+ ndbout << "Initial start ongoing: "
+ << Sysfile::getInitialStartOngoing(sysfile->systemRestartBits)
+ << ", ";
+
+ ndbout << "Restart Ongoing: "
+ << Sysfile::getRestartOngoing(sysfile->systemRestartBits)
+ << ", ";
+
+ ndbout << "LCP Ongoing: "
+ << Sysfile::getLCPOngoing(sysfile->systemRestartBits)
+ << endl;
+
+
+ ndbout << "-- Global Checkpoint Identities: --" << endl;
+ sprintf(buf, "keepGCI = %u", sysfile->keepGCI);
+ fill(buf, 40);
+ ndbout << " -- Tail of REDO log" << endl;
+
+ sprintf(buf, "oldestRestorableGCI = %u", sysfile->oldestRestorableGCI);
+ fill(buf, 40);
+ ndbout << " -- " << endl;
+
+ sprintf(buf, "newestRestorableGCI = %u", sysfile->newestRestorableGCI);
+ fill(buf, 40);
+ ndbout << " -- " << endl;
+
+ sprintf(buf, "latestLCP = %u", sysfile->latestLCP_ID);
+ fill(buf, 40);
+ ndbout << " -- " << endl;
+
+ ndbout << "-- Node status: --" << endl;
+ for(int i = 1; i < MAX_NDB_NODES; i++){
+ if(Sysfile::getNodeStatus(i, sysfile->nodeStatus) !=Sysfile::NS_NotDefined){
+ sprintf(buf,
+ "Node %.2d -- %s GCP: %d, NodeGroup: %d, TakeOverNode: %d, "
+ "LCP Ongoing: %s",
+ i,
+ getNSString(Sysfile::getNodeStatus(i,sysfile->nodeStatus)),
+ sysfile->lastCompletedGCI[i],
+ Sysfile::getNodeGroup(i, sysfile->nodeGroups),
+ Sysfile::getTakeOverNode(i, sysfile->takeOver),
+ BitmaskImpl::get(NdbNodeBitmask::Size,
+ sysfile->lcpActive, i) != 0 ? "yes" : "no");
+ ndbout << buf << endl;
+ }
+ }
+}
+
+NDB_COMMAND(printSysfile,
+ "printSysfile", "printSysfile", "Prints a sysfile", 16384){
+ if(argc < 2){
+ usage(argv[0]);
+ return 0;
+ }
+
+ for(int i = 1; i<argc; i++){
+ const char * filename = argv[i];
+
+ struct stat sbuf;
+ const int res = stat(filename, &sbuf);
+ if(res != 0){
+ ndbout << "Could not find file: \"" << filename << "\"" << endl;
+ continue;
+ }
+ const Uint32 bytes = sbuf.st_size;
+
+ Uint32 * buf = new Uint32[bytes/4+1];
+
+ FILE * f = fopen(filename, "rb");
+ if(f == 0){
+ ndbout << "Failed to open file" << endl;
+ delete [] buf;
+ continue;
+ }
+ Uint32 sz = fread(buf, 1, bytes, f);
+ fclose(f);
+ if(sz != bytes){
+ ndbout << "Failure while reading file" << endl;
+ delete [] buf;
+ continue;
+ }
+
+ print(filename, (Sysfile *)&buf[0]);
+ delete [] buf;
+ continue;
+ }
+ return 0;
+}
diff --git a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
new file mode 100644
index 00000000000..e7debe1f978
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
@@ -0,0 +1,2953 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef DBLQH_H
+#define DBLQH_H
+
+#include <pc.hpp>
+#include <ndb_limits.h>
+#include <SimulatedBlock.hpp>
+#include <DLList.hpp>
+#include <DLFifoList.hpp>
+#include <DLHashTable.hpp>
+
+#include <NodeBitmask.hpp>
+#include <signaldata/LCP.hpp>
+#include <signaldata/LqhTransConf.hpp>
+#include <signaldata/LqhFrag.hpp>
+
+// primary key is stored in TUP
+#include <../dbtup/Dbtup.hpp>
+
+#ifdef DBLQH_C
+// Constants
+/* ------------------------------------------------------------------------- */
+/* CONSTANTS USED WHEN MASTER REQUESTS STATE OF COPY FRAGMENTS. */
+/* ------------------------------------------------------------------------- */
+#define ZCOPY_CLOSING 0
+#define ZCOPY_ONGOING 1
+#define ZCOPY_ACTIVATION 2
+/* ------------------------------------------------------------------------- */
+/* STATES FOR THE VARIABLE GCP_LOG_PART_STATE */
+/* ------------------------------------------------------------------------- */
+#define ZIDLE 0
+#define ZWAIT_DISK 1
+#define ZON_DISK 2
+#define ZACTIVE 1
+/* ------------------------------------------------------------------------- */
+/* STATES FOR THE VARIABLE CSR_PHASES_STARTED */
+/* ------------------------------------------------------------------------- */
+#define ZSR_NO_PHASE_STARTED 0
+#define ZSR_PHASE1_COMPLETED 1
+#define ZSR_PHASE2_COMPLETED 2
+#define ZSR_BOTH_PHASES_STARTED 3
+/* ------------------------------------------------------------------------- */
+/* THE NUMBER OF PAGES IN A MBYTE, THE TWO LOGARITHM OF THIS. */
+/* THE NUMBER OF MBYTES IN A LOG FILE. */
+/* THE MAX NUMBER OF PAGES READ/WRITTEN FROM/TO DISK DURING */
+/* A WRITE OR READ. */
+/* ------------------------------------------------------------------------- */
+#define ZNOT_DIRTY 0
+#define ZDIRTY 1
+#define ZREAD_AHEAD_SIZE 8
+/* ------------------------------------------------------------------------- */
+/* CONSTANTS OF THE LOG PAGES */
+/* ------------------------------------------------------------------------- */
+#define ZPAGE_HEADER_SIZE 32
+#define ZNO_MBYTES_IN_FILE 16
+#define ZPAGE_SIZE 8192
+#define ZPAGES_IN_MBYTE 32
+#define ZTWOLOG_NO_PAGES_IN_MBYTE 5
+#define ZTWOLOG_PAGE_SIZE 13
+#define ZMAX_MM_BUFFER_SIZE 32 // Main memory window during log execution
+
+#define ZMAX_PAGES_WRITTEN 8 // Max pages before writing to disk (=> config)
+#define ZMIN_READ_BUFFER_SIZE 2 // Minimum number of pages to execute log
+#define ZMIN_LOG_PAGES_OPERATION 10 // Minimum no of pages before stopping
+
+#define ZPOS_CHECKSUM 0
+#define ZPOS_LOG_LAP 1
+#define ZPOS_MAX_GCI_COMPLETED 2
+#define ZPOS_MAX_GCI_STARTED 3
+#define ZNEXT_PAGE 4
+#define ZPREV_PAGE 5
+#define ZPOS_VERSION 6
+#define ZPOS_NO_LOG_FILES 7
+#define ZCURR_PAGE_INDEX 8
+#define ZLAST_LOG_PREP_REF 10
+#define ZPOS_DIRTY 11
+/* ------------------------------------------------------------------------- */
+/* CONSTANTS FOR THE VARIOUS REPLICA AND NODE TYPES. */
+/* ------------------------------------------------------------------------- */
+#define ZPRIMARY_NODE 0
+#define ZBACKUP_NODE 1
+#define ZSTANDBY_NODE 2
+#define ZTC_NODE 3
+#define ZLOG_NODE 3
+/* ------------------------------------------------------------------------- */
+/* VARIOUS CONSTANTS USED AS FLAGS TO THE FILE MANAGER. */
+/* ------------------------------------------------------------------------- */
+#define ZOPEN_READ 0
+#define ZOPEN_WRITE 1
+#define ZOPEN_READ_WRITE 2
+#define ZVAR_NO_LOG_PAGE_WORD 1
+#define ZLIST_OF_PAIRS 0
+#define ZLIST_OF_PAIRS_SYNCH 16
+#define ZARRAY_OF_PAGES 1
+#define ZLIST_OF_MEM_PAGES 2
+#define ZLIST_OF_MEM_PAGES_SYNCH 18
+#define ZCLOSE_NO_DELETE 0
+#define ZCLOSE_DELETE 1
+#define ZPAGE_ZERO 0
+/* ------------------------------------------------------------------------- */
+/* THE FOLLOWING CONSTANTS ARE USED TO DESCRIBE THE TYPES OF */
+/* LOG RECORDS, THE SIZE OF THE VARIOUS LOG RECORD TYPES AND */
+/* THE POSITIONS WITHIN THOSE LOG RECORDS. */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* THESE CONSTANTS DESCRIBE THE SIZES OF VARIOUS TYPES OF LOG REORDS. */
+/* NEXT_LOG_SIZE IS ACTUALLY ONE. THE REASON WE SET IT TO 2 IS TO */
+/* SIMPLIFY THE CODE SINCE OTHERWISE HAVE TO USE A SPECIAL VERSION */
+/* OF READ_LOGWORD WHEN READING LOG RECORD TYPE */
+/* SINCE NEXT MBYTE TYPE COULD BE THE VERY LAST WORD IN THE MBYTE. */
+/* BY SETTING IT TO 2 WE ENSURE IT IS NEVER THE VERY LAST WORD */
+/* IN THE MBYTE. */
+/* ------------------------------------------------------------------------- */
+#define ZFD_HEADER_SIZE 3
+#define ZFD_PART_SIZE 48
+#define ZLOG_HEAD_SIZE 6
+#define ZNEXT_LOG_SIZE 2
+#define ZABORT_LOG_SIZE 3
+#define ZCOMMIT_LOG_SIZE 9
+#define ZCOMPLETED_GCI_LOG_SIZE 2
+/* ------------------------------------------------------------------------- */
+/* THESE CONSTANTS DESCRIBE THE TYPE OF A LOG RECORD. */
+/* THIS IS THE FIRST WORD OF A LOG RECORD. */
+/* ------------------------------------------------------------------------- */
+#define ZNEW_PREP_OP_TYPE 0
+#define ZPREP_OP_TYPE 1
+#define ZCOMMIT_TYPE 2
+#define ZABORT_TYPE 3
+#define ZFD_TYPE 4
+#define ZFRAG_SPLIT_TYPE 5
+#define ZNEXT_LOG_RECORD_TYPE 6
+#define ZNEXT_MBYTE_TYPE 7
+#define ZCOMPLETED_GCI_TYPE 8
+#define ZINVALID_COMMIT_TYPE 9
+/* ------------------------------------------------------------------------- */
+/* THE POSITIONS OF LOGGED DATA IN A FILE DESCRIPTOR LOG RECORD HEADER.*/
+/* ALSO THE MAXIMUM NUMBER OF FILE DESCRIPTORS IN A LOG RECORD. */
+/* ------------------------------------------------------------------------- */
+#define ZPOS_LOG_TYPE 0
+#define ZPOS_NO_FD 1
+#define ZPOS_FILE_NO 2
+#define ZMAX_LOG_FILES_IN_PAGE_ZERO 40
+/* ------------------------------------------------------------------------- */
+/* THE POSITIONS WITHIN A PREPARE LOG RECORD AND A NEW PREPARE */
+/* LOG RECORD. */
+/* ------------------------------------------------------------------------- */
+#define ZPOS_HASH_VALUE 2
+#define ZPOS_SCHEMA_VERSION 3
+#define ZPOS_TRANS_TICKET 4
+#define ZPOS_OP_TYPE 5
+#define ZPOS_NO_ATTRINFO 6
+#define ZPOS_NO_KEYINFO 7
+/* ------------------------------------------------------------------------- */
+/* THE POSITIONS WITHIN A COMMIT LOG RECORD. */
+/* ------------------------------------------------------------------------- */
+#define ZPOS_COMMIT_TRANSID1 1
+#define ZPOS_COMMIT_TRANSID2 2
+#define ZPOS_COMMIT_GCI 3
+#define ZPOS_COMMIT_TABLE_REF 4
+#define ZPOS_COMMIT_FRAGID 5
+#define ZPOS_COMMIT_FILE_NO 6
+#define ZPOS_COMMIT_START_PAGE_NO 7
+#define ZPOS_COMMIT_START_PAGE_INDEX 8
+#define ZPOS_COMMIT_STOP_PAGE_NO 9
+/* ------------------------------------------------------------------------- */
+/* THE POSITIONS WITHIN A ABORT LOG RECORD. */
+/* ------------------------------------------------------------------------- */
+#define ZPOS_ABORT_TRANSID1 1
+#define ZPOS_ABORT_TRANSID2 2
+/* ------------------------------------------------------------------------- */
+/* THE POSITION WITHIN A COMPLETED GCI LOG RECORD. */
+/* ------------------------------------------------------------------------- */
+#define ZPOS_COMPLETED_GCI 1
+/* ------------------------------------------------------------------------- */
+/* THE POSITIONS WITHIN A NEW PREPARE LOG RECORD. */
+/* ------------------------------------------------------------------------- */
+#define ZPOS_NEW_PREP_FILE_NO 8
+#define ZPOS_NEW_PREP_PAGE_REF 9
+
+#define ZLAST_WRITE_IN_FILE 1
+#define ZENFORCE_WRITE 2
+/* ------------------------------------------------------------------------- */
+/* CONSTANTS USED AS INPUT TO SUBROUTINE WRITE_LOG_PAGES AMONG OTHERS. */
+/* ------------------------------------------------------------------------- */
+#define ZNORMAL 0
+#define ZINIT 1
+/* ------------------------------------------------------------------------- */
+/* CONSTANTS USED BY CONTINUEB TO DEDUCE WHICH CONTINUE SIGNAL IS TO */
+/* BE EXECUTED AS A RESULT OF THIS CONTINUEB SIGNAL. */
+/* ------------------------------------------------------------------------- */
+#define ZLOG_LQHKEYREQ 0
+#define ZPACK_LQHKEYREQ 1
+#define ZSEND_ATTRINFO 2
+#define ZSR_GCI_LIMITS 3
+#define ZSR_LOG_LIMITS 4
+#define ZSEND_EXEC_CONF 5
+#define ZEXEC_SR 6
+#define ZSR_FOURTH_COMP 7
+#define ZINIT_FOURTH 8
+#define ZTIME_SUPERVISION 9
+#define ZSR_PHASE3_START 10
+#define ZLQH_TRANS_NEXT 11
+#define ZLQH_RELEASE_AT_NODE_FAILURE 12
+#define ZSCAN_TC_CONNECT 13
+#define ZINITIALISE_RECORDS 14
+#define ZINIT_GCP_REC 15
+#define ZRESTART_OPERATIONS_AFTER_STOP 16
+#define ZCHECK_LCP_STOP_BLOCKED 17
+#define ZSCAN_MARKERS 18
+#define ZOPERATION_EVENT_REP 19
+#define ZPREP_DROP_TABLE 20
+
+/* ------------------------------------------------------------------------- */
+/* NODE STATE DURING SYSTEM RESTART, VARIABLES CNODES_SR_STATE */
+/* AND CNODES_EXEC_SR_STATE. */
+/* ------------------------------------------------------------------------- */
+#define ZSTART_SR 1
+#define ZEXEC_SR_COMPLETED 2
+/* ------------------------------------------------------------------------- */
+/* CONSTANTS USED BY NODE STATUS TO DEDUCE THE STATUS OF A NODE. */
+/* ------------------------------------------------------------------------- */
+#define ZNODE_UP 0
+#define ZNODE_DOWN 1
+/* ------------------------------------------------------------------------- */
+/* START PHASES */
+/* ------------------------------------------------------------------------- */
+#define ZLAST_START_PHASE 255
+#define ZSTART_PHASE1 1
+#define ZSTART_PHASE2 2
+#define ZSTART_PHASE3 3
+#define ZSTART_PHASE4 4
+#define ZSTART_PHASE6 6
+/* ------------------------------------------------------------------------- */
+/* CONSTANTS USED BY SCAN AND COPY FRAGMENT PROCEDURES */
+/* ------------------------------------------------------------------------- */
+#define ZSTORED_PROC_SCAN 0
+#define ZSTORED_PROC_COPY 2
+#define ZDELETE_STORED_PROC_ID 3
+//#define ZSCAN_NEXT 1
+//#define ZSCAN_NEXT_COMMIT 2
+//#define ZSCAN_NEXT_ABORT 12
+#define ZCOPY_COMMIT 3
+#define ZCOPY_REPEAT 4
+#define ZCOPY_ABORT 5
+#define ZCOPY_CLOSE 6
+//#define ZSCAN_CLOSE 6
+//#define ZEMPTY_FRAGMENT 0
+#define ZWRITE_LOCK 1
+#define ZSCAN_FRAG_CLOSED 2
+/* ------------------------------------------------------------------------- */
+/* ERROR CODES ADDED IN VERSION 0.1 AND 0.2 */
+/* ------------------------------------------------------------------------- */
+#define ZNOT_FOUND 1 // Not an error code, a return value
+#define ZNO_FREE_LQH_CONNECTION 414
+#define ZGET_DATAREC_ERROR 418
+#define ZGET_ATTRINBUF_ERROR 419
+#define ZNO_FREE_FRAGMENTREC 460 // Insert new fragment error code
+#define ZTAB_FILE_SIZE 464 // Insert new fragment error code + Start kernel
+#define ZNO_ADD_FRAGREC 465 // Insert new fragment error code
+/* ------------------------------------------------------------------------- */
+/* ERROR CODES ADDED IN VERSION 0.3 */
+/* ------------------------------------------------------------------------- */
+#define ZTAIL_PROBLEM_IN_LOG_ERROR 410
+#define ZGCI_TOO_LOW_ERROR 429 // GCP_SAVEREF error code
+#define ZTAB_STATE_ERROR 474 // Insert new fragment error code
+#define ZTOO_NEW_GCI_ERROR 479 // LCP Start error
+/* ------------------------------------------------------------------------- */
+/* ERROR CODES ADDED IN VERSION 0.4 */
+/* ------------------------------------------------------------------------- */
+
+#define ZNO_FREE_FRAG_SCAN_REC_ERROR 490 // SCAN_FRAGREF error code
+#define ZCOPY_NO_FRAGMENT_ERROR 491 // COPY_FRAGREF error code
+#define ZTAKE_OVER_ERROR 499
+#define ZCOPY_NODE_ERROR 1204
+#define ZTOO_MANY_COPY_ACTIVE_ERROR 1208 // COPY_FRAG and COPY_ACTIVEREF code
+#define ZCOPY_ACTIVE_ERROR 1210 // COPY_ACTIVEREF error code
+#define ZNO_TC_CONNECT_ERROR 1217 // Simple Read + SCAN
+/* ------------------------------------------------------------------------- */
+/* ERROR CODES ADDED IN VERSION 1.X */
+/* ------------------------------------------------------------------------- */
+//#define ZSCAN_BOOK_ACC_OP_ERROR 1219 // SCAN_FRAGREF error code
+#define ZFILE_CHANGE_PROBLEM_IN_LOG_ERROR 1220
+#define ZTEMPORARY_REDO_LOG_FAILURE 1221
+#define ZNO_FREE_MARKER_RECORDS_ERROR 1222
+#define ZNODE_SHUTDOWN_IN_PROGESS 1223
+#define ZTOO_MANY_FRAGMENTS 1224
+#define ZTABLE_NOT_DEFINED 1225
+#define ZDROP_TABLE_IN_PROGRESS 1226
+#define ZINVALID_SCHEMA_VERSION 1227
+
+/* ------------------------------------------------------------------------- */
+/* ERROR CODES ADDED IN VERSION 2.X */
+/* ------------------------------------------------------------------------- */
+#define ZNODE_FAILURE_ERROR 400
+/* ------------------------------------------------------------------------- */
+/* ERROR CODES FROM ACC */
+/* ------------------------------------------------------------------------- */
+#define ZNO_TUPLE_FOUND 626
+#define ZTUPLE_ALREADY_EXIST 630
+/* ------------------------------------------------------------------------- */
+/* ERROR CODES FROM TUP */
+/* ------------------------------------------------------------------------- */
+#define ZSEARCH_CONDITION_FALSE 899
+#define ZUSER_ERROR_CODE_LIMIT 6000
+#endif
+
+/**
+ * @class dblqh
+ *
+ * @section secIntro Introduction
+ *
+ * Dblqh is the coordinator of the LDM. Dblqh is responsible for
+ * performing operations on tuples. It does this job with help of
+ * Dbacc block (that manages the index structures) and Dbtup
+ * (that manages the tuples).
+ *
+ * Dblqh also keeps track of the participants and acts as a coordinator of
+ * 2-phase commits. Logical redo logging is also handled by the Dblqh
+ * block.
+ *
+ * @section secModules Modules
+ *
+ * The code is partitioned into the following modules:
+ * - START / RESTART
+ * - Start phase 1: Load our block reference and our processor id
+ * - Start phase 2: Initiate all records within the block
+ * Connect LQH with ACC and TUP.
+ * - Start phase 4: Connect LQH with LQH. Connect every LQH with
+ * every LQH in the database system.
+ * If initial start, then create the fragment log files.
+ * If system restart or node restart,
+ * then open the fragment log files and
+ * find the end of the log files.
+ * - ADD / DELETE FRAGMENT<br>
+ * Used by dictionary to create new fragments and delete old fragments.
+ * - EXECUTION<br>
+ * handles the reception of lqhkeyreq and all processing
+ * of operations on behalf of this request.
+ * This does also involve reception of various types of attrinfo
+ * and keyinfo.
+ * It also involves communication with ACC and TUP.
+ * - LOG<br>
+ * The log module handles the reading and writing of the log.
+ * It is also responsible for handling system restart.
+ * It controls the system restart in TUP and ACC as well.
+ * - TRANSACTION<br>
+ * This module handles the commit and the complete phases.
+ * - MODULE TO HANDLE TC FAILURE<br>
+ * - SCAN<br>
+ * This module contains the code that handles a scan of a particular
+ * fragment.
+ * It operates under the control of TC and orders ACC to
+ * perform a scan of all tuples in the fragment.
+ * TUP performs the necessary search conditions
+ * to ensure that only valid tuples are returned to the application.
+ * - NODE RECOVERY<br>
+ * Used when a node has failed.
+ * It performs a copy of a fragment to a new replica of the fragment.
+ * It does also shut down all connections to the failed node.
+ * - LOCAL CHECKPOINT<br>
+ * Handles execution and control of LCPs
+ * It controls the LCPs in TUP and ACC.
+ * It also interacts with DIH to control which GCPs are recoverable.
+ * - GLOBAL CHECKPOINT<br>
+ * Helps DIH in discovering when GCPs are recoverable.
+ * It handles the request gcp_savereq that requests LQH to
+ * save a particular GCP to disk and respond when completed.
+ * - FILE HANDLING<br>
+ * With submodules:
+ * - SIGNAL RECEPTION
+ * - NORMAL OPERATION
+ * - FILE CHANGE
+ * - INITIAL START
+ * - SYSTEM RESTART PHASE ONE
+ * - SYSTEM RESTART PHASE TWO,
+ * - SYSTEM RESTART PHASE THREE
+ * - SYSTEM RESTART PHASE FOUR
+ * - ERROR
+ * - TEST
+ * - LOG
+ */
+class Dblqh: public SimulatedBlock {
+public:
+ enum LcpCloseState {
+ LCP_IDLE = 0,
+ LCP_RUNNING = 1, // LCP is running
+ LCP_CLOSE_STARTED = 2, // Completion(closing of files) has started
+ ACC_LCP_CLOSE_COMPLETED = 3,
+ TUP_LCP_CLOSE_COMPLETED = 4
+ };
+
+ enum ExecUndoLogState {
+ EULS_IDLE = 0,
+ EULS_STARTED = 1,
+ EULS_COMPLETED = 2,
+ EULS_ACC_COMPLETED = 3,
+ EULS_TUP_COMPLETED = 4
+ };
+
+ struct AddFragRecord {
+ enum AddFragStatus {
+ FREE = 0,
+ ACC_ADDFRAG = 1,
+ WAIT_TWO_TUP = 2,
+ WAIT_ONE_TUP = 3,
+ WAIT_TWO_TUX = 4,
+ WAIT_ONE_TUX = 5,
+ WAIT_ADD_ATTR = 6,
+ TUP_ATTR_WAIT1 = 7,
+ TUP_ATTR_WAIT2 = 8,
+ TUX_ATTR_WAIT1 = 9,
+ TUX_ATTR_WAIT2 = 10
+ };
+ LqhAddAttrReq::Entry attributes[LqhAddAttrReq::MAX_ATTRIBUTES];
+ UintR accConnectptr;
+ AddFragStatus addfragStatus;
+ UintR dictConnectptr;
+ UintR fragmentPtr;
+ UintR nextAddfragrec;
+ UintR noOfAllocPages;
+ UintR schemaVer;
+ UintR tup1Connectptr;
+ UintR tup2Connectptr;
+ UintR tux1Connectptr;
+ UintR tux2Connectptr;
+ UintR checksumIndicator;
+ UintR GCPIndicator;
+ BlockReference dictBlockref;
+ Uint32 m_senderAttrPtr;
+ Uint16 addfragErrorCode;
+ Uint16 attrSentToTup;
+ Uint16 attrReceived;
+ Uint16 addFragid;
+ Uint16 fragid1;
+ Uint16 fragid2;
+ Uint16 noOfAttr;
+ Uint16 noOfNull;
+ Uint16 tabId;
+ Uint16 totalAttrReceived;
+ Uint16 fragCopyCreation;
+ Uint16 noOfKeyAttr;
+ Uint32 noOfNewAttr; // noOfCharsets in upper half
+ Uint16 noOfAttributeGroups;
+ Uint16 lh3DistrBits;
+ Uint16 tableType;
+ Uint16 primaryTableId;
+ };// Size 108 bytes
+ typedef Ptr<AddFragRecord> AddFragRecordPtr;
+
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /* $$$$$$$ ATTRIBUTE INFORMATION RECORD $$$$$$$ */
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /**
+ * Can contain one (1) attrinfo signal.
+ * One signal contains 24 attr. info words.
+ * But 32 elements are used to make plex happy.
+ * Some of the elements are used to the following things:
+ * - Data length in this record is stored in the
+ * element indexed by ZINBUF_DATA_LEN.
+ * - Next attrinbuf is pointed out by the element
+ * indexed by ZINBUF_NEXT.
+ */
+ struct Attrbuf {
+ UintR attrbuf[32];
+ }; // Size 128 bytes
+ typedef Ptr<Attrbuf> AttrbufPtr;
+
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /* $$$$$$$ DATA BUFFER $$$$$$$ */
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /**
+ * This buffer is used as a general data storage.
+ */
+ struct Databuf {
+ UintR data[4];
+ UintR nextDatabuf;
+ }; // size 20 bytes
+ typedef Ptr<Databuf> DatabufPtr;
+
+ struct ScanRecord {
+ enum ScanState {
+ SCAN_FREE = 0,
+ WAIT_STORED_PROC_COPY = 1,
+ WAIT_STORED_PROC_SCAN = 2,
+ WAIT_NEXT_SCAN_COPY = 3,
+ WAIT_NEXT_SCAN = 4,
+ WAIT_DELETE_STORED_PROC_ID_SCAN = 5,
+ WAIT_DELETE_STORED_PROC_ID_COPY = 6,
+ WAIT_ACC_COPY = 7,
+ WAIT_ACC_SCAN = 8,
+ WAIT_SCAN_NEXTREQ = 10,
+ WAIT_CLOSE_SCAN = 12,
+ WAIT_CLOSE_COPY = 13,
+ WAIT_RELEASE_LOCK = 14,
+ WAIT_TUPKEY_COPY = 15,
+ WAIT_LQHKEY_COPY = 16,
+ IN_QUEUE = 17
+ };
+ enum ScanType {
+ ST_IDLE = 0,
+ SCAN = 1,
+ COPY = 2
+ };
+
+ UintR scan_acc_op_ptr[32];
+ Uint32 scan_acc_index;
+ Uint32 scan_acc_attr_recs;
+ UintR scanApiOpPtr;
+ UintR scanLocalref[2];
+
+ Uint32 m_max_batch_size_rows;
+ Uint32 m_max_batch_size_bytes;
+
+ Uint32 m_curr_batch_size_rows;
+ Uint32 m_curr_batch_size_bytes;
+
+ bool check_scan_batch_completed() const;
+
+ UintR copyPtr;
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ };
+ Uint32 prevList;
+ Uint32 nextHash;
+ Uint32 prevHash;
+ bool equal(const ScanRecord & key) const {
+ return scanNumber == key.scanNumber && fragPtrI == key.fragPtrI;
+ }
+ Uint32 hashValue() const {
+ return fragPtrI ^ scanNumber;
+ }
+
+ UintR scanAccPtr;
+ UintR scanAiLength;
+ UintR scanErrorCounter;
+ UintR scanLocalFragid;
+ UintR scanSchemaVersion;
+
+ /**
+ * This is _always_ main table, even in range scan
+ * in which case scanTcrec->fragmentptr is different
+ */
+ Uint32 fragPtrI;
+ UintR scanStoredProcId;
+ ScanState scanState;
+ UintR scanTcrec;
+ ScanType scanType;
+ BlockReference scanApiBlockref;
+ NodeId scanNodeId;
+ Uint16 scanReleaseCounter;
+ Uint16 scanNumber;
+
+ Uint8 scanCompletedStatus;
+ Uint8 scanFlag;
+ Uint8 scanLockHold;
+ Uint8 scanLockMode;
+ Uint8 readCommitted;
+ Uint8 rangeScan;
+ Uint8 descending;
+ Uint8 scanTcWaiting;
+ Uint8 scanKeyinfoFlag;
+ Uint8 m_last_row;
+ }; // Size 272 bytes
+ typedef Ptr<ScanRecord> ScanRecordPtr;
+
+ struct Fragrecord {
+ enum ExecSrStatus {
+ IDLE = 0,
+ ACTIVE_REMOVE_AFTER = 1,
+ ACTIVE = 2
+ };
+ /**
+ * Possible state transitions are:
+ * - FREE -> DEFINED Fragment record is allocated
+ * - DEFINED -> ACTIVE Add fragment is completed and
+ * fragment is ready to
+ * receive operations.
+ * - DEFINED -> ACTIVE_CREATION Add fragment is completed and
+ * fragment is ready to
+ * receive operations in parallel
+ * with a copy fragment
+ * which is performed from the
+ * primary replica
+ * - DEFINED -> CRASH_RECOVERING A fragment is ready to be
+ * recovered from a local
+ * checkpoint on disk
+ * - ACTIVE -> BLOCKED A local checkpoint is to be
+ * started. No more operations
+ * are allowed to be started until
+ * the local checkpoint
+ * has been started.
+ * - ACTIVE -> REMOVING A fragment is removed from the node
+ * - BLOCKED -> ACTIVE Operations are allowed again in
+ * the fragment.
+ * - CRASH_RECOVERING -> ACTIVE A fragment has been recovered and
+ * are now ready for
+ * operations again.
+ * - CRASH_RECOVERING -> REMOVING Fragment recovery failed or
+ * was cancelled.
+ * - ACTIVE_CREATION -> ACTIVE A fragment is now copied and now
+ * is a normal fragment
+ * - ACTIVE_CREATION -> REMOVING Copying of the fragment failed
+ * - REMOVING -> FREE Removing of the fragment is
+ * completed and the fragment
+ * is now free again.
+ */
+ enum FragStatus {
+ FREE = 0, ///< Fragment record is currently not in use
+ FSACTIVE = 1, ///< Fragment is defined and usable for operations
+ DEFINED = 2, ///< Fragment is defined but not yet usable by
+ ///< operations
+ BLOCKED = 3, ///< LQH is waiting for all active operations to
+ ///< complete the current phase so that the
+ ///< local checkpoint can be started.
+ ACTIVE_CREATION = 4, ///< Fragment is defined and active but is under
+ ///< creation by the primary LQH.
+ CRASH_RECOVERING = 5, ///< Fragment is recovering after a crash by
+ ///< executing the fragment log and so forth.
+ ///< Will need further breakdown.
+ REMOVING = 6 ///< The fragment is currently removed.
+ ///< Operations are not allowed.
+ };
+ enum LogFlag {
+ STATE_TRUE = 0,
+ STATE_FALSE = 1
+ };
+ enum SrStatus {
+ SS_IDLE = 0,
+ SS_STARTED = 1,
+ SS_COMPLETED = 2
+ };
+ enum LcpFlag {
+ LCP_STATE_TRUE = 0,
+ LCP_STATE_FALSE = 1
+ };
+ /**
+ * Last GCI for executing the fragment log in this phase.
+ */
+ UintR execSrLastGci[4];
+ /**
+ * Start GCI for executing the fragment log in this phase.
+ */
+ UintR execSrStartGci[4];
+ /**
+ * Requesting user pointer for executing the fragment log in
+ * this phase
+ */
+ UintR execSrUserptr[4];
+ /**
+ * The LCP identifier of the LCP's.
+ * =0 means that the LCP number has not been stored.
+ * The LCP identifier is supplied by DIH when starting the LCP.
+ */
+ UintR lcpId[MAX_LCP_STORED];
+ UintR maxGciInLcp;
+ /**
+ * This variable contains the maximum global checkpoint
+ * identifier that exists in a certain local checkpoint.
+ * Maximum 4 local checkpoints is possible in this release.
+ */
+ UintR maxGciCompletedInLcp;
+ UintR srLastGci[4];
+ UintR srStartGci[4];
+ /**
+ * The fragment pointers in ACC
+ */
+ UintR accFragptr[2];
+ /**
+ * The EXEC_SR variables are used to keep track of which fragments
+ * that are interested in being executed as part of executing the
+ * fragment loop.
+ * It is initialised for every phase of executing the
+ * fragment log (the fragment log can be executed upto four times).
+ *
+ * Each execution is capable of executing the log records on four
+ * fragment replicas.
+ */
+ /**
+ * Requesting block reference for executing the fragment log
+ * in this phase.
+ */
+ BlockReference execSrBlockref[4];
+ /**
+ * This variable contains references to active scan and copy
+ * fragment operations on the fragment.
+ * A maximum of four concurrently active is allowed.
+ */
+ typedef Bitmask<4> ScanNumberMask;
+ ScanNumberMask m_scanNumberMask;
+ DLList<ScanRecord>::Head m_activeScans;
+ DLFifoList<ScanRecord>::Head m_queuedScans;
+
+ Uint16 srLqhLognode[4];
+ /**
+ * The fragment pointers in TUP and TUX
+ */
+ UintR tupFragptr[2];
+ UintR tuxFragptr[2];
+ /**
+ * This queue is where operations are put when blocked in ACC
+ * during start of a local chkp.
+ */
+ UintR accBlockedList;
+ /**
+ * This is the queue where all operations that are active on the
+ * fragment is put.
+ * This is used to deduct when the fragment do
+ * no longer contain any active operations.
+ * This is needed when starting a local checkpoint.
+ */
+ UintR activeList;
+ /**
+ * This variable keeps track of how many operations that are
+ * active that have skipped writing the log but not yet committed
+ * or aborted. This is used during start of fragment.
+ */
+ UintR activeTcCounter;
+ /**
+ * This status specifies whether this fragment is actively
+ * engaged in executing the fragment log.
+ */
+ ExecSrStatus execSrStatus;
+ /**
+ * The fragment id of this fragment.
+ */
+ UintR fragId;
+ /**
+ * Status of fragment
+ */
+ FragStatus fragStatus;
+ /**
+ * Indicates a local checkpoint is active and thus can generate
+ * UNDO log records.
+ */
+ UintR fragActiveStatus;
+ /**
+ * Reference to current LCP record.
+ * If no LCP is ongoing on the fragment then the value is RNIL.
+ * If LCP_REF /= RNIL then a local checkpoint is ongoing in the
+ * fragment.
+ * LCP_STATE in LCP_RECORD specifies the state of the
+ * local checkpoint.
+ */
+ UintR lcpRef;
+ /**
+ * This flag indicates whether logging is currently activated at
+ * the fragment.
+ * During a system restart it is temporarily shut off.
+ * Some fragments have it permanently shut off.
+ */
+ LogFlag logFlag;
+ UintR masterPtr;
+ /**
+ * This variable contains the maximum global checkpoint identifier
+ * which was completed when the local checkpoint was started.
+ */
+ /**
+ * Reference to the next fragment record in a free list of fragment
+ * records.
+ */
+ UintR nextFrag;
+ /**
+ * The newest GCI that has been committed on fragment
+ */
+ UintR newestGci;
+ SrStatus srStatus;
+ UintR srUserptr;
+ /**
+ * The starting global checkpoint of this fragment.
+ */
+ UintR startGci;
+ /**
+ * A reference to the table owning this fragment.
+ */
+ UintR tabRef;
+ /**
+ * This is the queue to put operations that have been blocked
+ * during start of a local chkp.
+ */
+ UintR firstWaitQueue;
+ UintR lastWaitQueue;
+ /**
+ * The block reference to ACC on the fragment makes it
+ * possible to have different ACC blocks for different
+ * fragments in the future.
+ */
+ BlockReference accBlockref;
+ /**
+ * Ordered index block.
+ */
+ BlockReference tuxBlockref;
+ /**
+ * The master block reference as sent in COPY_ACTIVEREQ.
+ */
+ BlockReference masterBlockref;
+ /**
+ * These variables are used during system restart to recall
+ * from which node to execute the fragment log and which GCI's
+ * this node should start and stop from. Also to remember who
+ * to send the response to when system restart is completed.
+ */
+ BlockReference srBlockref;
+ /**
+ * The block reference to TUP on the fragment makes it
+ * possible to have different TUP blocks for different
+ * fragments in the future.
+ */
+ BlockReference tupBlockref;
+ /**
+ * This state indicates if the fragment will participate in a
+ * checkpoint.
+ * Temporary tables with Fragrecord::logFlag permanently off
+ * will also have Fragrecord::lcpFlag off.
+ */
+ LcpFlag lcpFlag;
+ /**
+ * Used to ensure that updates started with old
+ * configuration do not arrive here after the copy fragment
+ * has started.
+ * If they are allowed to arrive after they
+ * could update a record that has already been replicated to
+ * the new node. This type of arrival should be extremely
+ * rare but we must anyway ensure that no harm is done.
+ */
+ Uint16 copyNode;
+ /**
+ * This variable ensures that only one copy fragment is
+ * active at a time on the fragment.
+ */
+ Uint8 copyFragState;
+ /**
+ * The number of fragment replicas that will execute the log
+ * records in this round of executing the fragment
+ * log. Maximum four is possible.
+ */
+ Uint8 execSrNoReplicas;
+ /**
+ * This variable contains what type of replica this fragment
+ * is. Two types are possible:
+ * - Primary/Backup replica = 0
+ * - Stand-by replica = 1
+ *
+ * It is not possible to distinguish between primary and
+ * backup on a fragment.
+ * This can only be done per transaction.
+ * DIH can change from primary to backup without informing
+ * the various replicas about this change.
+ */
+ Uint8 fragCopy;
+ /**
+ * This is the last fragment distribution key that we have
+ * heard of.
+ */
+ Uint8 fragDistributionKey;
+ /**
+ * The identity of the next local checkpoint this fragment
+ * should perform.
+ */
+ Uint8 nextLcp;
+ /**
+ * How many local checkpoints does the fragment contain
+ */
+ Uint8 srChkpnr;
+ Uint8 srNoLognodes;
+ /**
+ * Table type.
+ */
+ Uint8 tableType;
+ /**
+ * For ordered index fragment, i-value of corresponding
+ * fragment in primary table.
+ */
+ UintR tableFragptr;
+ };
+ typedef Ptr<Fragrecord> FragrecordPtr;
+
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /* $$$$$$$ GLOBAL CHECKPOINT RECORD $$$$$$ */
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /**
+ * This record describes a global checkpoint that is
+ * completed. It waits for all log records belonging to this
+ * global checkpoint to be saved on disk.
+ */
+ struct GcpRecord {
+ /**
+ * The file number within each log part where the log was
+ * located when gcp_savereq was received. The last record
+ * belonging to this global checkpoint is certainly before
+ * this place in the log. We could come even closer but it
+ * would cost performance and doesn't seem like a good
+ * idea. This is simple and it works.
+ */
+ Uint16 gcpFilePtr[4];
+ /**
+ * The page number within the file for each log part.
+ */
+ Uint16 gcpPageNo[4];
+ /**
+ * The word number within the last page that was written for
+ * each log part.
+ */
+ Uint16 gcpWordNo[4];
+ /**
+ * The identity of this global checkpoint.
+ */
+ UintR gcpId;
+ /**
+ * The state of this global checkpoint, one for each log part.
+ */
+ Uint8 gcpLogPartState[4];
+ /**
+ * The sync state of this global checkpoint, one for each
+ * log part.
+ */
+ Uint8 gcpSyncReady[4];
+ /**
+ * User pointer of the sender of gcp_savereq (= master DIH).
+ */
+ UintR gcpUserptr;
+ /**
+ * Block reference of the sender of gcp_savereq
+ * (= master DIH).
+ */
+ BlockReference gcpBlockref;
+ }; // Size 44 bytes
+ typedef Ptr<GcpRecord> GcpRecordPtr;
+
+ struct HostRecord {
+ bool inPackedList;
+ UintR noOfPackedWordsLqh;
+ UintR packedWordsLqh[30];
+ UintR noOfPackedWordsTc;
+ UintR packedWordsTc[29];
+ BlockReference hostLqhBlockRef;
+ BlockReference hostTcBlockRef;
+ };// Size 128 bytes
+ typedef Ptr<HostRecord> HostRecordPtr;
+
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /* $$$$$$$ LOCAL CHECKPOINT RECORD $$$$$$$ */
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /**
+ * This record contains the information about a local
+ * checkpoint that is ongoing. This record is also used as a
+ * system restart record.
+ */
+ struct LcpRecord {
+ LcpRecord() { m_EMPTY_LCP_REQ.clear(); }
+
+ enum LcpState {
+ LCP_IDLE = 0,
+ LCP_STARTED = 1,
+ LCP_COMPLETED = 2,
+ LCP_WAIT_FRAGID = 3,
+ LCP_WAIT_TUP_PREPLCP = 4,
+ LCP_WAIT_HOLDOPS = 5,
+ LCP_WAIT_ACTIVE_FINISH = 6,
+ LCP_START_CHKP = 7,
+ LCP_BLOCKED_COMP = 8,
+ LCP_SR_WAIT_FRAGID = 9,
+ LCP_SR_STARTED = 10,
+ LCP_SR_COMPLETED = 11
+ };
+ Uint32 firstLcpLocAcc;
+ Uint32 firstLcpLocTup;
+ Uint32 lcpAccptr;
+
+ LcpState lcpState;
+ bool lastFragmentFlag;
+
+ struct FragOrd {
+ Uint32 fragPtrI;
+ LcpFragOrd lcpFragOrd;
+ };
+ FragOrd currentFragment;
+
+ bool lcpQueued;
+ FragOrd queuedFragment;
+
+ bool reportEmpty;
+ NdbNodeBitmask m_EMPTY_LCP_REQ;
+ }; // Size 76 bytes
+ typedef Ptr<LcpRecord> LcpRecordPtr;
+
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /* $$$$$$ LOCAL CHECKPOINT SUPPORT RECORD $$$$$$$ */
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /**
+ * This record contains the information about an outstanding
+ * request to TUP or ACC. Used for both local checkpoints and
+ * system restart.
+ */
+ struct LcpLocRecord {
+ enum LcpLocstate {
+ IDLE = 0,
+ WAIT_TUP_PREPLCP = 1,
+ WAIT_LCPHOLDOP = 2,
+ HOLDOP_READY = 3,
+ ACC_WAIT_STARTED = 4,
+ ACC_STARTED = 5,
+ ACC_COMPLETED = 6,
+ TUP_WAIT_STARTED = 7,
+ TUP_STARTED = 8,
+ TUP_COMPLETED = 9,
+ SR_ACC_STARTED = 10,
+ SR_TUP_STARTED = 11,
+ SR_ACC_COMPLETED = 12,
+ SR_TUP_COMPLETED = 13
+ };
+ enum WaitingBlock {
+ ACC = 0,
+ TUP = 1,
+ NONE = 2
+ };
+
+ LcpLocstate lcpLocstate;
+ UintR locFragid;
+ UintR masterLcpRec;
+ UintR nextLcpLoc;
+ UintR tupRef;
+ WaitingBlock waitingBlock;
+ Uint32 accContCounter;
+ }; // 28 bytes
+ typedef Ptr<LcpLocRecord> LcpLocRecordPtr;
+
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /* */
+ /* THE RECORDS THAT START BY LOG_ ARE A PART OF THE LOG MANAGER. */
+ /* THESE RECORDS ARE USED TO HANDLE THE FRAGMENT LOG. */
+ /* */
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /* $$$$$$$ LOG RECORD $$$$$$$ */
+ /* */
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /* THIS RECORD IS ALIGNED TO BE 256 BYTES. */
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /**
+ * This record describes the current state of a log.
+ * A log consists of a number of log files.
+ * These log files are described by the log file record.
+ *
+ * There will be 4 sets of log files.
+ * Different tables will use different log files dependent
+ * on the table id.
+ * This ensures that more than one outstanding request can
+ * be sent to the file system.
+ * The log file to use is found by performing a very simple hash
+ * function.
+ */
+ struct LogPartRecord {
+ enum LogPartState {
+ IDLE = 0, ///< Nothing happens at the moment
+ ACTIVE = 1, ///< An operation is active logging
+ SR_FIRST_PHASE = 2, ///< Finding the end of the log and
+ ///< the information about global
+ ///< checkpoints in the log is ongoing.
+ SR_FIRST_PHASE_COMPLETED = 3, ///< First phase completed
+ SR_THIRD_PHASE_STARTED = 4, ///< Executing fragment log is in 3rd ph
+ SR_THIRD_PHASE_COMPLETED = 5,
+ SR_FOURTH_PHASE_STARTED = 6, ///< Finding the log tail and head
+ ///< is the fourth phase.
+ SR_FOURTH_PHASE_COMPLETED = 7,
+ FILE_CHANGE_PROBLEM = 8, ///< For some reason the write to
+ ///< page zero in file zero have not
+ ///< finished after 15 mbyte of
+ ///< log data have been written
+ TAIL_PROBLEM = 9 ///< Only 1 mbyte of log left.
+ ///< No operations allowed to enter the
+ ///< log. Only special log records
+ ///< are allowed
+ };
+ enum WaitWriteGciLog {
+ WWGL_TRUE = 0,
+ WWGL_FALSE = 1
+ };
+ enum LogExecState {
+ LES_IDLE = 0,
+ LES_SEARCH_STOP = 1,
+ LES_SEARCH_START = 2,
+ LES_EXEC_LOG = 3,
+ LES_EXEC_LOG_NEW_MBYTE = 4,
+ LES_EXEC_LOG_NEW_FILE = 5,
+ LES_EXEC_LOGREC_FROM_FILE = 6,
+ LES_EXEC_LOG_COMPLETED = 7,
+ LES_WAIT_READ_EXEC_SR_NEW_MBYTE = 8,
+ LES_WAIT_READ_EXEC_SR = 9,
+ LES_EXEC_LOG_INVALIDATE = 10
+ };
+
+ /**
+ * Is a CONTINUEB(ZLOG_LQHKEYREQ) signal sent and
+ * outstanding. We do not want several instances of this
+ * signal out in the air since that would create multiple
+ * writers of the list.
+ */
+ UintR LogLqhKeyReqSent;
+ /**
+ * Contains the current log file where log records are
+ * written. During system restart it is used to indicate the
+ * last log file.
+ */
+ UintR currentLogfile;
+ /**
+ * The log file used to execute log records from far behind.
+ */
+ UintR execSrExecLogFile;
+ /**
+ * The currently executing prepare record starts in this log
+ * page. This variable is used to enable that a log record is
+ * executed multiple times in execution of the log.
+ */
+ UintR execSrLogPage;
+ /**
+ * This variable keeps track of the lfo record where the
+ * pages that were read from disk when an operations log
+ * record were not found in the main memory buffer for log
+ * pages.
+ */
+ UintR execSrLfoRec;
+ /**
+ * The starting page number when reading log from far behind.
+ */
+ UintR execSrStartPageNo;
+ /**
+ * The last page number when reading log from far behind.
+ */
+ UintR execSrStopPageNo;
+ /**
+ * Contains a reference to the first log file, file number 0.
+ */
+ UintR firstLogfile;
+ /**
+ * The head of the operations queued for logging.
+ */
+ UintR firstLogQueue;
+ /**
+ * This variable contains the oldest operation in this log
+ * part which have not been committed yet.
+ */
+ UintR firstLogTcrec;
+ /**
+ * The first reference to a set of 8 pages. These are used
+ * during execution of the log to keep track of which pages
+ * are in memory and which are not.
+ */
+ UintR firstPageRef;
+ /**
+ * This variable contains the global checkpoint record
+ * waiting for disk writes to complete.
+ */
+ UintR gcprec;
+ /**
+ * The last reference to a set of 8 pages. These are used
+ * during execution of the log to keep track of which pages
+ * are in memory and which are not.
+ */
+ UintR lastPageRef;
+ /**
+ * The tail of the operations queued for logging.
+ */
+ UintR lastLogQueue;
+ /**
+ * This variable contains the newest operation in this log
+ * part which have not been committed yet.
+ */
+ UintR lastLogTcrec;
+ /**
+ * This variable indicates which was the last mbyte that was
+ * written before the system crashed. Discovered during
+ * system restart.
+ */
+ UintR lastLogfile;
+ /**
+ * This variable is used to keep track of the state during
+ * the third phase of the system restart, i.e. when
+ * LogPartRecord::logPartState ==
+ * LogPartRecord::SR_THIRD_PHASE_STARTED.
+ */
+ LogExecState logExecState;
+ /**
+ * This variable contains the lap number of this log part.
+ */
+ UintR logLap;
+ /**
+ * This variable contains the place to stop executing the log
+ * in this phase.
+ */
+ UintR logLastGci;
+ /**
+ * This variable contains the place to start executing the
+ * log in this phase.
+ */
+ UintR logStartGci;
+ /**
+ * The latest GCI completed in this log part.
+ */
+ UintR logPartNewestCompletedGCI;
+ /**
+ * The current state of this log part.
+ */
+ LogPartState logPartState;
+ /**
+ * A timer that is set every time a log page is sent to disk.
+ * Ensures that log pages are not kept in main memory for
+ * more than a certain time.
+ */
+ UintR logPartTimer;
+ /**
+ * The current timer which is set by the periodic signal
+ * received by LQH
+ */
+ UintR logTimer;
+ /**
+ * Contains the number of the log tail file and the mbyte
+ * reference within that file. This information ensures that
+ * the tail is not overwritten when writing new log records.
+ */
+ UintR logTailFileNo;
+ /**
+ * The TcConnectionrec used during execution of this log part.
+ */
+ UintR logTcConrec;
+ /**
+ * The number of pages that currently resides in the main
+ * memory buffer. It does not refer pages that are currently
+ * read from the log files. Only to pages already read
+ * from the log file.
+ */
+ UintR mmBufferSize;
+ /**
+ * Contains the current number of log files in this log part.
+ */
+ UintR noLogFiles;
+ /**
+ * This variable is used only during execution of a log
+ * record. It keeps track of in which page record a log
+ * record was started. It is used then to deduce which
+ * pages that are dirty after that the log records on the
+ * page have been executed.
+ *
+ * It is also used to find out where to write the invalidate
+ * command when that is needed.
+ */
+ UintR prevLogpage;
+ /**
+ * The number of files remaining to gather GCI information
+ * for during system restart. Only used if number of files
+ * is larger than 60.
+ */
+ UintR srRemainingFiles;
+ /**
+ * The log file where to start executing the log during
+ * system restart.
+ */
+ UintR startLogfile;
+ /**
+ * The last log file in which to execute the log during system
+ * restart.
+ */
+ UintR stopLogfile;
+ /**
+ * This variable keeps track of when we want to write a complete
+ * gci log record but have been blocked by an ongoing log operation.
+ */
+ WaitWriteGciLog waitWriteGciLog;
+ /**
+ * The currently executing prepare record starts in this index
+ * in the log page.
+ */
+ Uint16 execSrLogPageIndex;
+ /**
+ * Which of the four exec_sr's in the fragment is currently executing
+ */
+ Uint16 execSrExecuteIndex;
+ /**
+ * The number of pages executed in the current mbyte.
+ */
+ Uint16 execSrPagesExecuted;
+ /**
+ * The number of pages read from disk that have arrived and are
+ * currently awaiting execution of the log.
+ */
+ Uint16 execSrPagesRead;
+ /**
+ * The number of pages read from disk and currently not arrived
+ * to the block.
+ */
+ Uint16 execSrPagesReading;
+ /**
+ * This variable refers to the new header file where we will
+ * start writing the log after a system restart have been completed.
+ */
+ Uint16 headFileNo;
+ /**
+ * This variable refers to the page number within the header file.
+ */
+ Uint16 headPageNo;
+ /**
+ * This variable refers to the index within the new header
+ * page.
+ */
+ Uint16 headPageIndex;
+ /**
+ * This variables indicates which was the last mbyte in the last
+ * logfile before a system crash. Discovered during system restart.
+ */
+ Uint16 lastMbyte;
+ /**
+ * This variable is used only during execution of a log
+ * record. It keeps track of in which file page a log
+ * record was started. It is used if it is needed to write a
+ * dirty page to disk during log execution (this happens when
+ * commit records are invalidated).
+ */
+ Uint16 prevFilepage;
+ /**
+ * This is used to save where we were in the execution of log
+ * records when we find a commit record that needs to be
+ * executed.
+ *
+ * This variable is also used to remember the index where the
+ * log type was in the log record. It is only used in this
+ * role when finding a commit record that needs to be
+ * invalidated.
+ */
+ Uint16 savePageIndex;
+ Uint8 logTailMbyte;
+ /**
+ * The mbyte within the starting log file where to start
+ * executing the log.
+ */
+ Uint8 startMbyte;
+ /**
+ * The last mbyte in which to execute the log during system
+ * restart.
+ */
+ Uint8 stopMbyte;
+ /**
+ * This variable refers to the file where invalidation is
+ * occuring during system/node restart.
+ */
+ Uint16 invalidateFileNo;
+ /**
+ * This variable refers to the page where invalidation is
+ * occuring during system/node restart.
+ */
+ Uint16 invalidatePageNo;
+ }; // Size 164 Bytes
+ typedef Ptr<LogPartRecord> LogPartRecordPtr;
+
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /* $$$$$$$ LOG FILE RECORD $$$$$$$ */
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /* THIS RECORD IS ALIGNED TO BE 288 (256 + 32) BYTES. */
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /**
+ * This record contains information about a log file.
+ * A log file contains log records from several tables and
+ * fragments of a table. LQH can contain more than
+ * one log file to ensure faster log processing.
+ *
+ * The number of pages to write to disk at a time is
+ * configurable.
+ */
+ struct LogFileRecord {
+ enum FileChangeState {
+ NOT_ONGOING = 0,
+ BOTH_WRITES_ONGOING = 1,
+ LAST_WRITE_ONGOING = 2,
+ FIRST_WRITE_ONGOING = 3,
+ WRITE_PAGE_ZERO_ONGOING = 4
+ };
+ enum LogFileStatus {
+ LFS_IDLE = 0, ///< Log file record not in use
+ CLOSED = 1, ///< Log file closed
+ OPENING_INIT = 2,
+ OPEN_SR_FRONTPAGE = 3, ///< Log file opened as part of system
+ ///< restart. Open file 0 to find
+ ///< the front page of the log part.
+ OPEN_SR_LAST_FILE = 4, ///< Open last log file that was written
+ ///< before the system restart.
+ OPEN_SR_NEXT_FILE = 5, ///< Open a log file which is 16 files
+ ///< backwards to find the next
+ ///< information about GCPs.
+ OPEN_EXEC_SR_START = 6, ///< Log file opened as part of
+ ///< executing
+ ///< log during system restart.
+ OPEN_EXEC_SR_NEW_MBYTE = 7,
+ OPEN_SR_FOURTH_PHASE = 8,
+ OPEN_SR_FOURTH_NEXT = 9,
+ OPEN_SR_FOURTH_ZERO = 10,
+ OPENING_WRITE_LOG = 11, ///< Log file opened as part of writing
+ ///< log during normal operation.
+ OPEN_EXEC_LOG = 12,
+ CLOSING_INIT = 13,
+ CLOSING_SR = 14, ///< Log file closed as part of system
+ ///< restart. Currently trying to
+ ///< find where to start executing the
+ ///< log
+ CLOSING_EXEC_SR = 15, ///< Log file closed as part of
+ ///< executing log during system restart
+ CLOSING_EXEC_SR_COMPLETED = 16,
+ CLOSING_WRITE_LOG = 17, ///< Log file closed as part of writing
+ ///< log during normal operation.
+ CLOSING_EXEC_LOG = 18,
+ OPEN_INIT = 19,
+ OPEN = 20, ///< Log file open
+ OPEN_SR_INVALIDATE_PAGES = 21,
+ CLOSE_SR_INVALIDATE_PAGES = 22
+ };
+
+ /**
+ * When a new mbyte is started in the log we have to find out
+ * how far back in the log we still have prepared operations
+ * which have been neither committed or aborted. This variable
+ * keeps track of this value for each of the mbytes in this
+ * log file. This is used in writing down these values in the
+ * header of each log file. That information is used during
+ * system restart to find the tail of the log.
+ */
+ UintR logLastPrepRef[16];
+ /**
+ * The max global checkpoint completed before the mbyte in the
+ * log file was started. One variable per mbyte.
+ */
+ UintR logMaxGciCompleted[16];
+ /**
+ * The max global checkpoint started before the mbyte in the log
+ * file was started. One variable per mbyte.
+ */
+ UintR logMaxGciStarted[16];
+ /**
+ * This variable contains the file name as needed by the file
+ * system when opening the file.
+ */
+ UintR fileName[4];
+ /**
+ * This variable has a reference to the log page which is
+ * currently in use by the log.
+ */
+ UintR currentLogpage;
+ /**
+ * The number of the current mbyte in the log file.
+ */
+ UintR currentMbyte;
+ /**
+ * This variable is used when changing files. It is to find
+ * out when both the last write in the previous file and the
+ * first write in this file has been completed. After these
+ * writes have completed the variable keeps track of when the
+ * write to page zero in file zero is completed.
+ */
+ FileChangeState fileChangeState;
+ /**
+ * The number of the file within this log part.
+ */
+ UintR fileNo;
+ /**
+ * This variable shows where to read/write the next pages into
+ * the log. Used when writing the log during normal operation
+ * and when reading the log during system restart. It
+ * specifies the page position where each page is 8 kbyte.
+ */
+ UintR filePosition;
+ /**
+ * This contains the file pointer needed by the file system
+ * when reading/writing/closing and synching.
+ */
+ UintR fileRef;
+ /**
+ * The head of the pages waiting for shipment to disk.
+ * They are filled with log info.
+ */
+ UintR firstFilledPage;
+ /**
+ * A list of active read/write operations on the log file.
+ * Operations are always put in last and the first should
+ * always complete first.
+ */
+ UintR firstLfo;
+ UintR lastLfo;
+ /**
+ * The tail of the pages waiting for shipment to disk.
+ * They are filled with log info.
+ */
+ UintR lastFilledPage;
+ /**
+ * This variable keeps track of the last written page in the
+ * file while writing page zero in file zero when changing log
+ * file.
+ */
+ UintR lastPageWritten;
+ /**
+ * This variable keeps track of the last written word in the
+ * last page written in the file while writing page zero in
+ * file zero when changing log file.
+ */
+ UintR lastWordWritten;
+ /**
+ * This variable contains the last word written in the last page.
+ */
+ UintR logFilePagesToDiskWithoutSynch;
+ /**
+ * This variable keeps track of the number of pages written since
+ * last synch on this log file.
+ */
+ LogFileStatus logFileStatus;
+ /**
+ * A reference to page zero in this file.
+ * This page is written before the file is closed.
+ */
+ UintR logPageZero;
+ /**
+ * This variable contains a reference to the record describing
+ * this log part. One of four records (0,1,2 or 3).
+ */
+ UintR logPartRec;
+ /**
+ * Next free log file record or next log file in this log.
+ */
+ UintR nextLogFile;
+ /**
+ * The previous log file.
+ */
+ UintR prevLogFile;
+ /**
+ * The number of remaining words in this mbyte of the log file.
+ */
+ UintR remainingWordsInMbyte;
+ /**
+ * The current file page within the current log file. This is
+ * a reference within the file and not a reference to a log
+ * page record. It is used to deduce where log records are
+ * written. Particularly completed gcp records and prepare log
+ * records.
+ */
+ Uint16 currentFilepage;
+ /**
+ * The number of pages in the list referenced by
+ * LOG_PAGE_BUFFER.
+ */
+ Uint16 noLogpagesInBuffer;
+ }; // Size 288 bytes
+ typedef Ptr<LogFileRecord> LogFileRecordPtr;
+
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /* $$$$$$$ LOG OPERATION RECORD $$$$$$$ */
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /**
+ * This record contains a currently active file operation
+ * that has started by the log module.
+ */
+ struct LogFileOperationRecord {
+ enum LfoState {
+ IDLE = 0, ///< Operation is not used at the moment
+ INIT_WRITE_AT_END = 1, ///< Write in file so that it grows to
+ ///< 16 Mbyte
+ INIT_FIRST_PAGE = 2, ///< Initialise the first page in a file
+ WRITE_GCI_ZERO = 3,
+ WRITE_INIT_MBYTE = 4,
+ WRITE_DIRTY = 5,
+ READ_SR_FRONTPAGE = 6, ///< Read page zero in file zero during
+ ///< system restart
+ READ_SR_LAST_FILE = 7, ///< Read page zero in last file open
+ ///< before system crash
+ READ_SR_NEXT_FILE = 8, ///< Read 60 files backwards to find
+ ///< further information GCPs in page
+ ///< zero
+ READ_SR_LAST_MBYTE = 9,
+ READ_EXEC_SR = 10,
+ READ_EXEC_LOG = 11,
+ READ_SR_FOURTH_PHASE = 12,
+ READ_SR_FOURTH_ZERO = 13,
+ FIRST_PAGE_WRITE_IN_LOGFILE = 14,
+ LAST_WRITE_IN_FILE = 15,
+ WRITE_PAGE_ZERO = 16,
+ ACTIVE_WRITE_LOG = 17, ///< A write operation during
+ ///< writing of log
+ READ_SR_INVALIDATE_PAGES = 18,
+ WRITE_SR_INVALIDATE_PAGES = 19
+ };
+ /**
+ * We have to remember the log pages read.
+ * Otherwise we cannot build the linked list after the pages have
+ * arrived to main memory.
+ */
+ UintR logPageArray[16];
+ /**
+ * A list of the pages that are part of this active operation.
+ */
+ UintR firstLfoPage;
+ /**
+ * A timer to ensure that records are not lost.
+ */
+ UintR lfoTimer;
+ /**
+ * The word number of the last written word in the last during
+ * a file write.
+ */
+ UintR lfoWordWritten;
+ /**
+ * This variable contains the state of the log file operation.
+ */
+ LfoState lfoState;
+ /**
+ * The log file that the file operation affects.
+ */
+ UintR logFileRec;
+ /**
+ * The log file operations on a file are kept in a linked list.
+ */
+ UintR nextLfo;
+ /**
+ * The page number of the first read/written page during a file
+ * read/write.
+ */
+ Uint16 lfoPageNo;
+ /**
+ * The number of pages written or read during an operation to
+ * the log file.
+ */
+ Uint16 noPagesRw;
+ }; // 92 bytes
+ typedef Ptr<LogFileOperationRecord> LogFileOperationRecordPtr;
+
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /* $$$$$$$ LOG PAGE RECORD $$$$$$$ */
+ /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
+ /**
+ * These are the 8 k pages used to store log records before storing
+ * them in the file system.
+ * Since 64 kbyte is sent to disk at a time it is necessary to have
+ * at least 4*64 kbytes of log pages.
+ * To handle multiple outstanding requests we need some additional pages.
+ * Thus we allocate 1 mbyte to ensure that we do not get problems with
+ * insufficient number of pages.
+ */
+ struct LogPageRecord {
+ /**
+ * This variable contains the pages that are sent to disk.
+ *
+ * All pages contain a header of 12 words:
+ * - WORD 0: CHECKSUM Calculated before storing on disk and
+ * checked when read from disk.
+ * - WORD 1: LAP How many wraparounds have the log
+ * experienced since initial start of the
+ * system.
+ * - WORD 2: MAX_GCI_COMPLETED Which is the maximum gci which have
+ * completed before this page. This
+ * gci will not be found in this
+ * page and hereafter in the log.
+ * - WORD 3: MAX_GCI_STARTED The maximum gci which have started
+ * before this page.
+ * - WORD 4: NEXT_PAGE Pointer to the next page.
+ * Only used in main memory
+ * - WORD 5: PREVIOUS_PAGE Pointer to the previous page.
+ * Currently not used.
+ * - WORD 6: VERSION NDB version that wrote the page.
+ * - WORD 7: NO_LOG_FILES Number of log files in this log part.
+ * - WORD 8: CURRENT PAGE INDEX This keeps track of where we are in the
+ * page.
+ * This is only used when pages is in
+ * memory.
+ * - WORD 9: OLD PREPARE FILE NO This keeps track of the oldest prepare
+ * operation still alive (not committed
+ * or aborted) when this mbyte started.
+ * - WORD 10: OLD PREPARE PAGE REF File page reference within this file
+ * number.
+ * Page no + Page index.
+ * If no prepare was alive then these
+ * values points this mbyte.
+ * - WORD 11: DIRTY FLAG = 0 means not dirty and
+ * = 1 means the page is dirty.
+ * Is used when executing log when
+ * a need to write invalid commit
+ * records arise.
+ *
+ * The remaining 2036 words are used for log information, i.e.
+ * log records.
+ *
+ * A log record on this page has the following layout:
+ * - WORD 0: LOG RECORD TYPE
+ * The following types are supported:
+ * - PREPARE OPERATION An operation not yet committed.
+ * - NEW PREPARE OPERATION A prepared operation already
+ * logged is inserted
+ * into the log again so that the
+ * log tail can be advanced.
+ * This can happen when a transaction is
+ * committed for a long time.
+ * - ABORT TRANSACTION A previously prepared transaction
+ * was aborted.
+ * - COMMIT TRANSACTION A previously prepared transaction
+ * was committed.
+ * - INVALID COMMIT A previous commit record was
+ * invalidated by a
+ * subsequent system restart.
+ * A log record must be invalidated
+ * in a system restart if it belongs
+ * to a global checkpoint id which
+ * is not included in the system
+ * restart.
+ * Otherwise it will be included in
+ * a subsequent system restart since
+ * it will then most likely belong
+ * to a global checkpoint id which
+ * is part of that system
+ * restart.
+ * This is not a correct behaviour
+ * since this operation is lost in a
+ * system restart and should not
+ * reappear at a later system
+ * restart.
+ * - COMPLETED GCI A GCI has now been completed.
+ * - FRAGMENT SPLIT A fragment has been split
+ * (not implemented yet)
+ * - FILE DESCRIPTOR This is always the first log record
+ * in a file.
+ * It is always placed on page 0 after
+ * the header.
+ * It is written when the file is
+ * opened and when the file is closed.
+ * - NEXT LOG RECORD This log record only records where
+ * the next log record starts.
+ * - NEXT MBYTE RECORD This log record specifies that there
+ * are no more log records in this mbyte.
+ *
+ *
+ * A FILE DESCRIPTOR log record continues as follows:
+ * - WORD 1: NO_LOG_DESCRIPTORS This defines the number of
+ * descriptors of log files that
+ * will follow hereafter (max 32).
+ * the log descriptor will describe
+ * information about
+ * max_gci_completed,
+ * max_gci_started and log_lap at
+ * every 1 mbyte of the log file
+ * since a log file is 16 mbyte
+ * always, i need 16 entries in the
+ * array with max_gci_completed,
+ * max_gci_started and log_lap. thus
+ * 32 entries per log file
+ * descriptor (max 32*48 = 1536,
+ * always fits in page 0).
+ * - WORD 2: LAST LOG FILE The number of the log file currently
+ * open. This is only valid in file 0.
+ * - WORD 3 - WORD 18: MAX_GCI_COMPLETED for every 1 mbyte
+ * in this log file.
+ * - WORD 19 - WORD 34: MAX_GCI_STARTED for every 1 mbyte
+ * in this log file.
+ *
+ * Then it continues for NO_LOG_DESCRIPTORS until all subsequent
+ * log files (max 32) have been properly described.
+ *
+ *
+ * A PREPARE OPERATION log record continues as follows:
+ * - WORD 1: LOG RECORD SIZE
+ * - WORD 2: HASH VALUE
+ * - WORD 3: SCHEMA VERSION
+ * - WORD 4: OPERATION TYPE
+ * = 0 READ,
+ * = 1 UPDATE,
+ * = 2 INSERT,
+ * = 3 DELETE
+ * - WORD 5: NUMBER OF WORDS IN ATTRINFO PART
+ * - WORD 6: KEY LENGTH IN WORDS
+ * - WORD 7 - (WORD 7 + KEY_LENGTH - 1) The tuple key
+ * - (WORD 7 + KEY_LENGTH) -
+ * (WORD 7 + KEY_LENGTH + ATTRINFO_LENGTH - 1) The attrinfo
+ *
+ * A log record can be spread in several pages in some cases.
+ * The next log record always starts immediately after this log record.
+ * A log record does however never traverse a 1 mbyte boundary.
+ * This is used to ensure that we can always come back if something
+ * strange occurs in the log file.
+ * To ensure this we also have log records which only records
+ * the next log record.
+ *
+ *
+ * A COMMIT TRANSACTION log record continues as follows:
+ * - WORD 1: TRANSACTION ID PART 1
+ * - WORD 2: TRANSACTION ID PART 2
+ * - WORD 3: FRAGMENT ID OF THE OPERATION
+ * - WORD 4: TABLE ID OF THE OPERATION
+ * - WORD 5: THE FILE NUMBER OF THE PREPARE RECORD
+ * - WORD 6: THE STARTING PAGE NUMBER OF THE PREPARE RECORD
+ * - WORD 7: THE STARTING PAGE INDEX OF THE PREPARE RECORD
+ * - WORD 8: THE STOP PAGE NUMBER OF THE PREPARE RECORD
+ * - WORD 9: GLOBAL CHECKPOINT OF THE TRANSACTION
+ *
+ *
+ * An ABORT TRANSACTION log record continues as follows:
+ * - WORD 1: TRANSACTION ID PART 1
+ * - WORD 2: TRANSACTION ID PART 2
+ *
+ *
+ * A COMPLETED CGI log record continues as follows:
+ * - WORD 1: THE COMPLETED GCI
+ *
+ *
+ * A NEXT LOG RECORD log record continues as follows:
+ * - There is no more information needed.
+ * The next log record will always refer to the start of the next page.
+ *
+ * A NEXT MBYTE RECORD log record continues as follows:
+ * - There is no more information needed.
+ * The next mbyte will always refer to the start of the next mbyte.
+ */
+ UintR logPageWord[8192]; // Size 32 kbytes
+ };
+ typedef Ptr<LogPageRecord> LogPageRecordPtr;
+
+ struct PageRefRecord {
+ UintR pageRef[8];
+ UintR prNext;
+ UintR prPrev;
+ Uint16 prFileNo;
+ Uint16 prPageNo;
+ }; // size 44 bytes
+ typedef Ptr<PageRefRecord> PageRefRecordPtr;
+
+ struct Tablerec {
+ enum TableStatus {
+ TABLE_DEFINED = 0,
+ NOT_DEFINED = 1,
+ ADD_TABLE_ONGOING = 2,
+ PREP_DROP_TABLE_ONGOING = 3,
+ PREP_DROP_TABLE_DONE = 4
+ };
+
+ UintR fragrec[MAX_FRAG_PER_NODE];
+ Uint16 fragid[MAX_FRAG_PER_NODE];
+ /**
+ * Status of the table
+ */
+ TableStatus tableStatus;
+ /**
+ * Table type and target table of index.
+ */
+ Uint16 tableType;
+ Uint16 primaryTableId;
+ Uint32 schemaVersion;
+
+ Uint32 usageCount;
+ NdbNodeBitmask waitingTC;
+ NdbNodeBitmask waitingDIH;
+ }; // Size 100 bytes
+ typedef Ptr<Tablerec> TablerecPtr;
+
+ struct TcConnectionrec {
+ enum ListState {
+ NOT_IN_LIST = 0,
+ IN_ACTIVE_LIST = 1,
+ ACC_BLOCK_LIST = 2,
+ WAIT_QUEUE_LIST = 3
+ };
+ enum LogWriteState {
+ NOT_STARTED = 0,
+ NOT_WRITTEN = 1,
+ NOT_WRITTEN_WAIT = 2,
+ WRITTEN = 3
+ };
+ enum AbortState {
+ ABORT_IDLE = 0,
+ ABORT_ACTIVE = 1,
+ NEW_FROM_TC = 2,
+ REQ_FROM_TC = 3,
+ ABORT_FROM_TC = 4,
+ ABORT_FROM_LQH = 5
+ };
+ enum TransactionState {
+ IDLE = 0,
+
+ /* -------------------------------------------------------------------- */
+ // Transaction in progress states
+ /* -------------------------------------------------------------------- */
+ WAIT_ACC = 1,
+ WAIT_TUPKEYINFO = 2,
+ WAIT_ATTR = 3,
+ WAIT_TUP = 4,
+ STOPPED = 5,
+ LOG_QUEUED = 6,
+ PREPARED = 7,
+ LOG_COMMIT_WRITTEN_WAIT_SIGNAL = 8,
+ LOG_COMMIT_QUEUED_WAIT_SIGNAL = 9,
+
+ /* -------------------------------------------------------------------- */
+ // Commit in progress states
+ /* -------------------------------------------------------------------- */
+ COMMIT_STOPPED = 10,
+ LOG_COMMIT_QUEUED = 11,
+ COMMIT_QUEUED = 12,
+ COMMITTED = 13,
+
+ /* -------------------------------------------------------------------- */
+ // Abort in progress states
+ /* -------------------------------------------------------------------- */
+ WAIT_ACC_ABORT = 14,
+ ABORT_QUEUED = 15,
+ ABORT_STOPPED = 16,
+ WAIT_AI_AFTER_ABORT = 17,
+ LOG_ABORT_QUEUED = 18,
+ WAIT_TUP_TO_ABORT = 19,
+
+ /* -------------------------------------------------------------------- */
+ // Scan in progress states
+ /* -------------------------------------------------------------------- */
+ WAIT_SCAN_AI = 20,
+ SCAN_STATE_USED = 21,
+ SCAN_FIRST_STOPPED = 22,
+ SCAN_CHECK_STOPPED = 23,
+ SCAN_STOPPED = 24,
+ SCAN_RELEASE_STOPPED = 25,
+ SCAN_CLOSE_STOPPED = 26,
+ COPY_CLOSE_STOPPED = 27,
+ COPY_FIRST_STOPPED = 28,
+ COPY_STOPPED = 29,
+ SCAN_TUPKEY = 30,
+ COPY_TUPKEY = 31,
+
+ TC_NOT_CONNECTED = 32,
+ PREPARED_RECEIVED_COMMIT = 33, // Temporary state in write commit log
+ LOG_COMMIT_WRITTEN = 34 // Temporary state in write commit log
+ };
+ enum ConnectState {
+ DISCONNECTED = 0,
+ CONNECTED = 1,
+ COPY_CONNECTED = 2,
+ LOG_CONNECTED = 3
+ };
+ ConnectState connectState;
+ UintR copyCountWords;
+ UintR firstAttrinfo[5];
+ UintR tupkeyData[4];
+ UintR transid[2];
+ AbortState abortState;
+ UintR accConnectrec;
+ UintR applOprec;
+ UintR clientConnectrec;
+ UintR tcTimer;
+ UintR currReclenAi;
+ UintR currTupAiLen;
+ UintR firstAttrinbuf;
+ UintR firstTupkeybuf;
+ UintR fragmentid;
+ UintR fragmentptr;
+ UintR gci;
+ UintR hashValue;
+ UintR lastTupkeybuf;
+ UintR lastAttrinbuf;
+ /**
+ * Each operation (TcConnectrec) can be stored in max one out of many
+ * lists.
+ * This variable keeps track of which list it is in.
+ */
+ ListState listState;
+
+ UintR logStartFileNo;
+ LogWriteState logWriteState;
+ UintR nextHashRec;
+ UintR nextLogTcrec;
+ UintR nextTcLogQueue;
+ UintR nextTc;
+ UintR nextTcConnectrec;
+ UintR prevHashRec;
+ UintR prevLogTcrec;
+ UintR prevTc;
+ UintR readlenAi;
+ UintR reqRef;
+ UintR reqinfo;
+ UintR schemaVersion;
+ UintR storedProcId;
+ UintR simpleTcConnect;
+ UintR tableref;
+ UintR tcOprec;
+ UintR tcScanInfo;
+ UintR tcScanRec;
+ UintR totReclenAi;
+ UintR totSendlenAi;
+ UintR tupConnectrec;
+ UintR savePointId;
+ TransactionState transactionState;
+ BlockReference applRef;
+ BlockReference clientBlockref;
+
+ BlockReference reqBlockref;
+ BlockReference tcBlockref;
+ BlockReference tcAccBlockref;
+ BlockReference tcTuxBlockref;
+ BlockReference tcTupBlockref;
+ Uint32 commitAckMarker;
+ union {
+ Uint32 m_scan_curr_range_no;
+ UintR noFiredTriggers;
+ };
+ Uint16 errorCode;
+ Uint16 logStartPageIndex;
+ Uint16 logStartPageNo;
+ Uint16 logStopPageNo;
+ Uint16 nextReplica;
+ Uint16 primKeyLen;
+ Uint16 save1;
+ Uint16 nodeAfterNext[3];
+
+ Uint8 activeCreat;
+ Uint8 apiVersionNo;
+ Uint8 dirtyOp;
+ Uint8 indTakeOver;
+ Uint8 lastReplicaNo;
+ Uint8 localFragptr;
+ Uint8 lockType;
+ Uint8 nextSeqNoReplica;
+ Uint8 opSimple;
+ Uint8 opExec;
+ Uint8 operation;
+ Uint8 reclenAiLqhkey;
+ Uint8 m_offset_current_keybuf;
+ Uint8 replicaType;
+ Uint8 simpleRead;
+ Uint8 seqNoReplica;
+ Uint8 tcNodeFailrec;
+ }; /* p2c: size = 280 bytes */
+
+ typedef Ptr<TcConnectionrec> TcConnectionrecPtr;
+
+ struct TcNodeFailRecord {
+ enum TcFailStatus {
+ TC_STATE_TRUE = 0,
+ TC_STATE_FALSE = 1,
+ TC_STATE_BREAK = 2
+ };
+ UintR lastNewTcRef;
+ UintR newTcRef;
+ TcFailStatus tcFailStatus;
+ UintR tcRecNow;
+ BlockReference lastNewTcBlockref;
+ BlockReference newTcBlockref;
+ Uint16 oldNodeId;
+ }; // Size 28 bytes
+ typedef Ptr<TcNodeFailRecord> TcNodeFailRecordPtr;
+
+ struct CommitLogRecord {
+ Uint32 startPageNo;
+ Uint32 startPageIndex;
+ Uint32 stopPageNo;
+ Uint32 fileNo;
+ };
+
+public:
+ Dblqh(const class Configuration &);
+ virtual ~Dblqh();
+
+private:
+ BLOCK_DEFINES(Dblqh);
+
+ void execPACKED_SIGNAL(Signal* signal);
+ void execDEBUG_SIG(Signal* signal);
+ void execATTRINFO(Signal* signal);
+ void execKEYINFO(Signal* signal);
+ void execLQHKEYREQ(Signal* signal);
+ void execLQHKEYREF(Signal* signal);
+ void execCOMMIT(Signal* signal);
+ void execCOMPLETE(Signal* signal);
+ void execLQHKEYCONF(Signal* signal);
+ void execTESTSIG(Signal* signal);
+ void execLQH_RESTART_OP(Signal* signal);
+ void execCONTINUEB(Signal* signal);
+ void execSTART_RECREQ(Signal* signal);
+ void execSTART_RECCONF(Signal* signal);
+ void execEXEC_FRAGREQ(Signal* signal);
+ void execEXEC_FRAGCONF(Signal* signal);
+ void execEXEC_FRAGREF(Signal* signal);
+ void execSTART_EXEC_SR(Signal* signal);
+ void execEXEC_SRREQ(Signal* signal);
+ void execEXEC_SRCONF(Signal* signal);
+ void execREAD_PSUEDO_REQ(Signal* signal);
+
+ void execDUMP_STATE_ORD(Signal* signal);
+ void execACC_COM_BLOCK(Signal* signal);
+ void execACC_COM_UNBLOCK(Signal* signal);
+ void execTUP_COM_BLOCK(Signal* signal);
+ void execTUP_COM_UNBLOCK(Signal* signal);
+ void execACC_ABORTCONF(Signal* signal);
+ void execNODE_FAILREP(Signal* signal);
+ void execCHECK_LCP_STOP(Signal* signal);
+ void execSEND_PACKED(Signal* signal);
+ void execTUP_ATTRINFO(Signal* signal);
+ void execREAD_CONFIG_REQ(Signal* signal);
+ void execLQHFRAGREQ(Signal* signal);
+ void execLQHADDATTREQ(Signal* signal);
+ void execTUP_ADD_ATTCONF(Signal* signal);
+ void execTUP_ADD_ATTRREF(Signal* signal);
+ void execACCFRAGCONF(Signal* signal);
+ void execACCFRAGREF(Signal* signal);
+ void execTUPFRAGCONF(Signal* signal);
+ void execTUPFRAGREF(Signal* signal);
+ void execTAB_COMMITREQ(Signal* signal);
+ void execACCSEIZECONF(Signal* signal);
+ void execACCSEIZEREF(Signal* signal);
+ void execREAD_NODESCONF(Signal* signal);
+ void execREAD_NODESREF(Signal* signal);
+ void execSTTOR(Signal* signal);
+ void execNDB_STTOR(Signal* signal);
+ void execTUPSEIZECONF(Signal* signal);
+ void execTUPSEIZEREF(Signal* signal);
+ void execACCKEYCONF(Signal* signal);
+ void execACCKEYREF(Signal* signal);
+ void execTUPKEYCONF(Signal* signal);
+ void execTUPKEYREF(Signal* signal);
+ void execABORT(Signal* signal);
+ void execABORTREQ(Signal* signal);
+ void execCOMMITREQ(Signal* signal);
+ void execCOMPLETEREQ(Signal* signal);
+ void execMEMCHECKREQ(Signal* signal);
+ void execSCAN_FRAGREQ(Signal* signal);
+ void execSCAN_NEXTREQ(Signal* signal);
+ void execACC_SCANCONF(Signal* signal);
+ void execACC_SCANREF(Signal* signal);
+ void execNEXT_SCANCONF(Signal* signal);
+ void execNEXT_SCANREF(Signal* signal);
+ void execACC_TO_REF(Signal* signal);
+ void execSTORED_PROCCONF(Signal* signal);
+ void execSTORED_PROCREF(Signal* signal);
+ void execCOPY_FRAGREQ(Signal* signal);
+ void execCOPY_ACTIVEREQ(Signal* signal);
+ void execCOPY_STATEREQ(Signal* signal);
+ void execLQH_TRANSREQ(Signal* signal);
+ void execTRANSID_AI(Signal* signal);
+ void execINCL_NODEREQ(Signal* signal);
+ void execACC_LCPCONF(Signal* signal);
+ void execACC_LCPREF(Signal* signal);
+ void execACC_LCPSTARTED(Signal* signal);
+ void execACC_CONTOPCONF(Signal* signal);
+ void execLCP_FRAGIDCONF(Signal* signal);
+ void execLCP_FRAGIDREF(Signal* signal);
+ void execLCP_HOLDOPCONF(Signal* signal);
+ void execLCP_HOLDOPREF(Signal* signal);
+ void execTUP_PREPLCPCONF(Signal* signal);
+ void execTUP_PREPLCPREF(Signal* signal);
+ void execTUP_LCPCONF(Signal* signal);
+ void execTUP_LCPREF(Signal* signal);
+ void execTUP_LCPSTARTED(Signal* signal);
+ void execEND_LCPCONF(Signal* signal);
+
+ void execLCP_FRAG_ORD(Signal* signal);
+ void execEMPTY_LCP_REQ(Signal* signal);
+
+ void execSTART_FRAGREQ(Signal* signal);
+ void execSTART_RECREF(Signal* signal);
+ void execSR_FRAGIDCONF(Signal* signal);
+ void execSR_FRAGIDREF(Signal* signal);
+ void execACC_SRCONF(Signal* signal);
+ void execACC_SRREF(Signal* signal);
+ void execTUP_SRCONF(Signal* signal);
+ void execTUP_SRREF(Signal* signal);
+ void execGCP_SAVEREQ(Signal* signal);
+ void execFSOPENCONF(Signal* signal);
+ void execFSOPENREF(Signal* signal);
+ void execFSCLOSECONF(Signal* signal);
+ void execFSCLOSEREF(Signal* signal);
+ void execFSWRITECONF(Signal* signal);
+ void execFSWRITEREF(Signal* signal);
+ void execFSREADCONF(Signal* signal);
+ void execFSREADREF(Signal* signal);
+ void execSCAN_HBREP(Signal* signal);
+ void execSET_VAR_REQ(Signal* signal);
+ void execTIME_SIGNAL(Signal* signal);
+ void execFSSYNCCONF(Signal* signal);
+ void execFSSYNCREF(Signal* signal);
+
+ void execALTER_TAB_REQ(Signal* signal);
+ void execALTER_TAB_CONF(Signal* signal);
+
+ void execCREATE_TRIG_CONF(Signal* signal);
+ void execCREATE_TRIG_REF(Signal* signal);
+ void execCREATE_TRIG_REQ(Signal* signal);
+
+ void execDROP_TRIG_CONF(Signal* signal);
+ void execDROP_TRIG_REF(Signal* signal);
+ void execDROP_TRIG_REQ(Signal* signal);
+
+ void execPREP_DROP_TAB_REQ(Signal* signal);
+ void execWAIT_DROP_TAB_REQ(Signal* signal);
+ void execDROP_TAB_REQ(Signal* signal);
+
+ void execLQH_ALLOCREQ(Signal* signal);
+ void execLQH_WRITELOG_REQ(Signal* signal);
+
+ void execTUXFRAGCONF(Signal* signal);
+ void execTUXFRAGREF(Signal* signal);
+ void execTUX_ADD_ATTRCONF(Signal* signal);
+ void execTUX_ADD_ATTRREF(Signal* signal);
+
+ // Statement blocks
+
+ void init_acc_ptr_list(ScanRecord*);
+ bool seize_acc_ptr_list(ScanRecord*, Uint32);
+ void release_acc_ptr_list(ScanRecord*);
+ Uint32 get_acc_ptr_from_scan_record(ScanRecord*, Uint32, bool);
+ void set_acc_ptr_in_scan_record(ScanRecord*, Uint32, Uint32);
+ void i_get_acc_ptr(ScanRecord*, Uint32*&, Uint32);
+
+ void removeTable(Uint32 tableId);
+ void sendLCP_COMPLETE_REP(Signal* signal, Uint32 lcpId);
+ void sendEMPTY_LCP_CONF(Signal* signal, bool idle);
+ void sendLCP_FRAGIDREQ(Signal* signal);
+ void sendLCP_FRAG_REP(Signal * signal, const LcpRecord::FragOrd &) const;
+
+ void updatePackedList(Signal* signal, HostRecord * ahostptr, Uint16 hostId);
+ void LQHKEY_abort(Signal* signal, int errortype);
+ void LQHKEY_error(Signal* signal, int errortype);
+ void nextRecordCopy(Signal* signal);
+ void calculateHash(Signal* signal);
+ void continueAfterCheckLcpStopBlocked(Signal* signal);
+ void checkLcpStopBlockedLab(Signal* signal);
+ void sendCommittedTc(Signal* signal, BlockReference atcBlockref);
+ void sendCompletedTc(Signal* signal, BlockReference atcBlockref);
+ void sendLqhkeyconfTc(Signal* signal, BlockReference atcBlockref);
+ void sendCommitLqh(Signal* signal, BlockReference alqhBlockref);
+ void sendCompleteLqh(Signal* signal, BlockReference alqhBlockref);
+ void sendPackedSignalLqh(Signal* signal, HostRecord * ahostptr);
+ void sendPackedSignalTc(Signal* signal, HostRecord * ahostptr);
+ Uint32 handleLongTupKey(Signal* signal,
+ Uint32 lenSofar,
+ Uint32 primKeyLen,
+ Uint32* dataPtr);
+ void cleanUp(Signal* signal);
+ void sendAttrinfoLoop(Signal* signal);
+ void sendAttrinfoSignal(Signal* signal);
+ void sendLqhAttrinfoSignal(Signal* signal);
+ void sendKeyinfoAcc(Signal* signal, Uint32 pos);
+ Uint32 initScanrec(const class ScanFragReq *);
+ void initScanTc(Signal* signal,
+ Uint32 transid1,
+ Uint32 transid2,
+ Uint32 fragId,
+ Uint32 nodeId);
+ void finishScanrec(Signal* signal);
+ void releaseScanrec(Signal* signal);
+ void seizeScanrec(Signal* signal);
+ Uint32 sendKeyinfo20(Signal* signal, ScanRecord *, TcConnectionrec *);
+ void sendScanFragConf(Signal* signal, Uint32 scanCompleted);
+ void initCopyrec(Signal* signal);
+ void initCopyTc(Signal* signal);
+ void sendCopyActiveConf(Signal* signal,Uint32 tableId);
+ void checkLcpCompleted(Signal* signal);
+ void checkLcpHoldop(Signal* signal);
+ void checkLcpStarted(Signal* signal);
+ void checkLcpTupprep(Signal* signal);
+ void getNextFragForLcp(Signal* signal);
+ void initLcpLocAcc(Signal* signal, Uint32 fragId);
+ void initLcpLocTup(Signal* signal, Uint32 fragId);
+ void moveAccActiveFrag(Signal* signal);
+ void moveActiveToAcc(Signal* signal);
+ void releaseLocalLcps(Signal* signal);
+ void seizeLcpLoc(Signal* signal);
+ void sendAccContOp(Signal* signal);
+ void sendStartLcp(Signal* signal);
+ void setLogTail(Signal* signal, Uint32 keepGci);
+ Uint32 remainingLogSize(const LogFileRecordPtr &sltCurrLogFilePtr,
+ const LogPartRecordPtr &sltLogPartPtr);
+ void checkGcpCompleted(Signal* signal, Uint32 pageWritten, Uint32 wordWritten);
+ void initFsopenconf(Signal* signal);
+ void initFsrwconf(Signal* signal);
+ void initLfo(Signal* signal);
+ void initLogfile(Signal* signal, Uint32 fileNo);
+ void initLogpage(Signal* signal);
+ void openFileRw(Signal* signal, LogFileRecordPtr olfLogFilePtr);
+ void openLogfileInit(Signal* signal);
+ void openNextLogfile(Signal* signal);
+ void releaseLfo(Signal* signal);
+ void releaseLfoPages(Signal* signal);
+ void releaseLogpage(Signal* signal);
+ void seizeLfo(Signal* signal);
+ void seizeLogfile(Signal* signal);
+ void seizeLogpage(Signal* signal);
+ void writeFileDescriptor(Signal* signal);
+ void writeFileHeaderOpen(Signal* signal, Uint32 type);
+ void writeInitMbyte(Signal* signal);
+ void writeSinglePage(Signal* signal, Uint32 pageNo, Uint32 wordWritten);
+ void buildLinkedLogPageList(Signal* signal);
+ void changeMbyte(Signal* signal);
+ Uint32 checkIfExecLog(Signal* signal);
+ void checkNewMbyte(Signal* signal);
+ void checkReadExecSr(Signal* signal);
+ void checkScanTcCompleted(Signal* signal);
+ void checkSrCompleted(Signal* signal);
+ void closeFile(Signal* signal, LogFileRecordPtr logFilePtr);
+ void completedLogPage(Signal* signal, Uint32 clpType);
+ void deleteFragrec(Uint32 fragId);
+ void deleteTransidHash(Signal* signal);
+ void findLogfile(Signal* signal,
+ Uint32 fileNo,
+ LogPartRecordPtr flfLogPartPtr,
+ LogFileRecordPtr* parLogFilePtr);
+ void findPageRef(Signal* signal, CommitLogRecord* commitLogRecord);
+ int findTransaction(UintR Transid1, UintR Transid2, UintR TcOprec);
+ void getFirstInLogQueue(Signal* signal);
+ bool getFragmentrec(Signal* signal, Uint32 fragId);
+ void initialiseAddfragrec(Signal* signal);
+ void initialiseAttrbuf(Signal* signal);
+ void initialiseDatabuf(Signal* signal);
+ void initialiseFragrec(Signal* signal);
+ void initialiseGcprec(Signal* signal);
+ void initialiseLcpRec(Signal* signal);
+ void initialiseLcpLocrec(Signal* signal);
+ void initialiseLfo(Signal* signal);
+ void initialiseLogFile(Signal* signal);
+ void initialiseLogPage(Signal* signal);
+ void initialiseLogPart(Signal* signal);
+ void initialisePageRef(Signal* signal);
+ void initialiseScanrec(Signal* signal);
+ void initialiseTabrec(Signal* signal);
+ void initialiseTcrec(Signal* signal);
+ void initialiseTcNodeFailRec(Signal* signal);
+ void initFragrec(Signal* signal,
+ Uint32 tableId,
+ Uint32 fragId,
+ Uint32 copyType);
+ void initFragrecSr(Signal* signal);
+ void initGciInLogFileRec(Signal* signal, Uint32 noFdDesc);
+ void initLcpSr(Signal* signal,
+ Uint32 lcpNo,
+ Uint32 lcpId,
+ Uint32 tableId,
+ Uint32 fragId,
+ Uint32 fragPtr);
+ void initLogpart(Signal* signal);
+ void initLogPointers(Signal* signal);
+ void initReqinfoExecSr(Signal* signal);
+ bool insertFragrec(Signal* signal, Uint32 fragId);
+ void linkActiveFrag(Signal* signal);
+ void linkFragQueue(Signal* signal);
+ void linkWaitLog(Signal* signal, LogPartRecordPtr regLogPartPtr);
+ void logNextStart(Signal* signal);
+ void moveToPageRef(Signal* signal);
+ void readAttrinfo(Signal* signal);
+ void readCommitLog(Signal* signal, CommitLogRecord* commitLogRecord);
+ void readExecLog(Signal* signal);
+ void readExecSrNewMbyte(Signal* signal);
+ void readExecSr(Signal* signal);
+ void readKey(Signal* signal);
+ void readLogData(Signal* signal, Uint32 noOfWords, Uint32* dataPtr);
+ void readLogHeader(Signal* signal);
+ Uint32 readLogword(Signal* signal);
+ Uint32 readLogwordExec(Signal* signal);
+ void readSinglePage(Signal* signal, Uint32 pageNo);
+ void releaseAccList(Signal* signal);
+ void releaseActiveCopy(Signal* signal);
+ void releaseActiveFrag(Signal* signal);
+ void releaseActiveList(Signal* signal);
+ void releaseAddfragrec(Signal* signal);
+ void releaseFragrec();
+ void releaseLcpLoc(Signal* signal);
+ void releaseOprec(Signal* signal);
+ void releasePageRef(Signal* signal);
+ void releaseMmPages(Signal* signal);
+ void releasePrPages(Signal* signal);
+ void releaseTcrec(Signal* signal, TcConnectionrecPtr tcConnectptr);
+ void releaseTcrecLog(Signal* signal, TcConnectionrecPtr tcConnectptr);
+ void releaseWaitQueue(Signal* signal);
+ void removeLogTcrec(Signal* signal);
+ void removePageRef(Signal* signal);
+ Uint32 returnExecLog(Signal* signal);
+ int saveTupattrbuf(Signal* signal, Uint32* dataPtr, Uint32 length);
+ void seizeAddfragrec(Signal* signal);
+ void seizeAttrinbuf(Signal* signal);
+ Uint32 seize_attrinbuf();
+ Uint32 release_attrinbuf(Uint32);
+ Uint32 copy_bounds(Uint32 * dst, TcConnectionrec*);
+
+ void seizeFragmentrec(Signal* signal);
+ void seizePageRef(Signal* signal);
+ void seizeTcrec();
+ void seizeTupkeybuf(Signal* signal);
+ void sendAborted(Signal* signal);
+ void sendLqhTransconf(Signal* signal, LqhTransConf::OperationStatus);
+ void sendTupkey(Signal* signal);
+ void startExecSr(Signal* signal);
+ void startNextExecSr(Signal* signal);
+ void startTimeSupervision(Signal* signal);
+ void stepAhead(Signal* signal, Uint32 stepAheadWords);
+ void systemError(Signal* signal);
+ void writeAbortLog(Signal* signal);
+ void writeCommitLog(Signal* signal, LogPartRecordPtr regLogPartPtr);
+ void writeCompletedGciLog(Signal* signal);
+ void writeDirty(Signal* signal);
+ void writeKey(Signal* signal);
+ void writeLogHeader(Signal* signal);
+ void writeLogWord(Signal* signal, Uint32 data);
+ void writeNextLog(Signal* signal);
+ void errorReport(Signal* signal, int place);
+ void warningReport(Signal* signal, int place);
+ void invalidateLogAfterLastGCI(Signal *signal);
+ void readFileInInvalidate(Signal *signal);
+ void exitFromInvalidate(Signal* signal);
+ Uint32 calcPageCheckSum(LogPageRecordPtr logP);
+
+ // Generated statement blocks
+ void systemErrorLab(Signal* signal);
+ void initFourth(Signal* signal);
+ void packLqhkeyreqLab(Signal* signal);
+ void sendNdbSttorryLab(Signal* signal);
+ void execSrCompletedLab(Signal* signal);
+ void execLogRecord(Signal* signal);
+ void srPhase3Comp(Signal* signal);
+ void srLogLimits(Signal* signal);
+ void srGciLimits(Signal* signal);
+ void srPhase3Start(Signal* signal);
+ void warningHandlerLab(Signal* signal);
+ void checkStartCompletedLab(Signal* signal);
+ void continueAbortLab(Signal* signal);
+ void abortContinueAfterBlockedLab(Signal* signal, bool canBlock);
+ void abortCommonLab(Signal* signal);
+ void localCommitLab(Signal* signal);
+ void abortErrorLab(Signal* signal);
+ void continueAfterReceivingAllAiLab(Signal* signal);
+ void abortStateHandlerLab(Signal* signal);
+ void writeAttrinfoLab(Signal* signal);
+ void scanAttrinfoLab(Signal* signal, Uint32* dataPtr, Uint32 length);
+ void abort_scan(Signal* signal, Uint32 scan_ptr_i, Uint32 errcode);
+ void localAbortStateHandlerLab(Signal* signal);
+ void logLqhkeyreqLab(Signal* signal);
+ void lqhAttrinfoLab(Signal* signal, Uint32* dataPtr, Uint32 length);
+ void rwConcludedAiLab(Signal* signal);
+ void aiStateErrorCheckLab(Signal* signal, Uint32* dataPtr, Uint32 length);
+ void takeOverErrorLab(Signal* signal);
+ void endgettupkeyLab(Signal* signal);
+ void noFreeRecordLab(Signal* signal,
+ const class LqhKeyReq * lqhKeyReq,
+ Uint32 errorCode);
+ void logLqhkeyrefLab(Signal* signal);
+ void closeCopyLab(Signal* signal);
+ void commitReplyLab(Signal* signal);
+ void completeUnusualLab(Signal* signal);
+ void completeTransNotLastLab(Signal* signal);
+ void completedLab(Signal* signal);
+ void copyCompletedLab(Signal* signal);
+ void completeLcpRoundLab(Signal* signal);
+ void continueAfterLogAbortWriteLab(Signal* signal);
+ void sendAttrinfoLab(Signal* signal);
+ void sendExecConf(Signal* signal);
+ void execSr(Signal* signal);
+ void srFourthComp(Signal* signal);
+ void timeSup(Signal* signal);
+ void closeCopyRequestLab(Signal* signal);
+ void closeScanRequestLab(Signal* signal);
+ void scanTcConnectLab(Signal* signal, Uint32 startTcCon, Uint32 fragId);
+ void initGcpRecLab(Signal* signal);
+ void prepareContinueAfterBlockedLab(Signal* signal);
+ void commitContinueAfterBlockedLab(Signal* signal);
+ void continueCopyAfterBlockedLab(Signal* signal);
+ void continueFirstCopyAfterBlockedLab(Signal* signal);
+ void continueFirstScanAfterBlockedLab(Signal* signal);
+ void continueScanAfterBlockedLab(Signal* signal);
+ void continueScanReleaseAfterBlockedLab(Signal* signal);
+ void continueCloseScanAfterBlockedLab(Signal* signal);
+ void continueCloseCopyAfterBlockedLab(Signal* signal);
+ void sendExecFragRefLab(Signal* signal);
+ void fragrefLab(Signal* signal, BlockReference retRef,
+ Uint32 retPtr, Uint32 errorCode);
+ void abortAddFragOps(Signal* signal);
+ void rwConcludedLab(Signal* signal);
+ void sendsttorryLab(Signal* signal);
+ void initialiseRecordsLab(Signal* signal, Uint32 data, Uint32, Uint32);
+ void startphase2Lab(Signal* signal, Uint32 config);
+ void startphase3Lab(Signal* signal);
+ void startphase4Lab(Signal* signal);
+ void startphase6Lab(Signal* signal);
+ void moreconnectionsLab(Signal* signal);
+ void scanReleaseLocksLab(Signal* signal);
+ void closeScanLab(Signal* signal);
+ void nextScanConfLoopLab(Signal* signal);
+ void scanNextLoopLab(Signal* signal);
+ void commitReqLab(Signal* signal, Uint32 gci);
+ void completeTransLastLab(Signal* signal);
+ void tupScanCloseConfLab(Signal* signal);
+ void tupCopyCloseConfLab(Signal* signal);
+ void accScanCloseConfLab(Signal* signal);
+ void accCopyCloseConfLab(Signal* signal);
+ void nextScanConfScanLab(Signal* signal);
+ void nextScanConfCopyLab(Signal* signal);
+ void continueScanNextReqLab(Signal* signal);
+ void keyinfoLab(const Uint32 * src, const Uint32 * end);
+ void copySendTupkeyReqLab(Signal* signal);
+ void storedProcConfScanLab(Signal* signal);
+ void storedProcConfCopyLab(Signal* signal);
+ void copyStateFinishedLab(Signal* signal);
+ void lcpCompletedLab(Signal* signal);
+ void lcpStartedLab(Signal* signal);
+ void contChkpNextFragLab(Signal* signal);
+ void startLcpRoundLab(Signal* signal);
+ void startFragRefLab(Signal* signal);
+ void srCompletedLab(Signal* signal);
+ void openFileInitLab(Signal* signal);
+ void openSrFrontpageLab(Signal* signal);
+ void openSrLastFileLab(Signal* signal);
+ void openSrNextFileLab(Signal* signal);
+ void openExecSrStartLab(Signal* signal);
+ void openExecSrNewMbyteLab(Signal* signal);
+ void openSrFourthPhaseLab(Signal* signal);
+ void openSrFourthZeroSkipInitLab(Signal* signal);
+ void openSrFourthZeroLab(Signal* signal);
+ void openExecLogLab(Signal* signal);
+ void checkInitCompletedLab(Signal* signal);
+ void closingSrLab(Signal* signal);
+ void closeExecSrLab(Signal* signal);
+ void execLogComp(Signal* signal);
+ void closeWriteLogLab(Signal* signal);
+ void closeExecLogLab(Signal* signal);
+ void writePageZeroLab(Signal* signal);
+ void lastWriteInFileLab(Signal* signal);
+ void initWriteEndLab(Signal* signal);
+ void initFirstPageLab(Signal* signal);
+ void writeGciZeroLab(Signal* signal);
+ void writeDirtyLab(Signal* signal);
+ void writeInitMbyteLab(Signal* signal);
+ void writeLogfileLab(Signal* signal);
+ void firstPageWriteLab(Signal* signal);
+ void readSrLastMbyteLab(Signal* signal);
+ void readSrLastFileLab(Signal* signal);
+ void readSrNextFileLab(Signal* signal);
+ void readExecSrLab(Signal* signal);
+ void readExecLogLab(Signal* signal);
+ void readSrFourthPhaseLab(Signal* signal);
+ void readSrFourthZeroLab(Signal* signal);
+ void copyLqhKeyRefLab(Signal* signal);
+ void restartOperationsLab(Signal* signal);
+ void lqhTransNextLab(Signal* signal);
+ void restartOperationsAfterStopLab(Signal* signal);
+ void sttorStartphase1Lab(Signal* signal);
+ void startphase1Lab(Signal* signal, Uint32 config, Uint32 nodeId);
+ void tupkeyConfLab(Signal* signal);
+ void copyTupkeyConfLab(Signal* signal);
+ void scanTupkeyConfLab(Signal* signal);
+ void scanTupkeyRefLab(Signal* signal);
+ void accScanConfScanLab(Signal* signal);
+ void accScanConfCopyLab(Signal* signal);
+ void scanLockReleasedLab(Signal* signal);
+ void openSrFourthNextLab(Signal* signal);
+ void closingInitLab(Signal* signal);
+ void closeExecSrCompletedLab(Signal* signal);
+ void readSrFrontpageLab(Signal* signal);
+
+ void sendAddFragReq(Signal* signal);
+ void sendAddAttrReq(Signal* signal);
+ void checkDropTab(Signal*);
+ Uint32 checkDropTabState(Tablerec::TableStatus, Uint32) const;
+
+ // Initialisation
+ void initData();
+ void initRecords();
+
+ Dbtup* c_tup;
+ Uint32 readPrimaryKeys(ScanRecord*, TcConnectionrec*, Uint32 * dst);
+// ----------------------------------------------------------------
+// These are variables handling the records. For most records one
+// pointer to the array of structs, one pointer-struct, a file size
+// and a first free record variable. The pointer struct are temporary
+// variables that are kept on the class object since there are often a
+// great deal of those variables that exist simultaneously and
+// thus no perfect solution of handling them is currently available.
+// ----------------------------------------------------------------
+/* ------------------------------------------------------------------------- */
+/* POSITIONS WITHIN THE ATTRINBUF AND THE MAX SIZE OF DATA WITHIN AN */
+/* ATTRINBUF. */
+/* ------------------------------------------------------------------------- */
+
+
+#define ZADDFRAGREC_FILE_SIZE 1
+ AddFragRecord *addFragRecord;
+ AddFragRecordPtr addfragptr;
+ UintR cfirstfreeAddfragrec;
+ UintR caddfragrecFileSize;
+
+#define ZATTRINBUF_FILE_SIZE 12288 // 1.5 MByte
+#define ZINBUF_DATA_LEN 24 /* POSITION OF 'DATA LENGHT'-VARIABLE. */
+#define ZINBUF_NEXT 25 /* POSITION OF 'NEXT'-VARIABLE. */
+ Attrbuf *attrbuf;
+ AttrbufPtr attrinbufptr;
+ UintR cfirstfreeAttrinbuf;
+ UintR cattrinbufFileSize;
+ Uint32 c_no_attrinbuf_recs;
+
+#define ZDATABUF_FILE_SIZE 10000 // 200 kByte
+ Databuf *databuf;
+ DatabufPtr databufptr;
+ UintR cfirstfreeDatabuf;
+ UintR cdatabufFileSize;
+
+// Configurable
+ Fragrecord *fragrecord;
+ FragrecordPtr fragptr;
+ UintR cfirstfreeFragrec;
+ UintR cfragrecFileSize;
+
+#define ZGCPREC_FILE_SIZE 1
+ GcpRecord *gcpRecord;
+ GcpRecordPtr gcpPtr;
+ UintR cgcprecFileSize;
+
+// MAX_NDB_NODES is the size of this array
+ HostRecord *hostRecord;
+ UintR chostFileSize;
+
+#define ZNO_CONCURRENT_LCP 1
+ LcpRecord *lcpRecord;
+ LcpRecordPtr lcpPtr;
+ UintR cfirstfreeLcpLoc;
+ UintR clcpFileSize;
+
+#define ZLCP_LOCREC_FILE_SIZE 4
+ LcpLocRecord *lcpLocRecord;
+ LcpLocRecordPtr lcpLocptr;
+ UintR clcpLocrecFileSize;
+
+#define ZLOG_PART_FILE_SIZE 4
+ LogPartRecord *logPartRecord;
+ LogPartRecordPtr logPartPtr;
+ UintR clogPartFileSize;
+
+// Configurable
+ LogFileRecord *logFileRecord;
+ LogFileRecordPtr logFilePtr;
+ UintR cfirstfreeLogFile;
+ UintR clogFileFileSize;
+
+#define ZLFO_FILE_SIZE 256 /* MAX 256 OUTSTANDING FILE OPERATIONS */
+ LogFileOperationRecord *logFileOperationRecord;
+ LogFileOperationRecordPtr lfoPtr;
+ UintR cfirstfreeLfo;
+ UintR clfoFileSize;
+
+ LogPageRecord *logPageRecord;
+ LogPageRecordPtr logPagePtr;
+ UintR cfirstfreeLogPage;
+ UintR clogPageFileSize;
+
+#define ZPAGE_REF_FILE_SIZE 20
+ PageRefRecord *pageRefRecord;
+ PageRefRecordPtr pageRefPtr;
+ UintR cfirstfreePageRef;
+ UintR cpageRefFileSize;
+
+#define ZSCANREC_FILE_SIZE 100
+ ArrayPool<ScanRecord> c_scanRecordPool;
+ ScanRecordPtr scanptr;
+ UintR cscanNoFreeRec;
+ Uint32 cscanrecFileSize;
+
+// Configurable
+ Tablerec *tablerec;
+ TablerecPtr tabptr;
+ UintR ctabrecFileSize;
+
+// Configurable
+ TcConnectionrec *tcConnectionrec;
+ TcConnectionrecPtr tcConnectptr;
+ UintR cfirstfreeTcConrec;
+ UintR ctcConnectrecFileSize;
+
+// MAX_NDB_NODES is the size of this array
+ TcNodeFailRecord *tcNodeFailRecord;
+ TcNodeFailRecordPtr tcNodeFailptr;
+ UintR ctcNodeFailrecFileSize;
+
+ Uint16 terrorCode;
+
+ Uint32 c_firstInNodeGroup;
+
+// ------------------------------------------------------------------------
+// These variables are used to store block state which do not need arrays
+// of struct's.
+// ------------------------------------------------------------------------
+ Uint32 c_lcpId;
+ Uint32 cnoOfFragsCheckpointed;
+
+/* ------------------------------------------------------------------------- */
+// cmaxWordsAtNodeRec keeps track of how many words that currently are
+// outstanding in a node recovery situation.
+// cbookedAccOps keeps track of how many operation records that have been
+// booked in ACC for the scan processes.
+// cmaxAccOps contains the maximum number of operation records which can be
+// allocated for scan purposes in ACC.
+/* ------------------------------------------------------------------------- */
+ UintR cmaxWordsAtNodeRec;
+ UintR cbookedAccOps;
+ UintR cmaxAccOps;
+/* ------------------------------------------------------------------------- */
+/*THIS STATE VARIABLE IS ZTRUE IF AN ADD NODE IS ONGOING. ADD NODE MEANS */
+/*THAT CONNECTIONS ARE SET-UP TO THE NEW NODE. */
+/* ------------------------------------------------------------------------- */
+ Uint8 caddNodeState;
+/* ------------------------------------------------------------------------- */
+/*THIS VARIABLE SPECIFIES WHICH TYPE OF RESTART THAT IS ONGOING */
+/* ------------------------------------------------------------------------- */
+ Uint16 cstartType;
+/* ------------------------------------------------------------------------- */
+/*THIS VARIABLE INDICATES WHETHER AN INITIAL RESTART IS ONGOING OR NOT. */
+/* ------------------------------------------------------------------------- */
+ Uint8 cinitialStartOngoing;
+/* ------------------------------------------------------------------------- */
+/*THIS VARIABLE KEEPS TRACK OF WHEN TUP AND ACC HAVE COMPLETED EXECUTING */
+/*THEIR UNDO LOG. */
+/* ------------------------------------------------------------------------- */
+ ExecUndoLogState csrExecUndoLogState;
+/* ------------------------------------------------------------------------- */
+/*THIS VARIABLE KEEPS TRACK OF WHEN TUP AND ACC HAVE CONFIRMED COMPLETION */
+/*OF A LOCAL CHECKPOINT ROUND. */
+/* ------------------------------------------------------------------------- */
+ LcpCloseState clcpCompletedState;
+/* ------------------------------------------------------------------------- */
+/*DURING CONNECTION PROCESSES IN SYSTEM RESTART THESE VARIABLES KEEP TRACK */
+/*OF HOW MANY CONNECTIONS AND RELEASES THAT ARE TO BE PERFORMED. */
+/* ------------------------------------------------------------------------- */
+/***************************************************************************>*/
+/*THESE VARIABLES CONTAIN INFORMATION USED DURING SYSTEM RESTART. */
+/***************************************************************************>*/
+/* ------------------------------------------------------------------------- */
+/*THIS VARIABLE IS ZTRUE IF THE SIGNAL START_REC_REQ HAVE BEEN RECEIVED. */
+/*RECEPTION OF THIS SIGNAL INDICATES THAT ALL FRAGMENTS THAT THIS NODE */
+/*SHOULD START HAVE BEEN RECEIVED. */
+/* ------------------------------------------------------------------------- */
+ Uint8 cstartRecReq;
+/* ------------------------------------------------------------------------- */
+/*THIS VARIABLE KEEPS TRACK OF HOW MANY FRAGMENTS THAT PARTICIPATE IN */
+/*EXECUTING THE LOG. IF ZERO WE DON'T NEED TO EXECUTE THE LOG AT ALL. */
+/* ------------------------------------------------------------------------- */
+ UintR cnoFragmentsExecSr;
+/* ------------------------------------------------------------------------- */
+/*THIS VARIABLE KEEPS TRACK OF WHICH OF THE FIRST TWO RESTART PHASES THAT */
+/*HAVE COMPLETED. */
+/* ------------------------------------------------------------------------- */
+ Uint8 csrPhaseStarted;
+/* ------------------------------------------------------------------------- */
+/*NUMBER OF PHASES COMPLETED OF EXECUTING THE FRAGMENT LOG. */
+/* ------------------------------------------------------------------------- */
+ Uint8 csrPhasesCompleted;
+/* ------------------------------------------------------------------------- */
+/*THE BLOCK REFERENCE OF THE MASTER DIH DURING SYSTEM RESTART. */
+/* ------------------------------------------------------------------------- */
+ BlockReference cmasterDihBlockref;
+/* ------------------------------------------------------------------------- */
+/*THIS VARIABLE IS THE HEAD OF A LINKED LIST OF FRAGMENTS WAITING TO BE */
+/*RESTORED FROM DISK. */
+/* ------------------------------------------------------------------------- */
+ UintR cfirstWaitFragSr;
+/* ------------------------------------------------------------------------- */
+/*THIS VARIABLE IS THE HEAD OF A LINKED LIST OF FRAGMENTS THAT HAVE BEEN */
+/*RESTORED FROM DISK THAT AWAITS EXECUTION OF THE FRAGMENT LOG. */
+/* ------------------------------------------------------------------------- */
+ UintR cfirstCompletedFragSr;
+
+ /**
+ * List of fragment that the log execution is completed for
+ */
+ Uint32 c_redo_log_complete_frags;
+
+/* ------------------------------------------------------------------------- */
+/*USED DURING SYSTEM RESTART, INDICATES THE OLDEST GCI THAT CAN BE RESTARTED */
+/*FROM AFTER THIS SYSTEM RESTART. USED TO FIND THE LOG TAIL. */
+/* ------------------------------------------------------------------------- */
+ UintR crestartOldestGci;
+/* ------------------------------------------------------------------------- */
+/*USED DURING SYSTEM RESTART, INDICATES THE NEWEST GCI THAT CAN BE RESTARTED */
+/*AFTER THIS SYSTEM RESTART. USED TO FIND THE LOG HEAD. */
+/* ------------------------------------------------------------------------- */
+ UintR crestartNewestGci;
+/* ------------------------------------------------------------------------- */
+/*THE NUMBER OF LOG FILES. SET AS A PARAMETER WHEN NDB IS STARTED. */
+/* ------------------------------------------------------------------------- */
+ UintR cnoLogFiles;
+/* ------------------------------------------------------------------------- */
+/*THESE TWO VARIABLES CONTAIN THE NEWEST GCI RECEIVED IN THE BLOCK AND THE */
+/*NEWEST COMPLETED GCI IN THE BLOCK. */
+/* ------------------------------------------------------------------------- */
+ UintR cnewestGci;
+ UintR cnewestCompletedGci;
+/* ------------------------------------------------------------------------- */
+/*THIS VARIABLE ONLY PASSES INFORMATION FROM STTOR TO STTORRY = TEMPORARY */
+/* ------------------------------------------------------------------------- */
+ Uint16 csignalKey;
+/* ------------------------------------------------------------------------- */
+/*THIS VARIABLE CONTAINS THE CURRENT START PHASE IN THE BLOCK. IS ZNIL IF */
+/*NO SYSTEM RESTART IS ONGOING. */
+/* ------------------------------------------------------------------------- */
+ Uint16 cstartPhase;
+/* ------------------------------------------------------------------------- */
+/*THIS VARIABLE CONTAIN THE CURRENT GLOBAL CHECKPOINT RECORD. IT'S RNIL IF */
+/*NOT A GCP SAVE IS ONGOING. */
+/* ------------------------------------------------------------------------- */
+ UintR ccurrentGcprec;
+/* ------------------------------------------------------------------------- */
+/*THESE VARIABLES ARE USED TO KEEP TRACK OF ALL ACTIVE COPY FRAGMENTS IN LQH.*/
+/* ------------------------------------------------------------------------- */
+ Uint8 cnoActiveCopy;
+ UintR cactiveCopy[4];
+
+/* ------------------------------------------------------------------------- */
+/*THESE VARIABLES CONTAIN THE BLOCK REFERENCES OF THE OTHER NDB BLOCKS. */
+/*ALSO THE BLOCK REFERENCE OF MY OWN BLOCK = LQH */
+/* ------------------------------------------------------------------------- */
+ BlockReference caccBlockref;
+ BlockReference ctupBlockref;
+ BlockReference ctuxBlockref;
+ BlockReference cownref;
+ UintR cLqhTimeOutCount;
+ UintR cLqhTimeOutCheckCount;
+ UintR cnoOfLogPages;
+ bool caccCommitBlocked;
+ bool ctupCommitBlocked;
+ bool cCommitBlocked;
+ UintR cCounterAccCommitBlocked;
+ UintR cCounterTupCommitBlocked;
+/* ------------------------------------------------------------------------- */
+/*THIS VARIABLE CONTAINS MY OWN PROCESSOR ID. */
+/* ------------------------------------------------------------------------- */
+ NodeId cownNodeid;
+
+/* ------------------------------------------------------------------------- */
+/*THESE VARIABLES CONTAIN INFORMATION ABOUT THE OTHER NODES IN THE SYSTEM */
+/*THESE VARIABLES ARE MOSTLY USED AT SYSTEM RESTART AND ADD NODE TO SET-UP */
+/*AND RELEASE CONNECTIONS TO OTHER NODES IN THE CLUSTER. */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/*THIS ARRAY CONTAINS THE PROCESSOR ID'S OF THE NODES THAT ARE ALIVE. */
+/*CNO_OF_NODES SPECIFIES HOW MANY NODES THAT ARE CURRENTLY ALIVE. */
+/*CNODE_VERSION SPECIFIES THE NDB VERSION EXECUTING ON THE NODE. */
+/* ------------------------------------------------------------------------- */
+ UintR cpackedListIndex;
+ Uint16 cpackedList[MAX_NDB_NODES];
+ UintR cnodeData[MAX_NDB_NODES];
+ UintR cnodeStatus[MAX_NDB_NODES];
+/* ------------------------------------------------------------------------- */
+/*THIS VARIABLE INDICATES WHETHER A CERTAIN NODE HAS SENT ALL FRAGMENTS THAT */
+/*NEED TO HAVE THE LOG EXECUTED. */
+/* ------------------------------------------------------------------------- */
+ Uint8 cnodeSrState[MAX_NDB_NODES];
+/* ------------------------------------------------------------------------- */
+/*THIS VARIABLE INDICATES WHETHER A CERTAIN NODE HAVE EXECUTED THE LOG */
+/* ------------------------------------------------------------------------- */
+ Uint8 cnodeExecSrState[MAX_NDB_NODES];
+ UintR cnoOfNodes;
+
+/* ------------------------------------------------------------------------- */
+/* THIS VARIABLE CONTAINS THE DIRECTORY OF A HASH TABLE OF ALL ACTIVE */
+/* OPERATION IN THE BLOCK. IT IS USED TO BE ABLE TO QUICKLY ABORT AN */
+/* OPERATION WHERE THE CONNECTION WAS LOST DUE TO NODE FAILURES. IT IS */
+/* ACTUALLY USED FOR ALL ABORTS COMMANDED BY TC. */
+/* ------------------------------------------------------------------------- */
+ UintR preComputedRequestInfoMask;
+ UintR ctransidHash[1024];
+
+ Uint32 c_diskless;
+
+public:
+ /**
+ *
+ */
+ struct CommitAckMarker {
+ Uint32 transid1;
+ Uint32 transid2;
+
+ Uint32 apiRef; // Api block ref
+ Uint32 apiOprec; // Connection Object in NDB API
+ Uint32 tcNodeId;
+ union { Uint32 nextPool; Uint32 nextHash; };
+ Uint32 prevHash;
+
+ inline bool equal(const CommitAckMarker & p) const {
+ return ((p.transid1 == transid1) && (p.transid2 == transid2));
+ }
+
+ inline Uint32 hashValue() const {
+ return transid1;
+ }
+ };
+
+ typedef Ptr<CommitAckMarker> CommitAckMarkerPtr;
+ ArrayPool<CommitAckMarker> m_commitAckMarkerPool;
+ DLHashTable<CommitAckMarker> m_commitAckMarkerHash;
+ typedef DLHashTable<CommitAckMarker>::Iterator CommitAckMarkerIterator;
+ void execREMOVE_MARKER_ORD(Signal* signal);
+ void scanMarkers(Signal* signal, Uint32 tcNodeFail, Uint32 bucket, Uint32 i);
+
+ struct Counters {
+ Uint32 operations;
+
+ inline void clear(){
+ operations = 0;
+ }
+ };
+
+ Counters c_Counters;
+
+ inline bool getAllowRead() const {
+ return getNodeState().startLevel < NodeState::SL_STOPPING_3;
+ }
+
+ DLHashTable<ScanRecord> c_scanTakeOverHash;
+};
+
+inline
+bool
+Dblqh::ScanRecord::check_scan_batch_completed() const
+{
+ Uint32 max_rows = m_max_batch_size_rows;
+ Uint32 max_bytes = m_max_batch_size_bytes;
+
+ return (max_rows > 0 && (m_curr_batch_size_rows >= max_rows)) ||
+ (max_bytes > 0 && (m_curr_batch_size_bytes >= max_bytes));
+}
+
+inline
+void
+Dblqh::i_get_acc_ptr(ScanRecord* scanP, Uint32* &acc_ptr, Uint32 index)
+{
+ if (index == 0) {
+ acc_ptr= (Uint32*)&scanP->scan_acc_op_ptr[0];
+ } else {
+ Uint32 attr_buf_index, attr_buf_rec;
+
+ AttrbufPtr regAttrPtr;
+ jam();
+ attr_buf_rec= (index + 31) / 32;
+ attr_buf_index= (index - 1) & 31;
+ regAttrPtr.i= scanP->scan_acc_op_ptr[attr_buf_rec];
+ ptrCheckGuard(regAttrPtr, cattrinbufFileSize, attrbuf);
+ acc_ptr= (Uint32*)&regAttrPtr.p->attrbuf[attr_buf_index];
+ }
+}
+
+#endif
diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
new file mode 100644
index 00000000000..e39d0ca68a6
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
@@ -0,0 +1,455 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+#include <pc.hpp>
+#define DBLQH_C
+#include "Dblqh.hpp"
+#include <ndb_limits.h>
+
+#define DEBUG(x) { ndbout << "LQH::" << x << endl; }
+
+void Dblqh::initData()
+{
+ caddfragrecFileSize = ZADDFRAGREC_FILE_SIZE;
+ cattrinbufFileSize = ZATTRINBUF_FILE_SIZE;
+ c_no_attrinbuf_recs= ZATTRINBUF_FILE_SIZE;
+ cdatabufFileSize = ZDATABUF_FILE_SIZE;
+ cfragrecFileSize = 0;
+ cgcprecFileSize = ZGCPREC_FILE_SIZE;
+ chostFileSize = MAX_NDB_NODES;
+ clcpFileSize = ZNO_CONCURRENT_LCP;
+ clcpLocrecFileSize = ZLCP_LOCREC_FILE_SIZE;
+ clfoFileSize = ZLFO_FILE_SIZE;
+ clogFileFileSize = 0;
+ clogPartFileSize = ZLOG_PART_FILE_SIZE;
+ cpageRefFileSize = ZPAGE_REF_FILE_SIZE;
+ cscanrecFileSize = ZSCANREC_FILE_SIZE;
+ ctabrecFileSize = 0;
+ ctcConnectrecFileSize = 0;
+ ctcNodeFailrecFileSize = MAX_NDB_NODES;
+
+ addFragRecord = 0;
+ attrbuf = 0;
+ databuf = 0;
+ fragrecord = 0;
+ gcpRecord = 0;
+ hostRecord = 0;
+ lcpRecord = 0;
+ lcpLocRecord = 0;
+ logPartRecord = 0;
+ logFileRecord = 0;
+ logFileOperationRecord = 0;
+ logPageRecord = 0;
+ pageRefRecord = 0;
+ tablerec = 0;
+ tcConnectionrec = 0;
+ tcNodeFailRecord = 0;
+
+ // Records with constant sizes
+
+ cLqhTimeOutCount = 0;
+ cLqhTimeOutCheckCount = 0;
+ cbookedAccOps = 0;
+ c_redo_log_complete_frags = RNIL;
+}//Dblqh::initData()
+
+void Dblqh::initRecords()
+{
+ // Records with dynamic sizes
+ addFragRecord = (AddFragRecord*)allocRecord("AddFragRecord",
+ sizeof(AddFragRecord),
+ caddfragrecFileSize);
+ attrbuf = (Attrbuf*)allocRecord("Attrbuf",
+ sizeof(Attrbuf),
+ cattrinbufFileSize);
+
+ databuf = (Databuf*)allocRecord("Databuf",
+ sizeof(Databuf),
+ cdatabufFileSize);
+
+ fragrecord = (Fragrecord*)allocRecord("Fragrecord",
+ sizeof(Fragrecord),
+ cfragrecFileSize);
+
+ gcpRecord = (GcpRecord*)allocRecord("GcpRecord",
+ sizeof(GcpRecord),
+ cgcprecFileSize);
+
+ hostRecord = (HostRecord*)allocRecord("HostRecord",
+ sizeof(HostRecord),
+ chostFileSize);
+
+ lcpRecord = (LcpRecord*)allocRecord("LcpRecord",
+ sizeof(LcpRecord),
+ clcpFileSize);
+
+ for(Uint32 i = 0; i<clcpFileSize; i++){
+ new (&lcpRecord[i])LcpRecord();
+ }
+
+ lcpLocRecord = (LcpLocRecord*)allocRecord("LcpLocRecord",
+ sizeof(LcpLocRecord),
+ clcpLocrecFileSize);
+
+ logPartRecord = (LogPartRecord*)allocRecord("LogPartRecord",
+ sizeof(LogPartRecord),
+ clogPartFileSize);
+
+ logFileRecord = (LogFileRecord*)allocRecord("LogFileRecord",
+ sizeof(LogFileRecord),
+ clogFileFileSize);
+
+ logFileOperationRecord = (LogFileOperationRecord*)
+ allocRecord("LogFileOperationRecord",
+ sizeof(LogFileOperationRecord),
+ clfoFileSize);
+
+ logPageRecord = (LogPageRecord*)allocRecord("LogPageRecord",
+ sizeof(LogPageRecord),
+ clogPageFileSize,
+ false);
+
+ pageRefRecord = (PageRefRecord*)allocRecord("PageRefRecord",
+ sizeof(PageRefRecord),
+ cpageRefFileSize);
+
+ cscanNoFreeRec = cscanrecFileSize;
+ c_scanRecordPool.setSize(cscanrecFileSize);
+ c_scanTakeOverHash.setSize(64);
+
+ tablerec = (Tablerec*)allocRecord("Tablerec",
+ sizeof(Tablerec),
+ ctabrecFileSize);
+
+ tcConnectionrec = (TcConnectionrec*)allocRecord("TcConnectionrec",
+ sizeof(TcConnectionrec),
+ ctcConnectrecFileSize);
+
+ m_commitAckMarkerPool.setSize(ctcConnectrecFileSize);
+ m_commitAckMarkerHash.setSize(1024);
+
+ tcNodeFailRecord = (TcNodeFailRecord*)allocRecord("TcNodeFailRecord",
+ sizeof(TcNodeFailRecord),
+ ctcNodeFailrecFileSize);
+
+ /*
+ ndbout << "FRAGREC SIZE = " << sizeof(Fragrecord) << endl;
+ ndbout << "TAB SIZE = " << sizeof(Tablerec) << endl;
+ ndbout << "GCP SIZE = " << sizeof(GcpRecord) << endl;
+ ndbout << "LCP SIZE = " << sizeof(LcpRecord) << endl;
+ ndbout << "LCPLOC SIZE = " << sizeof(LcpLocRecord) << endl;
+ ndbout << "LOGPART SIZE = " << sizeof(LogPartRecord) << endl;
+ ndbout << "LOGFILE SIZE = " << sizeof(LogFileRecord) << endl;
+ ndbout << "TC SIZE = " << sizeof(TcConnectionrec) << endl;
+ ndbout << "HOST SIZE = " << sizeof(HostRecord) << endl;
+ ndbout << "LFO SIZE = " << sizeof(LogFileOperationRecord) << endl;
+ ndbout << "PR SIZE = " << sizeof(PageRefRecord) << endl;
+ ndbout << "SCAN SIZE = " << sizeof(ScanRecord) << endl;
+*/
+
+ // Initialize BAT for interface to file system
+ NewVARIABLE* bat = allocateBat(2);
+ bat[1].WA = &logPageRecord->logPageWord[0];
+ bat[1].nrr = clogPageFileSize;
+ bat[1].ClusterSize = sizeof(LogPageRecord);
+ bat[1].bits.q = ZTWOLOG_PAGE_SIZE;
+ bat[1].bits.v = 5;
+}//Dblqh::initRecords()
+
+Dblqh::Dblqh(const class Configuration & conf):
+ SimulatedBlock(DBLQH, conf),
+ m_commitAckMarkerHash(m_commitAckMarkerPool),
+ c_scanTakeOverHash(c_scanRecordPool)
+{
+ Uint32 log_page_size= 0;
+ BLOCK_CONSTRUCTOR(Dblqh);
+
+ const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ ndb_mgm_get_int_parameter(p, CFG_DB_REDO_BUFFER,
+ &log_page_size);
+
+ /**
+ * Always set page size in half MBytes
+ */
+ clogPageFileSize= (log_page_size / sizeof(LogPageRecord));
+ Uint32 mega_byte_part= clogPageFileSize & 15;
+ if (mega_byte_part != 0) {
+ jam();
+ clogPageFileSize+= (16 - mega_byte_part);
+ }
+
+ addRecSignal(GSN_PACKED_SIGNAL, &Dblqh::execPACKED_SIGNAL);
+ addRecSignal(GSN_DEBUG_SIG, &Dblqh::execDEBUG_SIG);
+ addRecSignal(GSN_ATTRINFO, &Dblqh::execATTRINFO);
+ addRecSignal(GSN_KEYINFO, &Dblqh::execKEYINFO);
+ addRecSignal(GSN_LQHKEYREQ, &Dblqh::execLQHKEYREQ);
+ addRecSignal(GSN_LQHKEYREF, &Dblqh::execLQHKEYREF);
+ addRecSignal(GSN_COMMIT, &Dblqh::execCOMMIT);
+ addRecSignal(GSN_COMPLETE, &Dblqh::execCOMPLETE);
+ addRecSignal(GSN_LQHKEYCONF, &Dblqh::execLQHKEYCONF);
+#ifdef VM_TRACE
+ addRecSignal(GSN_TESTSIG, &Dblqh::execTESTSIG);
+#endif
+ addRecSignal(GSN_LQH_RESTART_OP, &Dblqh::execLQH_RESTART_OP);
+ addRecSignal(GSN_CONTINUEB, &Dblqh::execCONTINUEB);
+ addRecSignal(GSN_START_RECREQ, &Dblqh::execSTART_RECREQ);
+ addRecSignal(GSN_START_RECCONF, &Dblqh::execSTART_RECCONF);
+ addRecSignal(GSN_EXEC_FRAGREQ, &Dblqh::execEXEC_FRAGREQ);
+ addRecSignal(GSN_EXEC_FRAGCONF, &Dblqh::execEXEC_FRAGCONF);
+ addRecSignal(GSN_EXEC_FRAGREF, &Dblqh::execEXEC_FRAGREF);
+ addRecSignal(GSN_START_EXEC_SR, &Dblqh::execSTART_EXEC_SR);
+ addRecSignal(GSN_EXEC_SRREQ, &Dblqh::execEXEC_SRREQ);
+ addRecSignal(GSN_EXEC_SRCONF, &Dblqh::execEXEC_SRCONF);
+ addRecSignal(GSN_SCAN_HBREP, &Dblqh::execSCAN_HBREP);
+
+ addRecSignal(GSN_ALTER_TAB_REQ, &Dblqh::execALTER_TAB_REQ);
+
+ // Trigger signals, transit to from TUP
+ addRecSignal(GSN_CREATE_TRIG_REQ, &Dblqh::execCREATE_TRIG_REQ);
+ addRecSignal(GSN_CREATE_TRIG_CONF, &Dblqh::execCREATE_TRIG_CONF);
+ addRecSignal(GSN_CREATE_TRIG_REF, &Dblqh::execCREATE_TRIG_REF);
+
+ addRecSignal(GSN_DROP_TRIG_REQ, &Dblqh::execDROP_TRIG_REQ);
+ addRecSignal(GSN_DROP_TRIG_CONF, &Dblqh::execDROP_TRIG_CONF);
+ addRecSignal(GSN_DROP_TRIG_REF, &Dblqh::execDROP_TRIG_REF);
+
+ addRecSignal(GSN_DUMP_STATE_ORD, &Dblqh::execDUMP_STATE_ORD);
+ addRecSignal(GSN_ACC_COM_BLOCK, &Dblqh::execACC_COM_BLOCK);
+ addRecSignal(GSN_ACC_COM_UNBLOCK, &Dblqh::execACC_COM_UNBLOCK);
+ addRecSignal(GSN_TUP_COM_BLOCK, &Dblqh::execTUP_COM_BLOCK);
+ addRecSignal(GSN_TUP_COM_UNBLOCK, &Dblqh::execTUP_COM_UNBLOCK);
+ addRecSignal(GSN_NODE_FAILREP, &Dblqh::execNODE_FAILREP);
+ addRecSignal(GSN_CHECK_LCP_STOP, &Dblqh::execCHECK_LCP_STOP);
+ addRecSignal(GSN_SEND_PACKED, &Dblqh::execSEND_PACKED);
+ addRecSignal(GSN_TUP_ATTRINFO, &Dblqh::execTUP_ATTRINFO);
+ addRecSignal(GSN_READ_CONFIG_REQ, &Dblqh::execREAD_CONFIG_REQ, true);
+ addRecSignal(GSN_LQHFRAGREQ, &Dblqh::execLQHFRAGREQ);
+ addRecSignal(GSN_LQHADDATTREQ, &Dblqh::execLQHADDATTREQ);
+ addRecSignal(GSN_TUP_ADD_ATTCONF, &Dblqh::execTUP_ADD_ATTCONF);
+ addRecSignal(GSN_TUP_ADD_ATTRREF, &Dblqh::execTUP_ADD_ATTRREF);
+ addRecSignal(GSN_ACCFRAGCONF, &Dblqh::execACCFRAGCONF);
+ addRecSignal(GSN_ACCFRAGREF, &Dblqh::execACCFRAGREF);
+ addRecSignal(GSN_TUPFRAGCONF, &Dblqh::execTUPFRAGCONF);
+ addRecSignal(GSN_TUPFRAGREF, &Dblqh::execTUPFRAGREF);
+ addRecSignal(GSN_TAB_COMMITREQ, &Dblqh::execTAB_COMMITREQ);
+ addRecSignal(GSN_ACCSEIZECONF, &Dblqh::execACCSEIZECONF);
+ addRecSignal(GSN_ACCSEIZEREF, &Dblqh::execACCSEIZEREF);
+ addRecSignal(GSN_READ_NODESCONF, &Dblqh::execREAD_NODESCONF);
+ addRecSignal(GSN_READ_NODESREF, &Dblqh::execREAD_NODESREF);
+ addRecSignal(GSN_STTOR, &Dblqh::execSTTOR);
+ addRecSignal(GSN_NDB_STTOR, &Dblqh::execNDB_STTOR);
+ addRecSignal(GSN_TUPSEIZECONF, &Dblqh::execTUPSEIZECONF);
+ addRecSignal(GSN_TUPSEIZEREF, &Dblqh::execTUPSEIZEREF);
+ addRecSignal(GSN_ACCKEYCONF, &Dblqh::execACCKEYCONF);
+ addRecSignal(GSN_ACCKEYREF, &Dblqh::execACCKEYREF);
+ addRecSignal(GSN_TUPKEYCONF, &Dblqh::execTUPKEYCONF);
+ addRecSignal(GSN_TUPKEYREF, &Dblqh::execTUPKEYREF);
+ addRecSignal(GSN_ABORT, &Dblqh::execABORT);
+ addRecSignal(GSN_ABORTREQ, &Dblqh::execABORTREQ);
+ addRecSignal(GSN_COMMITREQ, &Dblqh::execCOMMITREQ);
+ addRecSignal(GSN_COMPLETEREQ, &Dblqh::execCOMPLETEREQ);
+#ifdef VM_TRACE
+ addRecSignal(GSN_MEMCHECKREQ, &Dblqh::execMEMCHECKREQ);
+#endif
+ addRecSignal(GSN_SCAN_FRAGREQ, &Dblqh::execSCAN_FRAGREQ);
+ addRecSignal(GSN_SCAN_NEXTREQ, &Dblqh::execSCAN_NEXTREQ);
+ addRecSignal(GSN_ACC_SCANCONF, &Dblqh::execACC_SCANCONF);
+ addRecSignal(GSN_ACC_SCANREF, &Dblqh::execACC_SCANREF);
+ addRecSignal(GSN_NEXT_SCANCONF, &Dblqh::execNEXT_SCANCONF);
+ addRecSignal(GSN_NEXT_SCANREF, &Dblqh::execNEXT_SCANREF);
+ addRecSignal(GSN_STORED_PROCCONF, &Dblqh::execSTORED_PROCCONF);
+ addRecSignal(GSN_STORED_PROCREF, &Dblqh::execSTORED_PROCREF);
+ addRecSignal(GSN_COPY_FRAGREQ, &Dblqh::execCOPY_FRAGREQ);
+ addRecSignal(GSN_COPY_ACTIVEREQ, &Dblqh::execCOPY_ACTIVEREQ);
+ addRecSignal(GSN_COPY_STATEREQ, &Dblqh::execCOPY_STATEREQ);
+ addRecSignal(GSN_LQH_TRANSREQ, &Dblqh::execLQH_TRANSREQ);
+ addRecSignal(GSN_TRANSID_AI, &Dblqh::execTRANSID_AI);
+ addRecSignal(GSN_INCL_NODEREQ, &Dblqh::execINCL_NODEREQ);
+ addRecSignal(GSN_ACC_LCPCONF, &Dblqh::execACC_LCPCONF);
+ addRecSignal(GSN_ACC_LCPREF, &Dblqh::execACC_LCPREF);
+ addRecSignal(GSN_ACC_LCPSTARTED, &Dblqh::execACC_LCPSTARTED);
+ addRecSignal(GSN_ACC_CONTOPCONF, &Dblqh::execACC_CONTOPCONF);
+ addRecSignal(GSN_LCP_FRAGIDCONF, &Dblqh::execLCP_FRAGIDCONF);
+ addRecSignal(GSN_LCP_FRAGIDREF, &Dblqh::execLCP_FRAGIDREF);
+ addRecSignal(GSN_LCP_HOLDOPCONF, &Dblqh::execLCP_HOLDOPCONF);
+ addRecSignal(GSN_LCP_HOLDOPREF, &Dblqh::execLCP_HOLDOPREF);
+ addRecSignal(GSN_TUP_PREPLCPCONF, &Dblqh::execTUP_PREPLCPCONF);
+ addRecSignal(GSN_TUP_PREPLCPREF, &Dblqh::execTUP_PREPLCPREF);
+ addRecSignal(GSN_TUP_LCPCONF, &Dblqh::execTUP_LCPCONF);
+ addRecSignal(GSN_TUP_LCPREF, &Dblqh::execTUP_LCPREF);
+ addRecSignal(GSN_TUP_LCPSTARTED, &Dblqh::execTUP_LCPSTARTED);
+ addRecSignal(GSN_END_LCPCONF, &Dblqh::execEND_LCPCONF);
+
+ addRecSignal(GSN_EMPTY_LCP_REQ, &Dblqh::execEMPTY_LCP_REQ);
+ addRecSignal(GSN_LCP_FRAG_ORD, &Dblqh::execLCP_FRAG_ORD);
+
+ addRecSignal(GSN_START_FRAGREQ, &Dblqh::execSTART_FRAGREQ);
+ addRecSignal(GSN_START_RECREF, &Dblqh::execSTART_RECREF);
+ addRecSignal(GSN_SR_FRAGIDCONF, &Dblqh::execSR_FRAGIDCONF);
+ addRecSignal(GSN_SR_FRAGIDREF, &Dblqh::execSR_FRAGIDREF);
+ addRecSignal(GSN_ACC_SRCONF, &Dblqh::execACC_SRCONF);
+ addRecSignal(GSN_ACC_SRREF, &Dblqh::execACC_SRREF);
+ addRecSignal(GSN_TUP_SRCONF, &Dblqh::execTUP_SRCONF);
+ addRecSignal(GSN_TUP_SRREF, &Dblqh::execTUP_SRREF);
+ addRecSignal(GSN_GCP_SAVEREQ, &Dblqh::execGCP_SAVEREQ);
+ addRecSignal(GSN_FSOPENCONF, &Dblqh::execFSOPENCONF);
+ addRecSignal(GSN_FSOPENREF, &Dblqh::execFSOPENREF);
+ addRecSignal(GSN_FSCLOSECONF, &Dblqh::execFSCLOSECONF);
+ addRecSignal(GSN_FSCLOSEREF, &Dblqh::execFSCLOSEREF);
+ addRecSignal(GSN_FSWRITECONF, &Dblqh::execFSWRITECONF);
+ addRecSignal(GSN_FSWRITEREF, &Dblqh::execFSWRITEREF);
+ addRecSignal(GSN_FSREADCONF, &Dblqh::execFSREADCONF);
+ addRecSignal(GSN_FSREADREF, &Dblqh::execFSREADREF);
+ addRecSignal(GSN_ACC_ABORTCONF, &Dblqh::execACC_ABORTCONF);
+ addRecSignal(GSN_SET_VAR_REQ, &Dblqh::execSET_VAR_REQ);
+ addRecSignal(GSN_TIME_SIGNAL, &Dblqh::execTIME_SIGNAL);
+ addRecSignal(GSN_FSSYNCCONF, &Dblqh::execFSSYNCCONF);
+ addRecSignal(GSN_FSSYNCREF, &Dblqh::execFSSYNCREF);
+ addRecSignal(GSN_REMOVE_MARKER_ORD, &Dblqh::execREMOVE_MARKER_ORD);
+
+ //addRecSignal(GSN_DROP_TAB_REQ, &Dblqh::execDROP_TAB_REQ);
+ addRecSignal(GSN_PREP_DROP_TAB_REQ, &Dblqh::execPREP_DROP_TAB_REQ);
+ addRecSignal(GSN_WAIT_DROP_TAB_REQ, &Dblqh::execWAIT_DROP_TAB_REQ);
+ addRecSignal(GSN_DROP_TAB_REQ, &Dblqh::execDROP_TAB_REQ);
+
+ addRecSignal(GSN_LQH_ALLOCREQ, &Dblqh::execLQH_ALLOCREQ);
+ addRecSignal(GSN_LQH_WRITELOG_REQ, &Dblqh::execLQH_WRITELOG_REQ);
+
+ // TUX
+ addRecSignal(GSN_TUXFRAGCONF, &Dblqh::execTUXFRAGCONF);
+ addRecSignal(GSN_TUXFRAGREF, &Dblqh::execTUXFRAGREF);
+ addRecSignal(GSN_TUX_ADD_ATTRCONF, &Dblqh::execTUX_ADD_ATTRCONF);
+ addRecSignal(GSN_TUX_ADD_ATTRREF, &Dblqh::execTUX_ADD_ATTRREF);
+
+ addRecSignal(GSN_READ_PSUEDO_REQ, &Dblqh::execREAD_PSUEDO_REQ);
+
+ initData();
+
+#ifdef VM_TRACE
+ {
+ void* tmp[] = {
+ &addfragptr,
+ &attrinbufptr,
+ &databufptr,
+ &fragptr,
+ &gcpPtr,
+ &lcpPtr,
+ &lcpLocptr,
+ &logPartPtr,
+ &logFilePtr,
+ &lfoPtr,
+ &logPagePtr,
+ &pageRefPtr,
+ &scanptr,
+ &tabptr,
+ &tcConnectptr,
+ &tcNodeFailptr,
+ };
+ init_globals_list(tmp, sizeof(tmp)/sizeof(tmp[0]));
+ }
+#endif
+
+}//Dblqh::Dblqh()
+
+Dblqh::~Dblqh()
+{
+ // Records with dynamic sizes
+ deallocRecord((void **)&addFragRecord, "AddFragRecord",
+ sizeof(AddFragRecord),
+ caddfragrecFileSize);
+
+ deallocRecord((void**)&attrbuf,
+ "Attrbuf",
+ sizeof(Attrbuf),
+ cattrinbufFileSize);
+
+ deallocRecord((void**)&databuf,
+ "Databuf",
+ sizeof(Databuf),
+ cdatabufFileSize);
+
+ deallocRecord((void**)&fragrecord,
+ "Fragrecord",
+ sizeof(Fragrecord),
+ cfragrecFileSize);
+
+ deallocRecord((void**)&gcpRecord,
+ "GcpRecord",
+ sizeof(GcpRecord),
+ cgcprecFileSize);
+
+ deallocRecord((void**)&hostRecord,
+ "HostRecord",
+ sizeof(HostRecord),
+ chostFileSize);
+
+ deallocRecord((void**)&lcpRecord,
+ "LcpRecord",
+ sizeof(LcpRecord),
+ clcpFileSize);
+
+ deallocRecord((void**)&lcpLocRecord,
+ "LcpLocRecord",
+ sizeof(LcpLocRecord),
+ clcpLocrecFileSize);
+
+ deallocRecord((void**)&logPartRecord,
+ "LogPartRecord",
+ sizeof(LogPartRecord),
+ clogPartFileSize);
+
+ deallocRecord((void**)&logFileRecord,
+ "LogFileRecord",
+ sizeof(LogFileRecord),
+ clogFileFileSize);
+
+ deallocRecord((void**)&logFileOperationRecord,
+ "LogFileOperationRecord",
+ sizeof(LogFileOperationRecord),
+ clfoFileSize);
+
+ deallocRecord((void**)&logPageRecord,
+ "LogPageRecord",
+ sizeof(LogPageRecord),
+ clogPageFileSize);
+
+ deallocRecord((void**)&pageRefRecord,
+ "PageRefRecord",
+ sizeof(PageRefRecord),
+ cpageRefFileSize);
+
+
+ deallocRecord((void**)&tablerec,
+ "Tablerec",
+ sizeof(Tablerec),
+ ctabrecFileSize);
+
+ deallocRecord((void**)&tcConnectionrec,
+ "TcConnectionrec",
+ sizeof(TcConnectionrec),
+ ctcConnectrecFileSize);
+
+ deallocRecord((void**)&tcNodeFailRecord,
+ "TcNodeFailRecord",
+ sizeof(TcNodeFailRecord),
+ ctcNodeFailrecFileSize);
+}//Dblqh::~Dblqh()
+
+BLOCK_FUNCTIONS(Dblqh)
+
diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
new file mode 100644
index 00000000000..c34d4ddb566
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
@@ -0,0 +1,18661 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#define DBLQH_C
+#include "Dblqh.hpp"
+#include <ndb_limits.h>
+#include <md5_hash.hpp>
+
+#include <ndb_version.h>
+#include <signaldata/TuxBound.hpp>
+#include <signaldata/AccScan.hpp>
+#include <signaldata/CopyActive.hpp>
+#include <signaldata/CopyFrag.hpp>
+#include <signaldata/CreateTrig.hpp>
+#include <signaldata/DropTrig.hpp>
+#include <signaldata/EmptyLcp.hpp>
+#include <signaldata/EventReport.hpp>
+#include <signaldata/ExecFragReq.hpp>
+#include <signaldata/GCPSave.hpp>
+#include <signaldata/TcKeyRef.hpp>
+#include <signaldata/LqhKey.hpp>
+#include <signaldata/NextScan.hpp>
+#include <signaldata/NFCompleteRep.hpp>
+#include <signaldata/NodeFailRep.hpp>
+#include <signaldata/ReadNodesConf.hpp>
+#include <signaldata/RelTabMem.hpp>
+#include <signaldata/ScanFrag.hpp>
+#include <signaldata/SrFragidConf.hpp>
+#include <signaldata/StartFragReq.hpp>
+#include <signaldata/StartRec.hpp>
+#include <signaldata/TupKey.hpp>
+#include <signaldata/TupCommit.hpp>
+#include <signaldata/LqhFrag.hpp>
+#include <signaldata/AccFrag.hpp>
+#include <signaldata/TupFrag.hpp>
+#include <signaldata/DumpStateOrd.hpp>
+#include <signaldata/PackedSignal.hpp>
+
+#include <signaldata/PrepDropTab.hpp>
+#include <signaldata/DropTab.hpp>
+
+#include <signaldata/AlterTab.hpp>
+
+#include <signaldata/LCP.hpp>
+
+// Use DEBUG to print messages that should be
+// seen only when we debug the product
+#ifdef VM_TRACE
+#define DEBUG(x) ndbout << "DBLQH: "<< x << endl;
+NdbOut &
+operator<<(NdbOut& out, Dblqh::TcConnectionrec::TransactionState state){
+ out << (int)state;
+ return out;
+}
+
+NdbOut &
+operator<<(NdbOut& out, Dblqh::TcConnectionrec::LogWriteState state){
+ out << (int)state;
+ return out;
+}
+
+NdbOut &
+operator<<(NdbOut& out, Dblqh::TcConnectionrec::ListState state){
+ out << (int)state;
+ return out;
+}
+
+NdbOut &
+operator<<(NdbOut& out, Dblqh::TcConnectionrec::AbortState state){
+ out << (int)state;
+ return out;
+}
+
+NdbOut &
+operator<<(NdbOut& out, Dblqh::ScanRecord::ScanState state){
+ out << (int)state;
+ return out;
+}
+
+NdbOut &
+operator<<(NdbOut& out, Dblqh::LogFileOperationRecord::LfoState state){
+ out << (int)state;
+ return out;
+}
+
+NdbOut &
+operator<<(NdbOut& out, Dblqh::ScanRecord::ScanType state){
+ out << (int)state;
+ return out;
+}
+
+#else
+#define DEBUG(x)
+#endif
+
+//#define MARKER_TRACE 1
+//#define TRACE_SCAN_TAKEOVER 1
+
+const Uint32 NR_ScanNo = 0;
+
+void Dblqh::execACC_COM_BLOCK(Signal* signal)
+{
+ jamEntry();
+/* ------------------------------------------------------------------------- */
+// Undo log buffer in ACC is in critical sector of being full.
+/* ------------------------------------------------------------------------- */
+ cCounterAccCommitBlocked++;
+ caccCommitBlocked = true;
+ cCommitBlocked = true;
+ return;
+}//Dblqh::execACC_COM_BLOCK()
+
+void Dblqh::execACC_COM_UNBLOCK(Signal* signal)
+{
+ jamEntry();
+/* ------------------------------------------------------------------------- */
+// Undo log buffer in ACC ok again.
+/* ------------------------------------------------------------------------- */
+ caccCommitBlocked = false;
+ if (ctupCommitBlocked == false) {
+ jam();
+ cCommitBlocked = false;
+ }//if
+ return;
+}//Dblqh::execACC_COM_UNBLOCK()
+
+void Dblqh::execTUP_COM_BLOCK(Signal* signal)
+{
+ jamEntry();
+/* ------------------------------------------------------------------------- */
+// Undo log buffer in TUP is in critical sector of being full.
+/* ------------------------------------------------------------------------- */
+ cCounterTupCommitBlocked++;
+ ctupCommitBlocked = true;
+ cCommitBlocked = true;
+ return;
+}//Dblqh::execTUP_COM_BLOCK()
+
+void Dblqh::execTUP_COM_UNBLOCK(Signal* signal)
+{
+ jamEntry();
+/* ------------------------------------------------------------------------- */
+// Undo log buffer in TUP ok again.
+/* ------------------------------------------------------------------------- */
+ ctupCommitBlocked = false;
+ if (caccCommitBlocked == false) {
+ jam();
+ cCommitBlocked = false;
+ }//if
+ return;
+}//Dblqh::execTUP_COM_UNBLOCK()
+
+/* ------------------------------------------------------------------------- */
+/* ------- SEND SYSTEM ERROR ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dblqh::systemError(Signal* signal)
+{
+ progError(0, 0);
+}//Dblqh::systemError()
+
+/* *************** */
+/* ACCSEIZEREF > */
+/* *************** */
+void Dblqh::execACCSEIZEREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}//Dblqh::execACCSEIZEREF()
+
+/* ******************************************************>> */
+/* THIS SIGNAL IS USED TO HANDLE REAL-TIME */
+/* BREAKS THAT ARE NECESSARY TO ENSURE REAL-TIME */
+/* OPERATION OF LQH. */
+/* This signal is also used for signal loops, for example */
+/* the timeout handling for writing logs every second. */
+/* ******************************************************>> */
+void Dblqh::execCONTINUEB(Signal* signal)
+{
+ jamEntry();
+ Uint32 tcase = signal->theData[0];
+ Uint32 data0 = signal->theData[1];
+ Uint32 data1 = signal->theData[2];
+ Uint32 data2 = signal->theData[3];
+#if 0
+ if (tcase == RNIL) {
+ tcConnectptr.i = data0;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ ndbout << "State = " << tcConnectptr.p->transactionState;
+ ndbout << " seqNoReplica = " << tcConnectptr.p->seqNoReplica;
+ ndbout << " tcNodeFailrec = " << tcConnectptr.p->tcNodeFailrec;
+ ndbout << " activeCreat = " << tcConnectptr.p->activeCreat;
+ ndbout << endl;
+ ndbout << "tupkeyData0 = " << tcConnectptr.p->tupkeyData[0];
+ ndbout << "tupkeyData1 = " << tcConnectptr.p->tupkeyData[1];
+ ndbout << "tupkeyData2 = " << tcConnectptr.p->tupkeyData[2];
+ ndbout << "tupkeyData3 = " << tcConnectptr.p->tupkeyData[3];
+ ndbout << endl;
+ ndbout << "abortState = " << tcConnectptr.p->abortState;
+ ndbout << "listState = " << tcConnectptr.p->listState;
+ ndbout << endl;
+ return;
+ }//if
+#endif
+ switch (tcase) {
+ case ZLOG_LQHKEYREQ:
+ if (cnoOfLogPages == 0) {
+ jam();
+ sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 10, 2);
+ return;
+ }//if
+ logPartPtr.i = data0;
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+ logFilePtr.i = logPartPtr.p->currentLogfile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ logPagePtr.i = logFilePtr.p->currentLogpage;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+
+ tcConnectptr.i = logPartPtr.p->firstLogQueue;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ if ((cCommitBlocked == true) &&
+ (fragptr.p->fragActiveStatus == ZTRUE)) {
+ jam();
+ sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 10, 2);
+ return;
+ }//if
+ logPartPtr.p->LogLqhKeyReqSent = ZFALSE;
+ getFirstInLogQueue(signal);
+
+ switch (tcConnectptr.p->transactionState) {
+ case TcConnectionrec::LOG_QUEUED:
+ if (tcConnectptr.p->abortState != TcConnectionrec::ABORT_IDLE) {
+ jam();
+ logNextStart(signal);
+ abortCommonLab(signal);
+ return;
+ } else {
+ jam();
+/*------------------------------------------------------------*/
+/* WE MUST SET THE STATE OF THE LOG PART TO IDLE TO */
+/* ENSURE THAT WE ARE NOT QUEUED AGAIN ON THE LOG PART */
+/* WE WILL SET THE LOG PART STATE TO ACTIVE IMMEDIATELY */
+/* SO NO OTHER PROCESS WILL SEE THIS STATE. IT IS MERELY*/
+/* USED TO ENABLE REUSE OF CODE. */
+/*------------------------------------------------------------*/
+ if (logPartPtr.p->logPartState == LogPartRecord::ACTIVE) {
+ jam();
+ logPartPtr.p->logPartState = LogPartRecord::IDLE;
+ }//if
+ logLqhkeyreqLab(signal);
+ return;
+ }//if
+ break;
+ case TcConnectionrec::LOG_ABORT_QUEUED:
+ jam();
+ writeAbortLog(signal);
+ removeLogTcrec(signal);
+ logNextStart(signal);
+ continueAfterLogAbortWriteLab(signal);
+ return;
+ break;
+ case TcConnectionrec::LOG_COMMIT_QUEUED:
+ case TcConnectionrec::LOG_COMMIT_QUEUED_WAIT_SIGNAL:
+ jam();
+ writeCommitLog(signal, logPartPtr);
+ logNextStart(signal);
+ if (tcConnectptr.p->transactionState == TcConnectionrec::LOG_COMMIT_QUEUED) {
+ if (tcConnectptr.p->seqNoReplica != 0) {
+ jam();
+ commitReplyLab(signal);
+ } else {
+ jam();
+ localCommitLab(signal);
+ }//if
+ return;
+ } else {
+ jam();
+ tcConnectptr.p->transactionState = TcConnectionrec::LOG_COMMIT_WRITTEN_WAIT_SIGNAL;
+ return;
+ }//if
+ break;
+ case TcConnectionrec::COMMIT_QUEUED:
+ jam();
+ logNextStart(signal);
+ localCommitLab(signal);
+ break;
+ case TcConnectionrec::ABORT_QUEUED:
+ jam();
+ logNextStart(signal);
+ abortCommonLab(signal);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+ break;
+ case ZSR_GCI_LIMITS:
+ jam();
+ signal->theData[0] = data0;
+ srGciLimits(signal);
+ return;
+ break;
+ case ZSR_LOG_LIMITS:
+ jam();
+ signal->theData[0] = data0;
+ signal->theData[1] = data1;
+ signal->theData[2] = data2;
+ srLogLimits(signal);
+ return;
+ break;
+ case ZSEND_EXEC_CONF:
+ jam();
+ signal->theData[0] = data0;
+ sendExecConf(signal);
+ return;
+ break;
+ case ZEXEC_SR:
+ jam();
+ signal->theData[0] = data0;
+ execSr(signal);
+ return;
+ break;
+ case ZSR_FOURTH_COMP:
+ jam();
+ signal->theData[0] = data0;
+ srFourthComp(signal);
+ return;
+ break;
+ case ZINIT_FOURTH:
+ jam();
+ signal->theData[0] = data0;
+ initFourth(signal);
+ return;
+ break;
+ case ZTIME_SUPERVISION:
+ jam();
+ signal->theData[0] = data0;
+ timeSup(signal);
+ return;
+ break;
+ case ZSR_PHASE3_START:
+ jam();
+ signal->theData[0] = data0;
+ srPhase3Start(signal);
+ return;
+ break;
+ case ZLQH_TRANS_NEXT:
+ jam();
+ tcNodeFailptr.i = data0;
+ ptrCheckGuard(tcNodeFailptr, ctcNodeFailrecFileSize, tcNodeFailRecord);
+ lqhTransNextLab(signal);
+ return;
+ break;
+ case ZSCAN_TC_CONNECT:
+ jam();
+ tabptr.i = data1;
+ ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
+ scanTcConnectLab(signal, data0, data2);
+ return;
+ break;
+ case ZINITIALISE_RECORDS:
+ jam();
+ initialiseRecordsLab(signal, data0, data2, signal->theData[4]);
+ return;
+ break;
+ case ZINIT_GCP_REC:
+ jam();
+ gcpPtr.i = 0;
+ ptrAss(gcpPtr, gcpRecord);
+ initGcpRecLab(signal);
+ return;
+ break;
+ case ZRESTART_OPERATIONS_AFTER_STOP:
+ jam();
+ tcConnectptr.i = data0;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ if (tcConnectptr.p->listState != TcConnectionrec::WAIT_QUEUE_LIST) {
+ jam();
+ return;
+ }//if
+ releaseWaitQueue(signal);
+ linkActiveFrag(signal);
+ restartOperationsAfterStopLab(signal);
+ return;
+ break;
+ case ZCHECK_LCP_STOP_BLOCKED:
+ jam();
+ c_scanRecordPool.getPtr(scanptr, data0);
+ tcConnectptr.i = scanptr.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ checkLcpStopBlockedLab(signal);
+ return;
+ case ZSCAN_MARKERS:
+ jam();
+ scanMarkers(signal, data0, data1, data2);
+ return;
+ break;
+
+ case ZOPERATION_EVENT_REP:
+ jam();
+ /* --------------------------------------------------------------------- */
+ // Report information about transaction activity once per second.
+ /* --------------------------------------------------------------------- */
+ if (signal->theData[1] == 0) {
+ signal->theData[0] = NDB_LE_OperationReportCounters;
+ signal->theData[1] = c_Counters.operations;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
+ }//if
+ c_Counters.clear();
+ signal->theData[0] = ZOPERATION_EVENT_REP;
+ signal->theData[1] = 0;
+ sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 5000, 2);
+ break;
+ case ZPREP_DROP_TABLE:
+ jam();
+ checkDropTab(signal);
+ return;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+}//Dblqh::execCONTINUEB()
+
+/* *********************************************************> */
+/* Request from DBDIH to include a new node in the node list */
+/* and so forth. */
+/* *********************************************************> */
+void Dblqh::execINCL_NODEREQ(Signal* signal)
+{
+ jamEntry();
+ BlockReference retRef = signal->theData[0];
+ Uint32 nodeId = signal->theData[1];
+ cnewestGci = signal->theData[2];
+ cnewestCompletedGci = signal->theData[2] - 1;
+ ndbrequire(cnoOfNodes < MAX_NDB_NODES);
+ for (Uint32 i = 0; i < cnoOfNodes; i++) {
+ jam();
+ if (cnodeData[i] == nodeId) {
+ jam();
+ cnodeStatus[i] = ZNODE_UP;
+ }//if
+ }//for
+ signal->theData[0] = cownref;
+ sendSignal(retRef, GSN_INCL_NODECONF, signal, 1, JBB);
+ return;
+}//Dblqh::execINCL_NODEREQ()
+
+void Dblqh::execTUPSEIZEREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}//Dblqh::execTUPSEIZEREF()
+
+/* ########################################################################## */
+/* ####### START / RESTART MODULE ####### */
+/* ########################################################################## */
+/* ************************************************************************>> */
+/* This is first signal that arrives in a start / restart. Sender is NDBCNTR_REF. */
+/* ************************************************************************>> */
+void Dblqh::execSTTOR(Signal* signal)
+{
+ UintR tstartPhase;
+
+ jamEntry();
+ /* START CASE */
+ tstartPhase = signal->theData[1];
+ /* SYSTEM RESTART RANK */
+ csignalKey = signal->theData[6];
+ switch (tstartPhase) {
+ case ZSTART_PHASE1:
+ jam();
+ cstartPhase = tstartPhase;
+ sttorStartphase1Lab(signal);
+ c_tup = (Dbtup*)globalData.getBlock(DBTUP);
+ ndbrequire(c_tup != 0);
+ return;
+ break;
+ default:
+ jam();
+ /*empty*/;
+ sendsttorryLab(signal);
+ return;
+ break;
+ }//switch
+}//Dblqh::execSTTOR()
+
+/* ***************************************> */
+/* Restart phases 1 - 6, sender is Ndbcntr */
+/* ***************************************> */
+void Dblqh::execNDB_STTOR(Signal* signal)
+{
+ jamEntry();
+ Uint32 ownNodeId = signal->theData[1]; /* START PHASE*/
+ cstartPhase = signal->theData[2]; /* MY NODE ID */
+ cstartType = signal->theData[3]; /* START TYPE */
+
+ switch (cstartPhase) {
+ case ZSTART_PHASE1:
+ jam();
+ preComputedRequestInfoMask = 0;
+ LqhKeyReq::setKeyLen(preComputedRequestInfoMask, RI_KEYLEN_MASK);
+ LqhKeyReq::setLastReplicaNo(preComputedRequestInfoMask, RI_LAST_REPL_MASK);
+ LqhKeyReq::setLockType(preComputedRequestInfoMask, RI_LOCK_TYPE_MASK);
+ // Dont LqhKeyReq::setApplicationAddressFlag
+ LqhKeyReq::setDirtyFlag(preComputedRequestInfoMask, 1);
+ // Dont LqhKeyReq::setInterpretedFlag
+ LqhKeyReq::setSimpleFlag(preComputedRequestInfoMask, 1);
+ LqhKeyReq::setOperation(preComputedRequestInfoMask, RI_OPERATION_MASK);
+ // Dont setAIInLqhKeyReq
+ // Dont setSeqNoReplica
+ // Dont setSameClientAndTcFlag
+ // Dont setReturnedReadLenAIFlag
+ // Dont setAPIVersion
+ LqhKeyReq::setMarkerFlag(preComputedRequestInfoMask, 1);
+ //preComputedRequestInfoMask = 0x003d7fff;
+ startphase1Lab(signal, /* dummy */ ~0, ownNodeId);
+
+ signal->theData[0] = ZOPERATION_EVENT_REP;
+ signal->theData[1] = 1;
+ sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 10, 2);
+ return;
+ break;
+ case ZSTART_PHASE2:
+ jam();
+ startphase2Lab(signal, /* dummy */ ~0);
+ return;
+ break;
+ case ZSTART_PHASE3:
+ jam();
+ startphase3Lab(signal);
+ return;
+ break;
+ case ZSTART_PHASE4:
+ jam();
+ startphase4Lab(signal);
+ return;
+ break;
+ case ZSTART_PHASE6:
+ jam();
+ startphase6Lab(signal);
+ return;
+ break;
+ default:
+ jam();
+ /*empty*/;
+ sendNdbSttorryLab(signal);
+ return;
+ break;
+ }//switch
+}//Dblqh::execNDB_STTOR()
+
+/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
+/* +++++++ START PHASE 1 +++++++ */
+/* LOAD OUR BLOCK REFERENCE AND OUR PROCESSOR ID */
+/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
+void Dblqh::sttorStartphase1Lab(Signal* signal)
+{
+ sendsttorryLab(signal);
+ return;
+}//Dblqh::sttorStartphase1Lab()
+
+/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
+/* +++++++ START PHASE 2 +++++++ */
+/* */
+/* INITIATE ALL RECORDS WITHIN THE BLOCK */
+/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
+void Dblqh::startphase1Lab(Signal* signal, Uint32 _dummy, Uint32 ownNodeId)
+{
+ UintR Ti;
+ HostRecordPtr ThostPtr;
+
+/* ------- INITIATE ALL RECORDS ------- */
+ cownNodeid = ownNodeId;
+ caccBlockref = calcAccBlockRef (cownNodeid);
+ ctupBlockref = calcTupBlockRef (cownNodeid);
+ ctuxBlockref = calcTuxBlockRef (cownNodeid);
+ cownref = calcLqhBlockRef (cownNodeid);
+ for (Ti = 0; Ti < chostFileSize; Ti++) {
+ ThostPtr.i = Ti;
+ ptrCheckGuard(ThostPtr, chostFileSize, hostRecord);
+ ThostPtr.p->hostLqhBlockRef = calcLqhBlockRef(ThostPtr.i);
+ ThostPtr.p->hostTcBlockRef = calcTcBlockRef(ThostPtr.i);
+ ThostPtr.p->inPackedList = false;
+ ThostPtr.p->noOfPackedWordsLqh = 0;
+ ThostPtr.p->noOfPackedWordsTc = 0;
+ }//for
+ cpackedListIndex = 0;
+ sendNdbSttorryLab(signal);
+ return;
+}//Dblqh::startphase1Lab()
+
+/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
+/* +++++++ START PHASE 2 +++++++ */
+/* */
+/* CONNECT LQH WITH ACC AND TUP. */
+/* EVERY CONNECTION RECORD IN LQH IS ASSIGNED TO ONE ACC CONNECTION RECORD */
+/* AND ONE TUP CONNECTION RECORD. */
+/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
+void Dblqh::startphase2Lab(Signal* signal, Uint32 _dummy)
+{
+ cmaxWordsAtNodeRec = MAX_NO_WORDS_OUTSTANDING_COPY_FRAGMENT;
+/* -- ACC AND TUP CONNECTION PROCESS -- */
+ tcConnectptr.i = 0;
+ ptrAss(tcConnectptr, tcConnectionrec);
+ moreconnectionsLab(signal);
+ return;
+}//Dblqh::startphase2Lab()
+
+void Dblqh::moreconnectionsLab(Signal* signal)
+{
+ tcConnectptr.p->tcAccBlockref = caccBlockref;
+ // set TUX block here (no operation is seized in TUX)
+ tcConnectptr.p->tcTuxBlockref = ctuxBlockref;
+/* NO STATE CHECKING IS PERFORMED, ASSUMED TO WORK */
+/* *************** */
+/* ACCSEIZEREQ < */
+/* *************** */
+ signal->theData[0] = tcConnectptr.i;
+ signal->theData[1] = cownref;
+ sendSignal(caccBlockref, GSN_ACCSEIZEREQ, signal, 2, JBB);
+ return;
+}//Dblqh::moreconnectionsLab()
+
+/* ***************> */
+/* ACCSEIZECONF > */
+/* ***************> */
+void Dblqh::execACCSEIZECONF(Signal* signal)
+{
+ jamEntry();
+ tcConnectptr.i = signal->theData[0];
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ tcConnectptr.p->accConnectrec = signal->theData[1];
+/* *************** */
+/* TUPSEIZEREQ < */
+/* *************** */
+ tcConnectptr.p->tcTupBlockref = ctupBlockref;
+ signal->theData[0] = tcConnectptr.i;
+ signal->theData[1] = cownref;
+ sendSignal(ctupBlockref, GSN_TUPSEIZEREQ, signal, 2, JBB);
+ return;
+}//Dblqh::execACCSEIZECONF()
+
+/* ***************> */
+/* TUPSEIZECONF > */
+/* ***************> */
+void Dblqh::execTUPSEIZECONF(Signal* signal)
+{
+ jamEntry();
+ tcConnectptr.i = signal->theData[0];
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ tcConnectptr.p->tupConnectrec = signal->theData[1];
+/* ------- CHECK IF THERE ARE MORE CONNECTIONS TO BE CONNECTED ------- */
+ tcConnectptr.i = tcConnectptr.p->nextTcConnectrec;
+ if (tcConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ moreconnectionsLab(signal);
+ return;
+ }//if
+/* ALL LQH_CONNECT RECORDS ARE CONNECTED TO ACC AND TUP ---- */
+ sendNdbSttorryLab(signal);
+ return;
+}//Dblqh::execTUPSEIZECONF()
+
+/* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
+/* +++++++ START PHASE 4 +++++++ */
+/* */
+/* CONNECT LQH WITH LQH. */
+/* CONNECT EACH LQH WITH EVERY LQH IN THE DATABASE SYSTEM. */
+/* IF INITIAL START THEN CREATE THE FRAGMENT LOG FILES */
+/*IF SYSTEM RESTART OR NODE RESTART THEN OPEN THE FRAGMENT LOG FILES AND */
+/*FIND THE END OF THE LOG FILES. */
+/* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
+/* WAIT UNTIL ADD NODE PROCESSES ARE COMPLETED */
+/* IF INITIAL START ALSO WAIT FOR LOG FILES TO INITIALISED */
+/*START TIME SUPERVISION OF LOG FILES. WE HAVE TO WRITE LOG PAGES TO DISK */
+/*EVEN IF THE PAGES ARE NOT FULL TO ENSURE THAT THEY COME TO DISK ASAP. */
+/* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
+void Dblqh::startphase3Lab(Signal* signal)
+{
+ LogFileRecordPtr prevLogFilePtr;
+ LogFileRecordPtr zeroLogFilePtr;
+
+ caddNodeState = ZTRUE;
+/* ***************<< */
+/* READ_NODESREQ < */
+/* ***************<< */
+ cinitialStartOngoing = ZTRUE;
+ ndbrequire(cnoLogFiles != 0);
+
+ for (logPartPtr.i = 0; logPartPtr.i < 4; logPartPtr.i++) {
+ jam();
+ ptrAss(logPartPtr, logPartRecord);
+ initLogpart(signal);
+ for (Uint32 fileNo = 0; fileNo < cnoLogFiles; fileNo++) {
+ seizeLogfile(signal);
+ if (fileNo != 0) {
+ jam();
+ prevLogFilePtr.p->nextLogFile = logFilePtr.i;
+ logFilePtr.p->prevLogFile = prevLogFilePtr.i;
+ } else {
+ jam();
+ logPartPtr.p->firstLogfile = logFilePtr.i;
+ logPartPtr.p->currentLogfile = logFilePtr.i;
+ zeroLogFilePtr.i = logFilePtr.i;
+ zeroLogFilePtr.p = logFilePtr.p;
+ }//if
+ prevLogFilePtr.i = logFilePtr.i;
+ prevLogFilePtr.p = logFilePtr.p;
+ initLogfile(signal, fileNo);
+ if ((cstartType == NodeState::ST_INITIAL_START) ||
+ (cstartType == NodeState::ST_INITIAL_NODE_RESTART)) {
+ if (logFilePtr.i == zeroLogFilePtr.i) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/*IN AN INITIAL START WE START BY CREATING ALL LOG FILES AND SETTING THEIR */
+/*PROPER SIZE AND INITIALISING PAGE ZERO IN ALL FILES. */
+/*WE START BY CREATING FILE ZERO IN EACH LOG PART AND THEN PROCEED */
+/*SEQUENTIALLY THROUGH ALL LOG FILES IN THE LOG PART. */
+/* ------------------------------------------------------------------------- */
+ openLogfileInit(signal);
+ }//if
+ }//if
+ }//for
+ zeroLogFilePtr.p->prevLogFile = logFilePtr.i;
+ logFilePtr.p->nextLogFile = zeroLogFilePtr.i;
+ }//for
+ if (cstartType != NodeState::ST_INITIAL_START &&
+ cstartType != NodeState::ST_INITIAL_NODE_RESTART) {
+ jam();
+ ndbrequire(cstartType == NodeState::ST_NODE_RESTART ||
+ cstartType == NodeState::ST_SYSTEM_RESTART);
+ /** --------------------------------------------------------------------
+ * THIS CODE KICKS OFF THE SYSTEM RESTART AND NODE RESTART. IT STARTS UP
+ * THE RESTART BY FINDING THE END OF THE LOG AND FROM THERE FINDING THE
+ * INFO ABOUT THE GLOBAL CHECKPOINTS IN THE FRAGMENT LOG.
+ --------------------------------------------------------------------- */
+ for (logPartPtr.i = 0; logPartPtr.i < 4; logPartPtr.i++) {
+ jam();
+ LogFileRecordPtr locLogFilePtr;
+ ptrAss(logPartPtr, logPartRecord);
+ locLogFilePtr.i = logPartPtr.p->firstLogfile;
+ ptrCheckGuard(locLogFilePtr, clogFileFileSize, logFileRecord);
+ locLogFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_FRONTPAGE;
+ openFileRw(signal, locLogFilePtr);
+ }//for
+ }//if
+
+ signal->theData[0] = cownref;
+ sendSignal(NDBCNTR_REF, GSN_READ_NODESREQ, signal, 1, JBB);
+ return;
+}//Dblqh::startphase3Lab()
+
+/* ****************** */
+/* READ_NODESCONF > */
+/* ****************** */
+void Dblqh::execREAD_NODESCONF(Signal* signal)
+{
+ jamEntry();
+
+ ReadNodesConf * const readNodes = (ReadNodesConf *)&signal->theData[0];
+ cnoOfNodes = readNodes->noOfNodes;
+
+ unsigned ind = 0;
+ unsigned i = 0;
+ for (i = 1; i < MAX_NDB_NODES; i++) {
+ jam();
+ if (NodeBitmask::get(readNodes->allNodes, i)) {
+ jam();
+ cnodeData[ind] = i;
+ cnodeStatus[ind] = NodeBitmask::get(readNodes->inactiveNodes, i);
+ //readNodes->getVersionId(i, readNodes->theVersionIds) not used
+ ind++;
+ }//if
+ }//for
+ ndbrequire(ind == cnoOfNodes);
+ ndbrequire(cnoOfNodes >= 1 && cnoOfNodes < MAX_NDB_NODES);
+ ndbrequire(!(cnoOfNodes == 1 && cstartType == NodeState::ST_NODE_RESTART));
+
+ caddNodeState = ZFALSE;
+ if (cstartType == NodeState::ST_SYSTEM_RESTART) {
+ jam();
+ sendNdbSttorryLab(signal);
+ return;
+ }//if
+ checkStartCompletedLab(signal);
+ return;
+}//Dblqh::execREAD_NODESCONF()
+
+void Dblqh::checkStartCompletedLab(Signal* signal)
+{
+ if (caddNodeState == ZFALSE) {
+ if (cinitialStartOngoing == ZFALSE) {
+ jam();
+ sendNdbSttorryLab(signal);
+ return;
+ }//if
+ }//if
+ return;
+}//Dblqh::checkStartCompletedLab()
+
+void Dblqh::startphase4Lab(Signal* signal)
+{
+ sendNdbSttorryLab(signal);
+ return;
+}//Dblqh::startphase4Lab()
+
+/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
+/* SET CONCURRENCY OF LOCAL CHECKPOINTS TO BE USED AFTER SYSTEM RESTART. */
+/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
+void Dblqh::startphase6Lab(Signal* signal)
+{
+ cstartPhase = ZNIL;
+ cstartType = ZNIL;
+ sendNdbSttorryLab(signal);
+ return;
+}//Dblqh::startphase6Lab()
+
+void Dblqh::sendNdbSttorryLab(Signal* signal)
+{
+ signal->theData[0] = cownref;
+ sendSignal(NDBCNTR_REF, GSN_NDB_STTORRY, signal, 1, JBB);
+ return;
+}//Dblqh::sendNdbSttorryLab()
+
+void Dblqh::sendsttorryLab(Signal* signal)
+{
+/* *********<< */
+/* STTORRY < */
+/* *********<< */
+ signal->theData[0] = csignalKey; /* SIGNAL KEY */
+ signal->theData[1] = 3; /* BLOCK CATEGORY */
+ signal->theData[2] = 2; /* SIGNAL VERSION NUMBER */
+ signal->theData[3] = ZSTART_PHASE1;
+ signal->theData[4] = 255;
+ sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 5, JBB);
+ return;
+}//Dblqh::sendsttorryLab()
+
+/* ***************>> */
+/* READ_NODESREF > */
+/* ***************>> */
+void Dblqh::execREAD_NODESREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}//Dblqh::execREAD_NODESREF()
+
+/* *************** */
+/* SIZEALT_REP > */
+/* *************** */
+void Dblqh::execREAD_CONFIG_REQ(Signal* signal)
+{
+ const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr();
+ Uint32 ref = req->senderRef;
+ Uint32 senderData = req->senderData;
+ ndbrequire(req->noOfParameters == 0);
+
+ jamEntry();
+
+ const ndb_mgm_configuration_iterator * p =
+ theConfiguration.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ cnoLogFiles = 8;
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_REDOLOG_FILES,
+ &cnoLogFiles));
+ ndbrequire(cnoLogFiles > 0);
+
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_FRAG, &cfragrecFileSize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_TABLE, &ctabrecFileSize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_TC_CONNECT,
+ &ctcConnectrecFileSize));
+ clogFileFileSize = 4 * cnoLogFiles;
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_SCAN, &cscanrecFileSize));
+ cmaxAccOps = cscanrecFileSize * MAX_PARALLEL_OP_PER_SCAN;
+
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS, &c_diskless));
+
+ initRecords();
+ initialiseRecordsLab(signal, 0, ref, senderData);
+
+ return;
+}//Dblqh::execSIZEALT_REP()
+
+/* ########################################################################## */
+/* ####### ADD/DELETE FRAGMENT MODULE ####### */
+/* THIS MODULE IS USED BY DICTIONARY TO CREATE NEW FRAGMENTS AND DELETE */
+/* OLD FRAGMENTS. */
+/* */
+/* ########################################################################## */
+/* -------------------------------------------------------------- */
+/* FRAG REQ */
+/* -------------------------------------------------------------- */
+/* *********************************************************> */
+/* LQHFRAGREQ: Create new fragments for a table. Sender DICT */
+/* *********************************************************> */
+
+// this unbelievable mess could be replaced by one signal to LQH
+// and execute direct to local DICT to get everything at once
+
+void Dblqh::execLQHFRAGREQ(Signal* signal)
+{
+ jamEntry();
+ LqhFragReq * req = (LqhFragReq*)signal->getDataPtr();
+
+ Uint32 retPtr = req->senderData;
+ BlockReference retRef = req->senderRef;
+ Uint32 fragId = req->fragmentId;
+ Uint32 reqinfo = req->requestInfo;
+ tabptr.i = req->tableId;
+ Uint16 tlocalKeylen = req->localKeyLength;
+ Uint32 tmaxLoadFactor = req->maxLoadFactor;
+ Uint32 tminLoadFactor = req->minLoadFactor;
+ Uint8 tk = req->kValue;
+ Uint8 tlhstar = req->lh3DistrBits;
+ Uint8 tlh = req->lh3PageBits;
+ Uint32 tnoOfAttr = req->noOfAttributes;
+ Uint32 tnoOfNull = req->noOfNullAttributes;
+ Uint32 noOfAlloc = req->noOfPagesToPreAllocate;
+ Uint32 tschemaVersion = req->schemaVersion;
+ Uint32 ttupKeyLength = req->keyLength;
+ Uint32 nextLcp = req->nextLCP;
+ Uint32 noOfKeyAttr = req->noOfKeyAttr;
+ Uint32 noOfNewAttr = req->noOfNewAttr;
+ Uint32 checksumIndicator = req->checksumIndicator;
+ Uint32 noOfAttributeGroups = req->noOfAttributeGroups;
+ Uint32 gcpIndicator = req->GCPIndicator;
+ Uint32 startGci = req->startGci;
+ Uint32 tableType = req->tableType;
+ Uint32 primaryTableId = req->primaryTableId;
+
+ ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
+ bool tempTable = ((reqinfo & LqhFragReq::TemporaryTable) != 0);
+
+ /* Temporary tables set to defined in system restart */
+ if (tabptr.p->tableStatus == Tablerec::NOT_DEFINED){
+ tabptr.p->tableStatus = Tablerec::ADD_TABLE_ONGOING;
+ tabptr.p->tableType = tableType;
+ tabptr.p->primaryTableId = primaryTableId;
+ tabptr.p->schemaVersion = tschemaVersion;
+ }//if
+
+ if (tabptr.p->tableStatus != Tablerec::ADD_TABLE_ONGOING){
+ jam();
+ fragrefLab(signal, retRef, retPtr, ZTAB_STATE_ERROR);
+ return;
+ }//if
+ //--------------------------------------------------------------------
+ // We could arrive here if we create the fragment as part of a take
+ // over by a hot spare node. The table is then is already created
+ // and bit 31 is set, thus indicating that we are creating a fragment
+ // by copy creation. Also since the node has already been started we
+ // know that it is not a node restart ongoing.
+ //--------------------------------------------------------------------
+
+ if (getFragmentrec(signal, fragId)) {
+ jam();
+ fragrefLab(signal, retRef, retPtr, terrorCode);
+ return;
+ }//if
+ if (!insertFragrec(signal, fragId)) {
+ jam();
+ fragrefLab(signal, retRef, retPtr, terrorCode);
+ return;
+ }//if
+ Uint32 copyType = reqinfo & 3;
+ initFragrec(signal, tabptr.i, fragId, copyType);
+ fragptr.p->startGci = startGci;
+ fragptr.p->newestGci = startGci;
+ fragptr.p->tableType = tableType;
+
+ if (DictTabInfo::isOrderedIndex(tableType)) {
+ jam();
+ // find corresponding primary table fragment
+ TablerecPtr tTablePtr;
+ tTablePtr.i = primaryTableId;
+ ptrCheckGuard(tTablePtr, ctabrecFileSize, tablerec);
+ FragrecordPtr tFragPtr;
+ tFragPtr.i = RNIL;
+ for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
+ if (tTablePtr.p->fragid[i] == fragptr.p->fragId) {
+ jam();
+ tFragPtr.i = tTablePtr.p->fragrec[i];
+ break;
+ }
+ }
+ ndbrequire(tFragPtr.i != RNIL);
+ // store it
+ fragptr.p->tableFragptr = tFragPtr.i;
+ } else {
+ fragptr.p->tableFragptr = fragptr.i;
+ }
+
+ if (tempTable) {
+//--------------------------------------------
+// reqinfo bit 3-4 = 2 means temporary table
+// without logging or checkpointing.
+//--------------------------------------------
+ jam();
+ fragptr.p->logFlag = Fragrecord::STATE_FALSE;
+ fragptr.p->lcpFlag = Fragrecord::LCP_STATE_FALSE;
+ }//if
+
+ fragptr.p->nextLcp = nextLcp;
+//----------------------------------------------
+// For node restarts it is not necessarily zero
+//----------------------------------------------
+ if (cfirstfreeAddfragrec == RNIL) {
+ jam();
+ deleteFragrec(fragId);
+ fragrefLab(signal, retRef, retPtr, ZNO_ADD_FRAGREC);
+ return;
+ }//if
+ seizeAddfragrec(signal);
+ addfragptr.p->addFragid = fragId;
+ addfragptr.p->fragmentPtr = fragptr.i;
+ addfragptr.p->dictBlockref = retRef;
+ addfragptr.p->dictConnectptr = retPtr;
+ addfragptr.p->m_senderAttrPtr = RNIL;
+ addfragptr.p->noOfAttr = tnoOfAttr;
+ addfragptr.p->noOfNull = tnoOfNull;
+ addfragptr.p->noOfAllocPages = noOfAlloc;
+ addfragptr.p->tabId = tabptr.i;
+ addfragptr.p->totalAttrReceived = 0;
+ addfragptr.p->attrSentToTup = ZNIL;/* TO FIND PROGRAMMING ERRORS QUICKLY */
+ addfragptr.p->schemaVer = tschemaVersion;
+ Uint32 tmp = (reqinfo & LqhFragReq::CreateInRunning);
+ addfragptr.p->fragCopyCreation = (tmp == 0 ? 0 : 1);
+ addfragptr.p->addfragErrorCode = 0;
+ addfragptr.p->noOfKeyAttr = noOfKeyAttr;
+ addfragptr.p->noOfNewAttr = noOfNewAttr;
+ addfragptr.p->checksumIndicator = checksumIndicator;
+ addfragptr.p->noOfAttributeGroups = noOfAttributeGroups;
+ addfragptr.p->GCPIndicator = gcpIndicator;
+ addfragptr.p->lh3DistrBits = tlhstar;
+ addfragptr.p->tableType = tableType;
+ addfragptr.p->primaryTableId = primaryTableId;
+ //
+ addfragptr.p->tup1Connectptr = RNIL;
+ addfragptr.p->tup2Connectptr = RNIL;
+ addfragptr.p->tux1Connectptr = RNIL;
+ addfragptr.p->tux2Connectptr = RNIL;
+
+ if (DictTabInfo::isTable(tableType) ||
+ DictTabInfo::isHashIndex(tableType)) {
+ jam();
+ AccFragReq* const accreq = (AccFragReq*)signal->getDataPtrSend();
+ accreq->userPtr = addfragptr.i;
+ accreq->userRef = cownref;
+ accreq->tableId = tabptr.i;
+ accreq->reqInfo = copyType << 4;
+ accreq->fragId = fragId;
+ accreq->localKeyLen = tlocalKeylen;
+ accreq->maxLoadFactor = tmaxLoadFactor;
+ accreq->minLoadFactor = tminLoadFactor;
+ accreq->kValue = tk;
+ accreq->lhFragBits = tlhstar;
+ accreq->lhDirBits = tlh;
+ accreq->keyLength = ttupKeyLength;
+ /* ----------------------------------------------------------------------- */
+ /* Send ACCFRAGREQ, when confirmation is received send 2 * TUPFRAGREQ to */
+ /* create 2 tuple fragments on this node. */
+ /* ----------------------------------------------------------------------- */
+ addfragptr.p->addfragStatus = AddFragRecord::ACC_ADDFRAG;
+ sendSignal(fragptr.p->accBlockref, GSN_ACCFRAGREQ,
+ signal, AccFragReq::SignalLength, JBB);
+ return;
+ }
+ if (DictTabInfo::isOrderedIndex(tableType)) {
+ jam();
+ // NOTE: next 2 lines stolen from ACC
+ addfragptr.p->fragid1 = (fragId << 1) | 0;
+ addfragptr.p->fragid2 = (fragId << 1) | 1;
+ addfragptr.p->addfragStatus = AddFragRecord::WAIT_TWO_TUP;
+ sendAddFragReq(signal);
+ return;
+ }
+ ndbrequire(false);
+}//Dblqh::execLQHFRAGREQ()
+
+/* *************** */
+/* ACCFRAGCONF > */
+/* *************** */
+void Dblqh::execACCFRAGCONF(Signal* signal)
+{
+ jamEntry();
+ addfragptr.i = signal->theData[0];
+ Uint32 taccConnectptr = signal->theData[1];
+ Uint32 fragId1 = signal->theData[2];
+ Uint32 fragId2 = signal->theData[3];
+ Uint32 accFragPtr1 = signal->theData[4];
+ Uint32 accFragPtr2 = signal->theData[5];
+ ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
+ ndbrequire(addfragptr.p->addfragStatus == AddFragRecord::ACC_ADDFRAG);
+
+ addfragptr.p->accConnectptr = taccConnectptr;
+ addfragptr.p->fragid1 = fragId1;
+ addfragptr.p->fragid2 = fragId2;
+ fragptr.i = addfragptr.p->fragmentPtr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ fragptr.p->accFragptr[0] = accFragPtr1;
+ fragptr.p->accFragptr[1] = accFragPtr2;
+
+ addfragptr.p->addfragStatus = AddFragRecord::WAIT_TWO_TUP;
+ sendAddFragReq(signal);
+}//Dblqh::execACCFRAGCONF()
+
+/* *************** */
+/* TUPFRAGCONF > */
+/* *************** */
+void Dblqh::execTUPFRAGCONF(Signal* signal)
+{
+ jamEntry();
+ addfragptr.i = signal->theData[0];
+ Uint32 tupConnectptr = signal->theData[1];
+ Uint32 tupFragPtr = signal->theData[2]; /* TUP FRAGMENT POINTER */
+ Uint32 localFragId = signal->theData[3]; /* LOCAL FRAGMENT ID */
+ ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
+ fragptr.i = addfragptr.p->fragmentPtr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ if (localFragId == addfragptr.p->fragid1) {
+ jam();
+ fragptr.p->tupFragptr[0] = tupFragPtr;
+ } else if (localFragId == addfragptr.p->fragid2) {
+ jam();
+ fragptr.p->tupFragptr[1] = tupFragPtr;
+ } else {
+ ndbrequire(false);
+ return;
+ }//if
+ switch (addfragptr.p->addfragStatus) {
+ case AddFragRecord::WAIT_TWO_TUP:
+ jam();
+ fragptr.p->tupFragptr[0] = tupFragPtr;
+ addfragptr.p->tup1Connectptr = tupConnectptr;
+ addfragptr.p->addfragStatus = AddFragRecord::WAIT_ONE_TUP;
+ sendAddFragReq(signal);
+ break;
+ case AddFragRecord::WAIT_ONE_TUP:
+ jam();
+ fragptr.p->tupFragptr[1] = tupFragPtr;
+ addfragptr.p->tup2Connectptr = tupConnectptr;
+ if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType)) {
+ addfragptr.p->addfragStatus = AddFragRecord::WAIT_TWO_TUX;
+ sendAddFragReq(signal);
+ break;
+ }
+ goto done_with_frag;
+ break;
+ case AddFragRecord::WAIT_TWO_TUX:
+ jam();
+ fragptr.p->tuxFragptr[0] = tupFragPtr;
+ addfragptr.p->tux1Connectptr = tupConnectptr;
+ addfragptr.p->addfragStatus = AddFragRecord::WAIT_ONE_TUX;
+ sendAddFragReq(signal);
+ break;
+ case AddFragRecord::WAIT_ONE_TUX:
+ jam();
+ fragptr.p->tuxFragptr[1] = tupFragPtr;
+ addfragptr.p->tux2Connectptr = tupConnectptr;
+ goto done_with_frag;
+ break;
+ done_with_frag:
+ /* ---------------------------------------------------------------- */
+ /* Finished create of fragments. Now ready for creating attributes. */
+ /* ---------------------------------------------------------------- */
+ addfragptr.p->addfragStatus = AddFragRecord::WAIT_ADD_ATTR;
+ {
+ LqhFragConf* conf = (LqhFragConf*)signal->getDataPtrSend();
+ conf->senderData = addfragptr.p->dictConnectptr;
+ conf->lqhFragPtr = addfragptr.i;
+ sendSignal(addfragptr.p->dictBlockref, GSN_LQHFRAGCONF,
+ signal, LqhFragConf::SignalLength, JBB);
+ }
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }
+}//Dblqh::execTUPFRAGCONF()
+
+/* *************** */
+/* TUXFRAGCONF > */
+/* *************** */
+void Dblqh::execTUXFRAGCONF(Signal* signal)
+{
+ jamEntry();
+ execTUPFRAGCONF(signal);
+}//Dblqh::execTUXFRAGCONF
+
+/*
+ * Add fragment in TUP or TUX. Called up to 4 times.
+ */
+void
+Dblqh::sendAddFragReq(Signal* signal)
+{
+ fragptr.i = addfragptr.p->fragmentPtr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ if (addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUP ||
+ addfragptr.p->addfragStatus == AddFragRecord::WAIT_ONE_TUP) {
+ if (DictTabInfo::isTable(addfragptr.p->tableType) ||
+ DictTabInfo::isHashIndex(addfragptr.p->tableType)) {
+ jam();
+ signal->theData[0] = addfragptr.i;
+ signal->theData[1] = cownref;
+ signal->theData[2] = 0; /* ADD TABLE */
+ signal->theData[3] = addfragptr.p->tabId;
+ signal->theData[4] = addfragptr.p->noOfAttr;
+ signal->theData[5] =
+ addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUP
+ ? addfragptr.p->fragid1 : addfragptr.p->fragid2;
+ signal->theData[6] = (addfragptr.p->noOfAllocPages >> 1) + 1;
+ signal->theData[7] = addfragptr.p->noOfNull;
+ signal->theData[8] = addfragptr.p->schemaVer;
+ signal->theData[9] = addfragptr.p->noOfKeyAttr;
+ signal->theData[10] = addfragptr.p->noOfNewAttr;
+ signal->theData[11] = addfragptr.p->checksumIndicator;
+ signal->theData[12] = addfragptr.p->noOfAttributeGroups;
+ signal->theData[13] = addfragptr.p->GCPIndicator;
+ sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ,
+ signal, TupFragReq::SignalLength, JBB);
+ return;
+ }
+ if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType)) {
+ jam();
+ signal->theData[0] = addfragptr.i;
+ signal->theData[1] = cownref;
+ signal->theData[2] = 0; /* ADD TABLE */
+ signal->theData[3] = addfragptr.p->tabId;
+ signal->theData[4] = 1; /* ordered index: one array attr */
+ signal->theData[5] =
+ addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUP
+ ? addfragptr.p->fragid1 : addfragptr.p->fragid2;
+ signal->theData[6] = (addfragptr.p->noOfAllocPages >> 1) + 1;
+ signal->theData[7] = 0; /* ordered index: no nullable */
+ signal->theData[8] = addfragptr.p->schemaVer;
+ signal->theData[9] = 1; /* ordered index: one key */
+ signal->theData[10] = addfragptr.p->noOfNewAttr;
+ signal->theData[11] = addfragptr.p->checksumIndicator;
+ signal->theData[12] = addfragptr.p->noOfAttributeGroups;
+ signal->theData[13] = addfragptr.p->GCPIndicator;
+ sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ,
+ signal, TupFragReq::SignalLength, JBB);
+ return;
+ }
+ }
+ if (addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUX ||
+ addfragptr.p->addfragStatus == AddFragRecord::WAIT_ONE_TUX) {
+ if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType)) {
+ jam();
+ TuxFragReq* const tuxreq = (TuxFragReq*)signal->getDataPtrSend();
+ tuxreq->userPtr = addfragptr.i;
+ tuxreq->userRef = cownref;
+ tuxreq->reqInfo = 0; /* ADD TABLE */
+ tuxreq->tableId = addfragptr.p->tabId;
+ ndbrequire(addfragptr.p->noOfAttr >= 2);
+ tuxreq->noOfAttr = addfragptr.p->noOfAttr - 1; /* skip NDB$TNODE */
+ tuxreq->fragId =
+ addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUX
+ ? addfragptr.p->fragid1: addfragptr.p->fragid2;
+ tuxreq->fragOff = addfragptr.p->lh3DistrBits;
+ tuxreq->tableType = addfragptr.p->tableType;
+ tuxreq->primaryTableId = addfragptr.p->primaryTableId;
+ // pointer to index fragment in TUP
+ tuxreq->tupIndexFragPtrI =
+ addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUX ?
+ fragptr.p->tupFragptr[0] : fragptr.p->tupFragptr[1];
+ // pointers to table fragments in TUP and ACC
+ FragrecordPtr tFragPtr;
+ tFragPtr.i = fragptr.p->tableFragptr;
+ ptrCheckGuard(tFragPtr, cfragrecFileSize, fragrecord);
+ tuxreq->tupTableFragPtrI[0] = tFragPtr.p->tupFragptr[0];
+ tuxreq->tupTableFragPtrI[1] = tFragPtr.p->tupFragptr[1];
+ tuxreq->accTableFragPtrI[0] = tFragPtr.p->accFragptr[0];
+ tuxreq->accTableFragPtrI[1] = tFragPtr.p->accFragptr[1];
+ sendSignal(fragptr.p->tuxBlockref, GSN_TUXFRAGREQ,
+ signal, TuxFragReq::SignalLength, JBB);
+ return;
+ }
+ }
+ ndbrequire(false);
+}//Dblqh::sendAddFragReq
+
+/* ************************************************************************> */
+/* LQHADDATTRREQ: Request from DICT to create attributes for the new table. */
+/* ************************************************************************> */
+void Dblqh::execLQHADDATTREQ(Signal* signal)
+{
+ jamEntry();
+ LqhAddAttrReq * const req = (LqhAddAttrReq*)signal->getDataPtr();
+
+ addfragptr.i = req->lqhFragPtr;
+ const Uint32 tnoOfAttr = req->noOfAttributes;
+ const Uint32 senderData = req->senderData;
+ const Uint32 senderAttrPtr = req->senderAttrPtr;
+
+ ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
+ ndbrequire(addfragptr.p->addfragStatus == AddFragRecord::WAIT_ADD_ATTR);
+ ndbrequire((tnoOfAttr != 0) && (tnoOfAttr <= LqhAddAttrReq::MAX_ATTRIBUTES));
+ addfragptr.p->totalAttrReceived += tnoOfAttr;
+ ndbrequire(addfragptr.p->totalAttrReceived <= addfragptr.p->noOfAttr);
+
+ addfragptr.p->attrReceived = tnoOfAttr;
+ for (Uint32 i = 0; i < tnoOfAttr; i++) {
+ addfragptr.p->attributes[i] = req->attributes[i];
+ }//for
+ addfragptr.p->attrSentToTup = 0;
+ ndbrequire(addfragptr.p->dictConnectptr == senderData);
+ addfragptr.p->m_senderAttrPtr = senderAttrPtr;
+ addfragptr.p->addfragStatus = AddFragRecord::TUP_ATTR_WAIT1;
+ sendAddAttrReq(signal);
+}//Dblqh::execLQHADDATTREQ()
+
+/* *********************>> */
+/* TUP_ADD_ATTCONF > */
+/* *********************>> */
+void Dblqh::execTUP_ADD_ATTCONF(Signal* signal)
+{
+ jamEntry();
+ addfragptr.i = signal->theData[0];
+ // implies that operation was released on the other side
+ const bool lastAttr = signal->theData[1];
+ ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
+ switch (addfragptr.p->addfragStatus) {
+ case AddFragRecord::TUP_ATTR_WAIT1:
+ jam();
+ if (lastAttr)
+ addfragptr.p->tup1Connectptr = RNIL;
+ addfragptr.p->addfragStatus = AddFragRecord::TUP_ATTR_WAIT2;
+ sendAddAttrReq(signal);
+ break;
+ case AddFragRecord::TUP_ATTR_WAIT2:
+ jam();
+ if (lastAttr)
+ addfragptr.p->tup2Connectptr = RNIL;
+ if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType)) {
+ addfragptr.p->addfragStatus = AddFragRecord::TUX_ATTR_WAIT1;
+ sendAddAttrReq(signal);
+ break;
+ }
+ goto done_with_attr;
+ break;
+ case AddFragRecord::TUX_ATTR_WAIT1:
+ jam();
+ if (lastAttr)
+ addfragptr.p->tux1Connectptr = RNIL;
+ addfragptr.p->addfragStatus = AddFragRecord::TUX_ATTR_WAIT2;
+ sendAddAttrReq(signal);
+ break;
+ case AddFragRecord::TUX_ATTR_WAIT2:
+ jam();
+ if (lastAttr)
+ addfragptr.p->tux2Connectptr = RNIL;
+ goto done_with_attr;
+ break;
+ done_with_attr:
+ addfragptr.p->attrSentToTup = addfragptr.p->attrSentToTup + 1;
+ ndbrequire(addfragptr.p->attrSentToTup <= addfragptr.p->attrReceived);
+ ndbrequire(addfragptr.p->totalAttrReceived <= addfragptr.p->noOfAttr);
+ if (addfragptr.p->attrSentToTup < addfragptr.p->attrReceived) {
+ // more in this batch
+ jam();
+ addfragptr.p->addfragStatus = AddFragRecord::TUP_ATTR_WAIT1;
+ sendAddAttrReq(signal);
+ } else if (addfragptr.p->totalAttrReceived < addfragptr.p->noOfAttr) {
+ // more batches to receive
+ jam();
+ addfragptr.p->addfragStatus = AddFragRecord::WAIT_ADD_ATTR;
+ LqhAddAttrConf *const conf = (LqhAddAttrConf*)signal->getDataPtrSend();
+ conf->senderData = addfragptr.p->dictConnectptr;
+ conf->senderAttrPtr = addfragptr.p->m_senderAttrPtr;
+ conf->fragId = addfragptr.p->addFragid;
+ sendSignal(addfragptr.p->dictBlockref, GSN_LQHADDATTCONF,
+ signal, LqhAddAttrConf::SignalLength, JBB);
+ } else {
+ fragptr.i = addfragptr.p->fragmentPtr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ /* ------------------------------------------------------------------
+ * WE HAVE NOW COMPLETED ADDING THIS FRAGMENT. WE NOW NEED TO SET THE
+ * PROPER STATE IN FRAG_STATUS DEPENDENT ON IF WE ARE CREATING A NEW
+ * REPLICA OR IF WE ARE CREATING A TABLE. FOR FRAGMENTS IN COPY
+ * PROCESS WE DO NOT WANT LOGGING ACTIVATED.
+ * ----------------------------------------------------------------- */
+ if (addfragptr.p->fragCopyCreation == 1) {
+ jam();
+ if (! DictTabInfo::isOrderedIndex(addfragptr.p->tableType))
+ fragptr.p->fragStatus = Fragrecord::ACTIVE_CREATION;
+ else
+ fragptr.p->fragStatus = Fragrecord::FSACTIVE;
+ fragptr.p->logFlag = Fragrecord::STATE_FALSE;
+ } else {
+ jam();
+ fragptr.p->fragStatus = Fragrecord::FSACTIVE;
+ }//if
+ LqhAddAttrConf *const conf = (LqhAddAttrConf*)signal->getDataPtrSend();
+ conf->senderData = addfragptr.p->dictConnectptr;
+ conf->senderAttrPtr = addfragptr.p->m_senderAttrPtr;
+ conf->fragId = addfragptr.p->addFragid;
+ sendSignal(addfragptr.p->dictBlockref, GSN_LQHADDATTCONF, signal,
+ LqhAddAttrConf::SignalLength, JBB);
+ releaseAddfragrec(signal);
+ }//if
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }
+}
+
+/* **********************>> */
+/* TUX_ADD_ATTRCONF > */
+/* **********************>> */
+void Dblqh::execTUX_ADD_ATTRCONF(Signal* signal)
+{
+ jamEntry();
+ execTUP_ADD_ATTCONF(signal);
+}//Dblqh::execTUX_ADD_ATTRCONF
+
+/*
+ * Add attribute in TUP or TUX. Called up to 4 times.
+ */
+void
+Dblqh::sendAddAttrReq(Signal* signal)
+{
+ arrGuard(addfragptr.p->attrSentToTup, LqhAddAttrReq::MAX_ATTRIBUTES);
+ LqhAddAttrReq::Entry& entry =
+ addfragptr.p->attributes[addfragptr.p->attrSentToTup];
+ const Uint32 attrId = entry.attrId & 0xffff;
+ const Uint32 primaryAttrId = entry.attrId >> 16;
+ fragptr.i = addfragptr.p->fragmentPtr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ if (addfragptr.p->addfragStatus == AddFragRecord::TUP_ATTR_WAIT1 ||
+ addfragptr.p->addfragStatus == AddFragRecord::TUP_ATTR_WAIT2) {
+ if (DictTabInfo::isTable(addfragptr.p->tableType) ||
+ DictTabInfo::isHashIndex(addfragptr.p->tableType) ||
+ (DictTabInfo::isOrderedIndex(addfragptr.p->tableType) &&
+ primaryAttrId == ZNIL)) {
+ jam();
+ TupAddAttrReq* const tupreq = (TupAddAttrReq*)signal->getDataPtrSend();
+ tupreq->tupConnectPtr =
+ addfragptr.p->addfragStatus == AddFragRecord::TUP_ATTR_WAIT1
+ ? addfragptr.p->tup1Connectptr : addfragptr.p->tup2Connectptr;
+ tupreq->notused1 = 0;
+ tupreq->attrId = attrId;
+ tupreq->attrDescriptor = entry.attrDescriptor;
+ tupreq->extTypeInfo = entry.extTypeInfo;
+ sendSignal(fragptr.p->tupBlockref, GSN_TUP_ADD_ATTRREQ,
+ signal, TupAddAttrReq::SignalLength, JBB);
+ return;
+ }
+ if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType) &&
+ primaryAttrId != ZNIL) {
+ // this attribute is not for TUP
+ jam();
+ TupAddAttrConf* tupconf = (TupAddAttrConf*)signal->getDataPtrSend();
+ tupconf->userPtr = addfragptr.i;
+ tupconf->lastAttr = false;
+ sendSignal(reference(), GSN_TUP_ADD_ATTCONF,
+ signal, TupAddAttrConf::SignalLength, JBB);
+ return;
+ }
+ }
+ if (addfragptr.p->addfragStatus == AddFragRecord::TUX_ATTR_WAIT1 ||
+ addfragptr.p->addfragStatus == AddFragRecord::TUX_ATTR_WAIT2) {
+ jam();
+ if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType) &&
+ primaryAttrId != ZNIL) {
+ jam();
+ TuxAddAttrReq* const tuxreq = (TuxAddAttrReq*)signal->getDataPtrSend();
+ tuxreq->tuxConnectPtr =
+ addfragptr.p->addfragStatus == AddFragRecord::TUX_ATTR_WAIT1
+ ? addfragptr.p->tux1Connectptr : addfragptr.p->tux2Connectptr;
+ tuxreq->notused1 = 0;
+ tuxreq->attrId = attrId;
+ tuxreq->attrDescriptor = entry.attrDescriptor;
+ tuxreq->extTypeInfo = entry.extTypeInfo;
+ tuxreq->primaryAttrId = primaryAttrId;
+ sendSignal(fragptr.p->tuxBlockref, GSN_TUX_ADD_ATTRREQ,
+ signal, TuxAddAttrReq::SignalLength, JBB);
+ return;
+ }
+ if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType) &&
+ primaryAttrId == ZNIL) {
+ // this attribute is not for TUX
+ jam();
+ TuxAddAttrConf* tuxconf = (TuxAddAttrConf*)signal->getDataPtrSend();
+ tuxconf->userPtr = addfragptr.i;
+ tuxconf->lastAttr = false;
+ sendSignal(reference(), GSN_TUX_ADD_ATTRCONF,
+ signal, TuxAddAttrConf::SignalLength, JBB);
+ return;
+ }
+ }
+ ndbrequire(false);
+}//Dblqh::sendAddAttrReq
+
+/* ************************************************************************>> */
+/* TAB_COMMITREQ: Commit the new table for use in transactions. Sender DICT. */
+/* ************************************************************************>> */
+void Dblqh::execTAB_COMMITREQ(Signal* signal)
+{
+ jamEntry();
+ Uint32 dihPtr = signal->theData[0];
+ BlockReference dihBlockref = signal->theData[1];
+ tabptr.i = signal->theData[2];
+
+ if (tabptr.i >= ctabrecFileSize) {
+ jam();
+ terrorCode = ZTAB_FILE_SIZE;
+ signal->theData[0] = dihPtr;
+ signal->theData[1] = cownNodeid;
+ signal->theData[2] = tabptr.i;
+ signal->theData[3] = terrorCode;
+ sendSignal(dihBlockref, GSN_TAB_COMMITREF, signal, 4, JBB);
+ return;
+ }//if
+ ptrAss(tabptr, tablerec);
+ if (tabptr.p->tableStatus != Tablerec::ADD_TABLE_ONGOING) {
+ jam();
+ terrorCode = ZTAB_STATE_ERROR;
+ signal->theData[0] = dihPtr;
+ signal->theData[1] = cownNodeid;
+ signal->theData[2] = tabptr.i;
+ signal->theData[3] = terrorCode;
+ signal->theData[4] = tabptr.p->tableStatus;
+ sendSignal(dihBlockref, GSN_TAB_COMMITREF, signal, 5, JBB);
+ ndbrequire(false);
+ return;
+ }//if
+ tabptr.p->usageCount = 0;
+ tabptr.p->tableStatus = Tablerec::TABLE_DEFINED;
+ signal->theData[0] = dihPtr;
+ signal->theData[1] = cownNodeid;
+ signal->theData[2] = tabptr.i;
+ sendSignal(dihBlockref, GSN_TAB_COMMITCONF, signal, 3, JBB);
+ return;
+}//Dblqh::execTAB_COMMITREQ()
+
+
+void Dblqh::fragrefLab(Signal* signal,
+ BlockReference fragBlockRef,
+ Uint32 fragConPtr,
+ Uint32 errorCode)
+{
+ LqhFragRef * ref = (LqhFragRef*)signal->getDataPtrSend();
+ ref->senderData = fragConPtr;
+ ref->errorCode = errorCode;
+ sendSignal(fragBlockRef, GSN_LQHFRAGREF, signal,
+ LqhFragRef::SignalLength, JBB);
+ return;
+}//Dblqh::fragrefLab()
+
+/*
+ * Abort on-going ops.
+ */
+void Dblqh::abortAddFragOps(Signal* signal)
+{
+ fragptr.i = addfragptr.p->fragmentPtr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ signal->theData[0] = (Uint32)-1;
+ if (addfragptr.p->tup1Connectptr != RNIL) {
+ jam();
+ signal->theData[1] = addfragptr.p->tup1Connectptr;
+ sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, 2, JBB);
+ addfragptr.p->tup1Connectptr = RNIL;
+ }
+ if (addfragptr.p->tup2Connectptr != RNIL) {
+ jam();
+ signal->theData[1] = addfragptr.p->tup2Connectptr;
+ sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, 2, JBB);
+ addfragptr.p->tup2Connectptr = RNIL;
+ }
+ if (addfragptr.p->tux1Connectptr != RNIL) {
+ jam();
+ signal->theData[1] = addfragptr.p->tux1Connectptr;
+ sendSignal(fragptr.p->tuxBlockref, GSN_TUXFRAGREQ, signal, 2, JBB);
+ addfragptr.p->tux1Connectptr = RNIL;
+ }
+ if (addfragptr.p->tux2Connectptr != RNIL) {
+ jam();
+ signal->theData[1] = addfragptr.p->tux2Connectptr;
+ sendSignal(fragptr.p->tuxBlockref, GSN_TUXFRAGREQ, signal, 2, JBB);
+ addfragptr.p->tux2Connectptr = RNIL;
+ }
+}
+
+/* ************>> */
+/* ACCFRAGREF > */
+/* ************>> */
+void Dblqh::execACCFRAGREF(Signal* signal)
+{
+ jamEntry();
+ addfragptr.i = signal->theData[0];
+ ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
+ terrorCode = signal->theData[1];
+ ndbrequire(addfragptr.p->addfragStatus == AddFragRecord::ACC_ADDFRAG);
+ addfragptr.p->addfragErrorCode = terrorCode;
+
+ const Uint32 ref = addfragptr.p->dictBlockref;
+ const Uint32 senderData = addfragptr.p->dictConnectptr;
+ const Uint32 errorCode = addfragptr.p->addfragErrorCode;
+ releaseAddfragrec(signal);
+ fragrefLab(signal, ref, senderData, errorCode);
+
+ return;
+}//Dblqh::execACCFRAGREF()
+
+/* ************>> */
+/* TUPFRAGREF > */
+/* ************>> */
+void Dblqh::execTUPFRAGREF(Signal* signal)
+{
+ jamEntry();
+ addfragptr.i = signal->theData[0];
+ ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
+ terrorCode = signal->theData[1];
+ fragptr.i = addfragptr.p->fragmentPtr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ addfragptr.p->addfragErrorCode = terrorCode;
+
+ // no operation to release, just add some jams
+ switch (addfragptr.p->addfragStatus) {
+ case AddFragRecord::WAIT_TWO_TUP:
+ jam();
+ break;
+ case AddFragRecord::WAIT_ONE_TUP:
+ jam();
+ break;
+ case AddFragRecord::WAIT_TWO_TUX:
+ jam();
+ break;
+ case AddFragRecord::WAIT_ONE_TUX:
+ jam();
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }
+ abortAddFragOps(signal);
+
+ const Uint32 ref = addfragptr.p->dictBlockref;
+ const Uint32 senderData = addfragptr.p->dictConnectptr;
+ const Uint32 errorCode = addfragptr.p->addfragErrorCode;
+ releaseAddfragrec(signal);
+ fragrefLab(signal, ref, senderData, errorCode);
+
+}//Dblqh::execTUPFRAGREF()
+
+/* ************>> */
+/* TUXFRAGREF > */
+/* ************>> */
+void Dblqh::execTUXFRAGREF(Signal* signal)
+{
+ jamEntry();
+ execTUPFRAGREF(signal);
+}//Dblqh::execTUXFRAGREF
+
+/* *********************> */
+/* TUP_ADD_ATTREF > */
+/* *********************> */
+void Dblqh::execTUP_ADD_ATTRREF(Signal* signal)
+{
+ jamEntry();
+ addfragptr.i = signal->theData[0];
+ ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
+ terrorCode = signal->theData[1];
+ addfragptr.p->addfragErrorCode = terrorCode;
+
+ // operation was released on the other side
+ switch (addfragptr.p->addfragStatus) {
+ case AddFragRecord::TUP_ATTR_WAIT1:
+ jam();
+ ndbrequire(addfragptr.p->tup1Connectptr != RNIL);
+ addfragptr.p->tup1Connectptr = RNIL;
+ break;
+ case AddFragRecord::TUP_ATTR_WAIT2:
+ jam();
+ ndbrequire(addfragptr.p->tup2Connectptr != RNIL);
+ addfragptr.p->tup2Connectptr = RNIL;
+ break;
+ case AddFragRecord::TUX_ATTR_WAIT1:
+ jam();
+ ndbrequire(addfragptr.p->tux1Connectptr != RNIL);
+ addfragptr.p->tux1Connectptr = RNIL;
+ break;
+ case AddFragRecord::TUX_ATTR_WAIT2:
+ jam();
+ ndbrequire(addfragptr.p->tux2Connectptr != RNIL);
+ addfragptr.p->tux2Connectptr = RNIL;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }
+ abortAddFragOps(signal);
+
+ const Uint32 Ref = addfragptr.p->dictBlockref;
+ const Uint32 senderData = addfragptr.p->dictConnectptr;
+ const Uint32 errorCode = addfragptr.p->addfragErrorCode;
+ releaseAddfragrec(signal);
+
+ LqhAddAttrRef *const ref = (LqhAddAttrRef*)signal->getDataPtrSend();
+ ref->senderData = senderData;
+ ref->errorCode = errorCode;
+ sendSignal(Ref, GSN_LQHADDATTREF, signal,
+ LqhAddAttrRef::SignalLength, JBB);
+
+}//Dblqh::execTUP_ADD_ATTRREF()
+
+/* **********************> */
+/* TUX_ADD_ATTRREF > */
+/* **********************> */
+void Dblqh::execTUX_ADD_ATTRREF(Signal* signal)
+{
+ jamEntry();
+ execTUP_ADD_ATTRREF(signal);
+}//Dblqh::execTUX_ADD_ATTRREF
+
+void
+Dblqh::execPREP_DROP_TAB_REQ(Signal* signal){
+ jamEntry();
+
+ PrepDropTabReq* req = (PrepDropTabReq*)signal->getDataPtr();
+
+ Uint32 senderRef = req->senderRef;
+ Uint32 senderData = req->senderData;
+
+ TablerecPtr tabPtr;
+ tabPtr.i = req->tableId;
+ ptrCheckGuard(tabPtr, ctabrecFileSize, tablerec);
+
+ Uint32 errCode = 0;
+ errCode = checkDropTabState(tabPtr.p->tableStatus, GSN_PREP_DROP_TAB_REQ);
+ if(errCode != 0){
+ jam();
+
+ PrepDropTabRef* ref = (PrepDropTabRef*)signal->getDataPtrSend();
+ ref->senderRef = reference();
+ ref->senderData = senderData;
+ ref->tableId = tabPtr.i;
+ ref->errorCode = errCode;
+ sendSignal(senderRef, GSN_PREP_DROP_TAB_REF, signal,
+ PrepDropTabRef::SignalLength, JBB);
+ return;
+ }
+
+ tabPtr.p->tableStatus = Tablerec::PREP_DROP_TABLE_ONGOING;
+ tabPtr.p->waitingTC.clear();
+ tabPtr.p->waitingDIH.clear();
+
+ PrepDropTabConf * conf = (PrepDropTabConf*)signal->getDataPtrSend();
+ conf->tableId = tabPtr.i;
+ conf->senderRef = reference();
+ conf->senderData = senderData;
+ sendSignal(senderRef, GSN_PREP_DROP_TAB_CONF, signal,
+ PrepDropTabConf::SignalLength, JBB);
+
+ signal->theData[0] = ZPREP_DROP_TABLE;
+ signal->theData[1] = tabPtr.i;
+ signal->theData[2] = senderRef;
+ signal->theData[3] = senderData;
+ checkDropTab(signal);
+}
+
+void
+Dblqh::checkDropTab(Signal* signal){
+
+ TablerecPtr tabPtr;
+ tabPtr.i = signal->theData[1];
+ ptrCheckGuard(tabPtr, ctabrecFileSize, tablerec);
+
+ ndbrequire(tabPtr.p->tableStatus == Tablerec::PREP_DROP_TABLE_ONGOING);
+
+ if(tabPtr.p->usageCount > 0){
+ jam();
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 4);
+ return;
+ }
+
+ bool lcpDone = true;
+ lcpPtr.i = 0;
+ ptrAss(lcpPtr, lcpRecord);
+ if(lcpPtr.p->lcpState != LcpRecord::LCP_IDLE){
+ jam();
+
+ if(lcpPtr.p->currentFragment.lcpFragOrd.tableId == tabPtr.i){
+ jam();
+ lcpDone = false;
+ }
+
+ if(lcpPtr.p->lcpQueued &&
+ lcpPtr.p->queuedFragment.lcpFragOrd.tableId == tabPtr.i){
+ jam();
+ lcpDone = false;
+ }
+ }
+
+ if(!lcpDone){
+ jam();
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 4);
+ return;
+ }
+
+ tabPtr.p->tableStatus = Tablerec::PREP_DROP_TABLE_DONE;
+
+ WaitDropTabConf * conf = (WaitDropTabConf*)signal->getDataPtrSend();
+ conf->tableId = tabPtr.i;
+ conf->senderRef = reference();
+ for(Uint32 i = 1; i<MAX_NDB_NODES; i++){
+ if(tabPtr.p->waitingTC.get(i)){
+ tabPtr.p->waitingTC.clear(i);
+ sendSignal(calcTcBlockRef(i), GSN_WAIT_DROP_TAB_CONF, signal,
+ WaitDropTabConf::SignalLength, JBB);
+ }
+ if(tabPtr.p->waitingDIH.get(i)){
+ tabPtr.p->waitingDIH.clear(i);
+ sendSignal(calcDihBlockRef(i), GSN_WAIT_DROP_TAB_CONF, signal,
+ WaitDropTabConf::SignalLength, JBB);
+ }
+ }
+}
+
+void
+Dblqh::execWAIT_DROP_TAB_REQ(Signal* signal){
+ jamEntry();
+ WaitDropTabReq * req = (WaitDropTabReq*)signal->getDataPtr();
+
+ TablerecPtr tabPtr;
+ tabPtr.i = req->tableId;
+ ptrCheckGuard(tabPtr, ctabrecFileSize, tablerec);
+
+ Uint32 senderRef = req->senderRef;
+ Uint32 nodeId = refToNode(senderRef);
+ Uint32 blockNo = refToBlock(senderRef);
+
+ if(tabPtr.p->tableStatus == Tablerec::PREP_DROP_TABLE_ONGOING){
+ jam();
+ switch(blockNo){
+ case DBTC:
+ tabPtr.p->waitingTC.set(nodeId);
+ break;
+ case DBDIH:
+ tabPtr.p->waitingDIH.set(nodeId);
+ break;
+ default:
+ ndbrequire(false);
+ }
+ return;
+ }
+
+ if(tabPtr.p->tableStatus == Tablerec::PREP_DROP_TABLE_DONE){
+ jam();
+ WaitDropTabConf * conf = (WaitDropTabConf*)signal->getDataPtrSend();
+ conf->tableId = tabPtr.i;
+ conf->senderRef = reference();
+ sendSignal(senderRef, GSN_WAIT_DROP_TAB_CONF, signal,
+ WaitDropTabConf::SignalLength, JBB);
+ return;
+ }
+
+ WaitDropTabRef * ref = (WaitDropTabRef*)signal->getDataPtrSend();
+ ref->tableId = tabPtr.i;
+ ref->senderRef = reference();
+
+ bool ok = false;
+ switch(tabPtr.p->tableStatus){
+ case Tablerec::TABLE_DEFINED:
+ ok = true;
+ ref->errorCode = WaitDropTabRef::IllegalTableState;
+ break;
+ case Tablerec::NOT_DEFINED:
+ ok = true;
+ ref->errorCode = WaitDropTabRef::NoSuchTable;
+ break;
+ case Tablerec::ADD_TABLE_ONGOING:
+ ok = true;
+ ref->errorCode = WaitDropTabRef::IllegalTableState;
+ break;
+ case Tablerec::PREP_DROP_TABLE_ONGOING:
+ case Tablerec::PREP_DROP_TABLE_DONE:
+ // Should have been take care of above
+ ndbrequire(false);
+ }
+ ndbrequire(ok);
+ ref->tableStatus = tabPtr.p->tableStatus;
+ sendSignal(senderRef, GSN_WAIT_DROP_TAB_REF, signal,
+ WaitDropTabRef::SignalLength, JBB);
+ return;
+}
+
+void
+Dblqh::execDROP_TAB_REQ(Signal* signal){
+ jamEntry();
+
+ DropTabReq* req = (DropTabReq*)signal->getDataPtr();
+
+ Uint32 senderRef = req->senderRef;
+ Uint32 senderData = req->senderData;
+
+ TablerecPtr tabPtr;
+ tabPtr.i = req->tableId;
+ ptrCheckGuard(tabPtr, ctabrecFileSize, tablerec);
+
+ do {
+ if(req->requestType == DropTabReq::RestartDropTab){
+ jam();
+ break;
+ }
+
+ if(req->requestType == DropTabReq::OnlineDropTab){
+ jam();
+ Uint32 errCode = 0;
+ errCode = checkDropTabState(tabPtr.p->tableStatus, GSN_DROP_TAB_REQ);
+ if(errCode != 0){
+ jam();
+
+ DropTabRef* ref = (DropTabRef*)signal->getDataPtrSend();
+ ref->senderRef = reference();
+ ref->senderData = senderData;
+ ref->tableId = tabPtr.i;
+ ref->errorCode = errCode;
+ sendSignal(senderRef, GSN_DROP_TAB_REF, signal,
+ DropTabRef::SignalLength, JBB);
+ return;
+ }
+ }
+
+ removeTable(tabPtr.i);
+
+ } while(false);
+
+ ndbrequire(tabPtr.p->usageCount == 0);
+ tabPtr.p->tableStatus = Tablerec::NOT_DEFINED;
+
+ DropTabConf * const dropConf = (DropTabConf *)signal->getDataPtrSend();
+ dropConf->senderRef = reference();
+ dropConf->senderData = senderData;
+ dropConf->tableId = tabPtr.i;
+ sendSignal(senderRef, GSN_DROP_TAB_CONF,
+ signal, DropTabConf::SignalLength, JBB);
+}
+
+Uint32
+Dblqh::checkDropTabState(Tablerec::TableStatus status, Uint32 gsn) const{
+
+ if(gsn == GSN_PREP_DROP_TAB_REQ){
+ switch(status){
+ case Tablerec::NOT_DEFINED:
+ jam();
+ // Fall through
+ case Tablerec::ADD_TABLE_ONGOING:
+ jam();
+ return PrepDropTabRef::NoSuchTable;
+ break;
+ case Tablerec::PREP_DROP_TABLE_ONGOING:
+ jam();
+ return PrepDropTabRef::PrepDropInProgress;
+ break;
+ case Tablerec::PREP_DROP_TABLE_DONE:
+ jam();
+ return PrepDropTabRef::DropInProgress;
+ break;
+ case Tablerec::TABLE_DEFINED:
+ jam();
+ return 0;
+ break;
+ }
+ ndbrequire(0);
+ }
+
+ if(gsn == GSN_DROP_TAB_REQ){
+ switch(status){
+ case Tablerec::NOT_DEFINED:
+ jam();
+ // Fall through
+ case Tablerec::ADD_TABLE_ONGOING:
+ jam();
+ return DropTabRef::NoSuchTable;
+ break;
+ case Tablerec::PREP_DROP_TABLE_ONGOING:
+ jam();
+ return DropTabRef::PrepDropInProgress;
+ break;
+ case Tablerec::PREP_DROP_TABLE_DONE:
+ jam();
+ return 0;
+ break;
+ case Tablerec::TABLE_DEFINED:
+ jam();
+ return DropTabRef::DropWoPrep;
+ }
+ ndbrequire(0);
+ }
+ ndbrequire(0);
+ return RNIL;
+}
+
+void Dblqh::removeTable(Uint32 tableId)
+{
+ tabptr.i = tableId;
+ ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
+
+ for (Uint32 i = (MAX_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
+ jam();
+ if (tabptr.p->fragid[i] != ZNIL) {
+ jam();
+ deleteFragrec(tabptr.p->fragid[i]);
+ }//if
+ }//for
+}//Dblqh::removeTable()
+
+void
+Dblqh::execALTER_TAB_REQ(Signal* signal)
+{
+ jamEntry();
+ AlterTabReq* const req = (AlterTabReq*)signal->getDataPtr();
+ const Uint32 senderRef = req->senderRef;
+ const Uint32 senderData = req->senderData;
+ const Uint32 changeMask = req->changeMask;
+ const Uint32 tableId = req->tableId;
+ const Uint32 tableVersion = req->tableVersion;
+ const Uint32 gci = req->gci;
+ AlterTabReq::RequestType requestType =
+ (AlterTabReq::RequestType) req->requestType;
+
+ TablerecPtr tablePtr;
+ tablePtr.i = tableId;
+ ptrCheckGuard(tablePtr, ctabrecFileSize, tablerec);
+ tablePtr.p->schemaVersion = tableVersion;
+
+ // Request handled successfully
+ AlterTabConf * conf = (AlterTabConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = senderData;
+ conf->changeMask = changeMask;
+ conf->tableId = tableId;
+ conf->tableVersion = tableVersion;
+ conf->gci = gci;
+ conf->requestType = requestType;
+ sendSignal(senderRef, GSN_ALTER_TAB_CONF, signal,
+ AlterTabConf::SignalLength, JBB);
+}
+
+/* ************************************************************************>>
+ * TIME_SIGNAL: Handles time-out of local operations. This is a clean-up
+ * handler. If no other measure has succeeded in cleaning up after time-outs
+ * or else then this routine will remove the transaction after 120 seconds of
+ * inactivity. The check is performed once per 10 second. Sender is QMGR.
+ * ************************************************************************>> */
+void Dblqh::execTIME_SIGNAL(Signal* signal)
+{
+ jamEntry();
+ cLqhTimeOutCount++;
+ cLqhTimeOutCheckCount++;
+ if ((cCounterAccCommitBlocked > 0) ||
+ (cCounterTupCommitBlocked > 0)) {
+ jam();
+ signal->theData[0] = NDB_LE_UndoLogBlocked;
+ signal->theData[1] = cCounterTupCommitBlocked;
+ signal->theData[2] = cCounterAccCommitBlocked;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
+
+ cCounterTupCommitBlocked = 0;
+ cCounterAccCommitBlocked = 0;
+ }//if
+ if (cLqhTimeOutCheckCount < 10) {
+ jam();
+ return;
+ }//if
+ cLqhTimeOutCheckCount = 0;
+#ifdef VM_TRACE
+ TcConnectionrecPtr tTcConptr;
+
+ for (tTcConptr.i = 0; tTcConptr.i < ctcConnectrecFileSize;
+ tTcConptr.i++) {
+ jam();
+ ptrAss(tTcConptr, tcConnectionrec);
+ if ((tTcConptr.p->tcTimer != 0) &&
+ ((tTcConptr.p->tcTimer + 120) < cLqhTimeOutCount)) {
+ ndbout << "Dblqh::execTIME_SIGNAL"<<endl
+ << "Timeout found in tcConnectRecord " <<tTcConptr.i<<endl
+ << " cLqhTimeOutCount = " << cLqhTimeOutCount << endl
+ << " tcTimer="<<tTcConptr.p->tcTimer<<endl
+ << " tcTimer+120="<<tTcConptr.p->tcTimer + 120<<endl;
+
+ ndbout << " transactionState = " << tTcConptr.p->transactionState<<endl;
+ ndbout << " operation = " << tTcConptr.p->operation<<endl;
+ ndbout << " tcNodeFailrec = " << tTcConptr.p->tcNodeFailrec
+ << " seqNoReplica = " << tTcConptr.p->seqNoReplica
+ << " simpleRead = " << tTcConptr.p->simpleRead
+ << endl;
+ ndbout << " replicaType = " << tTcConptr.p->replicaType
+ << " reclenAiLqhkey = " << tTcConptr.p->reclenAiLqhkey
+ << " opExec = " << tTcConptr.p->opExec
+ << endl;
+ ndbout << " opSimple = " << tTcConptr.p->opSimple
+ << " nextSeqNoReplica = " << tTcConptr.p->nextSeqNoReplica
+ << " lockType = " << tTcConptr.p->lockType
+ << " localFragptr = " << tTcConptr.p->localFragptr
+ << endl;
+ ndbout << " lastReplicaNo = " << tTcConptr.p->lastReplicaNo
+ << " indTakeOver = " << tTcConptr.p->indTakeOver
+ << " dirtyOp = " << tTcConptr.p->dirtyOp
+ << endl;
+ ndbout << " activeCreat = " << tTcConptr.p->activeCreat
+ << " tcBlockref = " << hex << tTcConptr.p->tcBlockref
+ << " reqBlockref = " << hex << tTcConptr.p->reqBlockref
+ << " primKeyLen = " << tTcConptr.p->primKeyLen
+ << endl;
+ ndbout << " nextReplica = " << tTcConptr.p->nextReplica
+ << " tcBlockref = " << hex << tTcConptr.p->tcBlockref
+ << " reqBlockref = " << hex << tTcConptr.p->reqBlockref
+ << " primKeyLen = " << tTcConptr.p->primKeyLen
+ << endl;
+ ndbout << " logStopPageNo = " << tTcConptr.p->logStopPageNo
+ << " logStartPageNo = " << tTcConptr.p->logStartPageNo
+ << " logStartPageIndex = " << tTcConptr.p->logStartPageIndex
+ << endl;
+ ndbout << " errorCode = " << tTcConptr.p->errorCode
+ << " clientBlockref = " << hex << tTcConptr.p->clientBlockref
+ << " applRef = " << hex << tTcConptr.p->applRef
+ << " totSendlenAi = " << tTcConptr.p->totSendlenAi
+ << endl;
+ ndbout << " totReclenAi = " << tTcConptr.p->totReclenAi
+ << " tcScanRec = " << tTcConptr.p->tcScanRec
+ << " tcScanInfo = " << tTcConptr.p->tcScanInfo
+ << " tcOprec = " << hex << tTcConptr.p->tcOprec
+ << endl;
+ ndbout << " tableref = " << tTcConptr.p->tableref
+ << " simpleTcConnect = " << tTcConptr.p->simpleTcConnect
+ << " storedProcId = " << tTcConptr.p->storedProcId
+ << " schemaVersion = " << tTcConptr.p->schemaVersion
+ << endl;
+ ndbout << " reqinfo = " << tTcConptr.p->reqinfo
+ << " reqRef = " << tTcConptr.p->reqRef
+ << " readlenAi = " << tTcConptr.p->readlenAi
+ << " prevTc = " << tTcConptr.p->prevTc
+ << endl;
+ ndbout << " prevLogTcrec = " << tTcConptr.p->prevLogTcrec
+ << " prevHashRec = " << tTcConptr.p->prevHashRec
+ << " nodeAfterNext0 = " << tTcConptr.p->nodeAfterNext[0]
+ << " nodeAfterNext1 = " << tTcConptr.p->nodeAfterNext[1]
+ << endl;
+ ndbout << " nextTcConnectrec = " << tTcConptr.p->nextTcConnectrec
+ << " nextTc = " << tTcConptr.p->nextTc
+ << " nextTcLogQueue = " << tTcConptr.p->nextTcLogQueue
+ << " nextLogTcrec = " << tTcConptr.p->nextLogTcrec
+ << endl;
+ ndbout << " nextHashRec = " << tTcConptr.p->nextHashRec
+ << " logWriteState = " << tTcConptr.p->logWriteState
+ << " logStartFileNo = " << tTcConptr.p->logStartFileNo
+ << " listState = " << tTcConptr.p->listState
+ << endl;
+ ndbout << " lastAttrinbuf = " << tTcConptr.p->lastAttrinbuf
+ << " lastTupkeybuf = " << tTcConptr.p->lastTupkeybuf
+ << " hashValue = " << tTcConptr.p->hashValue
+ << endl;
+ ndbout << " gci = " << tTcConptr.p->gci
+ << " fragmentptr = " << tTcConptr.p->fragmentptr
+ << " fragmentid = " << tTcConptr.p->fragmentid
+ << " firstTupkeybuf = " << tTcConptr.p->firstTupkeybuf
+ << endl;
+ ndbout << " firstAttrinbuf = " << tTcConptr.p->firstAttrinbuf
+ << " currTupAiLen = " << tTcConptr.p->currTupAiLen
+ << " currReclenAi = " << tTcConptr.p->currReclenAi
+ << endl;
+ ndbout << " tcTimer = " << tTcConptr.p->tcTimer
+ << " clientConnectrec = " << tTcConptr.p->clientConnectrec
+ << " applOprec = " << hex << tTcConptr.p->applOprec
+ << " abortState = " << tTcConptr.p->abortState
+ << endl;
+ ndbout << " transid0 = " << hex << tTcConptr.p->transid[0]
+ << " transid1 = " << hex << tTcConptr.p->transid[1]
+ << " tupkeyData0 = " << tTcConptr.p->tupkeyData[0]
+ << " tupkeyData1 = " << tTcConptr.p->tupkeyData[1]
+ << endl;
+ ndbout << " tupkeyData2 = " << tTcConptr.p->tupkeyData[2]
+ << " tupkeyData3 = " << tTcConptr.p->tupkeyData[3]
+ << endl;
+ switch (tTcConptr.p->transactionState) {
+
+ case TcConnectionrec::SCAN_STATE_USED:
+ if (tTcConptr.p->tcScanRec < cscanrecFileSize){
+ ScanRecordPtr TscanPtr;
+ c_scanRecordPool.getPtr(TscanPtr, tTcConptr.p->tcScanRec);
+ ndbout << " scanState = " << TscanPtr.p->scanState << endl;
+ //TscanPtr.p->scanLocalref[2];
+ ndbout << " copyPtr="<<TscanPtr.p->copyPtr
+ << " scanAccPtr="<<TscanPtr.p->scanAccPtr
+ << " scanAiLength="<<TscanPtr.p->scanAiLength
+ << endl;
+ ndbout << " m_curr_batch_size_rows="<<
+ TscanPtr.p->m_curr_batch_size_rows
+ << " m_max_batch_size_rows="<<
+ TscanPtr.p->m_max_batch_size_rows
+ << " scanErrorCounter="<<TscanPtr.p->scanErrorCounter
+ << " scanLocalFragid="<<TscanPtr.p->scanLocalFragid
+ << endl;
+ ndbout << " scanSchemaVersion="<<TscanPtr.p->scanSchemaVersion
+ << " scanStoredProcId="<<TscanPtr.p->scanStoredProcId
+ << " scanTcrec="<<TscanPtr.p->scanTcrec
+ << endl;
+ ndbout << " scanType="<<TscanPtr.p->scanType
+ << " scanApiBlockref="<<TscanPtr.p->scanApiBlockref
+ << " scanNodeId="<<TscanPtr.p->scanNodeId
+ << " scanCompletedStatus="<<TscanPtr.p->scanCompletedStatus
+ << endl;
+ ndbout << " scanFlag="<<TscanPtr.p->scanFlag
+ << " scanLockHold="<<TscanPtr.p->scanLockHold
+ << " scanLockMode="<<TscanPtr.p->scanLockMode
+ << " scanNumber="<<TscanPtr.p->scanNumber
+ << endl;
+ ndbout << " scanReleaseCounter="<<TscanPtr.p->scanReleaseCounter
+ << " scanTcWaiting="<<TscanPtr.p->scanTcWaiting
+ << " scanKeyinfoFlag="<<TscanPtr.p->scanKeyinfoFlag
+ << endl;
+ }else{
+ ndbout << "No connected scan record found" << endl;
+ }
+ break;
+ default:
+ break;
+ }//switch
+
+ // Reset the timer
+ tTcConptr.p->tcTimer = 0;
+ }//if
+ }//for
+#endif
+#ifdef VM_TRACE
+ for (lfoPtr.i = 0; lfoPtr.i < clfoFileSize; lfoPtr.i++) {
+ ptrAss(lfoPtr, logFileOperationRecord);
+ if ((lfoPtr.p->lfoTimer != 0) &&
+ ((lfoPtr.p->lfoTimer + 120) < cLqhTimeOutCount)) {
+ ndbout << "We have lost LFO record" << endl;
+ ndbout << "index = " << lfoPtr.i;
+ ndbout << "State = " << lfoPtr.p->lfoState;
+ ndbout << " Page No = " << lfoPtr.p->lfoPageNo;
+ ndbout << " noPagesRw = " << lfoPtr.p->noPagesRw;
+ ndbout << "lfoWordWritten = " << lfoPtr.p->lfoWordWritten << endl;
+ lfoPtr.p->lfoTimer = cLqhTimeOutCount;
+ }//if
+ }//for
+
+#endif
+
+#if 0
+ LcpRecordPtr TlcpPtr;
+ // Print information about the current local checkpoint
+ TlcpPtr.i = 0;
+ ptrAss(TlcpPtr, lcpRecord);
+ ndbout << "Information about LCP in this LQH" << endl
+ << " lcpState="<<TlcpPtr.p->lcpState<<endl
+ << " firstLcpLocAcc="<<TlcpPtr.p->firstLcpLocAcc<<endl
+ << " firstLcpLocTup="<<TlcpPtr.p->firstLcpLocTup<<endl
+ << " lcpAccptr="<<TlcpPtr.p->lcpAccptr<<endl
+ << " lastFragmentFlag="<<TlcpPtr.p->lastFragmentFlag<<endl
+ << " lcpQueued="<<TlcpPtr.p->lcpQueued<<endl
+ << " reportEmptyref="<< TlcpPtr.p->reportEmptyRef<<endl
+ << " reportEmpty="<<TlcpPtr.p->reportEmpty<<endl;
+#endif
+}//Dblqh::execTIME_SIGNAL()
+
+/* ######################################################################### */
+/* ####### EXECUTION MODULE ####### */
+/* THIS MODULE HANDLES THE RECEPTION OF LQHKEYREQ AND ALL PROCESSING */
+/* OF OPERATIONS ON BEHALF OF THIS REQUEST. THIS DOES ALSO INVOLVE */
+/* RECEPTION OF VARIOUS TYPES OF ATTRINFO AND KEYINFO. IT DOES ALSO */
+/* INVOLVE COMMUNICATION WITH ACC AND TUP. */
+/* ######################################################################### */
+
+void Dblqh::noFreeRecordLab(Signal* signal,
+ const LqhKeyReq * lqhKeyReq,
+ Uint32 errCode)
+{
+ jamEntry();
+ const Uint32 transid1 = lqhKeyReq->transId1;
+ const Uint32 transid2 = lqhKeyReq->transId2;
+ const Uint32 reqInfo = lqhKeyReq->requestInfo;
+
+ if(errCode == ZNO_FREE_MARKER_RECORDS_ERROR ||
+ errCode == ZNODE_SHUTDOWN_IN_PROGESS){
+ releaseTcrec(signal, tcConnectptr);
+ }
+
+ if (LqhKeyReq::getSimpleFlag(reqInfo) &&
+ LqhKeyReq::getOperation(reqInfo) == ZREAD){
+ jam();
+ ndbrequire(LqhKeyReq::getApplicationAddressFlag(reqInfo));
+ const Uint32 apiRef = lqhKeyReq->variableData[0];
+ const Uint32 apiOpRec = lqhKeyReq->variableData[1];
+
+ TcKeyRef * const tcKeyRef = (TcKeyRef *) signal->getDataPtrSend();
+
+ tcKeyRef->connectPtr = apiOpRec;
+ tcKeyRef->transId[0] = transid1;
+ tcKeyRef->transId[1] = transid2;
+ tcKeyRef->errorCode = errCode;
+ sendSignal(apiRef, GSN_TCKEYREF, signal, TcKeyRef::SignalLength, JBB);
+ } else {
+ jam();
+
+ const Uint32 clientPtr = lqhKeyReq->clientConnectPtr;
+ Uint32 TcOprec = clientPtr;
+ if(LqhKeyReq::getSameClientAndTcFlag(reqInfo) == 1){
+ if(LqhKeyReq::getApplicationAddressFlag(reqInfo))
+ TcOprec = lqhKeyReq->variableData[2];
+ else
+ TcOprec = lqhKeyReq->variableData[0];
+ }
+
+ LqhKeyRef * const ref = (LqhKeyRef*)signal->getDataPtrSend();
+ ref->userRef = clientPtr;
+ ref->connectPtr = TcOprec;
+ ref->errorCode = errCode;
+ ref->transId1 = transid1;
+ ref->transId2 = transid2;
+ sendSignal(signal->senderBlockRef(), GSN_LQHKEYREF, signal,
+ LqhKeyRef::SignalLength, JBB);
+ }//if
+ return;
+}//Dblqh::noFreeRecordLab()
+
+void Dblqh::LQHKEY_abort(Signal* signal, int errortype)
+{
+ switch (errortype) {
+ case 0:
+ jam();
+ terrorCode = ZCOPY_NODE_ERROR;
+ break;
+ case 1:
+ jam();
+ terrorCode = ZNO_FREE_LQH_CONNECTION;
+ break;
+ case 2:
+ jam();
+ terrorCode = signal->theData[1];
+ break;
+ case 3:
+ jam();
+ ndbrequire((tcConnectptr.p->transactionState == TcConnectionrec::WAIT_ACC_ABORT) ||
+ (tcConnectptr.p->transactionState == TcConnectionrec::ABORT_STOPPED) ||
+ (tcConnectptr.p->transactionState == TcConnectionrec::ABORT_QUEUED));
+ return;
+ break;
+ case 4:
+ jam();
+ if(tabptr.p->tableStatus == Tablerec::NOT_DEFINED){
+ jam();
+ terrorCode = ZTABLE_NOT_DEFINED;
+ } else if (tabptr.p->tableStatus == Tablerec::PREP_DROP_TABLE_ONGOING ||
+ tabptr.p->tableStatus == Tablerec::PREP_DROP_TABLE_DONE){
+ jam();
+ terrorCode = ZDROP_TABLE_IN_PROGRESS;
+ } else {
+ ndbrequire(0);
+ }
+ break;
+ case 5:
+ jam();
+ terrorCode = ZINVALID_SCHEMA_VERSION;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ abortErrorLab(signal);
+}//Dblqh::LQHKEY_abort()
+
+void Dblqh::LQHKEY_error(Signal* signal, int errortype)
+{
+ switch (errortype) {
+ case 0:
+ jam();
+ break;
+ case 1:
+ jam();
+ break;
+ case 2:
+ jam();
+ break;
+ case 3:
+ jam();
+ break;
+ case 4:
+ jam();
+ break;
+ case 5:
+ jam();
+ break;
+ case 6:
+ jam();
+ break;
+ default:
+ jam();
+ break;
+ }//switch
+ ndbrequire(false);
+}//Dblqh::LQHKEY_error()
+
+void Dblqh::execLQHKEYREF(Signal* signal)
+{
+ jamEntry();
+ tcConnectptr.i = signal->theData[0];
+ terrorCode = signal->theData[2];
+ Uint32 transid1 = signal->theData[3];
+ Uint32 transid2 = signal->theData[4];
+ if (tcConnectptr.i >= ctcConnectrecFileSize) {
+ errorReport(signal, 3);
+ return;
+ }//if
+/*------------------------------------------------------------------*/
+/* WE HAVE TO CHECK THAT THE SIGNAL DO NOT BELONG TO SOMETHING*/
+/* REMOVED DUE TO A TIME-OUT. */
+/*------------------------------------------------------------------*/
+ ptrAss(tcConnectptr, tcConnectionrec);
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ switch (regTcPtr->connectState) {
+ case TcConnectionrec::CONNECTED:
+ jam();
+ if ((regTcPtr->transid[0] != transid1) ||
+ (regTcPtr->transid[1] != transid2)) {
+ warningReport(signal, 14);
+ return;
+ }//if
+ if (regTcPtr->abortState != TcConnectionrec::ABORT_IDLE) {
+ warningReport(signal, 15);
+ return;
+ }//if
+ abortErrorLab(signal);
+ return;
+ break;
+ case TcConnectionrec::LOG_CONNECTED:
+ jam();
+ logLqhkeyrefLab(signal);
+ return;
+ break;
+ case TcConnectionrec::COPY_CONNECTED:
+ jam();
+ copyLqhKeyRefLab(signal);
+ return;
+ break;
+ default:
+ warningReport(signal, 16);
+ return;
+ break;
+ }//switch
+}//Dblqh::execLQHKEYREF()
+
+/* -------------------------------------------------------------------------- */
+/* ------- ENTER PACKED_SIGNAL ------- */
+/* Execution of packed signal. The packed signal can contain COMMIT, COMPLETE */
+/* or LQHKEYCONF signals. These signals will be executed by their resp. exec */
+/* functions. */
+/* -------------------------------------------------------------------------- */
+void Dblqh::execPACKED_SIGNAL(Signal* signal)
+{
+ Uint32 Tstep = 0;
+ Uint32 Tlength;
+ Uint32 TpackedData[28];
+ Uint32 sig0, sig1, sig2, sig3 ,sig4, sig5, sig6;
+
+ jamEntry();
+ Tlength = signal->length();
+ ndbrequire(Tlength <= 25);
+ MEMCOPY_NO_WORDS(&TpackedData[0], &signal->theData[0], Tlength);
+ while (Tlength > Tstep) {
+ switch (TpackedData[Tstep] >> 28) {
+ case ZCOMMIT:
+ jam();
+ sig0 = TpackedData[Tstep + 0] & 0x0FFFFFFF;
+ sig1 = TpackedData[Tstep + 1];
+ sig2 = TpackedData[Tstep + 2];
+ sig3 = TpackedData[Tstep + 3];
+ signal->theData[0] = sig0;
+ signal->theData[1] = sig1;
+ signal->theData[2] = sig2;
+ signal->theData[3] = sig3;
+ signal->header.theLength = 4;
+ execCOMMIT(signal);
+ Tstep += 4;
+ break;
+ case ZCOMPLETE:
+ jam();
+ sig0 = TpackedData[Tstep + 0] & 0x0FFFFFFF;
+ sig1 = TpackedData[Tstep + 1];
+ sig2 = TpackedData[Tstep + 2];
+ signal->theData[0] = sig0;
+ signal->theData[1] = sig1;
+ signal->theData[2] = sig2;
+ signal->header.theLength = 3;
+ execCOMPLETE(signal);
+ Tstep += 3;
+ break;
+ case ZLQHKEYCONF: {
+ jam();
+ LqhKeyConf * const lqhKeyConf = (LqhKeyConf *)signal->getDataPtr();
+
+ sig0 = TpackedData[Tstep + 0] & 0x0FFFFFFF;
+ sig1 = TpackedData[Tstep + 1];
+ sig2 = TpackedData[Tstep + 2];
+ sig3 = TpackedData[Tstep + 3];
+ sig4 = TpackedData[Tstep + 4];
+ sig5 = TpackedData[Tstep + 5];
+ sig6 = TpackedData[Tstep + 6];
+ lqhKeyConf->connectPtr = sig0;
+ lqhKeyConf->opPtr = sig1;
+ lqhKeyConf->userRef = sig2;
+ lqhKeyConf->readLen = sig3;
+ lqhKeyConf->transId1 = sig4;
+ lqhKeyConf->transId2 = sig5;
+ lqhKeyConf->noFiredTriggers = sig6;
+ execLQHKEYCONF(signal);
+ Tstep += LqhKeyConf::SignalLength;
+ break;
+ }
+ case ZREMOVE_MARKER:
+ jam();
+ sig0 = TpackedData[Tstep + 1];
+ sig1 = TpackedData[Tstep + 2];
+ signal->theData[0] = sig0;
+ signal->theData[1] = sig1;
+ signal->header.theLength = 2;
+ execREMOVE_MARKER_ORD(signal);
+ Tstep += 3;
+ break;
+ default:
+ ndbrequire(false);
+ return;
+ }//switch
+ }//while
+ ndbrequire(Tlength == Tstep);
+ return;
+}//Dblqh::execPACKED_SIGNAL()
+
+void
+Dblqh::execREMOVE_MARKER_ORD(Signal* signal)
+{
+ CommitAckMarker key;
+ key.transid1 = signal->theData[0];
+ key.transid2 = signal->theData[1];
+ jamEntry();
+
+ CommitAckMarkerPtr removedPtr;
+ m_commitAckMarkerHash.release(removedPtr, key);
+ ndbrequire(removedPtr.i != RNIL);
+#ifdef MARKER_TRACE
+ ndbout_c("Rem marker[%.8x %.8x]", key.transid1, key.transid2);
+#endif
+}
+
+
+/* -------------------------------------------------------------------------- */
+/* ------- ENTER SEND_PACKED ------- */
+/* Used to force a packed signal to be sent if local signal buffer is not */
+/* empty. */
+/* -------------------------------------------------------------------------- */
+void Dblqh::execSEND_PACKED(Signal* signal)
+{
+ HostRecordPtr Thostptr;
+ UintR i;
+ UintR TpackedListIndex = cpackedListIndex;
+ jamEntry();
+ for (i = 0; i < TpackedListIndex; i++) {
+ Thostptr.i = cpackedList[i];
+ ptrAss(Thostptr, hostRecord);
+ jam();
+ ndbrequire(Thostptr.i - 1 < MAX_NDB_NODES - 1);
+ if (Thostptr.p->noOfPackedWordsLqh > 0) {
+ jam();
+ sendPackedSignalLqh(signal, Thostptr.p);
+ }//if
+ if (Thostptr.p->noOfPackedWordsTc > 0) {
+ jam();
+ sendPackedSignalTc(signal, Thostptr.p);
+ }//if
+ Thostptr.p->inPackedList = false;
+ }//for
+ cpackedListIndex = 0;
+ return;
+}//Dblqh::execSEND_PACKED()
+
+void
+Dblqh::updatePackedList(Signal* signal, HostRecord * ahostptr, Uint16 hostId)
+{
+ Uint32 TpackedListIndex = cpackedListIndex;
+ if (ahostptr->inPackedList == false) {
+ jam();
+ ahostptr->inPackedList = true;
+ cpackedList[TpackedListIndex] = hostId;
+ cpackedListIndex = TpackedListIndex + 1;
+ }//if
+}//Dblqh::updatePackedList()
+
+void
+Dblqh::execREAD_PSUEDO_REQ(Signal* signal){
+ jamEntry();
+ TcConnectionrecPtr regTcPtr;
+ regTcPtr.i = signal->theData[0];
+ ptrCheckGuard(regTcPtr, ctcConnectrecFileSize, tcConnectionrec);
+
+ if(signal->theData[1] != AttributeHeader::RANGE_NO)
+ {
+ jam();
+ FragrecordPtr regFragptr;
+ regFragptr.i = regTcPtr.p->fragmentptr;
+ ptrCheckGuard(regFragptr, cfragrecFileSize, fragrecord);
+
+ signal->theData[0] = regFragptr.p->accFragptr[regTcPtr.p->localFragptr];
+ EXECUTE_DIRECT(DBACC, GSN_READ_PSUEDO_REQ, signal, 2);
+ }
+ else
+ {
+ signal->theData[0] = regTcPtr.p->m_scan_curr_range_no;
+ }
+}
+
+/* ************>> */
+/* TUPKEYCONF > */
+/* ************>> */
+void Dblqh::execTUPKEYCONF(Signal* signal)
+{
+ TcConnectionrec *regTcConnectionrec = tcConnectionrec;
+ Uint32 ttcConnectrecFileSize = ctcConnectrecFileSize;
+ const TupKeyConf * const tupKeyConf = (TupKeyConf *)signal->getDataPtr();
+ Uint32 tcIndex = tupKeyConf->userPtr;
+ jamEntry();
+ tcConnectptr.i = tcIndex;
+ ptrCheckGuard(tcConnectptr, ttcConnectrecFileSize, regTcConnectionrec);
+ switch (tcConnectptr.p->transactionState) {
+ case TcConnectionrec::WAIT_TUP:
+ jam();
+ if (tcConnectptr.p->seqNoReplica == 0) // Primary replica
+ tcConnectptr.p->noFiredTriggers = tupKeyConf->noFiredTriggers;
+ tupkeyConfLab(signal);
+ break;
+ case TcConnectionrec::COPY_TUPKEY:
+ jam();
+ copyTupkeyConfLab(signal);
+ break;
+ case TcConnectionrec::SCAN_TUPKEY:
+ jam();
+ scanTupkeyConfLab(signal);
+ break;
+ case TcConnectionrec::WAIT_TUP_TO_ABORT:
+ jam();
+/* ------------------------------------------------------------------------- */
+// Abort was not ready to start until this signal came back. Now we are ready
+// to start the abort.
+/* ------------------------------------------------------------------------- */
+ releaseActiveFrag(signal);
+ abortCommonLab(signal);
+ break;
+ case TcConnectionrec::WAIT_ACC_ABORT:
+ case TcConnectionrec::ABORT_QUEUED:
+ jam();
+/* -------------------------------------------------------------------------- */
+/* IGNORE SINCE ABORT OF THIS OPERATION IS ONGOING ALREADY. */
+/* -------------------------------------------------------------------------- */
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+}//Dblqh::execTUPKEYCONF()
+
+/* ************> */
+/* TUPKEYREF > */
+/* ************> */
+void Dblqh::execTUPKEYREF(Signal* signal)
+{
+ const TupKeyRef * const tupKeyRef = (TupKeyRef *)signal->getDataPtr();
+
+ jamEntry();
+ tcConnectptr.i = tupKeyRef->userRef;
+ terrorCode = tupKeyRef->errorCode;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ switch (tcConnectptr.p->transactionState) {
+ case TcConnectionrec::WAIT_TUP:
+ jam();
+ releaseActiveFrag(signal);
+ abortErrorLab(signal);
+ break;
+ case TcConnectionrec::COPY_TUPKEY:
+ ndbrequire(false);
+ break;
+ case TcConnectionrec::SCAN_TUPKEY:
+ jam();
+ scanTupkeyRefLab(signal);
+ break;
+ case TcConnectionrec::WAIT_TUP_TO_ABORT:
+ jam();
+/* ------------------------------------------------------------------------- */
+// Abort was not ready to start until this signal came back. Now we are ready
+// to start the abort.
+/* ------------------------------------------------------------------------- */
+ releaseActiveFrag(signal);
+ abortCommonLab(signal);
+ break;
+ case TcConnectionrec::WAIT_ACC_ABORT:
+ case TcConnectionrec::ABORT_QUEUED:
+ jam();
+/* ------------------------------------------------------------------------- */
+/* IGNORE SINCE ABORT OF THIS OPERATION IS ONGOING ALREADY. */
+/* ------------------------------------------------------------------------- */
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+}//Dblqh::execTUPKEYREF()
+
+void Dblqh::sendPackedSignalLqh(Signal* signal, HostRecord * ahostptr)
+{
+ Uint32 noOfWords = ahostptr->noOfPackedWordsLqh;
+ BlockReference hostRef = ahostptr->hostLqhBlockRef;
+ MEMCOPY_NO_WORDS(&signal->theData[0],
+ &ahostptr->packedWordsLqh[0],
+ noOfWords);
+ sendSignal(hostRef, GSN_PACKED_SIGNAL, signal, noOfWords, JBB);
+ ahostptr->noOfPackedWordsLqh = 0;
+}//Dblqh::sendPackedSignalLqh()
+
+void Dblqh::sendPackedSignalTc(Signal* signal, HostRecord * ahostptr)
+{
+ Uint32 noOfWords = ahostptr->noOfPackedWordsTc;
+ BlockReference hostRef = ahostptr->hostTcBlockRef;
+ MEMCOPY_NO_WORDS(&signal->theData[0],
+ &ahostptr->packedWordsTc[0],
+ noOfWords);
+ sendSignal(hostRef, GSN_PACKED_SIGNAL, signal, noOfWords, JBB);
+ ahostptr->noOfPackedWordsTc = 0;
+}//Dblqh::sendPackedSignalTc()
+
+void Dblqh::sendCommitLqh(Signal* signal, BlockReference alqhBlockref)
+{
+ HostRecordPtr Thostptr;
+ Thostptr.i = refToNode(alqhBlockref);
+ ptrCheckGuard(Thostptr, chostFileSize, hostRecord);
+ if (Thostptr.p->noOfPackedWordsLqh > 21) {
+ jam();
+ sendPackedSignalLqh(signal, Thostptr.p);
+ } else {
+ jam();
+ updatePackedList(signal, Thostptr.p, Thostptr.i);
+ }//if
+ Uint32 pos = Thostptr.p->noOfPackedWordsLqh;
+ Uint32 ptrAndType = tcConnectptr.p->clientConnectrec | (ZCOMMIT << 28);
+ Uint32 gci = tcConnectptr.p->gci;
+ Uint32 transid1 = tcConnectptr.p->transid[0];
+ Uint32 transid2 = tcConnectptr.p->transid[1];
+ Thostptr.p->packedWordsLqh[pos] = ptrAndType;
+ Thostptr.p->packedWordsLqh[pos + 1] = gci;
+ Thostptr.p->packedWordsLqh[pos + 2] = transid1;
+ Thostptr.p->packedWordsLqh[pos + 3] = transid2;
+ Thostptr.p->noOfPackedWordsLqh = pos + 4;
+}//Dblqh::sendCommitLqh()
+
+void Dblqh::sendCompleteLqh(Signal* signal, BlockReference alqhBlockref)
+{
+ HostRecordPtr Thostptr;
+ Thostptr.i = refToNode(alqhBlockref);
+ ptrCheckGuard(Thostptr, chostFileSize, hostRecord);
+ if (Thostptr.p->noOfPackedWordsLqh > 22) {
+ jam();
+ sendPackedSignalLqh(signal, Thostptr.p);
+ } else {
+ jam();
+ updatePackedList(signal, Thostptr.p, Thostptr.i);
+ }//if
+ Uint32 pos = Thostptr.p->noOfPackedWordsLqh;
+ Uint32 ptrAndType = tcConnectptr.p->clientConnectrec | (ZCOMPLETE << 28);
+ Uint32 transid1 = tcConnectptr.p->transid[0];
+ Uint32 transid2 = tcConnectptr.p->transid[1];
+ Thostptr.p->packedWordsLqh[pos] = ptrAndType;
+ Thostptr.p->packedWordsLqh[pos + 1] = transid1;
+ Thostptr.p->packedWordsLqh[pos + 2] = transid2;
+ Thostptr.p->noOfPackedWordsLqh = pos + 3;
+}//Dblqh::sendCompleteLqh()
+
+void Dblqh::sendCommittedTc(Signal* signal, BlockReference atcBlockref)
+{
+ HostRecordPtr Thostptr;
+ Thostptr.i = refToNode(atcBlockref);
+ ptrCheckGuard(Thostptr, chostFileSize, hostRecord);
+ if (Thostptr.p->noOfPackedWordsTc > 22) {
+ jam();
+ sendPackedSignalTc(signal, Thostptr.p);
+ } else {
+ jam();
+ updatePackedList(signal, Thostptr.p, Thostptr.i);
+ }//if
+ Uint32 pos = Thostptr.p->noOfPackedWordsTc;
+ Uint32 ptrAndType = tcConnectptr.p->clientConnectrec | (ZCOMMITTED << 28);
+ Uint32 transid1 = tcConnectptr.p->transid[0];
+ Uint32 transid2 = tcConnectptr.p->transid[1];
+ Thostptr.p->packedWordsTc[pos] = ptrAndType;
+ Thostptr.p->packedWordsTc[pos + 1] = transid1;
+ Thostptr.p->packedWordsTc[pos + 2] = transid2;
+ Thostptr.p->noOfPackedWordsTc = pos + 3;
+}//Dblqh::sendCommittedTc()
+
+void Dblqh::sendCompletedTc(Signal* signal, BlockReference atcBlockref)
+{
+ HostRecordPtr Thostptr;
+ Thostptr.i = refToNode(atcBlockref);
+ ptrCheckGuard(Thostptr, chostFileSize, hostRecord);
+ if (Thostptr.p->noOfPackedWordsTc > 22) {
+ jam();
+ sendPackedSignalTc(signal, Thostptr.p);
+ } else {
+ jam();
+ updatePackedList(signal, Thostptr.p, Thostptr.i);
+ }//if
+ Uint32 pos = Thostptr.p->noOfPackedWordsTc;
+ Uint32 ptrAndType = tcConnectptr.p->clientConnectrec | (ZCOMPLETED << 28);
+ Uint32 transid1 = tcConnectptr.p->transid[0];
+ Uint32 transid2 = tcConnectptr.p->transid[1];
+ Thostptr.p->packedWordsTc[pos] = ptrAndType;
+ Thostptr.p->packedWordsTc[pos + 1] = transid1;
+ Thostptr.p->packedWordsTc[pos + 2] = transid2;
+ Thostptr.p->noOfPackedWordsTc = pos + 3;
+}//Dblqh::sendCompletedTc()
+
+void Dblqh::sendLqhkeyconfTc(Signal* signal, BlockReference atcBlockref)
+{
+ LqhKeyConf* lqhKeyConf;
+ HostRecordPtr Thostptr;
+
+ Thostptr.i = refToNode(atcBlockref);
+ ptrCheckGuard(Thostptr, chostFileSize, hostRecord);
+ if (refToBlock(atcBlockref) == DBTC) {
+ jam();
+/*******************************************************************
+// This signal was intended for DBTC as part of the normal transaction
+// execution.
+********************************************************************/
+ if (Thostptr.p->noOfPackedWordsTc > (25 - LqhKeyConf::SignalLength)) {
+ jam();
+ sendPackedSignalTc(signal, Thostptr.p);
+ } else {
+ jam();
+ updatePackedList(signal, Thostptr.p, Thostptr.i);
+ }//if
+ lqhKeyConf = (LqhKeyConf *)
+ &Thostptr.p->packedWordsTc[Thostptr.p->noOfPackedWordsTc];
+ Thostptr.p->noOfPackedWordsTc += LqhKeyConf::SignalLength;
+ } else {
+ jam();
+/*******************************************************************
+// This signal was intended for DBLQH as part of log execution or
+// node recovery.
+********************************************************************/
+ if (Thostptr.p->noOfPackedWordsLqh > (25 - LqhKeyConf::SignalLength)) {
+ jam();
+ sendPackedSignalLqh(signal, Thostptr.p);
+ } else {
+ jam();
+ updatePackedList(signal, Thostptr.p, Thostptr.i);
+ }//if
+ lqhKeyConf = (LqhKeyConf *)
+ &Thostptr.p->packedWordsLqh[Thostptr.p->noOfPackedWordsLqh];
+ Thostptr.p->noOfPackedWordsLqh += LqhKeyConf::SignalLength;
+ }//if
+ Uint32 ptrAndType = tcConnectptr.i | (ZLQHKEYCONF << 28);
+ Uint32 tcOprec = tcConnectptr.p->tcOprec;
+ Uint32 ownRef = cownref;
+ Uint32 readlenAi = tcConnectptr.p->readlenAi;
+ Uint32 transid1 = tcConnectptr.p->transid[0];
+ Uint32 transid2 = tcConnectptr.p->transid[1];
+ Uint32 noFiredTriggers = tcConnectptr.p->noFiredTriggers;
+ lqhKeyConf->connectPtr = ptrAndType;
+ lqhKeyConf->opPtr = tcOprec;
+ lqhKeyConf->userRef = ownRef;
+ lqhKeyConf->readLen = readlenAi;
+ lqhKeyConf->transId1 = transid1;
+ lqhKeyConf->transId2 = transid2;
+ lqhKeyConf->noFiredTriggers = noFiredTriggers;
+}//Dblqh::sendLqhkeyconfTc()
+
+/* ************************************************************************>>
+ * KEYINFO: Get tuple request from DBTC. Next step is to contact DBACC to get
+ * key to tuple if all key/attrinfo has been received, else for more attrinfo
+ * signals.
+ * ************************************************************************>> */
+void Dblqh::execKEYINFO(Signal* signal)
+{
+ Uint32 tcOprec = signal->theData[0];
+ Uint32 transid1 = signal->theData[1];
+ Uint32 transid2 = signal->theData[2];
+ jamEntry();
+ if (findTransaction(transid1, transid2, tcOprec) != ZOK) {
+ jam();
+ return;
+ }//if
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ TcConnectionrec::TransactionState state = regTcPtr->transactionState;
+ if (state != TcConnectionrec::WAIT_TUPKEYINFO &&
+ state != TcConnectionrec::WAIT_SCAN_AI)
+ {
+ jam();
+/*****************************************************************************/
+/* TRANSACTION WAS ABORTED, THIS IS MOST LIKELY A SIGNAL BELONGING TO THE */
+/* ABORTED TRANSACTION. THUS IGNORE THE SIGNAL. */
+/*****************************************************************************/
+ return;
+ }//if
+ Uint32 errorCode = handleLongTupKey(signal,
+ (Uint32)regTcPtr->save1,
+ (Uint32)regTcPtr->primKeyLen,
+ &signal->theData[3]);
+ if (errorCode != 0) {
+ if (errorCode == 1) {
+ jam();
+ return;
+ }//if
+ jam();
+ terrorCode = errorCode;
+ if(state == TcConnectionrec::WAIT_TUPKEYINFO)
+ abortErrorLab(signal);
+ else
+ abort_scan(signal, regTcPtr->tcScanRec, errorCode);
+ return;
+ }//if
+ if(state == TcConnectionrec::WAIT_TUPKEYINFO)
+ {
+ FragrecordPtr regFragptr;
+ regFragptr.i = regTcPtr->fragmentptr;
+ ptrCheckGuard(regFragptr, cfragrecFileSize, fragrecord);
+ fragptr = regFragptr;
+ endgettupkeyLab(signal);
+ }
+ return;
+}//Dblqh::execKEYINFO()
+
+/* ------------------------------------------------------------------------- */
+/* FILL IN KEY DATA INTO DATA BUFFERS. */
+/* ------------------------------------------------------------------------- */
+Uint32 Dblqh::handleLongTupKey(Signal* signal,
+ Uint32 keyLength,
+ Uint32 primKeyLength,
+ Uint32* dataPtr)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ Uint32 dataPos = 0;
+ while (true) {
+ keyLength += 4;
+ if (cfirstfreeDatabuf == RNIL) {
+ jam();
+ return ZGET_DATAREC_ERROR;
+ }//if
+ seizeTupkeybuf(signal);
+ Databuf * const regDataPtr = databufptr.p;
+ Uint32 data0 = dataPtr[dataPos];
+ Uint32 data1 = dataPtr[dataPos + 1];
+ Uint32 data2 = dataPtr[dataPos + 2];
+ Uint32 data3 = dataPtr[dataPos + 3];
+ regDataPtr->data[0] = data0;
+ regDataPtr->data[1] = data1;
+ regDataPtr->data[2] = data2;
+ regDataPtr->data[3] = data3;
+ dataPos += 4;
+ if (keyLength < primKeyLength) {
+ if (dataPos > 16) {
+ jam();
+/* SAVE STATE AND WAIT FOR KEYINFO */
+ regTcPtr->save1 = keyLength;
+ return 1;
+ }//if
+ } else {
+ jam();
+ return 0;
+ }//if
+ }//while
+}//Dblqh::handleLongTupKey()
+
+/* ------------------------------------------------------------------------- */
+/* ------- HANDLE ATTRINFO SIGNALS ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+/* ************************************************************************>> */
+/* ATTRINFO: Continuation of KEYINFO signal (except for scans that do not use*/
+/* any KEYINFO). When all key and attribute info is received we contact DBACC*/
+/* for index handling. */
+/* ************************************************************************>> */
+void Dblqh::execATTRINFO(Signal* signal)
+{
+ Uint32 tcOprec = signal->theData[0];
+ Uint32 transid1 = signal->theData[1];
+ Uint32 transid2 = signal->theData[2];
+ jamEntry();
+ if (findTransaction(transid1,
+ transid2,
+ tcOprec) != ZOK) {
+ jam();
+ return;
+ }//if
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ Uint32 length = signal->length() - 3;
+ Uint32 totReclenAi = regTcPtr->totReclenAi;
+ Uint32 currReclenAi = regTcPtr->currReclenAi + length;
+ Uint32* dataPtr = &signal->theData[3];
+ regTcPtr->currReclenAi = currReclenAi;
+ if (totReclenAi == currReclenAi) {
+ switch (regTcPtr->transactionState) {
+ case TcConnectionrec::WAIT_ATTR:
+ {
+ Fragrecord *regFragrecord = fragrecord;
+ Uint32 fragIndex = regTcPtr->fragmentptr;
+ Uint32 tfragrecFileSize = cfragrecFileSize;
+ jam();
+ fragptr.i = fragIndex;
+ ptrCheckGuard(fragptr, tfragrecFileSize, regFragrecord);
+ lqhAttrinfoLab(signal, dataPtr, length);
+ endgettupkeyLab(signal);
+ return;
+ break;
+ }
+ case TcConnectionrec::WAIT_SCAN_AI:
+ jam();
+ scanAttrinfoLab(signal, dataPtr, length);
+ return;
+ break;
+ case TcConnectionrec::WAIT_TUP_TO_ABORT:
+ case TcConnectionrec::LOG_ABORT_QUEUED:
+ case TcConnectionrec::ABORT_QUEUED:
+ case TcConnectionrec::ABORT_STOPPED:
+ case TcConnectionrec::WAIT_ACC_ABORT:
+ case TcConnectionrec::WAIT_AI_AFTER_ABORT:
+ jam();
+ aiStateErrorCheckLab(signal, dataPtr,length);
+ return;
+ break;
+ default:
+ jam();
+ ndbrequire(regTcPtr->abortState != TcConnectionrec::ABORT_IDLE);
+ break;
+ }//switch
+ } else if (currReclenAi < totReclenAi) {
+ jam();
+ switch (regTcPtr->transactionState) {
+ case TcConnectionrec::WAIT_ATTR:
+ jam();
+ lqhAttrinfoLab(signal, dataPtr, length);
+ return;
+ break;
+ case TcConnectionrec::WAIT_SCAN_AI:
+ jam();
+ scanAttrinfoLab(signal, dataPtr, length);
+ return;
+ break;
+ case TcConnectionrec::WAIT_TUP_TO_ABORT:
+ case TcConnectionrec::LOG_ABORT_QUEUED:
+ case TcConnectionrec::ABORT_QUEUED:
+ case TcConnectionrec::ABORT_STOPPED:
+ case TcConnectionrec::WAIT_ACC_ABORT:
+ case TcConnectionrec::WAIT_AI_AFTER_ABORT:
+ jam();
+ aiStateErrorCheckLab(signal, dataPtr, length);
+ return;
+ break;
+ default:
+ jam();
+ ndbrequire(regTcPtr->abortState != TcConnectionrec::ABORT_IDLE);
+ break;
+ }//switch
+ } else {
+ switch (regTcPtr->transactionState) {
+ case TcConnectionrec::WAIT_SCAN_AI:
+ jam();
+ scanAttrinfoLab(signal, dataPtr, length);
+ return;
+ break;
+ default:
+ ndbout_c("%d", regTcPtr->transactionState);
+ ndbrequire(false);
+ break;
+ }//switch
+ }//if
+ return;
+}//Dblqh::execATTRINFO()
+
+/* ************************************************************************>> */
+/* TUP_ATTRINFO: Interpreted execution in DBTUP generates redo-log info */
+/* which is sent back to DBLQH for logging. This is because the decision */
+/* to execute or not is made in DBTUP and thus we cannot start logging until */
+/* DBTUP part has been run. */
+/* ************************************************************************>> */
+void Dblqh::execTUP_ATTRINFO(Signal* signal)
+{
+ TcConnectionrec *regTcConnectionrec = tcConnectionrec;
+ Uint32 length = signal->length() - 3;
+ Uint32 tcIndex = signal->theData[0];
+ Uint32 ttcConnectrecFileSize = ctcConnectrecFileSize;
+ jamEntry();
+ tcConnectptr.i = tcIndex;
+ ptrCheckGuard(tcConnectptr, ttcConnectrecFileSize, regTcConnectionrec);
+ ndbrequire(tcConnectptr.p->transactionState == TcConnectionrec::WAIT_TUP);
+ if (saveTupattrbuf(signal, &signal->theData[3], length) == ZOK) {
+ return;
+ } else {
+ jam();
+/* ------------------------------------------------------------------------- */
+/* WE ARE WAITING FOR RESPONSE FROM TUP HERE. THUS WE NEED TO */
+/* GO THROUGH THE STATE MACHINE FOR THE OPERATION. */
+/* ------------------------------------------------------------------------- */
+ localAbortStateHandlerLab(signal);
+ }//if
+}//Dblqh::execTUP_ATTRINFO()
+
+/* ------------------------------------------------------------------------- */
+/* ------- HANDLE ATTRINFO FROM LQH ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dblqh::lqhAttrinfoLab(Signal* signal, Uint32* dataPtr, Uint32 length)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ if (regTcPtr->operation != ZREAD) {
+ if (regTcPtr->opExec != 1) {
+ if (saveTupattrbuf(signal, dataPtr, length) == ZOK) {
+ ;
+ } else {
+ jam();
+/* ------------------------------------------------------------------------- */
+/* WE MIGHT BE WAITING FOR RESPONSE FROM SOME BLOCK HERE. THUS WE NEED TO */
+/* GO THROUGH THE STATE MACHINE FOR THE OPERATION. */
+/* ------------------------------------------------------------------------- */
+ localAbortStateHandlerLab(signal);
+ return;
+ }//if
+ }//if
+ }//if
+ Uint32 sig0 = regTcPtr->tupConnectrec;
+ Uint32 blockNo = refToBlock(regTcPtr->tcTupBlockref);
+ signal->theData[0] = sig0;
+ EXECUTE_DIRECT(blockNo, GSN_ATTRINFO, signal, length + 3);
+ jamEntry();
+}//Dblqh::lqhAttrinfoLab()
+
+/* ------------------------------------------------------------------------- */
+/* ------ FIND TRANSACTION BY USING HASH TABLE ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+int Dblqh::findTransaction(UintR Transid1, UintR Transid2, UintR TcOprec)
+{
+ TcConnectionrec *regTcConnectionrec = tcConnectionrec;
+ Uint32 ttcConnectrecFileSize = ctcConnectrecFileSize;
+ TcConnectionrecPtr locTcConnectptr;
+
+ Uint32 ThashIndex = (Transid1 ^ TcOprec) & 1023;
+ locTcConnectptr.i = ctransidHash[ThashIndex];
+ while (locTcConnectptr.i != RNIL) {
+ ptrCheckGuard(locTcConnectptr, ttcConnectrecFileSize, regTcConnectionrec);
+ if ((locTcConnectptr.p->transid[0] == Transid1) &&
+ (locTcConnectptr.p->transid[1] == Transid2) &&
+ (locTcConnectptr.p->tcOprec == TcOprec)) {
+/* FIRST PART OF TRANSACTION CORRECT */
+/* SECOND PART ALSO CORRECT */
+/* THE OPERATION RECORD POINTER IN TC WAS ALSO CORRECT */
+ jam();
+ tcConnectptr.i = locTcConnectptr.i;
+ tcConnectptr.p = locTcConnectptr.p;
+ return (int)ZOK;
+ }//if
+ jam();
+/* THIS WAS NOT THE TRANSACTION WHICH WAS SOUGHT */
+ locTcConnectptr.i = locTcConnectptr.p->nextHashRec;
+ }//while
+/* WE DID NOT FIND THE TRANSACTION, REPORT NOT FOUND */
+ return (int)ZNOT_FOUND;
+}//Dblqh::findTransaction()
+
+/* ------------------------------------------------------------------------- */
+/* ------- SAVE ATTRINFO FROM TUP IN ATTRINBUF ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+int Dblqh::saveTupattrbuf(Signal* signal, Uint32* dataPtr, Uint32 length)
+{
+ Uint32 tfirstfreeAttrinbuf = cfirstfreeAttrinbuf;
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ Uint32 currTupAiLen = regTcPtr->currTupAiLen;
+ if (tfirstfreeAttrinbuf == RNIL) {
+ jam();
+ terrorCode = ZGET_ATTRINBUF_ERROR;
+ return ZGET_ATTRINBUF_ERROR;
+ }//if
+ seizeAttrinbuf(signal);
+ Attrbuf * const regAttrPtr = attrinbufptr.p;
+ MEMCOPY_NO_WORDS(&regAttrPtr->attrbuf[0], dataPtr, length);
+ regTcPtr->currTupAiLen = currTupAiLen + length;
+ regAttrPtr->attrbuf[ZINBUF_DATA_LEN] = length;
+ return ZOK;
+}//Dblqh::saveTupattrbuf()
+
+/* ==========================================================================
+ * ======= SEIZE ATTRIBUTE IN BUFFER =======
+ *
+ * GET A NEW ATTRINBUF AND SETS ATTRINBUFPTR.
+ * ========================================================================= */
+void Dblqh::seizeAttrinbuf(Signal* signal)
+{
+ AttrbufPtr tmpAttrinbufptr;
+ AttrbufPtr regAttrinbufptr;
+ Attrbuf *regAttrbuf = attrbuf;
+ Uint32 tattrinbufFileSize = cattrinbufFileSize;
+
+ regAttrinbufptr.i = seize_attrinbuf();
+ tmpAttrinbufptr.i = tcConnectptr.p->lastAttrinbuf;
+ ptrCheckGuard(regAttrinbufptr, tattrinbufFileSize, regAttrbuf);
+ tcConnectptr.p->lastAttrinbuf = regAttrinbufptr.i;
+ regAttrinbufptr.p->attrbuf[ZINBUF_DATA_LEN] = 0;
+ if (tmpAttrinbufptr.i == RNIL) {
+ jam();
+ tcConnectptr.p->firstAttrinbuf = regAttrinbufptr.i;
+ } else {
+ jam();
+ ptrCheckGuard(tmpAttrinbufptr, tattrinbufFileSize, regAttrbuf);
+ tmpAttrinbufptr.p->attrbuf[ZINBUF_NEXT] = regAttrinbufptr.i;
+ }//if
+ regAttrinbufptr.p->attrbuf[ZINBUF_NEXT] = RNIL;
+ attrinbufptr = regAttrinbufptr;
+}//Dblqh::seizeAttrinbuf()
+
+/* ==========================================================================
+ * ======= SEIZE TC CONNECT RECORD =======
+ *
+ * GETS A NEW TC CONNECT RECORD FROM FREELIST.
+ * ========================================================================= */
+void Dblqh::seizeTcrec()
+{
+ TcConnectionrecPtr locTcConnectptr;
+
+ locTcConnectptr.i = cfirstfreeTcConrec;
+ ptrCheckGuard(locTcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ Uint32 nextTc = locTcConnectptr.p->nextTcConnectrec;
+ locTcConnectptr.p->nextTcConnectrec = RNIL;
+ locTcConnectptr.p->clientConnectrec = RNIL;
+ locTcConnectptr.p->clientBlockref = RNIL;
+ locTcConnectptr.p->abortState = TcConnectionrec::ABORT_IDLE;
+ locTcConnectptr.p->tcTimer = cLqhTimeOutCount;
+ locTcConnectptr.p->tableref = RNIL;
+ locTcConnectptr.p->savePointId = 0;
+ cfirstfreeTcConrec = nextTc;
+ tcConnectptr = locTcConnectptr;
+ locTcConnectptr.p->connectState = TcConnectionrec::CONNECTED;
+}//Dblqh::seizeTcrec()
+
+/* ==========================================================================
+ * ======= SEIZE DATA BUFFER =======
+ * ========================================================================= */
+void Dblqh::seizeTupkeybuf(Signal* signal)
+{
+ Databuf *regDatabuf = databuf;
+ DatabufPtr tmpDatabufptr;
+ DatabufPtr regDatabufptr;
+ Uint32 tdatabufFileSize = cdatabufFileSize;
+
+/* ------- GET A DATABUF. ------- */
+ regDatabufptr.i = cfirstfreeDatabuf;
+ tmpDatabufptr.i = tcConnectptr.p->lastTupkeybuf;
+ ptrCheckGuard(regDatabufptr, tdatabufFileSize, regDatabuf);
+ Uint32 nextFirst = regDatabufptr.p->nextDatabuf;
+ tcConnectptr.p->lastTupkeybuf = regDatabufptr.i;
+ if (tmpDatabufptr.i == RNIL) {
+ jam();
+ tcConnectptr.p->firstTupkeybuf = regDatabufptr.i;
+ } else {
+ jam();
+ ptrCheckGuard(tmpDatabufptr, tdatabufFileSize, regDatabuf);
+ tmpDatabufptr.p->nextDatabuf = regDatabufptr.i;
+ }//if
+ cfirstfreeDatabuf = nextFirst;
+ regDatabufptr.p->nextDatabuf = RNIL;
+ databufptr = regDatabufptr;
+}//Dblqh::seizeTupkeybuf()
+
+/* ------------------------------------------------------------------------- */
+/* ------- TAKE CARE OF LQHKEYREQ ------- */
+/* LQHKEYREQ IS THE SIGNAL THAT STARTS ALL OPERATIONS IN THE LQH BLOCK */
+/* THIS SIGNAL CONTAINS A LOT OF INFORMATION ABOUT WHAT TYPE OF OPERATION, */
+/* KEY INFORMATION, ATTRIBUTE INFORMATION, NODE INFORMATION AND A LOT MORE */
+/* ------------------------------------------------------------------------- */
+void Dblqh::execLQHKEYREQ(Signal* signal)
+{
+ UintR sig0, sig1, sig2, sig3, sig4, sig5;
+ Uint8 tfragDistKey;
+
+ const LqhKeyReq * const lqhKeyReq = (LqhKeyReq *)signal->getDataPtr();
+
+ sig0 = lqhKeyReq->clientConnectPtr;
+ if (cfirstfreeTcConrec != RNIL && !ERROR_INSERTED(5031)) {
+ jamEntry();
+ seizeTcrec();
+ } else {
+/* ------------------------------------------------------------------------- */
+/* NO FREE TC RECORD AVAILABLE, THUS WE CANNOT HANDLE THE REQUEST. */
+/* ------------------------------------------------------------------------- */
+ if (ERROR_INSERTED(5031)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ }
+ noFreeRecordLab(signal, lqhKeyReq, ZNO_TC_CONNECT_ERROR);
+ return;
+ }//if
+
+ if(ERROR_INSERTED(5038) &&
+ refToNode(signal->getSendersBlockRef()) != getOwnNodeId()){
+ jam();
+ SET_ERROR_INSERT_VALUE(5039);
+ return;
+ }
+
+ c_Counters.operations++;
+
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ regTcPtr->clientBlockref = signal->senderBlockRef();
+ regTcPtr->clientConnectrec = sig0;
+ regTcPtr->tcOprec = sig0;
+ regTcPtr->storedProcId = ZNIL;
+
+ UintR TtotReclenAi = lqhKeyReq->attrLen;
+ sig1 = lqhKeyReq->savePointId;
+ sig2 = lqhKeyReq->hashValue;
+ UintR Treqinfo = lqhKeyReq->requestInfo;
+ sig4 = lqhKeyReq->tableSchemaVersion;
+ sig5 = lqhKeyReq->tcBlockref;
+
+ regTcPtr->savePointId = sig1;
+ regTcPtr->hashValue = sig2;
+ const Uint32 schemaVersion = regTcPtr->schemaVersion = LqhKeyReq::getSchemaVersion(sig4);
+ tabptr.i = LqhKeyReq::getTableId(sig4);
+ regTcPtr->tcBlockref = sig5;
+
+ const Uint8 op = LqhKeyReq::getOperation(Treqinfo);
+ if (op == ZREAD && !getAllowRead()){
+ noFreeRecordLab(signal, lqhKeyReq, ZNODE_SHUTDOWN_IN_PROGESS);
+ return;
+ }
+
+ regTcPtr->totReclenAi = LqhKeyReq::getAttrLen(TtotReclenAi);
+ regTcPtr->tcScanInfo = lqhKeyReq->scanInfo;
+ regTcPtr->indTakeOver = LqhKeyReq::getScanTakeOverFlag(TtotReclenAi);
+
+ regTcPtr->readlenAi = 0;
+ regTcPtr->currTupAiLen = 0;
+ regTcPtr->listState = TcConnectionrec::NOT_IN_LIST;
+ regTcPtr->logWriteState = TcConnectionrec::NOT_STARTED;
+ regTcPtr->fragmentptr = RNIL;
+
+ sig0 = lqhKeyReq->fragmentData;
+ sig1 = lqhKeyReq->transId1;
+ sig2 = lqhKeyReq->transId2;
+ sig3 = lqhKeyReq->variableData[0];
+ sig4 = lqhKeyReq->variableData[1];
+
+ regTcPtr->fragmentid = LqhKeyReq::getFragmentId(sig0);
+ regTcPtr->nextReplica = LqhKeyReq::getNextReplicaNodeId(sig0);
+ regTcPtr->transid[0] = sig1;
+ regTcPtr->transid[1] = sig2;
+ regTcPtr->applRef = sig3;
+ regTcPtr->applOprec = sig4;
+
+ regTcPtr->commitAckMarker = RNIL;
+ if(LqhKeyReq::getMarkerFlag(Treqinfo)){
+ jam();
+
+ CommitAckMarkerPtr markerPtr;
+ m_commitAckMarkerHash.seize(markerPtr);
+ if(markerPtr.i == RNIL){
+ noFreeRecordLab(signal, lqhKeyReq, ZNO_FREE_MARKER_RECORDS_ERROR);
+ return;
+ }
+ markerPtr.p->transid1 = sig1;
+ markerPtr.p->transid2 = sig2;
+ markerPtr.p->apiRef = sig3;
+ markerPtr.p->apiOprec = sig4;
+ const NodeId tcNodeId = refToNode(sig5);
+ markerPtr.p->tcNodeId = tcNodeId;
+
+ CommitAckMarkerPtr tmp;
+#ifdef VM_TRACE
+#ifdef MARKER_TRACE
+ ndbout_c("Add marker[%.8x %.8x]", markerPtr.p->transid1, markerPtr.p->transid2);
+#endif
+ ndbrequire(!m_commitAckMarkerHash.find(tmp, * markerPtr.p));
+#endif
+ m_commitAckMarkerHash.add(markerPtr);
+ regTcPtr->commitAckMarker = markerPtr.i;
+ }
+
+ regTcPtr->reqinfo = Treqinfo;
+ regTcPtr->lastReplicaNo = LqhKeyReq::getLastReplicaNo(Treqinfo);
+ regTcPtr->lockType = LqhKeyReq::getLockType(Treqinfo);
+ regTcPtr->dirtyOp = LqhKeyReq::getDirtyFlag(Treqinfo);
+ regTcPtr->opExec = LqhKeyReq::getInterpretedFlag(Treqinfo);
+ regTcPtr->opSimple = LqhKeyReq::getSimpleFlag(Treqinfo);
+ regTcPtr->operation = LqhKeyReq::getOperation(Treqinfo);
+ regTcPtr->simpleRead = regTcPtr->operation == ZREAD && regTcPtr->opSimple;
+ regTcPtr->seqNoReplica = LqhKeyReq::getSeqNoReplica(Treqinfo);
+ UintR TreclenAiLqhkey = LqhKeyReq::getAIInLqhKeyReq(Treqinfo);
+ regTcPtr->apiVersionNo = 0;
+
+ CRASH_INSERTION2(5041, regTcPtr->simpleRead &&
+ refToNode(signal->senderBlockRef()) != cownNodeid);
+
+ regTcPtr->reclenAiLqhkey = TreclenAiLqhkey;
+ regTcPtr->currReclenAi = TreclenAiLqhkey;
+ UintR TitcKeyLen = LqhKeyReq::getKeyLen(Treqinfo);
+ regTcPtr->primKeyLen = TitcKeyLen;
+ regTcPtr->noFiredTriggers = lqhKeyReq->noFiredTriggers;
+
+ UintR TapplAddressInd = LqhKeyReq::getApplicationAddressFlag(Treqinfo);
+ UintR nextPos = (TapplAddressInd << 1);
+ UintR TsameClientAndTcOprec = LqhKeyReq::getSameClientAndTcFlag(Treqinfo);
+ if (TsameClientAndTcOprec == 1) {
+ regTcPtr->tcOprec = lqhKeyReq->variableData[nextPos];
+ nextPos++;
+ }//if
+ UintR TnextReplicasIndicator = regTcPtr->lastReplicaNo -
+ regTcPtr->seqNoReplica;
+ if (TnextReplicasIndicator > 1) {
+ regTcPtr->nodeAfterNext[0] = lqhKeyReq->variableData[nextPos] & 0xFFFF;
+ regTcPtr->nodeAfterNext[1] = lqhKeyReq->variableData[nextPos] >> 16;
+ nextPos++;
+ }//if
+ UintR TstoredProcIndicator = LqhKeyReq::getStoredProcFlag(TtotReclenAi);
+ if (TstoredProcIndicator == 1) {
+ regTcPtr->storedProcId = lqhKeyReq->variableData[nextPos] & ZNIL;
+ nextPos++;
+ }//if
+ UintR TreadLenAiIndicator = LqhKeyReq::getReturnedReadLenAIFlag(Treqinfo);
+ if (TreadLenAiIndicator == 1) {
+ regTcPtr->readlenAi = lqhKeyReq->variableData[nextPos] & ZNIL;
+ nextPos++;
+ }//if
+ sig0 = lqhKeyReq->variableData[nextPos + 0];
+ sig1 = lqhKeyReq->variableData[nextPos + 1];
+ sig2 = lqhKeyReq->variableData[nextPos + 2];
+ sig3 = lqhKeyReq->variableData[nextPos + 3];
+
+ regTcPtr->tupkeyData[0] = sig0;
+ regTcPtr->tupkeyData[1] = sig1;
+ regTcPtr->tupkeyData[2] = sig2;
+ regTcPtr->tupkeyData[3] = sig3;
+
+ if (TitcKeyLen > 0) {
+ if (TitcKeyLen < 4) {
+ nextPos += TitcKeyLen;
+ } else {
+ nextPos += 4;
+ }//if
+ } else {
+ LQHKEY_error(signal, 3);
+ return;
+ }//if
+
+ if ((LqhKeyReq::FixedSignalLength + nextPos + TreclenAiLqhkey) !=
+ signal->length()) {
+ LQHKEY_error(signal, 2);
+ return;
+ }//if
+ UintR TseqNoReplica = regTcPtr->seqNoReplica;
+ UintR TlastReplicaNo = regTcPtr->lastReplicaNo;
+ if (TseqNoReplica == TlastReplicaNo) {
+ jam();
+ regTcPtr->nextReplica = ZNIL;
+ } else {
+ if (TseqNoReplica < TlastReplicaNo) {
+ jam();
+ regTcPtr->nextSeqNoReplica = TseqNoReplica + 1;
+ if ((regTcPtr->nextReplica == 0) ||
+ (regTcPtr->nextReplica == cownNodeid)) {
+ LQHKEY_error(signal, 0);
+ }//if
+ } else {
+ LQHKEY_error(signal, 4);
+ return;
+ }//if
+ }//if
+ TcConnectionrecPtr localNextTcConnectptr;
+ Uint32 hashIndex = (regTcPtr->transid[0] ^ regTcPtr->tcOprec) & 1023;
+ localNextTcConnectptr.i = ctransidHash[hashIndex];
+ ctransidHash[hashIndex] = tcConnectptr.i;
+ regTcPtr->prevHashRec = RNIL;
+ regTcPtr->nextHashRec = localNextTcConnectptr.i;
+ if (localNextTcConnectptr.i != RNIL) {
+/* -------------------------------------------------------------------------- */
+/* ENSURE THAT THE NEXT RECORD HAS SET PREVIOUS TO OUR RECORD IF IT EXISTS */
+/* -------------------------------------------------------------------------- */
+ ptrCheckGuard(localNextTcConnectptr,
+ ctcConnectrecFileSize, tcConnectionrec);
+ jam();
+ localNextTcConnectptr.p->prevHashRec = tcConnectptr.i;
+ }//if
+ if (tabptr.i >= ctabrecFileSize) {
+ LQHKEY_error(signal, 5);
+ return;
+ }//if
+ ptrAss(tabptr, tablerec);
+ if(tabptr.p->tableStatus != Tablerec::TABLE_DEFINED){
+ LQHKEY_abort(signal, 4);
+ return;
+ }
+ if(tabptr.p->schemaVersion != schemaVersion){
+ LQHKEY_abort(signal, 5);
+ return;
+ }
+
+ regTcPtr->tableref = tabptr.i;
+ tabptr.p->usageCount++;
+
+ if (!getFragmentrec(signal, regTcPtr->fragmentid)) {
+ LQHKEY_error(signal, 6);
+ return;
+ }//if
+ regTcPtr->localFragptr = regTcPtr->hashValue & 1;
+ Uint8 TcopyType = fragptr.p->fragCopy;
+ tfragDistKey = fragptr.p->fragDistributionKey;
+ if (fragptr.p->fragStatus == Fragrecord::ACTIVE_CREATION) {
+ jam();
+ regTcPtr->activeCreat = ZTRUE;
+ CRASH_INSERTION(5002);
+ } else {
+ regTcPtr->activeCreat = ZFALSE;
+ }//if
+ regTcPtr->replicaType = TcopyType;
+ regTcPtr->fragmentptr = fragptr.i;
+ Uint8 TdistKey = LqhKeyReq::getDistributionKey(TtotReclenAi);
+ if ((tfragDistKey != TdistKey) &&
+ (regTcPtr->seqNoReplica == 0) &&
+ (regTcPtr->dirtyOp == ZFALSE) &&
+ (regTcPtr->simpleRead == ZFALSE)) {
+ /* ----------------------------------------------------------------------
+ * WE HAVE DIFFERENT OPINION THAN THE DIH THAT STARTED THE TRANSACTION.
+ * THE REASON COULD BE THAT THIS IS AN OLD DISTRIBUTION WHICH IS NO LONGER
+ * VALID TO USE. THIS MUST BE CHECKED.
+ * ONE IS ADDED TO THE DISTRIBUTION KEY EVERY TIME WE ADD A NEW REPLICA.
+ * FAILED REPLICAS DO NOT AFFECT THE DISTRIBUTION KEY. THIS MEANS THAT THE
+ * MAXIMUM DEVIATION CAN BE ONE BETWEEN THOSE TWO VALUES.
+ * --------------------------------------------------------------------- */
+ Int32 tmp = TdistKey - tfragDistKey;
+ tmp = (tmp < 0 ? - tmp : tmp);
+ if ((tmp <= 1) || (tfragDistKey == 0)) {
+ LQHKEY_abort(signal, 0);
+ return;
+ }//if
+ LQHKEY_error(signal, 1);
+ }//if
+ if (TreclenAiLqhkey != 0) {
+ if (regTcPtr->operation != ZREAD) {
+ if (regTcPtr->operation != ZDELETE) {
+ if (regTcPtr->opExec != 1) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* */
+/* UPDATES, WRITES AND INSERTS THAT ARE NOT INTERPRETED WILL USE THE */
+/* SAME ATTRINFO IN ALL REPLICAS. THUS WE SAVE THE ATTRINFO ALREADY */
+/* TO SAVE A SIGNAL FROM TUP TO LQH. INTERPRETED EXECUTION IN TUP */
+/* WILL CREATE NEW ATTRINFO FOR THE OTHER REPLICAS AND IT IS THUS NOT */
+/* A GOOD IDEA TO SAVE THE INFORMATION HERE. READS WILL ALSO BE */
+/* UNNECESSARY TO SAVE SINCE THAT ATTRINFO WILL NEVER BE SENT TO ANY */
+/* MORE REPLICAS. */
+/*---------------------------------------------------------------------------*/
+/* READS AND DELETES CAN ONLY HAVE INFORMATION ABOUT WHAT IS TO BE READ. */
+/* NO INFORMATION THAT NEEDS LOGGING. */
+/*---------------------------------------------------------------------------*/
+ sig0 = lqhKeyReq->variableData[nextPos + 0];
+ sig1 = lqhKeyReq->variableData[nextPos + 1];
+ sig2 = lqhKeyReq->variableData[nextPos + 2];
+ sig3 = lqhKeyReq->variableData[nextPos + 3];
+ sig4 = lqhKeyReq->variableData[nextPos + 4];
+
+ regTcPtr->firstAttrinfo[0] = sig0;
+ regTcPtr->firstAttrinfo[1] = sig1;
+ regTcPtr->firstAttrinfo[2] = sig2;
+ regTcPtr->firstAttrinfo[3] = sig3;
+ regTcPtr->firstAttrinfo[4] = sig4;
+ regTcPtr->currTupAiLen = TreclenAiLqhkey;
+ } else {
+ jam();
+ regTcPtr->reclenAiLqhkey = 0;
+ }//if
+ } else {
+ jam();
+ regTcPtr->reclenAiLqhkey = 0;
+ }//if
+ }//if
+ sig0 = lqhKeyReq->variableData[nextPos + 0];
+ sig1 = lqhKeyReq->variableData[nextPos + 1];
+ sig2 = lqhKeyReq->variableData[nextPos + 2];
+ sig3 = lqhKeyReq->variableData[nextPos + 3];
+ sig4 = lqhKeyReq->variableData[nextPos + 4];
+
+ signal->theData[0] = regTcPtr->tupConnectrec;
+ signal->theData[3] = sig0;
+ signal->theData[4] = sig1;
+ signal->theData[5] = sig2;
+ signal->theData[6] = sig3;
+ signal->theData[7] = sig4;
+ EXECUTE_DIRECT(refToBlock(regTcPtr->tcTupBlockref), GSN_ATTRINFO,
+ signal, TreclenAiLqhkey + 3);
+ jamEntry();
+ if (signal->theData[0] == (UintR)-1) {
+ LQHKEY_abort(signal, 2);
+ return;
+ }//if
+ }//if
+/* ------- TAKE CARE OF PRIM KEY DATA ------- */
+ if (regTcPtr->primKeyLen <= 4) {
+ endgettupkeyLab(signal);
+ return;
+ } else {
+ jam();
+/*--------------------------------------------------------------------*/
+/* KEY LENGTH WAS MORE THAN 4 WORDS (WORD = 4 BYTE). THUS WE */
+/* HAVE TO ALLOCATE A DATA BUFFER TO STORE THE KEY DATA AND */
+/* WAIT FOR THE KEYINFO SIGNAL. */
+/*--------------------------------------------------------------------*/
+ regTcPtr->save1 = 4;
+ regTcPtr->transactionState = TcConnectionrec::WAIT_TUPKEYINFO;
+ return;
+ }//if
+ return;
+}//Dblqh::execLQHKEYREQ()
+
+void Dblqh::endgettupkeyLab(Signal* signal)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ if (regTcPtr->totReclenAi == regTcPtr->currReclenAi) {
+ ;
+ } else {
+ jam();
+ ndbrequire(regTcPtr->currReclenAi < regTcPtr->totReclenAi);
+ regTcPtr->transactionState = TcConnectionrec::WAIT_ATTR;
+ return;
+ }//if
+/* ---------------------------------------------------------------------- */
+/* NOW RECEPTION OF LQHKEYREQ IS COMPLETED THE NEXT STEP IS TO START*/
+/* PROCESSING THE MESSAGE. IF THE MESSAGE IS TO A STAND-BY NODE */
+/* WITHOUT NETWORK REDUNDANCY OR PREPARE-TO-COMMIT ACTIVATED THE */
+/* PREPARATION TO SEND TO THE NEXT NODE WILL START IMMEDIATELY. */
+/* */
+/* OTHERWISE THE PROCESSING WILL START AFTER SETTING THE PROPER */
+/* STATE. HOWEVER BEFORE PROCESSING THE MESSAGE */
+/* IT IS NECESSARY TO CHECK THAT THE FRAGMENT IS NOT PERFORMING */
+/* A CHECKPOINT. THE OPERATION SHALL ALSO BE LINKED INTO THE */
+/* FRAGMENT QUEUE OR LIST OF ACTIVE OPERATIONS. */
+/* */
+/* THE FIRST STEP IN PROCESSING THE MESSAGE IS TO CONTACT DBACC. */
+/*------------------------------------------------------------------------*/
+ switch (fragptr.p->fragStatus) {
+ case Fragrecord::FSACTIVE:
+ case Fragrecord::CRASH_RECOVERING:
+ case Fragrecord::ACTIVE_CREATION:
+ linkActiveFrag(signal);
+ prepareContinueAfterBlockedLab(signal);
+ return;
+ break;
+ case Fragrecord::BLOCKED:
+ jam();
+ linkFragQueue(signal);
+ regTcPtr->transactionState = TcConnectionrec::STOPPED;
+ return;
+ break;
+ case Fragrecord::FREE:
+ jam();
+ case Fragrecord::DEFINED:
+ jam();
+ case Fragrecord::REMOVING:
+ jam();
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dblqh::endgettupkeyLab()
+
+void Dblqh::prepareContinueAfterBlockedLab(Signal* signal)
+{
+ UintR ttcScanOp;
+ UintR taccreq;
+
+/* -------------------------------------------------------------------------- */
+/* INPUT: TC_CONNECTPTR ACTIVE CONNECTION RECORD */
+/* FRAGPTR FRAGMENT RECORD */
+/* -------------------------------------------------------------------------- */
+/* -------------------------------------------------------------------------- */
+/* CONTINUE HERE AFTER BEING BLOCKED FOR A WHILE DURING LOCAL CHECKPOINT. */
+/* -------------------------------------------------------------------------- */
+/* ALSO AFTER NORMAL PROCEDURE WE CONTINUE HERE */
+/* -------------------------------------------------------------------------- */
+ Uint32 tc_ptr_i = tcConnectptr.i;
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ if (regTcPtr->indTakeOver == ZTRUE) {
+ jam();
+ ttcScanOp = KeyInfo20::getScanOp(regTcPtr->tcScanInfo);
+ scanptr.i = RNIL;
+ {
+ ScanRecord key;
+ key.scanNumber = KeyInfo20::getScanNo(regTcPtr->tcScanInfo);
+ key.fragPtrI = fragptr.i;
+ c_scanTakeOverHash.find(scanptr, key);
+#ifdef TRACE_SCAN_TAKEOVER
+ if(scanptr.i == RNIL)
+ ndbout_c("not finding (%d %d)", key.scanNumber, key.fragPtrI);
+#endif
+ }
+ if (scanptr.i == RNIL) {
+ jam();
+ releaseActiveFrag(signal);
+ takeOverErrorLab(signal);
+ return;
+ }//if
+ Uint32 accOpPtr= get_acc_ptr_from_scan_record(scanptr.p,
+ ttcScanOp,
+ true);
+ if (accOpPtr == RNIL) {
+ jam();
+ releaseActiveFrag(signal);
+ takeOverErrorLab(signal);
+ return;
+ }//if
+ signal->theData[1] = accOpPtr;
+ signal->theData[2] = regTcPtr->transid[0];
+ signal->theData[3] = regTcPtr->transid[1];
+ EXECUTE_DIRECT(refToBlock(regTcPtr->tcAccBlockref), GSN_ACC_TO_REQ,
+ signal, 4);
+ if (signal->theData[0] == (UintR)-1) {
+ execACC_TO_REF(signal);
+ return;
+ }//if
+ jamEntry();
+ }//if
+/*-------------------------------------------------------------------*/
+/* IT IS NOW TIME TO CONTACT ACC. THE TUPLE KEY WILL BE SENT */
+/* AND THIS WILL BE TRANSLATED INTO A LOCAL KEY BY USING THE */
+/* LOCAL PART OF THE LH3-ALGORITHM. ALSO PROPER LOCKS ON THE */
+/* TUPLE WILL BE SET. FOR INSERTS AND DELETES THE MESSAGE WILL */
+/* START AN INSERT/DELETE INTO THE HASH TABLE. */
+/* */
+/* BEFORE SENDING THE MESSAGE THE REQUEST INFORMATION IS SET */
+/* PROPERLY. */
+/* ----------------------------------------------------------------- */
+#if 0
+ if (regTcPtr->tableref != 0) {
+ switch (regTcPtr->operation) {
+ case ZREAD: ndbout << "Läsning "; break;
+ case ZUPDATE: ndbout << " Uppdatering "; break;
+ case ZWRITE: ndbout << "Write "; break;
+ case ZINSERT: ndbout << "Inläggning "; break;
+ case ZDELETE: ndbout << "Borttagning "; break;
+ default: ndbout << "????"; break;
+ }
+ ndbout << "med nyckel = " << regTcPtr->tupkeyData[0] << endl;
+ }
+#endif
+
+ regTcPtr->transactionState = TcConnectionrec::WAIT_ACC;
+ taccreq = regTcPtr->operation;
+ taccreq = taccreq + (regTcPtr->opSimple << 3);
+ taccreq = taccreq + (regTcPtr->lockType << 4);
+ taccreq = taccreq + (regTcPtr->dirtyOp << 6);
+ taccreq = taccreq + (regTcPtr->replicaType << 7);
+ taccreq = taccreq + (regTcPtr->apiVersionNo << 9);
+/* ************ */
+/* ACCKEYREQ < */
+/* ************ */
+ ndbrequire(regTcPtr->localFragptr < 2);
+ Uint32 sig0, sig1, sig2, sig3, sig4;
+ sig0 = regTcPtr->accConnectrec;
+ sig1 = fragptr.p->accFragptr[regTcPtr->localFragptr];
+ sig2 = regTcPtr->hashValue;
+ sig3 = regTcPtr->primKeyLen;
+ sig4 = regTcPtr->transid[0];
+ signal->theData[0] = sig0;
+ signal->theData[1] = sig1;
+ signal->theData[2] = taccreq;
+ signal->theData[3] = sig2;
+ signal->theData[4] = sig3;
+ signal->theData[5] = sig4;
+
+ sig0 = regTcPtr->transid[1];
+ sig1 = regTcPtr->tupkeyData[0];
+ sig2 = regTcPtr->tupkeyData[1];
+ sig3 = regTcPtr->tupkeyData[2];
+ sig4 = regTcPtr->tupkeyData[3];
+ signal->theData[6] = sig0;
+ signal->theData[7] = sig1;
+ signal->theData[8] = sig2;
+ signal->theData[9] = sig3;
+ signal->theData[10] = sig4;
+ if (regTcPtr->primKeyLen > 4) {
+ sendKeyinfoAcc(signal, 11);
+ }//if
+ EXECUTE_DIRECT(refToBlock(regTcPtr->tcAccBlockref), GSN_ACCKEYREQ,
+ signal, 7 + regTcPtr->primKeyLen);
+ if (signal->theData[0] < RNIL) {
+ signal->theData[0] = tc_ptr_i;
+ execACCKEYCONF(signal);
+ return;
+ } else if (signal->theData[0] == RNIL) {
+ ;
+ } else {
+ ndbrequire(signal->theData[0] == (UintR)-1);
+ signal->theData[0] = tc_ptr_i;
+ execACCKEYREF(signal);
+ }//if
+ return;
+}//Dblqh::prepareContinueAfterBlockedLab()
+
+/* ========================================================================== */
+/* ======= SEND KEYINFO TO ACC ======= */
+/* */
+/* ========================================================================== */
+void Dblqh::sendKeyinfoAcc(Signal* signal, Uint32 Ti)
+{
+ DatabufPtr regDatabufptr;
+ regDatabufptr.i = tcConnectptr.p->firstTupkeybuf;
+
+ do {
+ jam();
+ ptrCheckGuard(regDatabufptr, cdatabufFileSize, databuf);
+ Uint32 sig0 = regDatabufptr.p->data[0];
+ Uint32 sig1 = regDatabufptr.p->data[1];
+ Uint32 sig2 = regDatabufptr.p->data[2];
+ Uint32 sig3 = regDatabufptr.p->data[3];
+ signal->theData[Ti] = sig0;
+ signal->theData[Ti + 1] = sig1;
+ signal->theData[Ti + 2] = sig2;
+ signal->theData[Ti + 3] = sig3;
+ regDatabufptr.i = regDatabufptr.p->nextDatabuf;
+ Ti += 4;
+ } while (regDatabufptr.i != RNIL);
+}//Dblqh::sendKeyinfoAcc()
+
+void Dblqh::execLQH_ALLOCREQ(Signal* signal)
+{
+ TcConnectionrecPtr regTcPtr;
+ FragrecordPtr regFragptr;
+
+ jamEntry();
+ regTcPtr.i = signal->theData[0];
+ ptrCheckGuard(regTcPtr, ctcConnectrecFileSize, tcConnectionrec);
+
+ regFragptr.i = regTcPtr.p->fragmentptr;
+ ptrCheckGuard(regFragptr, cfragrecFileSize, fragrecord);
+
+ ndbrequire(regTcPtr.p->localFragptr < 2);
+ signal->theData[0] = regTcPtr.p->tupConnectrec;
+ signal->theData[1] = regFragptr.p->tupFragptr[regTcPtr.p->localFragptr];
+ signal->theData[2] = regTcPtr.p->tableref;
+ Uint32 tup = refToBlock(regTcPtr.p->tcTupBlockref);
+ EXECUTE_DIRECT(tup, GSN_TUP_ALLOCREQ, signal, 3);
+}//Dblqh::execTUP_ALLOCREQ()
+
+/* ************>> */
+/* ACCKEYCONF > */
+/* ************>> */
+void Dblqh::execACCKEYCONF(Signal* signal)
+{
+ TcConnectionrec *regTcConnectionrec = tcConnectionrec;
+ Uint32 ttcConnectrecFileSize = ctcConnectrecFileSize;
+ Uint32 tcIndex = signal->theData[0];
+ Uint32 Tfragid = signal->theData[2];
+ Uint32 localKey1 = signal->theData[3];
+ Uint32 localKey2 = signal->theData[4];
+ Uint32 localKeyFlag = signal->theData[5];
+ jamEntry();
+ tcConnectptr.i = tcIndex;
+ ptrCheckGuard(tcConnectptr, ttcConnectrecFileSize, regTcConnectionrec);
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ if (regTcPtr->transactionState != TcConnectionrec::WAIT_ACC) {
+ LQHKEY_abort(signal, 3);
+ return;
+ }//if
+ /* ------------------------------------------------------------------------
+ * Set transaction state and also reset the activeCreat since that is only
+ * valid in cases where the record was not present.
+ * ------------------------------------------------------------------------ */
+ regTcPtr->transactionState = TcConnectionrec::WAIT_TUP;
+ regTcPtr->activeCreat = ZFALSE;
+ /* ------------------------------------------------------------------------
+ * IT IS NOW TIME TO CONTACT THE TUPLE MANAGER. THE TUPLE MANAGER NEEDS THE
+ * INFORMATION ON WHICH TABLE AND FRAGMENT, THE LOCAL KEY AND IT NEEDS TO
+ * KNOW THE TYPE OF OPERATION TO PERFORM. TUP CAN SEND THE ATTRINFO DATA
+ * EITHER TO THE TC BLOCK OR DIRECTLY TO THE APPLICATION. THE SCHEMA VERSION
+ * IS NEEDED SINCE TWO SCHEMA VERSIONS CAN BE ACTIVE SIMULTANEOUSLY ON A
+ * TABLE.
+ * ------------------------------------------------------------------------ */
+ if (regTcPtr->operation == ZWRITE) {
+ if (signal->theData[1] > 0) {
+ /* --------------------------------------------------------------------
+ * ACC did perform an insert and thus we should indicate that the WRITE
+ * is an INSERT otherwise it is an UPDATE.
+ * -------------------------------------------------------------------- */
+ jam();
+ regTcPtr->operation = ZINSERT;
+ } else {
+ jam();
+ tcConnectptr.p->operation = ZUPDATE;
+ }//if
+ }//if
+ ndbrequire(localKeyFlag == 1);
+ localKey2 = localKey1 & MAX_TUPLES_PER_PAGE;
+ localKey1 = localKey1 >> MAX_TUPLES_BITS;
+ Uint32 Ttupreq = regTcPtr->dirtyOp;
+ Ttupreq = Ttupreq + (regTcPtr->opSimple << 1);
+ Ttupreq = Ttupreq + (regTcPtr->operation << 6);
+ Ttupreq = Ttupreq + (regTcPtr->opExec << 10);
+ Ttupreq = Ttupreq + (regTcPtr->apiVersionNo << 11);
+
+ /* ---------------------------------------------------------------------
+ * Clear interpreted mode bit since we do not want the next replica to
+ * use interpreted mode. The next replica will receive a normal write.
+ * --------------------------------------------------------------------- */
+ regTcPtr->opExec = 0;
+ /* ************< */
+ /* TUPKEYREQ < */
+ /* ************< */
+ TupKeyReq * const tupKeyReq = (TupKeyReq *)signal->getDataPtrSend();
+ Uint32 sig0, sig1, sig2, sig3;
+
+ sig0 = regTcPtr->tupConnectrec;
+ sig1 = regTcPtr->tableref;
+ tupKeyReq->connectPtr = sig0;
+ tupKeyReq->request = Ttupreq;
+ tupKeyReq->tableRef = sig1;
+ tupKeyReq->fragId = Tfragid;
+ tupKeyReq->keyRef1 = localKey1;
+ tupKeyReq->keyRef2 = localKey2;
+
+ sig0 = regTcPtr->totReclenAi;
+ sig1 = regTcPtr->applOprec;
+ sig2 = regTcPtr->applRef;
+ sig3 = regTcPtr->schemaVersion;
+ FragrecordPtr regFragptr;
+ regFragptr.i = regTcPtr->fragmentptr;
+ ptrCheckGuard(regFragptr, cfragrecFileSize, fragrecord);
+ tupKeyReq->attrBufLen = sig0;
+ tupKeyReq->opRef = sig1;
+ tupKeyReq->applRef = sig2;
+ tupKeyReq->schemaVersion = sig3;
+
+ ndbrequire(regTcPtr->localFragptr < 2);
+ sig0 = regTcPtr->storedProcId;
+ sig1 = regTcPtr->transid[0];
+ sig2 = regTcPtr->transid[1];
+ sig3 = regFragptr.p->tupFragptr[regTcPtr->localFragptr];
+ Uint32 tup = refToBlock(regTcPtr->tcTupBlockref);
+
+ tupKeyReq->storedProcedure = sig0;
+ tupKeyReq->transId1 = sig1;
+ tupKeyReq->transId2 = sig2;
+ tupKeyReq->fragPtr = sig3;
+ tupKeyReq->primaryReplica = (tcConnectptr.p->seqNoReplica == 0)?true:false;
+ tupKeyReq->coordinatorTC = tcConnectptr.p->tcBlockref;
+ tupKeyReq->tcOpIndex = tcConnectptr.p->tcOprec;
+ tupKeyReq->savePointId = tcConnectptr.p->savePointId;
+
+ EXECUTE_DIRECT(tup, GSN_TUPKEYREQ, signal, TupKeyReq::SignalLength);
+}//Dblqh::execACCKEYCONF()
+
+/* --------------------------------------------------------------------------
+ * ------- ENTER TUP... -------
+ * ENTER TUPKEYCONF WITH
+ * TC_CONNECTPTR,
+ * TDATA2, LOCAL KEY REFERENCE 1, ONLY INTERESTING AFTER INSERT
+ * TDATA3, LOCAL KEY REFERENCE 1, ONLY INTERESTING AFTER INSERT
+ * TDATA4, TOTAL LENGTH OF READ DATA SENT TO TC/APPLICATION
+ * TDATA5 TOTAL LENGTH OF UPDATE DATA SENT TO/FROM TUP
+ * GOTO TUPKEY_CONF
+ *
+ * TAKE CARE OF RESPONSES FROM TUPLE MANAGER.
+ * -------------------------------------------------------------------------- */
+void Dblqh::tupkeyConfLab(Signal* signal)
+{
+/* ---- GET OPERATION TYPE AND CHECK WHAT KIND OF OPERATION IS REQUESTED ---- */
+ const TupKeyConf * const tupKeyConf = (TupKeyConf *)&signal->theData[0];
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ if (regTcPtr->simpleRead) {
+ jam();
+ /* ----------------------------------------------------------------------
+ * THE OPERATION IS A SIMPLE READ. WE WILL IMMEDIATELY COMMIT THE OPERATION.
+ * SINCE WE HAVE NOT RELEASED THE FRAGMENT LOCK (FOR LOCAL CHECKPOINTS) YET
+ * WE CAN GO IMMEDIATELY TO COMMIT_CONTINUE_AFTER_BLOCKED.
+ * WE HAVE ALREADY SENT THE RESPONSE SO WE ARE NOT INTERESTED IN READ LENGTH
+ * ---------------------------------------------------------------------- */
+ regTcPtr->gci = cnewestGci;
+ releaseActiveFrag(signal);
+ commitContinueAfterBlockedLab(signal);
+ return;
+ }//if
+ if (tupKeyConf->readLength != 0) {
+ jam();
+
+ /* SET BIT 15 IN REQINFO */
+ LqhKeyReq::setApplicationAddressFlag(regTcPtr->reqinfo, 1);
+
+ regTcPtr->readlenAi = tupKeyConf->readLength;
+ }//if
+ regTcPtr->totSendlenAi = tupKeyConf->writeLength;
+ ndbrequire(regTcPtr->totSendlenAi == regTcPtr->currTupAiLen);
+ rwConcludedLab(signal);
+ return;
+}//Dblqh::tupkeyConfLab()
+
+/* --------------------------------------------------------------------------
+ * THE CODE IS FOUND IN THE SIGNAL RECEPTION PART OF LQH
+ * -------------------------------------------------------------------------- */
+void Dblqh::rwConcludedLab(Signal* signal)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ /* ------------------------------------------------------------------------
+ * WE HAVE NOW CONCLUDED READING/WRITING IN ACC AND TUP FOR THIS OPERATION.
+ * IT IS NOW TIME TO LOG THE OPERATION, SEND REQUEST TO NEXT NODE OR TC AND
+ * FOR SOME TYPES OF OPERATIONS IT IS EVEN TIME TO COMMIT THE OPERATION.
+ * ------------------------------------------------------------------------ */
+ if (regTcPtr->operation == ZREAD) {
+ jam();
+ /* ----------------------------------------------------------------------
+ * A NORMAL READ OPERATION IS NOT LOGGED BUT IS NOT COMMITTED UNTIL THE
+ * COMMIT SIGNAL ARRIVES. THUS WE CONTINUE PACKING THE RESPONSE.
+ * ---------------------------------------------------------------------- */
+ releaseActiveFrag(signal);
+ packLqhkeyreqLab(signal);
+ return;
+ } else {
+ FragrecordPtr regFragptr;
+ regFragptr.i = regTcPtr->fragmentptr;
+ ptrCheckGuard(regFragptr, cfragrecFileSize, fragrecord);
+ if (regFragptr.p->logFlag == Fragrecord::STATE_FALSE){
+ if (regTcPtr->dirtyOp == ZTRUE) {
+ jam();
+ /* ------------------------------------------------------------------
+ * THIS OPERATION WAS A WRITE OPERATION THAT DO NOT NEED LOGGING AND
+ * THAT CAN CAN BE COMMITTED IMMEDIATELY.
+ * ------------------------------------------------------------------ */
+ regTcPtr->gci = cnewestGci;
+ releaseActiveFrag(signal);
+ commitContinueAfterBlockedLab(signal);
+ return;
+ } else {
+ jam();
+ /* ------------------------------------------------------------------
+ * A NORMAL WRITE OPERATION ON A FRAGMENT WHICH DO NOT NEED LOGGING.
+ * WE WILL PACK THE REQUEST/RESPONSE TO THE NEXT NODE/TO TC.
+ * ------------------------------------------------------------------ */
+ regTcPtr->logWriteState = TcConnectionrec::NOT_WRITTEN;
+ releaseActiveFrag(signal);
+ packLqhkeyreqLab(signal);
+ return;
+ }//if
+ } else {
+ jam();
+ /* --------------------------------------------------------------------
+ * A DIRTY OPERATION WHICH NEEDS LOGGING. WE START BY LOGGING THE
+ * REQUEST. IN THIS CASE WE WILL RELEASE THE FRAGMENT LOCK FIRST.
+ * --------------------------------------------------------------------
+ * A NORMAL WRITE OPERATION THAT NEEDS LOGGING AND WILL NOT BE
+ * PREMATURELY COMMITTED.
+ * -------------------------------------------------------------------- */
+ releaseActiveFrag(signal);
+ logLqhkeyreqLab(signal);
+ return;
+ }//if
+ }//if
+}//Dblqh::rwConcludedLab()
+
+void Dblqh::rwConcludedAiLab(Signal* signal)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ fragptr.i = regTcPtr->fragmentptr;
+ /* ------------------------------------------------------------------------
+ * WE HAVE NOW CONCLUDED READING/WRITING IN ACC AND TUP FOR THIS OPERATION.
+ * IT IS NOW TIME TO LOG THE OPERATION, SEND REQUEST TO NEXT NODE OR TC AND
+ * FOR SOME TYPES OF OPERATIONS IT IS EVEN TIME TO COMMIT THE OPERATION.
+ * IN THIS CASE WE HAVE ALREADY RELEASED THE FRAGMENT LOCK.
+ * ERROR CASES AT FRAGMENT CREATION AND STAND-BY NODES ARE THE REASONS FOR
+ * COMING HERE.
+ * ------------------------------------------------------------------------ */
+ if (regTcPtr->operation == ZREAD) {
+ if (regTcPtr->opSimple == 1) {
+ jam();
+ /* --------------------------------------------------------------------
+ * THE OPERATION IS A SIMPLE READ. WE WILL IMMEDIATELY COMMIT THE
+ * OPERATION.
+ * -------------------------------------------------------------------- */
+ regTcPtr->gci = cnewestGci;
+ localCommitLab(signal);
+ return;
+ } else {
+ jam();
+ /* --------------------------------------------------------------------
+ * A NORMAL READ OPERATION IS NOT LOGGED BUT IS NOT COMMITTED UNTIL
+ * THE COMMIT SIGNAL ARRIVES. THUS WE CONTINUE PACKING THE RESPONSE.
+ * -------------------------------------------------------------------- */
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ packLqhkeyreqLab(signal);
+ return;
+ }//if
+ } else {
+ jam();
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ if (fragptr.p->logFlag == Fragrecord::STATE_FALSE) {
+ if (regTcPtr->dirtyOp == ZTRUE) {
+ /* ------------------------------------------------------------------
+ * THIS OPERATION WAS A WRITE OPERATION THAT DO NOT NEED LOGGING AND
+ * THAT CAN CAN BE COMMITTED IMMEDIATELY.
+ * ------------------------------------------------------------------ */
+ jam();
+ /* ----------------------------------------------------------------
+ * IT MUST BE ACTIVE CREATION OF A FRAGMENT.
+ * ---------------------------------------------------------------- */
+ regTcPtr->gci = cnewestGci;
+ localCommitLab(signal);
+ return;
+ } else {
+ /* ------------------------------------------------------------------
+ * A NORMAL WRITE OPERATION ON A FRAGMENT WHICH DO NOT NEED LOGGING.
+ * WE WILL PACK THE REQUEST/RESPONSE TO THE NEXT NODE/TO TC.
+ * ------------------------------------------------------------------ */
+ jam();
+ /* ---------------------------------------------------------------
+ * IT MUST BE ACTIVE CREATION OF A FRAGMENT.
+ * NOT A DIRTY OPERATION THUS PACK REQUEST/RESPONSE.
+ * ---------------------------------------------------------------- */
+ regTcPtr->logWriteState = TcConnectionrec::NOT_WRITTEN;
+ packLqhkeyreqLab(signal);
+ return;
+ }//if
+ } else {
+ jam();
+ /* --------------------------------------------------------------------
+ * A DIRTY OPERATION WHICH NEEDS LOGGING. WE START BY LOGGING THE
+ * REQUEST. IN THIS CASE WE WILL RELEASE THE FRAGMENT LOCK FIRST.
+ * -------------------------------------------------------------------- */
+ /* A NORMAL WRITE OPERATION THAT NEEDS LOGGING AND WILL NOT BE
+ * PREMATURELY COMMITTED.
+ * -------------------------------------------------------------------- */
+ logLqhkeyreqLab(signal);
+ return;
+ }//if
+ }//if
+}//Dblqh::rwConcludedAiLab()
+
+/* ##########################################################################
+ * ####### LOG MODULE #######
+ *
+ * ##########################################################################
+ * --------------------------------------------------------------------------
+ * THE LOG MODULE HANDLES THE READING AND WRITING OF THE LOG
+ * IT IS ALSO RESPONSIBLE FOR HANDLING THE SYSTEM RESTART.
+ * IT CONTROLS THE SYSTEM RESTART IN TUP AND ACC AS WELL.
+ * -------------------------------------------------------------------------- */
+void Dblqh::logLqhkeyreqLab(Signal* signal)
+{
+ UintR tcurrentFilepage;
+ TcConnectionrecPtr tmpTcConnectptr;
+
+ if (cnoOfLogPages < ZMIN_LOG_PAGES_OPERATION || ERROR_INSERTED(5032)) {
+ jam();
+ if(ERROR_INSERTED(5032)){
+ CLEAR_ERROR_INSERT_VALUE;
+ }
+/*---------------------------------------------------------------------------*/
+// The log disk is having problems in catching up with the speed of execution.
+// We must wait with writing the log of this operation to ensure we do not
+// overload the log.
+/*---------------------------------------------------------------------------*/
+ terrorCode = ZTEMPORARY_REDO_LOG_FAILURE;
+ abortErrorLab(signal);
+ return;
+ }//if
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ logPartPtr.i = regTcPtr->hashValue & 3;
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+/* -------------------------------------------------- */
+/* THIS PART IS USED TO WRITE THE LOG */
+/* -------------------------------------------------- */
+/* -------------------------------------------------- */
+/* CHECK IF A LOG OPERATION IS ONGOING ALREADY. */
+/* IF SO THEN QUEUE THE OPERATION FOR LATER */
+/* RESTART WHEN THE LOG PART IS FREE AGAIN. */
+/* -------------------------------------------------- */
+ LogPartRecord * const regLogPartPtr = logPartPtr.p;
+
+ if(ERROR_INSERTED(5033)){
+ jam();
+ CLEAR_ERROR_INSERT_VALUE;
+
+ if ((regLogPartPtr->firstLogQueue != RNIL) &&
+ (regLogPartPtr->LogLqhKeyReqSent == ZFALSE)) {
+ /* -------------------------------------------------- */
+ /* WE HAVE A PROBLEM IN THAT THE LOG HAS NO */
+ /* ROOM FOR ADDITIONAL OPERATIONS AT THE MOMENT.*/
+ /* -------------------------------------------------- */
+ /* -------------------------------------------------- */
+ /* WE MUST STILL RESTART QUEUED OPERATIONS SO */
+ /* THEY ALSO CAN BE ABORTED. */
+ /* -------------------------------------------------- */
+ regLogPartPtr->LogLqhKeyReqSent = ZTRUE;
+ signal->theData[0] = ZLOG_LQHKEYREQ;
+ signal->theData[1] = logPartPtr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ }//if
+
+ terrorCode = ZTAIL_PROBLEM_IN_LOG_ERROR;
+ abortErrorLab(signal);
+ return;
+ }
+
+ if (regLogPartPtr->logPartState == LogPartRecord::IDLE) {
+ ;
+ } else if (regLogPartPtr->logPartState == LogPartRecord::ACTIVE) {
+ jam();
+ linkWaitLog(signal, logPartPtr);
+ regTcPtr->transactionState = TcConnectionrec::LOG_QUEUED;
+ return;
+ } else {
+ if ((regLogPartPtr->firstLogQueue != RNIL) &&
+ (regLogPartPtr->LogLqhKeyReqSent == ZFALSE)) {
+/* -------------------------------------------------- */
+/* WE HAVE A PROBLEM IN THAT THE LOG HAS NO */
+/* ROOM FOR ADDITIONAL OPERATIONS AT THE MOMENT.*/
+/* -------------------------------------------------- */
+/* -------------------------------------------------- */
+/* WE MUST STILL RESTART QUEUED OPERATIONS SO */
+/* THEY ALSO CAN BE ABORTED. */
+/* -------------------------------------------------- */
+ regLogPartPtr->LogLqhKeyReqSent = ZTRUE;
+ signal->theData[0] = ZLOG_LQHKEYREQ;
+ signal->theData[1] = logPartPtr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ }//if
+ if (regLogPartPtr->logPartState == LogPartRecord::TAIL_PROBLEM) {
+ jam();
+ terrorCode = ZTAIL_PROBLEM_IN_LOG_ERROR;
+ } else {
+ ndbrequire(regLogPartPtr->logPartState == LogPartRecord::FILE_CHANGE_PROBLEM);
+ jam();
+ terrorCode = ZFILE_CHANGE_PROBLEM_IN_LOG_ERROR;
+ }//if
+ abortErrorLab(signal);
+ return;
+ }//if
+ regLogPartPtr->logPartState = LogPartRecord::ACTIVE;
+ logFilePtr.i = regLogPartPtr->currentLogfile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+/* -------------------------------------------------- */
+/* CHECK IF A NEW MBYTE IS TO BE STARTED. IF */
+/* SO INSERT A NEXT LOG RECORD, WRITE THE LOG */
+/* AND PLACE THE LOG POINTER ON THE NEW POSITION*/
+/* IF A NEW FILE IS TO BE USED, CHANGE FILE AND */
+/* ALSO START OPENING THE NEXT LOG FILE. IF A */
+/* LAP HAS BEEN COMPLETED THEN ADD ONE TO LAP */
+/* COUNTER. */
+/* -------------------------------------------------- */
+ checkNewMbyte(signal);
+/* -------------------------------------------------- */
+/* INSERT THE OPERATION RECORD LAST IN THE LIST */
+/* OF NOT COMPLETED OPERATIONS. ALSO RECORD THE */
+/* FILE NO, PAGE NO AND PAGE INDEX OF THE START */
+/* OF THIS LOG RECORD. */
+/* IT IS NOT ALLOWED TO INSERT IT INTO THE LIST */
+/* BEFORE CHECKING THE NEW MBYTE SINCE THAT WILL*/
+/* CAUSE THE OLD VALUES OF TC_CONNECTPTR TO BE */
+/* USED IN WRITE_FILE_DESCRIPTOR. */
+/* -------------------------------------------------- */
+ Uint32 tcIndex = tcConnectptr.i;
+ tmpTcConnectptr.i = regLogPartPtr->lastLogTcrec;
+ regLogPartPtr->lastLogTcrec = tcIndex;
+ if (tmpTcConnectptr.i == RNIL) {
+ jam();
+ regLogPartPtr->firstLogTcrec = tcIndex;
+ } else {
+ ptrCheckGuard(tmpTcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ tmpTcConnectptr.p->nextLogTcrec = tcIndex;
+ }//if
+ Uint32 fileNo = logFilePtr.p->fileNo;
+ tcurrentFilepage = logFilePtr.p->currentFilepage;
+ logPagePtr.i = logFilePtr.p->currentLogpage;
+ regTcPtr->nextLogTcrec = RNIL;
+ regTcPtr->prevLogTcrec = tmpTcConnectptr.i;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ Uint32 pageIndex = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ regTcPtr->logStartFileNo = fileNo;
+ regTcPtr->logStartPageNo = tcurrentFilepage;
+ regTcPtr->logStartPageIndex = pageIndex;
+/* -------------------------------------------------- */
+/* WRITE THE LOG HEADER OF THIS OPERATION. */
+/* -------------------------------------------------- */
+ writeLogHeader(signal);
+/* -------------------------------------------------- */
+/* WRITE THE TUPLE KEY OF THIS OPERATION. */
+/* -------------------------------------------------- */
+ writeKey(signal);
+/* -------------------------------------------------- */
+/* WRITE THE ATTRIBUTE INFO OF THIS OPERATION. */
+/* -------------------------------------------------- */
+ writeAttrinfoLab(signal);
+
+ logNextStart(signal);
+/* -------------------------------------------------- */
+/* RESET THE STATE OF THE LOG PART. IF ANY */
+/* OPERATIONS HAVE QUEUED THEN START THE FIRST */
+/* OF THESE. */
+/* -------------------------------------------------- */
+/* -------------------------------------------------- */
+/* CONTINUE WITH PACKING OF LQHKEYREQ */
+/* -------------------------------------------------- */
+ tcurrentFilepage = logFilePtr.p->currentFilepage;
+ if (logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] == ZPAGE_HEADER_SIZE) {
+ jam();
+ tcurrentFilepage--;
+ }//if
+ regTcPtr->logStopPageNo = tcurrentFilepage;
+ regTcPtr->logWriteState = TcConnectionrec::WRITTEN;
+ if (regTcPtr->abortState != TcConnectionrec::ABORT_IDLE) {
+/* -------------------------------------------------- */
+/* AN ABORT HAVE BEEN ORDERED. THE ABORT WAITED */
+/* FOR THE LOG WRITE TO BE COMPLETED. NOW WE */
+/* CAN PROCEED WITH THE NORMAL ABORT HANDLING. */
+/* -------------------------------------------------- */
+ abortCommonLab(signal);
+ return;
+ }//if
+ if (regTcPtr->dirtyOp != ZTRUE) {
+ packLqhkeyreqLab(signal);
+ } else {
+ /* ----------------------------------------------------------------------
+ * I NEED TO INSERT A COMMIT LOG RECORD SINCE WE ARE WRITING LOG IN THIS
+ * TRANSACTION. SINCE WE RELEASED THE LOG LOCK JUST NOW NO ONE ELSE CAN BE
+ * ACTIVE IN WRITING THE LOG. WE THUS WRITE THE LOG WITHOUT GETTING A LOCK
+ * SINCE WE ARE ONLY WRITING A COMMIT LOG RECORD.
+ * ---------------------------------------------------------------------- */
+ writeCommitLog(signal, logPartPtr);
+ /* ----------------------------------------------------------------------
+ * DIRTY OPERATIONS SHOULD COMMIT BEFORE THEY PACK THE REQUEST/RESPONSE.
+ * ---------------------------------------------------------------------- */
+ regTcPtr->gci = cnewestGci;
+ localCommitLab(signal);
+ }//if
+}//Dblqh::logLqhkeyreqLab()
+
+/* ------------------------------------------------------------------------- */
+/* ------- SEND LQHKEYREQ */
+/* */
+/* NO STATE CHECKING SINCE THE SIGNAL IS A LOCAL SIGNAL. THE EXECUTION OF */
+/* THE OPERATION IS COMPLETED. IT IS NOW TIME TO SEND THE OPERATION TO THE */
+/* NEXT REPLICA OR TO TC. */
+/* ------------------------------------------------------------------------- */
+void Dblqh::packLqhkeyreqLab(Signal* signal)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ if (regTcPtr->nextReplica == ZNIL) {
+/* ------------------------------------------------------------------------- */
+/* ------- SEND LQHKEYCONF ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+ sendLqhkeyconfTc(signal, regTcPtr->tcBlockref);
+ if (regTcPtr->dirtyOp != ZTRUE) {
+ jam();
+ regTcPtr->transactionState = TcConnectionrec::PREPARED;
+ releaseOprec(signal);
+ } else {
+ jam();
+/*************************************************************>*/
+/* DIRTY WRITES ARE USED IN TWO SITUATIONS. THE FIRST */
+/* SITUATION IS WHEN THEY ARE USED TO UPDATE COUNTERS AND*/
+/* OTHER ATTRIBUTES WHICH ARE NOT SENSITIVE TO CONSISTE- */
+/* NCY. THE SECOND SITUATION IS BY OPERATIONS THAT ARE */
+/* SENT AS PART OF A COPY FRAGMENT PROCESS. */
+/* */
+/* DURING A COPY FRAGMENT PROCESS THERE IS NO LOGGING */
+/* ONGOING SINCE THE FRAGMENT IS NOT COMPLETE YET. THE */
+/* LOGGING STARTS AFTER COMPLETING THE LAST COPY TUPLE */
+/* OPERATION. THE EXECUTION OF THE LAST COPY TUPLE DOES */
+/* ALSO START A LOCAL CHECKPOINT SO THAT THE FRAGMENT */
+/* REPLICA IS RECOVERABLE. THUS GLOBAL CHECKPOINT ID FOR */
+/* THOSE OPERATIONS ARE NOT INTERESTING. */
+/* */
+/* A DIRTY WRITE IS BY DEFINITION NOT CONSISTENT. THUS */
+/* IT CAN USE ANY GLOBAL CHECKPOINT. THE IDEA HERE IS TO */
+/* ALWAYS USE THE LATEST DEFINED GLOBAL CHECKPOINT ID IN */
+/* THIS NODE. */
+/*************************************************************>*/
+ cleanUp(signal);
+ }//if
+ return;
+ }//if
+/* ------------------------------------------------------------------------- */
+/* ------- SEND LQHKEYREQ ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* THERE ARE MORE REPLICAS TO SEND THE OPERATION TO. A NEW LQHKEYREQ WILL BE */
+/* PREPARED FOR THE NEXT REPLICA. */
+/* ------------------------------------------------------------------------- */
+/* CLEAR REPLICA TYPE, ATTRINFO INDICATOR (IN LQHKEYREQ), */
+/* INTERPRETED EXECUTION, SEQUENTIAL NUMBER OF REPLICA. */
+// Set bit indicating Client and TC record not the same.
+// Set readlenAi indicator if readlenAi != 0
+// Stored Procedure Indicator not set.
+/* ------------------------------------------------------------------------- */
+ LqhKeyReq * const lqhKeyReq = (LqhKeyReq *)&signal->theData[0];
+
+ UintR Treqinfo;
+ UintR sig0, sig1, sig2, sig3, sig4, sig5, sig6;
+ Treqinfo = preComputedRequestInfoMask & regTcPtr->reqinfo;
+
+ UintR TapplAddressIndicator = (regTcPtr->nextSeqNoReplica == 0 ? 0 : 1);
+ LqhKeyReq::setApplicationAddressFlag(Treqinfo, TapplAddressIndicator);
+ LqhKeyReq::setInterpretedFlag(Treqinfo, regTcPtr->opExec);
+ LqhKeyReq::setSeqNoReplica(Treqinfo, regTcPtr->nextSeqNoReplica);
+ LqhKeyReq::setAIInLqhKeyReq(Treqinfo, regTcPtr->reclenAiLqhkey);
+ UintR TreadLenAiInd = (regTcPtr->readlenAi == 0 ? 0 : 1);
+ UintR TsameLqhAndClient = (tcConnectptr.i ==
+ regTcPtr->tcOprec ? 0 : 1);
+ LqhKeyReq::setSameClientAndTcFlag(Treqinfo, TsameLqhAndClient);
+ LqhKeyReq::setReturnedReadLenAIFlag(Treqinfo, TreadLenAiInd);
+
+ UintR TotReclenAi = regTcPtr->totSendlenAi;
+/* ------------------------------------------------------------------------- */
+/* WE ARE NOW PREPARED TO SEND THE LQHKEYREQ. WE HAVE TO DECIDE IF ATTRINFO */
+/* IS INCLUDED IN THE LQHKEYREQ SIGNAL AND THEN SEND IT. */
+/* TAKE OVER SCAN OPERATION IS NEVER USED ON BACKUPS, LOG RECORDS AND START-UP*/
+/* OF NEW REPLICA AND THUS ONLY TOT_SENDLEN_AI IS USED THE UPPER 16 BITS ARE */
+/* ZERO. */
+/* ------------------------------------------------------------------------- */
+ sig0 = tcConnectptr.i;
+ sig1 = regTcPtr->savePointId;
+ sig2 = regTcPtr->hashValue;
+ sig4 = regTcPtr->tcBlockref;
+
+ lqhKeyReq->clientConnectPtr = sig0;
+ lqhKeyReq->attrLen = TotReclenAi;
+ lqhKeyReq->savePointId = sig1;
+ lqhKeyReq->hashValue = sig2;
+ lqhKeyReq->requestInfo = Treqinfo;
+ lqhKeyReq->tcBlockref = sig4;
+
+ sig0 = regTcPtr->tableref + (regTcPtr->schemaVersion << 16);
+ sig1 = regTcPtr->fragmentid + (regTcPtr->nodeAfterNext[0] << 16);
+ sig2 = regTcPtr->transid[0];
+ sig3 = regTcPtr->transid[1];
+ sig4 = regTcPtr->applRef;
+ sig5 = regTcPtr->applOprec;
+ sig6 = regTcPtr->tcOprec;
+ UintR nextPos = (TapplAddressIndicator << 1);
+
+ lqhKeyReq->tableSchemaVersion = sig0;
+ lqhKeyReq->fragmentData = sig1;
+ lqhKeyReq->transId1 = sig2;
+ lqhKeyReq->transId2 = sig3;
+ lqhKeyReq->noFiredTriggers = regTcPtr->noFiredTriggers;
+ lqhKeyReq->variableData[0] = sig4;
+ lqhKeyReq->variableData[1] = sig5;
+ lqhKeyReq->variableData[2] = sig6;
+
+ nextPos += TsameLqhAndClient;
+
+ if ((regTcPtr->lastReplicaNo - regTcPtr->nextSeqNoReplica) > 1) {
+ sig0 = (UintR)regTcPtr->nodeAfterNext[1] +
+ (UintR)(regTcPtr->nodeAfterNext[2] << 16);
+ lqhKeyReq->variableData[nextPos] = sig0;
+ nextPos++;
+ }//if
+ sig0 = regTcPtr->readlenAi;
+ sig1 = regTcPtr->tupkeyData[0];
+ sig2 = regTcPtr->tupkeyData[1];
+ sig3 = regTcPtr->tupkeyData[2];
+ sig4 = regTcPtr->tupkeyData[3];
+
+ lqhKeyReq->variableData[nextPos] = sig0;
+ nextPos += TreadLenAiInd;
+ lqhKeyReq->variableData[nextPos] = sig1;
+ lqhKeyReq->variableData[nextPos + 1] = sig2;
+ lqhKeyReq->variableData[nextPos + 2] = sig3;
+ lqhKeyReq->variableData[nextPos + 3] = sig4;
+ UintR TkeyLen = LqhKeyReq::getKeyLen(Treqinfo);
+ if (TkeyLen < 4) {
+ nextPos += TkeyLen;
+ } else {
+ nextPos += 4;
+ }//if
+
+ sig0 = regTcPtr->firstAttrinfo[0];
+ sig1 = regTcPtr->firstAttrinfo[1];
+ sig2 = regTcPtr->firstAttrinfo[2];
+ sig3 = regTcPtr->firstAttrinfo[3];
+ sig4 = regTcPtr->firstAttrinfo[4];
+ UintR TAiLen = regTcPtr->reclenAiLqhkey;
+ BlockReference lqhRef = calcLqhBlockRef(regTcPtr->nextReplica);
+
+ lqhKeyReq->variableData[nextPos] = sig0;
+ lqhKeyReq->variableData[nextPos + 1] = sig1;
+ lqhKeyReq->variableData[nextPos + 2] = sig2;
+ lqhKeyReq->variableData[nextPos + 3] = sig3;
+ lqhKeyReq->variableData[nextPos + 4] = sig4;
+
+ nextPos += TAiLen;
+
+ sendSignal(lqhRef, GSN_LQHKEYREQ, signal,
+ nextPos + LqhKeyReq::FixedSignalLength, JBB);
+ if (regTcPtr->primKeyLen > 4) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/* MORE THAN 4 WORDS OF KEY DATA IS IN THE OPERATION. THEREFORE WE NEED TO */
+/* PREPARE A KEYINFO SIGNAL. MORE THAN ONE KEYINFO SIGNAL CAN BE SENT. */
+/* ------------------------------------------------------------------------- */
+ sendTupkey(signal);
+ }//if
+/* ------------------------------------------------------------------------- */
+/* NOW I AM PREPARED TO SEND ALL THE ATTRINFO SIGNALS. AT THE MOMENT A LOOP */
+/* SENDS ALL AT ONCE. LATER WE HAVE TO ADDRESS THE PROBLEM THAT THESE COULD */
+/* LEAD TO BUFFER EXPLOSION => NODE CRASH. */
+/* ------------------------------------------------------------------------- */
+/* NEW CODE TO SEND ATTRINFO IN PACK_LQHKEYREQ */
+/* THIS CODE USES A REAL-TIME BREAK AFTER */
+/* SENDING 16 SIGNALS. */
+/* -------------------------------------------------- */
+ sig0 = regTcPtr->tcOprec;
+ sig1 = regTcPtr->transid[0];
+ sig2 = regTcPtr->transid[1];
+ signal->theData[0] = sig0;
+ signal->theData[1] = sig1;
+ signal->theData[2] = sig2;
+ AttrbufPtr regAttrinbufptr;
+ regAttrinbufptr.i = regTcPtr->firstAttrinbuf;
+ while (regAttrinbufptr.i != RNIL) {
+ ptrCheckGuard(regAttrinbufptr, cattrinbufFileSize, attrbuf);
+ jam();
+ Uint32 dataLen = regAttrinbufptr.p->attrbuf[ZINBUF_DATA_LEN];
+ ndbrequire(dataLen != 0);
+ MEMCOPY_NO_WORDS(&signal->theData[3], &regAttrinbufptr.p->attrbuf[0], dataLen);
+ regAttrinbufptr.i = regAttrinbufptr.p->attrbuf[ZINBUF_NEXT];
+ sendSignal(lqhRef, GSN_ATTRINFO, signal, dataLen + 3, JBB);
+ }//while
+ regTcPtr->transactionState = TcConnectionrec::PREPARED;
+ if (regTcPtr->dirtyOp == ZTRUE) {
+ jam();
+/*************************************************************>*/
+/* DIRTY WRITES ARE USED IN TWO SITUATIONS. THE FIRST */
+/* SITUATION IS WHEN THEY ARE USED TO UPDATE COUNTERS AND*/
+/* OTHER ATTRIBUTES WHICH ARE NOT SENSITIVE TO CONSISTE- */
+/* NCY. THE SECOND SITUATION IS BY OPERATIONS THAT ARE */
+/* SENT AS PART OF A COPY FRAGMENT PROCESS. */
+/* */
+/* DURING A COPY FRAGMENT PROCESS THERE IS NO LOGGING */
+/* ONGOING SINCE THE FRAGMENT IS NOT COMPLETE YET. THE */
+/* LOGGING STARTS AFTER COMPLETING THE LAST COPY TUPLE */
+/* OPERATION. THE EXECUTION OF THE LAST COPY TUPLE DOES */
+/* ALSO START A LOCAL CHECKPOINT SO THAT THE FRAGMENT */
+/* REPLICA IS RECOVERABLE. THUS GLOBAL CHECKPOINT ID FOR */
+/* THOSE OPERATIONS ARE NOT INTERESTING. */
+/* */
+/* A DIRTY WRITE IS BY DEFINITION NOT CONSISTENT. THUS */
+/* IT CAN USE ANY GLOBAL CHECKPOINT. THE IDEA HERE IS TO */
+/* ALWAYS USE THE LATEST DEFINED GLOBAL CHECKPOINT ID IN */
+/* THIS NODE. */
+/*************************************************************>*/
+ cleanUp(signal);
+ return;
+ }//if
+ /* ------------------------------------------------------------------------
+ * ALL INFORMATION NEEDED BY THE COMMIT PHASE AND COMPLETE PHASE IS
+ * KEPT IN THE TC_CONNECT RECORD. TO ENSURE PROPER USE OF MEMORY
+ * RESOURCES WE DEALLOCATE THE ATTRINFO RECORD AND KEY RECORDS
+ * AS SOON AS POSSIBLE.
+ * ------------------------------------------------------------------------ */
+ releaseOprec(signal);
+}//Dblqh::packLqhkeyreqLab()
+
+/* ========================================================================= */
+/* ==== CHECK IF THE LOG RECORD FITS INTO THE CURRENT MBYTE, ======= */
+/* OTHERWISE SWITCH TO NEXT MBYTE. */
+/* */
+/* ========================================================================= */
+void Dblqh::checkNewMbyte(Signal* signal)
+{
+ UintR tcnmTmp;
+ UintR ttotalLogSize;
+
+/* -------------------------------------------------- */
+/* CHECK IF A NEW MBYTE OF LOG RECORD IS TO BE */
+/* OPENED BEFORE WRITING THE LOG RECORD. NO LOG */
+/* RECORDS ARE ALLOWED TO SPAN A MBYTE BOUNDARY */
+/* */
+/* INPUT: TC_CONNECTPTR THE OPERATION */
+/* LOG_FILE_PTR THE LOG FILE */
+/* OUTPUT: LOG_FILE_PTR THE NEW LOG FILE */
+/* -------------------------------------------------- */
+ ttotalLogSize = ZLOG_HEAD_SIZE + tcConnectptr.p->currTupAiLen;
+ ttotalLogSize = ttotalLogSize + tcConnectptr.p->primKeyLen;
+ tcnmTmp = logFilePtr.p->remainingWordsInMbyte;
+ if ((ttotalLogSize + ZNEXT_LOG_SIZE) <= tcnmTmp) {
+ ndbrequire(tcnmTmp >= ttotalLogSize);
+ logFilePtr.p->remainingWordsInMbyte = tcnmTmp - ttotalLogSize;
+ return;
+ } else {
+ jam();
+/* -------------------------------------------------- */
+/* IT WAS NOT ENOUGH SPACE IN THIS MBYTE FOR */
+/* THIS LOG RECORD. MOVE TO NEXT MBYTE */
+/* THIS MIGHT INCLUDE CHANGING LOG FILE */
+/* -------------------------------------------------- */
+/* WE HAVE TO INSERT A NEXT LOG RECORD FIRST */
+/* -------------------------------------------------- */
+/* THEN CONTINUE BY WRITING THE FILE DESCRIPTORS*/
+/* -------------------------------------------------- */
+ logPagePtr.i = logFilePtr.p->currentLogpage;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ changeMbyte(signal);
+ tcnmTmp = logFilePtr.p->remainingWordsInMbyte;
+ }//if
+ ndbrequire(tcnmTmp >= ttotalLogSize);
+ logFilePtr.p->remainingWordsInMbyte = tcnmTmp - ttotalLogSize;
+}//Dblqh::checkNewMbyte()
+
+/* --------------------------------------------------------------------------
+ * ------- WRITE OPERATION HEADER TO LOG -------
+ *
+ * SUBROUTINE SHORT NAME: WLH
+ * ------------------------------------------------------------------------- */
+void Dblqh::writeLogHeader(Signal* signal)
+{
+ Uint32 logPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ Uint32 hashValue = tcConnectptr.p->hashValue;
+ Uint32 operation = tcConnectptr.p->operation;
+ Uint32 keyLen = tcConnectptr.p->primKeyLen;
+ Uint32 aiLen = tcConnectptr.p->currTupAiLen;
+ Uint32 totLogLen = aiLen + keyLen + ZLOG_HEAD_SIZE;
+ if ((logPos + ZLOG_HEAD_SIZE) < ZPAGE_SIZE) {
+ Uint32* dataPtr = &logPagePtr.p->logPageWord[logPos];
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPos + ZLOG_HEAD_SIZE;
+ dataPtr[0] = ZPREP_OP_TYPE;
+ dataPtr[1] = totLogLen;
+ dataPtr[2] = hashValue;
+ dataPtr[3] = operation;
+ dataPtr[4] = aiLen;
+ dataPtr[5] = keyLen;
+ } else {
+ writeLogWord(signal, ZPREP_OP_TYPE);
+ writeLogWord(signal, totLogLen);
+ writeLogWord(signal, hashValue);
+ writeLogWord(signal, operation);
+ writeLogWord(signal, aiLen);
+ writeLogWord(signal, keyLen);
+ }//if
+}//Dblqh::writeLogHeader()
+
+/* --------------------------------------------------------------------------
+ * ------- WRITE TUPLE KEY TO LOG -------
+ *
+ * SUBROUTINE SHORT NAME: WK
+ * ------------------------------------------------------------------------- */
+void Dblqh::writeKey(Signal* signal)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ Uint32 logPos, endPos, dataLen;
+ Int32 remainingLen;
+ logPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ remainingLen = regTcPtr->primKeyLen;
+ dataLen = remainingLen;
+ if (remainingLen > 4)
+ dataLen = 4;
+ remainingLen -= dataLen;
+ endPos = logPos + dataLen;
+ if (endPos < ZPAGE_SIZE) {
+ MEMCOPY_NO_WORDS(&logPagePtr.p->logPageWord[logPos],
+ &regTcPtr->tupkeyData[0],
+ dataLen);
+ } else {
+ jam();
+ for (Uint32 i = 0; i < dataLen; i++)
+ writeLogWord(signal, regTcPtr->tupkeyData[i]);
+ endPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ }//if
+ DatabufPtr regDatabufptr;
+ regDatabufptr.i = regTcPtr->firstTupkeybuf;
+ while (remainingLen > 0) {
+ logPos = endPos;
+ ptrCheckGuard(regDatabufptr, cdatabufFileSize, databuf);
+ dataLen = remainingLen;
+ if (remainingLen > 4)
+ dataLen = 4;
+ remainingLen -= dataLen;
+ endPos += dataLen;
+ if (endPos < ZPAGE_SIZE) {
+ MEMCOPY_NO_WORDS(&logPagePtr.p->logPageWord[logPos],
+ &regDatabufptr.p->data[0],
+ dataLen);
+ } else {
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPos;
+ for (Uint32 i = 0; i < dataLen; i++)
+ writeLogWord(signal, regDatabufptr.p->data[i]);
+ endPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ }//if
+ regDatabufptr.i = regDatabufptr.p->nextDatabuf;
+ }//while
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = endPos;
+ ndbrequire(regDatabufptr.i == RNIL);
+}//Dblqh::writeKey()
+
+/* --------------------------------------------------------------------------
+ * ------- WRITE ATTRINFO TO LOG -------
+ *
+ * SUBROUTINE SHORT NAME: WA
+ * ------------------------------------------------------------------------- */
+void Dblqh::writeAttrinfoLab(Signal* signal)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ Uint32 totLen = regTcPtr->currTupAiLen;
+ if (totLen == 0)
+ return;
+ Uint32 logPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ Uint32 lqhLen = regTcPtr->reclenAiLqhkey;
+ ndbrequire(totLen >= lqhLen);
+ Uint32 endPos = logPos + lqhLen;
+ totLen -= lqhLen;
+ if (endPos < ZPAGE_SIZE) {
+ MEMCOPY_NO_WORDS(&logPagePtr.p->logPageWord[logPos],
+ &regTcPtr->firstAttrinfo[0],
+ lqhLen);
+ } else {
+ for (Uint32 i = 0; i < lqhLen; i++)
+ writeLogWord(signal, regTcPtr->firstAttrinfo[i]);
+ endPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ }//if
+ AttrbufPtr regAttrinbufptr;
+ regAttrinbufptr.i = regTcPtr->firstAttrinbuf;
+ while (totLen > 0) {
+ logPos = endPos;
+ ptrCheckGuard(regAttrinbufptr, cattrinbufFileSize, attrbuf);
+ Uint32 dataLen = regAttrinbufptr.p->attrbuf[ZINBUF_DATA_LEN];
+ ndbrequire(totLen >= dataLen);
+ ndbrequire(dataLen > 0);
+ totLen -= dataLen;
+ endPos += dataLen;
+ if (endPos < ZPAGE_SIZE) {
+ MEMCOPY_NO_WORDS(&logPagePtr.p->logPageWord[logPos],
+ &regAttrinbufptr.p->attrbuf[0],
+ dataLen);
+ } else {
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPos;
+ for (Uint32 i = 0; i < dataLen; i++)
+ writeLogWord(signal, regAttrinbufptr.p->attrbuf[i]);
+ endPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ }//if
+ regAttrinbufptr.i = regAttrinbufptr.p->attrbuf[ZINBUF_NEXT];
+ }//while
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = endPos;
+ ndbrequire(regAttrinbufptr.i == RNIL);
+}//Dblqh::writeAttrinfoLab()
+
+/* ------------------------------------------------------------------------- */
+/* ------- SEND TUPLE KEY IN KEYINFO SIGNAL(S) ------- */
+/* */
+/* SUBROUTINE SHORT NAME: STU */
+/* ------------------------------------------------------------------------- */
+void Dblqh::sendTupkey(Signal* signal)
+{
+ UintR TdataPos = 3;
+ BlockReference lqhRef = calcLqhBlockRef(tcConnectptr.p->nextReplica);
+ signal->theData[0] = tcConnectptr.p->tcOprec;
+ signal->theData[1] = tcConnectptr.p->transid[0];
+ signal->theData[2] = tcConnectptr.p->transid[1];
+ databufptr.i = tcConnectptr.p->firstTupkeybuf;
+ do {
+ ptrCheckGuard(databufptr, cdatabufFileSize, databuf);
+ signal->theData[TdataPos] = databufptr.p->data[0];
+ signal->theData[TdataPos + 1] = databufptr.p->data[1];
+ signal->theData[TdataPos + 2] = databufptr.p->data[2];
+ signal->theData[TdataPos + 3] = databufptr.p->data[3];
+
+ databufptr.i = databufptr.p->nextDatabuf;
+ TdataPos += 4;
+ if (databufptr.i == RNIL) {
+ jam();
+ sendSignal(lqhRef, GSN_KEYINFO, signal, TdataPos, JBB);
+ return;
+ } else if (TdataPos == 23) {
+ jam();
+ sendSignal(lqhRef, GSN_KEYINFO, signal, 23, JBB);
+ TdataPos = 3;
+ }
+ } while (1);
+}//Dblqh::sendTupkey()
+
+void Dblqh::cleanUp(Signal* signal)
+{
+ releaseOprec(signal);
+ deleteTransidHash(signal);
+ releaseTcrec(signal, tcConnectptr);
+}//Dblqh::cleanUp()
+
+/* --------------------------------------------------------------------------
+ * ---- RELEASE ALL RECORDS CONNECTED TO THE OPERATION RECORD AND THE ----
+ * OPERATION RECORD ITSELF
+ * ------------------------------------------------------------------------- */
+void Dblqh::releaseOprec(Signal* signal)
+{
+ UintR Tmpbuf;
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+/* ---- RELEASE DATA BUFFERS ------------------- */
+ DatabufPtr regDatabufptr;
+ regDatabufptr.i = regTcPtr->firstTupkeybuf;
+/* --------------------------------------------------------------------------
+ * ------- RELEASE DATA BUFFERS -------
+ *
+ * ------------------------------------------------------------------------- */
+
+ while (regDatabufptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(regDatabufptr, cdatabufFileSize, databuf);
+ Tmpbuf = regDatabufptr.p->nextDatabuf;
+ regDatabufptr.p->nextDatabuf = cfirstfreeDatabuf;
+ cfirstfreeDatabuf = regDatabufptr.i;
+ regDatabufptr.i = Tmpbuf;
+ }//while
+/* ---- RELEASE ATTRINFO BUFFERS ------------------- */
+ AttrbufPtr regAttrinbufptr;
+ regAttrinbufptr.i = regTcPtr->firstAttrinbuf;
+ /* ########################################################################
+ * ####### RELEASE_ATTRINBUF #######
+ *
+ * ####################################################################### */
+ while (regAttrinbufptr.i != RNIL) {
+ jam();
+ regAttrinbufptr.i= release_attrinbuf(regAttrinbufptr.i);
+ }//while
+ regTcPtr->firstAttrinbuf = RNIL;
+ regTcPtr->lastAttrinbuf = RNIL;
+ regTcPtr->firstTupkeybuf = RNIL;
+ regTcPtr->lastTupkeybuf = RNIL;
+}//Dblqh::releaseOprec()
+
+/* ------------------------------------------------------------------------- */
+/* ------ DELETE TRANSACTION ID FROM HASH TABLE ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dblqh::deleteTransidHash(Signal* signal)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ TcConnectionrecPtr prevHashptr;
+ TcConnectionrecPtr nextHashptr;
+
+ prevHashptr.i = regTcPtr->prevHashRec;
+ nextHashptr.i = regTcPtr->nextHashRec;
+ if (prevHashptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(prevHashptr, ctcConnectrecFileSize, tcConnectionrec);
+ prevHashptr.p->nextHashRec = nextHashptr.i;
+ } else {
+ jam();
+/* ------------------------------------------------------------------------- */
+/* THE OPERATION WAS PLACED FIRST IN THE LIST OF THE HASH TABLE. NEED TO SET */
+/* A NEW LEADER OF THE LIST. */
+/* ------------------------------------------------------------------------- */
+ Uint32 hashIndex = (regTcPtr->transid[0] ^ regTcPtr->tcOprec) & 1023;
+ ctransidHash[hashIndex] = nextHashptr.i;
+ }//if
+ if (nextHashptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(nextHashptr, ctcConnectrecFileSize, tcConnectionrec);
+ nextHashptr.p->prevHashRec = prevHashptr.i;
+ }//if
+}//Dblqh::deleteTransidHash()
+
+/* --------------------------------------------------------------------------
+ * ------- LINK OPERATION IN ACTIVE LIST ON FRAGMENT -------
+ *
+ * SUBROUTINE SHORT NAME: LAF
+// Input Pointers:
+// tcConnectptr
+// fragptr
+ * ------------------------------------------------------------------------- */
+void Dblqh::linkActiveFrag(Signal* signal)
+{
+ TcConnectionrecPtr lafTcConnectptr;
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ Fragrecord * const regFragPtr = fragptr.p;
+ Uint32 tcIndex = tcConnectptr.i;
+ lafTcConnectptr.i = regFragPtr->activeList;
+ regTcPtr->prevTc = RNIL;
+ regFragPtr->activeList = tcIndex;
+ ndbrequire(regTcPtr->listState == TcConnectionrec::NOT_IN_LIST);
+ regTcPtr->nextTc = lafTcConnectptr.i;
+ regTcPtr->listState = TcConnectionrec::IN_ACTIVE_LIST;
+ if (lafTcConnectptr.i == RNIL) {
+ return;
+ } else {
+ jam();
+ ptrCheckGuard(lafTcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ lafTcConnectptr.p->prevTc = tcIndex;
+ }//if
+ return;
+}//Dblqh::linkActiveFrag()
+
+/* -------------------------------------------------------------------------
+ * ------- RELEASE OPERATION FROM ACTIVE LIST ON FRAGMENT -------
+ *
+ * SUBROUTINE SHORT NAME = RAF
+ * ------------------------------------------------------------------------- */
+void Dblqh::releaseActiveFrag(Signal* signal)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ TcConnectionrecPtr ralTcNextConnectptr;
+ TcConnectionrecPtr ralTcPrevConnectptr;
+ fragptr.i = regTcPtr->fragmentptr;
+ ralTcPrevConnectptr.i = regTcPtr->prevTc;
+ ralTcNextConnectptr.i = regTcPtr->nextTc;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ Fragrecord * const regFragPtr = fragptr.p;
+ ndbrequire(regTcPtr->listState == TcConnectionrec::IN_ACTIVE_LIST);
+ regTcPtr->listState = TcConnectionrec::NOT_IN_LIST;
+
+ if (ralTcNextConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(ralTcNextConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ ralTcNextConnectptr.p->prevTc = ralTcPrevConnectptr.i;
+ }//if
+ if (ralTcPrevConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(ralTcPrevConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ ralTcPrevConnectptr.p->nextTc = regTcPtr->nextTc;
+ } else {
+ jam();
+ /* ----------------------------------------------------------------------
+ * OPERATION RECORD IS FIRST IN ACTIVE LIST
+ * THIS MEANS THAT THERE EXISTS NO PREVIOUS TC THAT NEEDS TO BE UPDATED.
+ * --------------------------------------------------------------------- */
+ regFragPtr->activeList = ralTcNextConnectptr.i;
+ }//if
+ if (regFragPtr->lcpRef != RNIL) {
+ jam();
+ lcpPtr.i = regFragPtr->lcpRef;
+ ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
+ ndbrequire(lcpPtr.p->lcpState == LcpRecord::LCP_WAIT_ACTIVE_FINISH);
+
+ /* --------------------------------------------------------------------
+ * IF A FRAGMENT IS CURRENTLY STARTING A LOCAL CHECKPOINT AND IT
+ * IS WAITING FOR ACTIVE OPERATIONS TO BE COMPLETED WITH THE
+ * CURRENT PHASE, THEN IT IS CHECKED WHETHER THE
+ * LAST ACTIVE OPERATION WAS NOW COMPLETED.
+ * ------------------------------------------------------------------- */
+ if (regFragPtr->activeList == RNIL) {
+ jam();
+ /* ------------------------------------------------------------------
+ * ACTIVE LIST ON FRAGMENT IS EMPTY AND WE ARE WAITING FOR
+ * THIS TO HAPPEN.
+ * WE WILL NOW START THE CHECKPOINT IN TUP AND ACC.
+ * ----------------------------------------------------------------- */
+ /* SEND START LOCAL CHECKPOINT TO ACC AND TUP */
+ /* ----------------------------------------------------------------- */
+ fragptr.p->lcpRef = RNIL;
+ lcpPtr.p->lcpState = LcpRecord::LCP_START_CHKP;
+ sendStartLcp(signal);
+ }//if
+ }//if
+}//Dblqh::releaseActiveFrag()
+
+/* ######################################################################### */
+/* ####### TRANSACTION MODULE ####### */
+/* THIS MODULE HANDLES THE COMMIT AND THE COMPLETE PHASE. */
+/* ######################################################################### */
+void Dblqh::warningReport(Signal* signal, int place)
+{
+ switch (place) {
+ case 0:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "W: Received COMMIT in wrong state in Dblqh" << endl;
+#endif
+ break;
+ case 1:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "W: Received COMMIT with wrong transid in Dblqh" << endl;
+#endif
+ break;
+ case 2:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "W: Received COMPLETE in wrong state in Dblqh" << endl;
+#endif
+ break;
+ case 3:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "W: Received COMPLETE with wrong transid in Dblqh" << endl;
+#endif
+ break;
+ case 4:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "W: Received COMMITREQ in wrong state in Dblqh" << endl;
+#endif
+ break;
+ case 5:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "W: Received COMMITREQ with wrong transid in Dblqh" << endl;
+#endif
+ break;
+ case 6:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "W: Received COMPLETEREQ in wrong state in Dblqh" << endl;
+#endif
+ break;
+ case 7:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "W: Received COMPLETEREQ with wrong transid in Dblqh" << endl;
+#endif
+ break;
+ case 8:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "W: Received ABORT with non-existing transid in Dblqh" << endl;
+#endif
+ break;
+ case 9:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "W: Received ABORTREQ with non-existing transid in Dblqh" << endl;
+#endif
+ break;
+ case 10:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "W: Received ABORTREQ in wrong state in Dblqh" << endl;
+#endif
+ break;
+ case 11:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "W: Received COMMIT when tc-rec released in Dblqh" << endl;
+#endif
+ break;
+ case 12:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "W: Received COMPLETE when tc-rec released in Dblqh" << endl;
+#endif
+ break;
+ case 13:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "W: Received LQHKEYREF when tc-rec released in Dblqh" << endl;
+#endif
+ break;
+ case 14:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "W: Received LQHKEYREF with wrong transid in Dblqh" << endl;
+#endif
+ break;
+ case 15:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "W: Received LQHKEYREF when already aborting in Dblqh" << endl;
+#endif
+ break;
+ case 16:
+ jam();
+ ndbrequire(cstartPhase == ZNIL);
+#ifdef ABORT_TRACE
+ ndbout << "W: Received LQHKEYREF in wrong state in Dblqh" << endl;
+#endif
+ break;
+ default:
+ jam();
+ break;
+ }//switch
+ return;
+}//Dblqh::warningReport()
+
+void Dblqh::errorReport(Signal* signal, int place)
+{
+ switch (place) {
+ case 0:
+ jam();
+ break;
+ case 1:
+ jam();
+ break;
+ case 2:
+ jam();
+ break;
+ case 3:
+ jam();
+ break;
+ default:
+ jam();
+ break;
+ }//switch
+ systemErrorLab(signal);
+ return;
+}//Dblqh::errorReport()
+
+/* ************************************************************************>>
+ * COMMIT: Start commit request from TC. This signal is originally sent as a
+ * packed signal and this function is called from execPACKED_SIGNAL.
+ * This is the normal commit protocol where TC first send this signal to the
+ * backup node which then will send COMMIT to the primary node. If
+ * everything is ok the primary node send COMMITTED back to TC.
+ * ************************************************************************>> */
+void Dblqh::execCOMMIT(Signal* signal)
+{
+ TcConnectionrec *regTcConnectionrec = tcConnectionrec;
+ Uint32 ttcConnectrecFileSize = ctcConnectrecFileSize;
+ Uint32 tcIndex = signal->theData[0];
+ Uint32 gci = signal->theData[1];
+ Uint32 transid1 = signal->theData[2];
+ Uint32 transid2 = signal->theData[3];
+ jamEntry();
+ if (tcIndex >= ttcConnectrecFileSize) {
+ errorReport(signal, 0);
+ return;
+ }//if
+ if (ERROR_INSERTED(5011)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ sendSignalWithDelay(cownref, GSN_COMMIT, signal, 2000, 4);
+ return;
+ }//if
+ if (ERROR_INSERTED(5012)) {
+ SET_ERROR_INSERT_VALUE(5017);
+ sendSignalWithDelay(cownref, GSN_COMMIT, signal, 2000, 4);
+ return;
+ }//if
+ tcConnectptr.i = tcIndex;
+ ptrAss(tcConnectptr, regTcConnectionrec);
+ if ((tcConnectptr.p->transid[0] == transid1) &&
+ (tcConnectptr.p->transid[1] == transid2)) {
+ commitReqLab(signal, gci);
+ return;
+ }//if
+ warningReport(signal, 1);
+ return;
+}//Dblqh::execCOMMIT()
+
+/* ************************************************************************>>
+ * COMMITREQ: Commit request from TC. This is the commit protocol used if
+ * one of the nodes is not behaving correctly. TC explicitly sends COMMITREQ
+ * to both the backup and primary node and gets a COMMITCONF back if the
+ * COMMIT was ok.
+ * ************************************************************************>> */
+void Dblqh::execCOMMITREQ(Signal* signal)
+{
+ jamEntry();
+ Uint32 reqPtr = signal->theData[0];
+ BlockReference reqBlockref = signal->theData[1];
+ Uint32 gci = signal->theData[2];
+ Uint32 transid1 = signal->theData[3];
+ Uint32 transid2 = signal->theData[4];
+ Uint32 tcOprec = signal->theData[6];
+ if (ERROR_INSERTED(5004)) {
+ systemErrorLab(signal);
+ }
+ if (ERROR_INSERTED(5017)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ sendSignalWithDelay(cownref, GSN_COMMITREQ, signal, 2000, 7);
+ return;
+ }//if
+ if (findTransaction(transid1,
+ transid2,
+ tcOprec) != ZOK) {
+ warningReport(signal, 5);
+ return;
+ }//if
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ switch (regTcPtr->transactionState) {
+ case TcConnectionrec::PREPARED:
+ case TcConnectionrec::LOG_COMMIT_QUEUED_WAIT_SIGNAL:
+ case TcConnectionrec::LOG_COMMIT_WRITTEN_WAIT_SIGNAL:
+ jam();
+/*-------------------------------------------------------*/
+/* THE NORMAL CASE. */
+/*-------------------------------------------------------*/
+ regTcPtr->reqBlockref = reqBlockref;
+ regTcPtr->reqRef = reqPtr;
+ regTcPtr->abortState = TcConnectionrec::REQ_FROM_TC;
+ commitReqLab(signal, gci);
+ return;
+ break;
+ case TcConnectionrec::COMMITTED:
+ jam();
+/*---------------------------------------------------------*/
+/* FOR SOME REASON THE COMMIT PHASE HAVE BEEN */
+/* FINISHED AFTER A TIME OUT. WE NEED ONLY SEND A */
+/* COMMITCONF SIGNAL. */
+/*---------------------------------------------------------*/
+ regTcPtr->reqBlockref = reqBlockref;
+ regTcPtr->reqRef = reqPtr;
+ regTcPtr->abortState = TcConnectionrec::REQ_FROM_TC;
+ signal->theData[0] = regTcPtr->reqRef;
+ signal->theData[1] = cownNodeid;
+ signal->theData[2] = regTcPtr->transid[0];
+ signal->theData[3] = regTcPtr->transid[1];
+ sendSignal(regTcPtr->reqBlockref, GSN_COMMITCONF, signal, 4, JBB);
+ break;
+ case TcConnectionrec::COMMIT_STOPPED:
+ jam();
+ regTcPtr->reqBlockref = reqBlockref;
+ regTcPtr->reqRef = reqPtr;
+ regTcPtr->abortState = TcConnectionrec::REQ_FROM_TC;
+ /*empty*/;
+ break;
+ default:
+ jam();
+ warningReport(signal, 4);
+ return;
+ break;
+ }//switch
+ return;
+}//Dblqh::execCOMMITREQ()
+
+/* ************************************************************************>>
+ * COMPLETE : Complete the transaction. Sent as a packed signal from TC.
+ * Works the same way as COMMIT protocol. This is the normal case with both
+ * primary and backup working (See COMMIT).
+ * ************************************************************************>> */
+void Dblqh::execCOMPLETE(Signal* signal)
+{
+ TcConnectionrec *regTcConnectionrec = tcConnectionrec;
+ Uint32 ttcConnectrecFileSize = ctcConnectrecFileSize;
+ Uint32 tcIndex = signal->theData[0];
+ Uint32 transid1 = signal->theData[1];
+ Uint32 transid2 = signal->theData[2];
+ jamEntry();
+ if (tcIndex >= ttcConnectrecFileSize) {
+ errorReport(signal, 1);
+ return;
+ }//if
+ if (ERROR_INSERTED(5013)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ sendSignalWithDelay(cownref, GSN_COMPLETE, signal, 2000, 3);
+ return;
+ }//if
+ if (ERROR_INSERTED(5014)) {
+ SET_ERROR_INSERT_VALUE(5018);
+ sendSignalWithDelay(cownref, GSN_COMPLETE, signal, 2000, 3);
+ return;
+ }//if
+ tcConnectptr.i = tcIndex;
+ ptrAss(tcConnectptr, regTcConnectionrec);
+ if ((tcConnectptr.p->transactionState == TcConnectionrec::COMMITTED) &&
+ (tcConnectptr.p->transid[0] == transid1) &&
+ (tcConnectptr.p->transid[1] == transid2)) {
+ if (tcConnectptr.p->seqNoReplica != 0) {
+ jam();
+ localCommitLab(signal);
+ return;
+ } else {
+ jam();
+ completeTransLastLab(signal);
+ return;
+ }//if
+ }//if
+ if (tcConnectptr.p->transactionState != TcConnectionrec::COMMITTED) {
+ warningReport(signal, 2);
+ } else {
+ warningReport(signal, 3);
+ }//if
+}//Dblqh::execCOMPLETE()
+
+/* ************************************************************************>>
+ * COMPLETEREQ: Complete request from TC. Same as COMPLETE but used if one
+ * node is not working ok (See COMMIT).
+ * ************************************************************************>> */
+void Dblqh::execCOMPLETEREQ(Signal* signal)
+{
+ jamEntry();
+ Uint32 reqPtr = signal->theData[0];
+ BlockReference reqBlockref = signal->theData[1];
+ Uint32 transid1 = signal->theData[2];
+ Uint32 transid2 = signal->theData[3];
+ Uint32 tcOprec = signal->theData[5];
+ if (ERROR_INSERTED(5005)) {
+ systemErrorLab(signal);
+ }
+ if (ERROR_INSERTED(5018)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ sendSignalWithDelay(cownref, GSN_COMPLETEREQ, signal, 2000, 6);
+ return;
+ }//if
+ if (findTransaction(transid1,
+ transid2,
+ tcOprec) != ZOK) {
+ jam();
+/*---------------------------------------------------------*/
+/* FOR SOME REASON THE COMPLETE PHASE STARTED AFTER */
+/* A TIME OUT. THE TRANSACTION IS GONE. WE NEED TO */
+/* REPORT COMPLETION ANYWAY. */
+/*---------------------------------------------------------*/
+ signal->theData[0] = reqPtr;
+ signal->theData[1] = cownNodeid;
+ signal->theData[2] = transid1;
+ signal->theData[3] = transid2;
+ sendSignal(reqBlockref, GSN_COMPLETECONF, signal, 4, JBB);
+ warningReport(signal, 7);
+ return;
+ }//if
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ switch (regTcPtr->transactionState) {
+ case TcConnectionrec::COMMITTED:
+ jam();
+ regTcPtr->reqBlockref = reqBlockref;
+ regTcPtr->reqRef = reqPtr;
+ regTcPtr->abortState = TcConnectionrec::REQ_FROM_TC;
+ /*empty*/;
+ break;
+/*---------------------------------------------------------*/
+/* THE NORMAL CASE. */
+/*---------------------------------------------------------*/
+ case TcConnectionrec::COMMIT_STOPPED:
+ jam();
+/*---------------------------------------------------------*/
+/* FOR SOME REASON THE COMPLETE PHASE STARTED AFTER */
+/* A TIME OUT. WE HAVE SET THE PROPER VARIABLES SUCH */
+/* THAT A COMPLETECONF WILL BE SENT WHEN COMPLETE IS */
+/* FINISHED. */
+/*---------------------------------------------------------*/
+ regTcPtr->reqBlockref = reqBlockref;
+ regTcPtr->reqRef = reqPtr;
+ regTcPtr->abortState = TcConnectionrec::REQ_FROM_TC;
+ return;
+ break;
+ default:
+ jam();
+ warningReport(signal, 6);
+ return;
+ break;
+ }//switch
+ if (regTcPtr->seqNoReplica != 0) {
+ jam();
+ localCommitLab(signal);
+ return;
+ } else {
+ jam();
+ completeTransLastLab(signal);
+ return;
+ }//if
+}//Dblqh::execCOMPLETEREQ()
+
+/* ************> */
+/* COMPLETED > */
+/* ************> */
+void Dblqh::execLQHKEYCONF(Signal* signal)
+{
+ LqhKeyConf * const lqhKeyConf = (LqhKeyConf *)signal->getDataPtr();
+ Uint32 tcIndex = lqhKeyConf->opPtr;
+ Uint32 ttcConnectrecFileSize = ctcConnectrecFileSize;
+ TcConnectionrec *regTcConnectionrec = tcConnectionrec;
+ jamEntry();
+ if (tcIndex >= ttcConnectrecFileSize) {
+ errorReport(signal, 2);
+ return;
+ }//if
+ tcConnectptr.i = tcIndex;
+ ptrAss(tcConnectptr, regTcConnectionrec);
+ switch (tcConnectptr.p->connectState) {
+ case TcConnectionrec::LOG_CONNECTED:
+ jam();
+ completedLab(signal);
+ return;
+ break;
+ case TcConnectionrec::COPY_CONNECTED:
+ jam();
+ copyCompletedLab(signal);
+ return;
+ break;
+ default:
+ jam();
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dblqh::execLQHKEYCONF()
+
+/* ------------------------------------------------------------------------- */
+/* ------- COMMIT PHASE ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dblqh::commitReqLab(Signal* signal, Uint32 gci)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ TcConnectionrec::LogWriteState logWriteState = regTcPtr->logWriteState;
+ TcConnectionrec::TransactionState transState = regTcPtr->transactionState;
+ regTcPtr->gci = gci;
+ if (transState == TcConnectionrec::PREPARED) {
+ if (logWriteState == TcConnectionrec::WRITTEN) {
+ jam();
+ regTcPtr->transactionState = TcConnectionrec::PREPARED_RECEIVED_COMMIT;
+ TcConnectionrecPtr saveTcPtr = tcConnectptr;
+ Uint32 blockNo = refToBlock(regTcPtr->tcTupBlockref);
+ signal->theData[0] = regTcPtr->tupConnectrec;
+ signal->theData[1] = gci;
+ EXECUTE_DIRECT(blockNo, GSN_TUP_WRITELOG_REQ, signal, 2);
+ jamEntry();
+ if (regTcPtr->transactionState == TcConnectionrec::LOG_COMMIT_QUEUED) {
+ jam();
+ return;
+ }//if
+ ndbrequire(regTcPtr->transactionState == TcConnectionrec::LOG_COMMIT_WRITTEN);
+ tcConnectptr = saveTcPtr;
+ } else if (logWriteState == TcConnectionrec::NOT_STARTED) {
+ jam();
+ } else if (logWriteState == TcConnectionrec::NOT_WRITTEN) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* IT IS A READ OPERATION OR OTHER OPERATION THAT DO NOT USE THE LOG. */
+/*---------------------------------------------------------------------------*/
+/*---------------------------------------------------------------------------*/
+/* THE LOG HAS NOT BEEN WRITTEN SINCE THE LOG FLAG WAS FALSE. THIS CAN OCCUR */
+/* WHEN WE ARE STARTING A NEW FRAGMENT. */
+/*---------------------------------------------------------------------------*/
+ regTcPtr->logWriteState = TcConnectionrec::NOT_STARTED;
+ } else {
+ ndbrequire(logWriteState == TcConnectionrec::NOT_WRITTEN_WAIT);
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THE STATE WAS SET TO NOT_WRITTEN BY THE OPERATION BUT LATER A SCAN OF ALL */
+/* OPERATION RECORD CHANGED IT INTO NOT_WRITTEN_WAIT. THIS INDICATES THAT WE */
+/* ARE WAITING FOR THIS OPERATION TO COMMIT OR ABORT SO THAT WE CAN FIND THE */
+/* STARTING GLOBAL CHECKPOINT OF THIS NEW FRAGMENT. */
+/*---------------------------------------------------------------------------*/
+ checkScanTcCompleted(signal);
+ }//if
+ } else if (transState == TcConnectionrec::LOG_COMMIT_QUEUED_WAIT_SIGNAL) {
+ jam();
+ regTcPtr->transactionState = TcConnectionrec::LOG_COMMIT_QUEUED;
+ return;
+ } else if (transState == TcConnectionrec::LOG_COMMIT_WRITTEN_WAIT_SIGNAL) {
+ jam();
+ } else {
+ warningReport(signal, 0);
+ return;
+ }//if
+ if (regTcPtr->seqNoReplica != 0) {
+ jam();
+ commitReplyLab(signal);
+ return;
+ }//if
+ localCommitLab(signal);
+ return;
+}//Dblqh::commitReqLab()
+
+void Dblqh::execLQH_WRITELOG_REQ(Signal* signal)
+{
+ jamEntry();
+ tcConnectptr.i = signal->theData[0];
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ Uint32 gci = signal->theData[1];
+ Uint32 newestGci = cnewestGci;
+ TcConnectionrec::LogWriteState logWriteState = regTcPtr->logWriteState;
+ TcConnectionrec::TransactionState transState = regTcPtr->transactionState;
+ regTcPtr->gci = gci;
+ if (gci > newestGci) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/* KEEP TRACK OF NEWEST GLOBAL CHECKPOINT THAT LQH HAS HEARD OF. */
+/* ------------------------------------------------------------------------- */
+ cnewestGci = gci;
+ }//if
+ if (logWriteState == TcConnectionrec::WRITTEN) {
+/*---------------------------------------------------------------------------*/
+/* I NEED TO INSERT A COMMIT LOG RECORD SINCE WE ARE WRITING LOG IN THIS */
+/* TRANSACTION. */
+/*---------------------------------------------------------------------------*/
+ jam();
+ LogPartRecordPtr regLogPartPtr;
+ Uint32 noOfLogPages = cnoOfLogPages;
+ jam();
+ regLogPartPtr.i = regTcPtr->hashValue & 3;
+ ptrCheckGuard(regLogPartPtr, clogPartFileSize, logPartRecord);
+ if ((regLogPartPtr.p->logPartState == LogPartRecord::ACTIVE) ||
+ (noOfLogPages == 0)) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THIS LOG PART WAS CURRENTLY ACTIVE WRITING ANOTHER LOG RECORD. WE MUST */
+/* WAIT UNTIL THIS PART HAS COMPLETED ITS OPERATION. */
+/*---------------------------------------------------------------------------*/
+// We must delay the write of commit info to the log to safe-guard against
+// a crash due to lack of log pages. We temporary stop all log writes to this
+// log part to ensure that we don't get a buffer explosion in the delayed
+// signal buffer instead.
+/*---------------------------------------------------------------------------*/
+ linkWaitLog(signal, regLogPartPtr);
+ if (transState == TcConnectionrec::PREPARED) {
+ jam();
+ regTcPtr->transactionState = TcConnectionrec::LOG_COMMIT_QUEUED_WAIT_SIGNAL;
+ } else {
+ jam();
+ ndbrequire(transState == TcConnectionrec::PREPARED_RECEIVED_COMMIT);
+ regTcPtr->transactionState = TcConnectionrec::LOG_COMMIT_QUEUED;
+ }//if
+ if (regLogPartPtr.p->logPartState == LogPartRecord::IDLE) {
+ jam();
+ regLogPartPtr.p->logPartState = LogPartRecord::ACTIVE;
+ }//if
+ return;
+ }//if
+ writeCommitLog(signal, regLogPartPtr);
+ if (transState == TcConnectionrec::PREPARED) {
+ jam();
+ regTcPtr->transactionState = TcConnectionrec::LOG_COMMIT_WRITTEN_WAIT_SIGNAL;
+ } else {
+ jam();
+ ndbrequire(transState == TcConnectionrec::PREPARED_RECEIVED_COMMIT);
+ regTcPtr->transactionState = TcConnectionrec::LOG_COMMIT_WRITTEN;
+ }//if
+ }//if
+}//Dblqh::execLQH_WRITELOG_REQ()
+
+void Dblqh::localCommitLab(Signal* signal)
+{
+ FragrecordPtr regFragptr;
+ regFragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(regFragptr, cfragrecFileSize, fragrecord);
+ Fragrecord::FragStatus status = regFragptr.p->fragStatus;
+ fragptr = regFragptr;
+ switch (status) {
+ case Fragrecord::FSACTIVE:
+ case Fragrecord::CRASH_RECOVERING:
+ case Fragrecord::ACTIVE_CREATION:
+ jam();
+ commitContinueAfterBlockedLab(signal);
+ return;
+ break;
+ case Fragrecord::BLOCKED:
+ jam();
+ linkFragQueue(signal);
+ tcConnectptr.p->transactionState = TcConnectionrec::COMMIT_STOPPED;
+ break;
+ case Fragrecord::FREE:
+ jam();
+ case Fragrecord::DEFINED:
+ jam();
+ case Fragrecord::REMOVING:
+ jam();
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+}//Dblqh::localCommitLab()
+
+void Dblqh::commitContinueAfterBlockedLab(Signal* signal)
+{
+/* ------------------------------------------------------------------------- */
+/*INPUT: TC_CONNECTPTR ACTIVE OPERATION RECORD */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/*CONTINUE HERE AFTER BEING BLOCKED FOR A WHILE DURING LOCAL CHECKPOINT. */
+/*The operation is already removed from the active list since there is no */
+/*chance for any real-time breaks before we need to release it. */
+/* ------------------------------------------------------------------------- */
+/*ALSO AFTER NORMAL PROCEDURE WE CONTINUE */
+/*WE MUST COMMIT TUP BEFORE ACC TO ENSURE THAT NO ONE RACES IN AND SEES A */
+/*DIRTY STATE IN TUP. */
+/* ------------------------------------------------------------------------- */
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ Fragrecord * const regFragptr = fragptr.p;
+ Uint32 operation = regTcPtr->operation;
+ Uint32 simpleRead = regTcPtr->simpleRead;
+ Uint32 dirtyOp = regTcPtr->dirtyOp;
+ if (regTcPtr->activeCreat == ZFALSE) {
+ if ((cCommitBlocked == true) &&
+ (regFragptr->fragActiveStatus == ZTRUE)) {
+ jam();
+/* ------------------------------------------------------------------------- */
+// TUP and/or ACC have problems in writing the undo log to disk fast enough.
+// We must avoid the commit at this time and try later instead. The fragment
+// is also active with a local checkpoint and this commit can generate UNDO
+// log records that overflow the UNDO log buffer.
+/* ------------------------------------------------------------------------- */
+/*---------------------------------------------------------------------------*/
+// We must delay the write of commit info to the log to safe-guard against
+// a crash due to lack of log pages. We temporary stop all log writes to this
+// log part to ensure that we don't get a buffer explosion in the delayed
+// signal buffer instead.
+/*---------------------------------------------------------------------------*/
+ logPartPtr.i = regTcPtr->hashValue & 3;
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+ linkWaitLog(signal, logPartPtr);
+ regTcPtr->transactionState = TcConnectionrec::COMMIT_QUEUED;
+ if (logPartPtr.p->logPartState == LogPartRecord::IDLE) {
+ jam();
+ logPartPtr.p->logPartState = LogPartRecord::ACTIVE;
+ }//if
+ return;
+ }//if
+ if (operation != ZREAD) {
+ TupCommitReq * const tupCommitReq =
+ (TupCommitReq *)signal->getDataPtrSend();
+ Uint32 sig0 = regTcPtr->tupConnectrec;
+ Uint32 tup = refToBlock(regTcPtr->tcTupBlockref);
+ jam();
+ tupCommitReq->opPtr = sig0;
+ tupCommitReq->gci = regTcPtr->gci;
+ tupCommitReq->hashValue = regTcPtr->hashValue;
+ EXECUTE_DIRECT(tup, GSN_TUP_COMMITREQ, signal,
+ TupCommitReq::SignalLength);
+ Uint32 acc = refToBlock(regTcPtr->tcAccBlockref);
+ signal->theData[0] = regTcPtr->accConnectrec;
+ EXECUTE_DIRECT(acc, GSN_ACC_COMMITREQ, signal, 1);
+ } else {
+ if(!dirtyOp){
+ Uint32 acc = refToBlock(regTcPtr->tcAccBlockref);
+ signal->theData[0] = regTcPtr->accConnectrec;
+ EXECUTE_DIRECT(acc, GSN_ACC_COMMITREQ, signal, 1);
+ }
+ }
+ jamEntry();
+ if (simpleRead) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/*THE OPERATION WAS A SIMPLE READ THUS THE COMMIT PHASE IS ONLY NEEDED TO */
+/*RELEASE THE LOCKS. AT THIS POINT IN THE CODE THE LOCKS ARE RELEASED AND WE */
+/*ARE IN A POSITION TO SEND LQHKEYCONF TO TC. WE WILL ALSO RELEASE ALL */
+/*RESOURCES BELONGING TO THIS OPERATION SINCE NO MORE WORK WILL BE */
+/*PERFORMED. */
+/* ------------------------------------------------------------------------- */
+ cleanUp(signal);
+ return;
+ }//if
+ }//if
+ Uint32 seqNoReplica = regTcPtr->seqNoReplica;
+ if (regTcPtr->gci > regFragptr->newestGci) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/*IT IS THE FIRST TIME THIS GLOBAL CHECKPOINT IS INVOLVED IN UPDATING THIS */
+/*FRAGMENT. UPDATE THE VARIABLE THAT KEEPS TRACK OF NEWEST GCI IN FRAGMENT */
+/* ------------------------------------------------------------------------- */
+ regFragptr->newestGci = regTcPtr->gci;
+ }//if
+ if (dirtyOp != ZTRUE) {
+ if (seqNoReplica != 0) {
+ jam();
+ completeTransNotLastLab(signal);
+ return;
+ }//if
+ commitReplyLab(signal);
+ return;
+ } else {
+/* ------------------------------------------------------------------------- */
+/*WE MUST HANDLE DIRTY WRITES IN A SPECIAL WAY. THESE OPERATIONS WILL NOT */
+/*SEND ANY COMMIT OR COMPLETE MESSAGES TO OTHER NODES. THEY WILL MERELY SEND */
+/*THOSE SIGNALS INTERNALLY. */
+/* ------------------------------------------------------------------------- */
+ if (regTcPtr->abortState == TcConnectionrec::ABORT_IDLE) {
+ jam();
+ packLqhkeyreqLab(signal);
+ } else {
+ ndbrequire(regTcPtr->abortState != TcConnectionrec::NEW_FROM_TC);
+ jam();
+ sendLqhTransconf(signal, LqhTransConf::Committed);
+ cleanUp(signal);
+ }//if
+ }//if
+}//Dblqh::commitContinueAfterBlockedLab()
+
+void Dblqh::commitReplyLab(Signal* signal)
+{
+/* -------------------------------------------------------------- */
+/* BACKUP AND STAND-BY REPLICAS ONLY UPDATE THE TRANSACTION STATE */
+/* -------------------------------------------------------------- */
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ TcConnectionrec::AbortState abortState = regTcPtr->abortState;
+ regTcPtr->transactionState = TcConnectionrec::COMMITTED;
+ if (abortState == TcConnectionrec::ABORT_IDLE) {
+ Uint32 clientBlockref = regTcPtr->clientBlockref;
+ if (regTcPtr->seqNoReplica == 0) {
+ jam();
+ sendCommittedTc(signal, clientBlockref);
+ return;
+ } else {
+ jam();
+ sendCommitLqh(signal, clientBlockref);
+ return;
+ }//if
+ } else if (regTcPtr->abortState == TcConnectionrec::REQ_FROM_TC) {
+ jam();
+ signal->theData[0] = regTcPtr->reqRef;
+ signal->theData[1] = cownNodeid;
+ signal->theData[2] = regTcPtr->transid[0];
+ signal->theData[3] = regTcPtr->transid[1];
+ sendSignal(tcConnectptr.p->reqBlockref, GSN_COMMITCONF, signal, 4, JBB);
+ } else {
+ ndbrequire(regTcPtr->abortState == TcConnectionrec::NEW_FROM_TC);
+ jam();
+ sendLqhTransconf(signal, LqhTransConf::Committed);
+ }//if
+ return;
+}//Dblqh::commitReplyLab()
+
+/* ------------------------------------------------------------------------- */
+/* ------- COMPLETE PHASE ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dblqh::completeTransNotLastLab(Signal* signal)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ if (regTcPtr->abortState == TcConnectionrec::ABORT_IDLE) {
+ Uint32 clientBlockref = regTcPtr->clientBlockref;
+ jam();
+ sendCompleteLqh(signal, clientBlockref);
+ cleanUp(signal);
+ return;
+ } else {
+ jam();
+ completeUnusualLab(signal);
+ return;
+ }//if
+}//Dblqh::completeTransNotLastLab()
+
+void Dblqh::completeTransLastLab(Signal* signal)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ if (regTcPtr->abortState == TcConnectionrec::ABORT_IDLE) {
+ Uint32 clientBlockref = regTcPtr->clientBlockref;
+ jam();
+/* ------------------------------------------------------------------------- */
+/*DIRTY WRITES WHICH ARE LAST IN THE CHAIN OF REPLICAS WILL SEND COMPLETED */
+/*INSTEAD OF SENDING PREPARED TO THE TC (OR OTHER INITIATOR OF OPERATION). */
+/* ------------------------------------------------------------------------- */
+ sendCompletedTc(signal, clientBlockref);
+ cleanUp(signal);
+ return;
+ } else {
+ jam();
+ completeUnusualLab(signal);
+ return;
+ }//if
+}//Dblqh::completeTransLastLab()
+
+void Dblqh::completeUnusualLab(Signal* signal)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ if (regTcPtr->abortState == TcConnectionrec::ABORT_FROM_TC) {
+ jam();
+ sendAborted(signal);
+ } else if (regTcPtr->abortState == TcConnectionrec::NEW_FROM_TC) {
+ jam();
+ sendLqhTransconf(signal, LqhTransConf::Committed);
+ } else {
+ ndbrequire(regTcPtr->abortState == TcConnectionrec::REQ_FROM_TC);
+ jam();
+ signal->theData[0] = regTcPtr->reqRef;
+ signal->theData[1] = cownNodeid;
+ signal->theData[2] = regTcPtr->transid[0];
+ signal->theData[3] = regTcPtr->transid[1];
+ sendSignal(regTcPtr->reqBlockref,
+ GSN_COMPLETECONF, signal, 4, JBB);
+ }//if
+ cleanUp(signal);
+ return;
+}//Dblqh::completeUnusualLab()
+
+/* ========================================================================= */
+/* ======= RELEASE TC CONNECT RECORD ======= */
+/* */
+/* RELEASE A TC CONNECT RECORD TO THE FREELIST. */
+/* ========================================================================= */
+void Dblqh::releaseTcrec(Signal* signal, TcConnectionrecPtr locTcConnectptr)
+{
+ jam();
+ locTcConnectptr.p->tcTimer = 0;
+ locTcConnectptr.p->transactionState = TcConnectionrec::TC_NOT_CONNECTED;
+ locTcConnectptr.p->nextTcConnectrec = cfirstfreeTcConrec;
+ cfirstfreeTcConrec = locTcConnectptr.i;
+
+ TablerecPtr tabPtr;
+ tabPtr.i = locTcConnectptr.p->tableref;
+ if(tabPtr.i == RNIL)
+ return;
+
+ ptrCheckGuard(tabPtr, ctabrecFileSize, tablerec);
+
+ /**
+ * Normal case
+ */
+ ndbrequire(tabPtr.p->usageCount > 0);
+ tabPtr.p->usageCount--;
+}//Dblqh::releaseTcrec()
+
+void Dblqh::releaseTcrecLog(Signal* signal, TcConnectionrecPtr locTcConnectptr)
+{
+ jam();
+ locTcConnectptr.p->tcTimer = 0;
+ locTcConnectptr.p->transactionState = TcConnectionrec::TC_NOT_CONNECTED;
+ locTcConnectptr.p->nextTcConnectrec = cfirstfreeTcConrec;
+ cfirstfreeTcConrec = locTcConnectptr.i;
+
+ TablerecPtr tabPtr;
+ tabPtr.i = locTcConnectptr.p->tableref;
+ if(tabPtr.i == RNIL)
+ return;
+
+}//Dblqh::releaseTcrecLog()
+
+/* ------------------------------------------------------------------------- */
+/* ------- ABORT PHASE ------- */
+/* */
+/*THIS PART IS USED AT ERRORS THAT CAUSE ABORT OF TRANSACTION. */
+/* ------------------------------------------------------------------------- */
+/* ***************************************************>> */
+/* ABORT: Abort transaction in connection. Sender TC. */
+/* This is the normal protocol (See COMMIT) */
+/* ***************************************************>> */
+void Dblqh::execABORT(Signal* signal)
+{
+ jamEntry();
+ Uint32 tcOprec = signal->theData[0];
+ BlockReference tcBlockref = signal->theData[1];
+ Uint32 transid1 = signal->theData[2];
+ Uint32 transid2 = signal->theData[3];
+ CRASH_INSERTION(5003);
+ if (ERROR_INSERTED(5015)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ sendSignalWithDelay(cownref, GSN_ABORT, signal, 2000, 4);
+ return;
+ }//if
+ if (findTransaction(transid1,
+ transid2,
+ tcOprec) != ZOK) {
+ jam();
+
+ if(ERROR_INSERTED(5039) &&
+ refToNode(signal->getSendersBlockRef()) != getOwnNodeId()){
+ jam();
+ SET_ERROR_INSERT_VALUE(5040);
+ return;
+ }
+
+ if(ERROR_INSERTED(5040) &&
+ refToNode(signal->getSendersBlockRef()) != getOwnNodeId()){
+ jam();
+ SET_ERROR_INSERT_VALUE(5003);
+ return;
+ }
+
+/* ------------------------------------------------------------------------- */
+// SEND ABORTED EVEN IF NOT FOUND.
+//THE TRANSACTION MIGHT NEVER HAVE ARRIVED HERE.
+/* ------------------------------------------------------------------------- */
+ signal->theData[0] = tcOprec;
+ signal->theData[1] = transid1;
+ signal->theData[2] = transid2;
+ signal->theData[3] = cownNodeid;
+ signal->theData[4] = ZTRUE;
+ sendSignal(tcBlockref, GSN_ABORTED, signal, 5, JBB);
+ warningReport(signal, 8);
+ return;
+ }//if
+/* ------------------------------------------------------------------------- */
+/*A GUIDING DESIGN PRINCIPLE IN HANDLING THESE ERROR SITUATIONS HAVE BEEN */
+/*KEEP IT SIMPLE. THUS WE RATHER INSERT A WAIT AND SET THE ABORT_STATE TO */
+/*ACTIVE RATHER THAN WRITE NEW CODE TO HANDLE EVERY SPECIAL SITUATION. */
+/* ------------------------------------------------------------------------- */
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ if (regTcPtr->nextReplica != ZNIL) {
+/* ------------------------------------------------------------------------- */
+// We will immediately send the ABORT message also to the next LQH node in line.
+/* ------------------------------------------------------------------------- */
+ BlockReference TLqhRef = calcLqhBlockRef(regTcPtr->nextReplica);
+ signal->theData[0] = regTcPtr->tcOprec;
+ signal->theData[1] = regTcPtr->tcBlockref;
+ signal->theData[2] = regTcPtr->transid[0];
+ signal->theData[3] = regTcPtr->transid[1];
+ sendSignal(TLqhRef, GSN_ABORT, signal, 4, JBB);
+ }//if
+ regTcPtr->abortState = TcConnectionrec::ABORT_FROM_TC;
+ regTcPtr->activeCreat = ZFALSE;
+
+ const Uint32 commitAckMarker = regTcPtr->commitAckMarker;
+ if(commitAckMarker != RNIL){
+ jam();
+#ifdef MARKER_TRACE
+ {
+ CommitAckMarkerPtr tmp;
+ m_commitAckMarkerHash.getPtr(tmp, commitAckMarker);
+ ndbout_c("Ab2 marker[%.8x %.8x]", tmp.p->transid1, tmp.p->transid2);
+ }
+#endif
+ m_commitAckMarkerHash.release(commitAckMarker);
+ regTcPtr->commitAckMarker = RNIL;
+ }
+
+ abortStateHandlerLab(signal);
+
+ return;
+}//Dblqh::execABORT()
+
+/* ************************************************************************>>
+ * ABORTREQ: Same as ABORT but used in case one node isn't working ok.
+ * (See COMMITREQ)
+ * ************************************************************************>> */
+void Dblqh::execABORTREQ(Signal* signal)
+{
+ jamEntry();
+ Uint32 reqPtr = signal->theData[0];
+ BlockReference reqBlockref = signal->theData[1];
+ Uint32 transid1 = signal->theData[2];
+ Uint32 transid2 = signal->theData[3];
+ Uint32 tcOprec = signal->theData[5];
+ if (ERROR_INSERTED(5006)) {
+ systemErrorLab(signal);
+ }
+ if (ERROR_INSERTED(5016)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ sendSignalWithDelay(cownref, GSN_ABORTREQ, signal, 2000, 6);
+ return;
+ }//if
+ if (findTransaction(transid1,
+ transid2,
+ tcOprec) != ZOK) {
+ signal->theData[0] = reqPtr;
+ signal->theData[2] = cownNodeid;
+ signal->theData[3] = transid1;
+ signal->theData[4] = transid2;
+ sendSignal(reqBlockref, GSN_ABORTCONF, signal, 5, JBB);
+ warningReport(signal, 9);
+ return;
+ }//if
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ if (regTcPtr->transactionState != TcConnectionrec::PREPARED) {
+ warningReport(signal, 10);
+ return;
+ }//if
+ regTcPtr->reqBlockref = reqBlockref;
+ regTcPtr->reqRef = reqPtr;
+ regTcPtr->abortState = TcConnectionrec::REQ_FROM_TC;
+ regTcPtr->activeCreat = ZFALSE;
+ abortCommonLab(signal);
+ return;
+}//Dblqh::execABORTREQ()
+
+/* ************>> */
+/* ACC_TO_REF > */
+/* ************>> */
+void Dblqh::execACC_TO_REF(Signal* signal)
+{
+ jamEntry();
+ terrorCode = signal->theData[1];
+ releaseActiveFrag(signal);
+ abortErrorLab(signal);
+ return;
+}//Dblqh::execACC_TO_REF()
+
+/* ************> */
+/* ACCKEYREF > */
+/* ************> */
+void Dblqh::execACCKEYREF(Signal* signal)
+{
+ jamEntry();
+ tcConnectptr.i = signal->theData[0];
+ terrorCode = signal->theData[1];
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ TcConnectionrec * const tcPtr = tcConnectptr.p;
+ switch (tcPtr->transactionState) {
+ case TcConnectionrec::WAIT_ACC:
+ jam();
+ releaseActiveFrag(signal);
+ break;
+ case TcConnectionrec::WAIT_ACC_ABORT:
+ case TcConnectionrec::ABORT_STOPPED:
+ case TcConnectionrec::ABORT_QUEUED:
+ jam();
+/* ------------------------------------------------------------------------- */
+/*IGNORE SINCE ABORT OF THIS OPERATION IS ONGOING ALREADY. */
+/* ------------------------------------------------------------------------- */
+ return;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ const Uint32 errCode = terrorCode;
+ tcPtr->errorCode = errCode;
+/* ------------------------------------------------------------------------- */
+/*WHEN AN ABORT FROM TC ARRIVES IT COULD ACTUALLY BE A CORRECT BEHAVIOUR */
+/*SINCE THE TUPLE MIGHT NOT HAVE ARRIVED YET OR ALREADY HAVE BEEN INSERTED. */
+/* ------------------------------------------------------------------------- */
+ if (tcPtr->activeCreat == ZTRUE) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/*THIS IS A NORMAL EVENT DURING CREATION OF A FRAGMENT. PERFORM ABORT IN */
+/*TUP AND ACC AND THEN CONTINUE WITH NORMAL COMMIT PROCESSING. IF THE ERROR */
+/*HAPPENS TO BE A SERIOUS ERROR THEN PERFORM ABORT PROCESSING AS NORMAL. */
+/* ------------------------------------------------------------------------- */
+ switch (tcPtr->operation) {
+ case ZUPDATE:
+ case ZDELETE:
+ jam();
+ if (errCode != ZNO_TUPLE_FOUND) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/*A NORMAL ERROR WILL BE TREATED AS A NORMAL ABORT AND WILL ABORT THE */
+/*TRANSACTION. NO SPECIAL HANDLING IS NEEDED. */
+/* ------------------------------------------------------------------------- */
+ tcPtr->activeCreat = ZFALSE;
+ }//if
+ break;
+ case ZINSERT:
+ jam();
+ if (errCode != ZTUPLE_ALREADY_EXIST) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/*A NORMAL ERROR WILL BE TREATED AS A NORMAL ABORT AND WILL ABORT THE */
+/*TRANSACTION. NO SPECIAL HANDLING IS NEEDED. */
+/* ------------------------------------------------------------------------- */
+ tcPtr->activeCreat = ZFALSE;
+ }//if
+ break;
+ default:
+ jam();
+/* ------------------------------------------------------------------------- */
+/*A NORMAL ERROR WILL BE TREATED AS A NORMAL ABORT AND WILL ABORT THE */
+/*TRANSACTION. NO SPECIAL HANDLING IS NEEDED. */
+/* ------------------------------------------------------------------------- */
+ tcPtr->activeCreat = ZFALSE;
+ break;
+ }//switch
+ } else {
+ /**
+ * Only primary replica can get ZTUPLE_ALREADY_EXIST || ZNO_TUPLE_FOUND
+ *
+ * Unless it's a simple or dirty read
+ *
+ * NOT TRUE!
+ * 1) op1 - primary insert ok
+ * 2) op1 - backup insert fail (log full or what ever)
+ * 3) op1 - delete ok @ primary
+ * 4) op1 - delete fail @ backup
+ *
+ * -> ZNO_TUPLE_FOUND is possible
+ */
+ ndbrequire
+ (tcPtr->seqNoReplica == 0 ||
+ errCode != ZTUPLE_ALREADY_EXIST ||
+ (tcPtr->operation == ZREAD && (tcPtr->dirtyOp || tcPtr->opSimple)));
+ }
+ tcPtr->abortState = TcConnectionrec::ABORT_FROM_LQH;
+ abortCommonLab(signal);
+ return;
+}//Dblqh::execACCKEYREF()
+
+void Dblqh::localAbortStateHandlerLab(Signal* signal)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ if (regTcPtr->abortState != TcConnectionrec::ABORT_IDLE) {
+ jam();
+ return;
+ }//if
+ regTcPtr->activeCreat = ZFALSE;
+ regTcPtr->abortState = TcConnectionrec::ABORT_FROM_LQH;
+ regTcPtr->errorCode = terrorCode;
+ abortStateHandlerLab(signal);
+ return;
+}//Dblqh::localAbortStateHandlerLab()
+
+void Dblqh::abortStateHandlerLab(Signal* signal)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ switch (regTcPtr->transactionState) {
+ case TcConnectionrec::PREPARED:
+ jam();
+/* ------------------------------------------------------------------------- */
+/*THE OPERATION IS ALREADY PREPARED AND SENT TO THE NEXT LQH OR BACK TO TC. */
+/*WE CAN SIMPLY CONTINUE WITH THE ABORT PROCESS. */
+/*IF IT WAS A CHECK FOR TRANSACTION STATUS THEN WE REPORT THE STATUS TO THE */
+/*NEW TC AND CONTINUE WITH THE NEXT OPERATION IN LQH. */
+/* ------------------------------------------------------------------------- */
+ if (regTcPtr->abortState == TcConnectionrec::NEW_FROM_TC) {
+ jam();
+ sendLqhTransconf(signal, LqhTransConf::Prepared);
+ return;
+ }//if
+ break;
+ case TcConnectionrec::LOG_COMMIT_WRITTEN_WAIT_SIGNAL:
+ case TcConnectionrec::LOG_COMMIT_QUEUED_WAIT_SIGNAL:
+ jam();
+/* ------------------------------------------------------------------------- */
+// We can only reach these states for multi-updates on a record in a transaction.
+// We know that at least one of those has received the COMMIT signal, thus we
+// declare us only prepared since we then receive the expected COMMIT signal.
+/* ------------------------------------------------------------------------- */
+ ndbrequire(regTcPtr->abortState == TcConnectionrec::NEW_FROM_TC);
+ sendLqhTransconf(signal, LqhTransConf::Prepared);
+ break;
+ case TcConnectionrec::WAIT_TUPKEYINFO:
+ case TcConnectionrec::WAIT_ATTR:
+ jam();
+/* ------------------------------------------------------------------------- */
+/* WE ARE CURRENTLY WAITING FOR MORE INFORMATION. WE CAN START THE ABORT */
+/* PROCESS IMMEDIATELY. THE KEYINFO AND ATTRINFO SIGNALS WILL BE DROPPED */
+/* SINCE THE ABORT STATE WILL BE SET. */
+/* ------------------------------------------------------------------------- */
+ break;
+ case TcConnectionrec::WAIT_TUP:
+ jam();
+/* ------------------------------------------------------------------------- */
+// TUP is currently active. We have to wait for the TUPKEYREF or TUPKEYCONF
+// to arrive since we might otherwise jeopardise the local checkpoint
+// consistency in overload situations.
+/* ------------------------------------------------------------------------- */
+ regTcPtr->transactionState = TcConnectionrec::WAIT_TUP_TO_ABORT;
+ return;
+ case TcConnectionrec::WAIT_ACC:
+ jam();
+ if (regTcPtr->listState == TcConnectionrec::ACC_BLOCK_LIST) {
+ jam();
+/* ------------------------------------------------------------------------- */
+// If the operation is in the ACC Blocked list the operation is not allowed
+// to start yet. We release it from the ACC Blocked list and will go through
+// the gate in abortCommonLab(..) where it will be blocked.
+/* ------------------------------------------------------------------------- */
+ fragptr.i = regTcPtr->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ releaseAccList(signal);
+ } else {
+ jam();
+/* ------------------------------------------------------------------------- */
+// We start the abort immediately since the operation is still in the active
+// list and the fragment cannot have been frozen yet. By sending LCP_HOLDOPCONF
+// as direct signals we avoid the problem that we might find the operation
+// in an unexpected list in ACC.
+// We cannot accept being blocked before aborting ACC here since that would
+// lead to seriously complex issues.
+/* ------------------------------------------------------------------------- */
+ abortContinueAfterBlockedLab(signal, false);
+ return;
+ }//if
+ break;
+ case TcConnectionrec::LOG_QUEUED:
+ jam();
+/* ------------------------------------------------------------------------- */
+/*CURRENTLY QUEUED FOR LOGGING. WAIT UNTIL THE LOG RECORD HAVE BEEN INSERTED */
+/*AND THEN CONTINUE THE ABORT PROCESS. */
+//Could also be waiting for an overloaded log disk. In this case it is easy
+//to abort when CONTINUEB arrives.
+/* ------------------------------------------------------------------------- */
+ return;
+ break;
+ case TcConnectionrec::STOPPED:
+ jam();
+ /* ---------------------------------------------------------------------
+ * WE ARE CURRENTLY QUEUED FOR ACCESS TO THE FRAGMENT BY A LCP
+ * Since nothing has been done, just release operation
+ * i.e. no prepare log record has been written
+ * so no abort log records needs to be written
+ */
+ releaseWaitQueue(signal);
+ continueAfterLogAbortWriteLab(signal);
+ return;
+ break;
+ case TcConnectionrec::WAIT_AI_AFTER_ABORT:
+ jam();
+/* ------------------------------------------------------------------------- */
+/* ABORT OF ACC AND TUP ALREADY COMPLETED. THIS STATE IS ONLY USED WHEN */
+/* CREATING A NEW FRAGMENT. */
+/* ------------------------------------------------------------------------- */
+ continueAbortLab(signal);
+ return;
+ break;
+ case TcConnectionrec::WAIT_TUP_TO_ABORT:
+ case TcConnectionrec::ABORT_STOPPED:
+ case TcConnectionrec::LOG_ABORT_QUEUED:
+ case TcConnectionrec::WAIT_ACC_ABORT:
+ case TcConnectionrec::ABORT_QUEUED:
+ jam();
+/* ------------------------------------------------------------------------- */
+/*ABORT IS ALREADY ONGOING DUE TO SOME ERROR. WE HAVE ALREADY SET THE STATE */
+/*OF THE ABORT SO THAT WE KNOW THAT TC EXPECTS A REPORT. WE CAN THUS SIMPLY */
+/*EXIT. */
+/* ------------------------------------------------------------------------- */
+ return;
+ break;
+ case TcConnectionrec::COMMIT_STOPPED:
+ case TcConnectionrec::LOG_COMMIT_QUEUED:
+ case TcConnectionrec::COMMIT_QUEUED:
+ jam();
+/* ------------------------------------------------------------------------- */
+/*THIS IS ONLY AN ALLOWED STATE IF A DIRTY WRITE OR SIMPLE READ IS PERFORMED.*/
+/*IF WE ARE MERELY CHECKING THE TRANSACTION STATE IT IS ALSO AN ALLOWED STATE*/
+/* ------------------------------------------------------------------------- */
+ if (regTcPtr->dirtyOp == ZTRUE) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/*COMPLETE THE DIRTY WRITE AND THEN REPORT COMPLETED BACK TO TC. SINCE IT IS */
+/*A DIRTY WRITE IT IS ALLOWED TO COMMIT EVEN IF THE TRANSACTION ABORTS. */
+/* ------------------------------------------------------------------------- */
+ return;
+ }//if
+ if (regTcPtr->simpleRead) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/*A SIMPLE READ IS CURRENTLY RELEASING THE LOCKS OR WAITING FOR ACCESS TO */
+/*ACC TO CLEAR THE LOCKS. COMPLETE THIS PROCESS AND THEN RETURN AS NORMAL. */
+/*NO DATA HAS CHANGED DUE TO THIS SIMPLE READ ANYWAY. */
+/* ------------------------------------------------------------------------- */
+ return;
+ }//if
+ ndbrequire(regTcPtr->abortState == TcConnectionrec::NEW_FROM_TC);
+ jam();
+/* ------------------------------------------------------------------------- */
+/*WE ARE ONLY CHECKING THE STATUS OF THE TRANSACTION. IT IS COMMITTING. */
+/*COMPLETE THE COMMIT LOCALLY AND THEN SEND REPORT OF COMMITTED TO THE NEW TC*/
+/* ------------------------------------------------------------------------- */
+ return;
+ break;
+ case TcConnectionrec::COMMITTED:
+ jam();
+ ndbrequire(regTcPtr->abortState == TcConnectionrec::NEW_FROM_TC);
+/* ------------------------------------------------------------------------- */
+/*WE ARE CHECKING TRANSACTION STATUS. REPORT COMMITTED AND CONTINUE WITH THE */
+/*NEXT OPERATION. */
+/* ------------------------------------------------------------------------- */
+ sendLqhTransconf(signal, LqhTransConf::Committed);
+ return;
+ break;
+ default:
+ ndbrequire(false);
+/* ------------------------------------------------------------------------- */
+/*THE STATE WAS NOT AN ALLOWED STATE ON A NORMAL OPERATION. SCANS AND COPY */
+/*FRAGMENT OPERATIONS SHOULD HAVE EXECUTED IN ANOTHER PATH. */
+/* ------------------------------------------------------------------------- */
+ break;
+ }//switch
+ abortCommonLab(signal);
+ return;
+}//Dblqh::abortStateHandlerLab()
+
+void Dblqh::abortErrorLab(Signal* signal)
+{
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ if (regTcPtr->abortState == TcConnectionrec::ABORT_IDLE) {
+ jam();
+ regTcPtr->abortState = TcConnectionrec::ABORT_FROM_LQH;
+ regTcPtr->errorCode = terrorCode;
+ }//if
+ /* -----------------------------------------------------------------------
+ * ACTIVE CREATION IS RESET FOR ALL ERRORS WHICH SHOULD BE HANDLED
+ * WITH NORMAL ABORT HANDLING.
+ * ----------------------------------------------------------------------- */
+ regTcPtr->activeCreat = ZFALSE;
+ abortCommonLab(signal);
+ return;
+}//Dblqh::abortErrorLab()
+
+void Dblqh::abortCommonLab(Signal* signal)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ const Uint32 commitAckMarker = regTcPtr->commitAckMarker;
+ if(regTcPtr->activeCreat != ZTRUE && commitAckMarker != RNIL){
+ /**
+ * There is no NR ongoing and we have a marker
+ */
+ jam();
+#ifdef MARKER_TRACE
+ {
+ CommitAckMarkerPtr tmp;
+ m_commitAckMarkerHash.getPtr(tmp, commitAckMarker);
+ ndbout_c("Abo marker[%.8x %.8x]", tmp.p->transid1, tmp.p->transid2);
+ }
+#endif
+ m_commitAckMarkerHash.release(commitAckMarker);
+ regTcPtr->commitAckMarker = RNIL;
+ }
+
+ fragptr.i = regTcPtr->fragmentptr;
+ if (fragptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ switch (fragptr.p->fragStatus) {
+ case Fragrecord::FSACTIVE:
+ case Fragrecord::CRASH_RECOVERING:
+ case Fragrecord::ACTIVE_CREATION:
+ jam();
+ linkActiveFrag(signal);
+ abortContinueAfterBlockedLab(signal, true);
+ return;
+ break;
+ case Fragrecord::BLOCKED:
+ jam();
+ linkFragQueue(signal);
+ regTcPtr->transactionState = TcConnectionrec::ABORT_STOPPED;
+ return;
+ break;
+ case Fragrecord::FREE:
+ jam();
+ case Fragrecord::DEFINED:
+ jam();
+ case Fragrecord::REMOVING:
+ jam();
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ } else {
+ jam();
+ continueAbortLab(signal);
+ }//if
+}//Dblqh::abortCommonLab()
+
+void Dblqh::abortContinueAfterBlockedLab(Signal* signal, bool canBlock)
+{
+ /* ------------------------------------------------------------------------
+ * INPUT: TC_CONNECTPTR ACTIVE OPERATION RECORD
+ * ------------------------------------------------------------------------
+ * ------------------------------------------------------------------------
+ * CAN COME HERE AS RESTART AFTER BEING BLOCKED BY A LOCAL CHECKPOINT.
+ * ------------------------------------------------------------------------
+ * ALSO AS PART OF A NORMAL ABORT WITHOUT BLOCKING.
+ * WE MUST ABORT TUP BEFORE ACC TO ENSURE THAT NO ONE RACES IN
+ * AND SEES A STATE IN TUP.
+ * ------------------------------------------------------------------------ */
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ fragptr.i = regTcPtr->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ if ((cCommitBlocked == true) &&
+ (fragptr.p->fragActiveStatus == ZTRUE) &&
+ (canBlock == true) &&
+ (regTcPtr->operation != ZREAD)) {
+ jam();
+/* ------------------------------------------------------------------------- */
+// TUP and/or ACC have problems in writing the undo log to disk fast enough.
+// We must avoid the abort at this time and try later instead. The fragment
+// is also active with a local checkpoint and this commit can generate UNDO
+// log records that overflow the UNDO log buffer.
+//
+// In certain situations it is simply too complex to insert a wait state here
+// since ACC is active and we cannot release the operation from the active
+// list without causing great complexity.
+/* ------------------------------------------------------------------------- */
+/*---------------------------------------------------------------------------*/
+// We must delay the write of abort info to the log to safe-guard against
+// a crash due to lack of log pages. We temporary stop all log writes to this
+// log part to ensure that we don't get a buffer explosion in the delayed
+// signal buffer instead.
+/*---------------------------------------------------------------------------*/
+ releaseActiveFrag(signal);
+ logPartPtr.i = regTcPtr->hashValue & 3;
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+ linkWaitLog(signal, logPartPtr);
+ regTcPtr->transactionState = TcConnectionrec::ABORT_QUEUED;
+ if (logPartPtr.p->logPartState == LogPartRecord::IDLE) {
+ jam();
+ logPartPtr.p->logPartState = LogPartRecord::ACTIVE;
+ }//if
+ return;
+ }//if
+ signal->theData[0] = regTcPtr->tupConnectrec;
+ EXECUTE_DIRECT(DBTUP, GSN_TUP_ABORTREQ, signal, 1);
+ regTcPtr->transactionState = TcConnectionrec::WAIT_ACC_ABORT;
+ signal->theData[0] = regTcPtr->accConnectrec;
+ EXECUTE_DIRECT(DBACC, GSN_ACC_ABORTREQ, signal, 1);
+ /* ------------------------------------------------------------------------
+ * We need to insert a real-time break by sending ACC_ABORTCONF through the
+ * job buffer to ensure that we catch any ACCKEYCONF or TUPKEYCONF or
+ * TUPKEYREF that are in the job buffer but not yet processed. Doing
+ * everything without that would race and create a state error when they
+ * are executed.
+ * ----------------------------------------------------------------------- */
+ return;
+}//Dblqh::abortContinueAfterBlockedLab()
+
+/* ******************>> */
+/* ACC_ABORTCONF > */
+/* ******************>> */
+void Dblqh::execACC_ABORTCONF(Signal* signal)
+{
+ jamEntry();
+ tcConnectptr.i = signal->theData[0];
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ ndbrequire(regTcPtr->transactionState == TcConnectionrec::WAIT_ACC_ABORT);
+ if (regTcPtr->activeCreat == ZTRUE) {
+ /* ----------------------------------------------------------------------
+ * A NORMAL EVENT DURING CREATION OF A FRAGMENT. WE NOW NEED TO CONTINUE
+ * WITH NORMAL COMMIT PROCESSING.
+ * ---------------------------------------------------------------------- */
+ if (regTcPtr->currTupAiLen == regTcPtr->totReclenAi) {
+ jam();
+ regTcPtr->abortState = TcConnectionrec::ABORT_IDLE;
+ rwConcludedLab(signal);
+ return;
+ } else {
+ ndbrequire(regTcPtr->currTupAiLen < regTcPtr->totReclenAi);
+ jam();
+ releaseActiveFrag(signal);
+ regTcPtr->transactionState = TcConnectionrec::WAIT_AI_AFTER_ABORT;
+ return;
+ }//if
+ }//if
+ releaseActiveFrag(signal);
+ continueAbortLab(signal);
+ return;
+}//Dblqh::execACC_ABORTCONF()
+
+void Dblqh::continueAbortLab(Signal* signal)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ /* ------------------------------------------------------------------------
+ * AN ERROR OCCURED IN THE ACTIVE CREATION AFTER THE ABORT PHASE.
+ * WE NEED TO CONTINUE WITH A NORMAL ABORT.
+ * ------------------------------------------------------------------------
+ * ALSO USED FOR NORMAL CLEAN UP AFTER A NORMAL ABORT.
+ * ------------------------------------------------------------------------
+ * ALSO USED WHEN NO FRAGMENT WAS SET UP ON OPERATION.
+ * ------------------------------------------------------------------------ */
+ if (regTcPtr->logWriteState == TcConnectionrec::WRITTEN) {
+ jam();
+ /* ----------------------------------------------------------------------
+ * I NEED TO INSERT A ABORT LOG RECORD SINCE WE ARE WRITING LOG IN THIS
+ * TRANSACTION.
+ * ---------------------------------------------------------------------- */
+ initLogPointers(signal);
+ if (logPartPtr.p->logPartState == LogPartRecord::ACTIVE) {
+ jam();
+ /* --------------------------------------------------------------------
+ * A PREPARE OPERATION IS CURRENTLY WRITING IN THE LOG.
+ * WE MUST WAIT ON OUR TURN TO WRITE THE LOG.
+ * IT IS NECESSARY TO WRITE ONE LOG RECORD COMPLETELY
+ * AT A TIME OTHERWISE WE WILL SCRAMBLE THE LOG.
+ * -------------------------------------------------------------------- */
+ linkWaitLog(signal, logPartPtr);
+ regTcPtr->transactionState = TcConnectionrec::LOG_ABORT_QUEUED;
+ return;
+ }//if
+ if (cnoOfLogPages == 0) {
+ jam();
+/*---------------------------------------------------------------------------*/
+// We must delay the write of commit info to the log to safe-guard against
+// a crash due to lack of log pages. We temporary stop all log writes to this
+// log part to ensure that we don't get a buffer explosion in the delayed
+// signal buffer instead.
+/*---------------------------------------------------------------------------*/
+ linkWaitLog(signal, logPartPtr);
+ regTcPtr->transactionState = TcConnectionrec::LOG_ABORT_QUEUED;
+ if (logPartPtr.p->logPartState == LogPartRecord::IDLE) {
+ jam();
+ logPartPtr.p->logPartState = LogPartRecord::ACTIVE;
+ }//if
+ return;
+ }//if
+ writeAbortLog(signal);
+ removeLogTcrec(signal);
+ } else if (regTcPtr->logWriteState == TcConnectionrec::NOT_STARTED) {
+ jam();
+ } else if (regTcPtr->logWriteState == TcConnectionrec::NOT_WRITTEN) {
+ jam();
+ /* ------------------------------------------------------------------
+ * IT IS A READ OPERATION OR OTHER OPERATION THAT DO NOT USE THE LOG.
+ * ------------------------------------------------------------------ */
+ /* ------------------------------------------------------------------
+ * THE LOG HAS NOT BEEN WRITTEN SINCE THE LOG FLAG WAS FALSE.
+ * THIS CAN OCCUR WHEN WE ARE STARTING A NEW FRAGMENT.
+ * ------------------------------------------------------------------ */
+ regTcPtr->logWriteState = TcConnectionrec::NOT_STARTED;
+ } else {
+ ndbrequire(regTcPtr->logWriteState == TcConnectionrec::NOT_WRITTEN_WAIT);
+ jam();
+ /* ----------------------------------------------------------------
+ * THE STATE WAS SET TO NOT_WRITTEN BY THE OPERATION BUT LATER
+ * A SCAN OF ALL OPERATION RECORD CHANGED IT INTO NOT_WRITTEN_WAIT.
+ * THIS INDICATES THAT WE ARE WAITING FOR THIS OPERATION TO COMMIT
+ * OR ABORT SO THAT WE CAN FIND THE
+ * STARTING GLOBAL CHECKPOINT OF THIS NEW FRAGMENT.
+ * ---------------------------------------------------------------- */
+ checkScanTcCompleted(signal);
+ }//if
+ continueAfterLogAbortWriteLab(signal);
+ return;
+}//Dblqh::continueAbortLab()
+
+void Dblqh::continueAfterLogAbortWriteLab(Signal* signal)
+{
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ if (regTcPtr->simpleRead) {
+ jam();
+ TcKeyRef * const tcKeyRef = (TcKeyRef *) signal->getDataPtrSend();
+
+ tcKeyRef->connectPtr = regTcPtr->applOprec;
+ tcKeyRef->transId[0] = regTcPtr->transid[0];
+ tcKeyRef->transId[1] = regTcPtr->transid[1];
+ tcKeyRef->errorCode = regTcPtr->errorCode;
+ sendSignal(regTcPtr->applRef,
+ GSN_TCKEYREF, signal, TcKeyRef::SignalLength, JBB);
+ cleanUp(signal);
+ return;
+ }//if
+ if (regTcPtr->abortState == TcConnectionrec::ABORT_FROM_LQH) {
+ LqhKeyRef * const lqhKeyRef = (LqhKeyRef *)signal->getDataPtrSend();
+
+ jam();
+ lqhKeyRef->userRef = regTcPtr->clientConnectrec;
+ lqhKeyRef->connectPtr = regTcPtr->tcOprec;
+ lqhKeyRef->errorCode = regTcPtr->errorCode;
+ lqhKeyRef->transId1 = regTcPtr->transid[0];
+ lqhKeyRef->transId2 = regTcPtr->transid[1];
+ sendSignal(regTcPtr->clientBlockref, GSN_LQHKEYREF, signal,
+ LqhKeyRef::SignalLength, JBB);
+ } else if (regTcPtr->abortState == TcConnectionrec::ABORT_FROM_TC) {
+ jam();
+ sendAborted(signal);
+ } else if (regTcPtr->abortState == TcConnectionrec::NEW_FROM_TC) {
+ jam();
+ sendLqhTransconf(signal, LqhTransConf::Aborted);
+ } else {
+ ndbrequire(regTcPtr->abortState == TcConnectionrec::REQ_FROM_TC);
+ jam();
+ signal->theData[0] = regTcPtr->reqRef;
+ signal->theData[1] = tcConnectptr.i;
+ signal->theData[2] = cownNodeid;
+ signal->theData[3] = regTcPtr->transid[0];
+ signal->theData[4] = regTcPtr->transid[1];
+ sendSignal(regTcPtr->reqBlockref, GSN_ABORTCONF,
+ signal, 5, JBB);
+ }//if
+ cleanUp(signal);
+}//Dblqh::continueAfterLogAbortWriteLab()
+
+/* ##########################################################################
+ * ####### MODULE TO HANDLE TC FAILURE #######
+ *
+ * ########################################################################## */
+
+/* ************************************************************************>>
+ * NODE_FAILREP: Node failure report. Sender Ndbcntr. Set status of failed
+ * node to down and reply with NF_COMPLETEREP to DIH which will report that
+ * LQH has completed failure handling.
+ * ************************************************************************>> */
+void Dblqh::execNODE_FAILREP(Signal* signal)
+{
+ UintR TfoundNodes = 0;
+ UintR TnoOfNodes;
+ UintR Tdata[MAX_NDB_NODES];
+ Uint32 i;
+
+ NodeFailRep * const nodeFail = (NodeFailRep *)&signal->theData[0];
+
+ TnoOfNodes = nodeFail->noOfNodes;
+ UintR index = 0;
+ for (i = 1; i < MAX_NDB_NODES; i++) {
+ jam();
+ if(NodeBitmask::get(nodeFail->theNodes, i)){
+ jam();
+ Tdata[index] = i;
+ index++;
+ }//if
+ }//for
+
+ lcpPtr.i = 0;
+ ptrAss(lcpPtr, lcpRecord);
+
+ ndbrequire(index == TnoOfNodes);
+ ndbrequire(cnoOfNodes - 1 < MAX_NDB_NODES);
+ for (i = 0; i < TnoOfNodes; i++) {
+ const Uint32 nodeId = Tdata[i];
+ lcpPtr.p->m_EMPTY_LCP_REQ.clear(nodeId);
+
+ for (Uint32 j = 0; j < cnoOfNodes; j++) {
+ jam();
+ if (cnodeData[j] == nodeId){
+ jam();
+ cnodeStatus[j] = ZNODE_DOWN;
+
+ TfoundNodes++;
+ }//if
+ }//for
+ NFCompleteRep * const nfCompRep = (NFCompleteRep *)&signal->theData[0];
+ nfCompRep->blockNo = DBLQH;
+ nfCompRep->nodeId = cownNodeid;
+ nfCompRep->failedNodeId = Tdata[i];
+ sendSignal(DBDIH_REF, GSN_NF_COMPLETEREP, signal,
+ NFCompleteRep::SignalLength, JBB);
+ }//for
+ ndbrequire(TnoOfNodes == TfoundNodes);
+}//Dblqh::execNODE_FAILREP()
+
+/* ************************************************************************>>
+ * LQH_TRANSREQ: Report status of all transactions where TC was coordinated
+ * by a crashed TC
+ * ************************************************************************>> */
+/* ************************************************************************>>
+ * THIS SIGNAL IS RECEIVED AFTER A NODE CRASH.
+ * THE NODE HAD A TC AND COORDINATED A NUMBER OF TRANSACTIONS.
+ * NOW THE MASTER NODE IS PICKING UP THOSE TRANSACTIONS
+ * TO COMPLETE THEM. EITHER ABORT THEM OR COMMIT THEM.
+ * ************************************************************************>> */
+void Dblqh::execLQH_TRANSREQ(Signal* signal)
+{
+ jamEntry();
+ Uint32 newTcPtr = signal->theData[0];
+ BlockReference newTcBlockref = signal->theData[1];
+ Uint32 oldNodeId = signal->theData[2];
+ tcNodeFailptr.i = oldNodeId;
+ ptrCheckGuard(tcNodeFailptr, ctcNodeFailrecFileSize, tcNodeFailRecord);
+ if ((tcNodeFailptr.p->tcFailStatus == TcNodeFailRecord::TC_STATE_TRUE) ||
+ (tcNodeFailptr.p->tcFailStatus == TcNodeFailRecord::TC_STATE_BREAK)) {
+ jam();
+ tcNodeFailptr.p->lastNewTcBlockref = newTcBlockref;
+ /* ------------------------------------------------------------------------
+ * WE HAVE RECEIVED A SIGNAL SPECIFYING THAT WE NEED TO HANDLE THE FAILURE
+ * OF A TC. NOW WE RECEIVE ANOTHER SIGNAL WITH THE SAME ORDER. THIS CAN
+ * OCCUR IF THE NEW TC FAILS. WE MUST BE CAREFUL IN THIS CASE SO THAT WE DO
+ * NOT START PARALLEL ACTIVITIES TRYING TO DO THE SAME THING. WE SAVE THE
+ * NEW BLOCK REFERENCE TO THE LAST NEW TC IN A VARIABLE AND ASSIGN TO IT TO
+ * NEW_TC_BLOCKREF WHEN THE OLD PROCESS RETURNS TO LQH_TRANS_NEXT. IT IS
+ * CERTAIN TO COME THERE SINCE THIS IS THE ONLY PATH TO TAKE CARE OF THE
+ * NEXT TC CONNECT RECORD. WE SET THE STATUS TO BREAK TO INDICATE TO THE OLD
+ * PROCESS WHAT IS HAPPENING.
+ * ------------------------------------------------------------------------ */
+ tcNodeFailptr.p->lastNewTcRef = newTcPtr;
+ tcNodeFailptr.p->tcFailStatus = TcNodeFailRecord::TC_STATE_BREAK;
+ return;
+ }//if
+ tcNodeFailptr.p->oldNodeId = oldNodeId;
+ tcNodeFailptr.p->newTcBlockref = newTcBlockref;
+ tcNodeFailptr.p->newTcRef = newTcPtr;
+ tcNodeFailptr.p->tcRecNow = 0;
+ tcNodeFailptr.p->tcFailStatus = TcNodeFailRecord::TC_STATE_TRUE;
+ signal->theData[0] = ZLQH_TRANS_NEXT;
+ signal->theData[1] = tcNodeFailptr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ return;
+}//Dblqh::execLQH_TRANSREQ()
+
+void Dblqh::lqhTransNextLab(Signal* signal)
+{
+ UintR tend;
+ UintR tstart;
+ UintR guard0;
+
+ if (tcNodeFailptr.p->tcFailStatus == TcNodeFailRecord::TC_STATE_BREAK) {
+ jam();
+ /* ----------------------------------------------------------------------
+ * AN INTERRUPTION TO THIS NODE FAIL HANDLING WAS RECEIVED AND A NEW
+ * TC HAVE BEEN ASSIGNED TO TAKE OVER THE FAILED TC. PROBABLY THE OLD
+ * NEW TC HAVE FAILED.
+ * ---------------------------------------------------------------------- */
+ tcNodeFailptr.p->newTcBlockref = tcNodeFailptr.p->lastNewTcBlockref;
+ tcNodeFailptr.p->newTcRef = tcNodeFailptr.p->lastNewTcRef;
+ tcNodeFailptr.p->tcRecNow = 0;
+ tcNodeFailptr.p->tcFailStatus = TcNodeFailRecord::TC_STATE_TRUE;
+ }//if
+ tstart = tcNodeFailptr.p->tcRecNow;
+ tend = tstart + 200;
+ guard0 = tend;
+ for (tcConnectptr.i = tstart; tcConnectptr.i <= guard0; tcConnectptr.i++) {
+ jam();
+ if (tcConnectptr.i >= ctcConnectrecFileSize) {
+ jam();
+ /**
+ * Finished with scanning operation record
+ *
+ * now scan markers
+ */
+ scanMarkers(signal, tcNodeFailptr.i, 0, RNIL);
+ return;
+ }//if
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ if (tcConnectptr.p->transactionState != TcConnectionrec::IDLE) {
+ if (tcConnectptr.p->transactionState != TcConnectionrec::TC_NOT_CONNECTED) {
+ if (tcConnectptr.p->tcScanRec == RNIL) {
+ if (refToNode(tcConnectptr.p->tcBlockref) == tcNodeFailptr.p->oldNodeId) {
+ if (tcConnectptr.p->operation != ZREAD) {
+ jam();
+ tcConnectptr.p->tcNodeFailrec = tcNodeFailptr.i;
+ tcConnectptr.p->abortState = TcConnectionrec::NEW_FROM_TC;
+ abortStateHandlerLab(signal);
+ return;
+ } else {
+ jam();
+ if (tcConnectptr.p->opSimple != ZTRUE) {
+ jam();
+ tcConnectptr.p->tcNodeFailrec = tcNodeFailptr.i;
+ tcConnectptr.p->abortState = TcConnectionrec::NEW_FROM_TC;
+ abortStateHandlerLab(signal);
+ return;
+ }//if
+ }//if
+ }//if
+ } else {
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ if (scanptr.p->scanType == ScanRecord::COPY) {
+ jam();
+ if (scanptr.p->scanNodeId == tcNodeFailptr.p->oldNodeId) {
+ jam();
+ /* ------------------------------------------------------------
+ * THE RECEIVER OF THE COPY HAVE FAILED.
+ * WE HAVE TO CLOSE THE COPY PROCESS.
+ * ------------------------------------------------------------ */
+ tcConnectptr.p->tcNodeFailrec = tcNodeFailptr.i;
+ tcConnectptr.p->abortState = TcConnectionrec::NEW_FROM_TC;
+ closeCopyRequestLab(signal);
+ return;
+ }//if
+ } else {
+ if (scanptr.p->scanType == ScanRecord::SCAN) {
+ jam();
+ if (refToNode(tcConnectptr.p->tcBlockref) ==
+ tcNodeFailptr.p->oldNodeId) {
+ jam();
+ tcConnectptr.p->tcNodeFailrec = tcNodeFailptr.i;
+ tcConnectptr.p->abortState = TcConnectionrec::NEW_FROM_TC;
+ closeScanRequestLab(signal);
+ return;
+ }//if
+ } else {
+ jam();
+ /* ------------------------------------------------------------
+ * THIS IS AN ERROR THAT SHOULD NOT OCCUR. WE CRASH THE SYSTEM.
+ * ------------------------------------------------------------ */
+ systemErrorLab(signal);
+ return;
+ }//if
+ }//if
+ }//if
+ }//if
+ }//if
+ }//for
+ tcNodeFailptr.p->tcRecNow = tend + 1;
+ signal->theData[0] = ZLQH_TRANS_NEXT;
+ signal->theData[1] = tcNodeFailptr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ return;
+}//Dblqh::lqhTransNextLab()
+
+void
+Dblqh::scanMarkers(Signal* signal,
+ Uint32 tcNodeFail,
+ Uint32 startBucket,
+ Uint32 i){
+
+ jam();
+
+ TcNodeFailRecordPtr tcNodeFailPtr;
+ tcNodeFailPtr.i = tcNodeFail;
+ ptrCheckGuard(tcNodeFailPtr, ctcNodeFailrecFileSize, tcNodeFailRecord);
+ const Uint32 crashedTcNodeId = tcNodeFailPtr.p->oldNodeId;
+
+ CommitAckMarkerIterator iter;
+ if(i == RNIL){
+ m_commitAckMarkerHash.next(startBucket, iter);
+ } else {
+ jam();
+ iter.curr.i = i;
+ iter.bucket = startBucket;
+ m_commitAckMarkerHash.getPtr(iter.curr);
+ m_commitAckMarkerHash.next(iter);
+ }
+
+ const Uint32 RT_BREAK = 256;
+ for(i = 0; i<RT_BREAK || iter.bucket == startBucket; i++){
+ jam();
+
+ if(iter.curr.i == RNIL){
+ /**
+ * Done with iteration
+ */
+ jam();
+
+ tcNodeFailPtr.p->tcFailStatus = TcNodeFailRecord::TC_STATE_FALSE;
+ signal->theData[0] = tcNodeFailPtr.p->newTcRef;
+ signal->theData[1] = cownNodeid;
+ signal->theData[2] = LqhTransConf::LastTransConf;
+ sendSignal(tcNodeFailPtr.p->newTcBlockref, GSN_LQH_TRANSCONF,
+ signal, 3, JBB);
+ return;
+ }
+
+ if(iter.curr.p->tcNodeId == crashedTcNodeId){
+ jam();
+
+ /**
+ * Found marker belonging to crashed node
+ */
+ LqhTransConf * const lqhTransConf = (LqhTransConf *)&signal->theData[0];
+ lqhTransConf->tcRef = tcNodeFailPtr.p->newTcRef;
+ lqhTransConf->lqhNodeId = cownNodeid;
+ lqhTransConf->operationStatus = LqhTransConf::Marker;
+ lqhTransConf->transId1 = iter.curr.p->transid1;
+ lqhTransConf->transId2 = iter.curr.p->transid2;
+ lqhTransConf->apiRef = iter.curr.p->apiRef;
+ lqhTransConf->apiOpRec = iter.curr.p->apiOprec;
+ sendSignal(tcNodeFailPtr.p->newTcBlockref, GSN_LQH_TRANSCONF,
+ signal, 7, JBB);
+
+ signal->theData[0] = ZSCAN_MARKERS;
+ signal->theData[1] = tcNodeFailPtr.i;
+ signal->theData[2] = iter.bucket;
+ signal->theData[3] = iter.curr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB);
+ return;
+ }
+
+ m_commitAckMarkerHash.next(iter);
+ }
+
+ signal->theData[0] = ZSCAN_MARKERS;
+ signal->theData[1] = tcNodeFailPtr.i;
+ signal->theData[2] = iter.bucket;
+ signal->theData[3] = RNIL;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB);
+}
+
+/* #########################################################################
+ * ####### SCAN MODULE #######
+ *
+ * #########################################################################
+ * -------------------------------------------------------------------------
+ * THIS MODULE CONTAINS THE CODE THAT HANDLES A SCAN OF A PARTICULAR FRAGMENT
+ * IT OPERATES UNDER THE CONTROL OF TC AND ORDERS ACC TO PERFORM A SCAN OF
+ * ALL TUPLES IN THE FRAGMENT. TUP PERFORMS THE NECESSARY SEARCH CONDITIONS
+ * TO ENSURE THAT ONLY VALID TUPLES ARE RETURNED TO THE APPLICATION.
+ * ------------------------------------------------------------------------- */
+/* *************** */
+/* ACC_SCANCONF > */
+/* *************** */
+void Dblqh::execACC_SCANCONF(Signal* signal)
+{
+ AccScanConf * const accScanConf = (AccScanConf *)&signal->theData[0];
+ jamEntry();
+ scanptr.i = accScanConf->scanPtr;
+ c_scanRecordPool.getPtr(scanptr);
+ if (scanptr.p->scanState == ScanRecord::WAIT_ACC_SCAN) {
+ accScanConfScanLab(signal);
+ } else {
+ ndbrequire(scanptr.p->scanState == ScanRecord::WAIT_ACC_COPY);
+ accScanConfCopyLab(signal);
+ }//if
+}//Dblqh::execACC_SCANCONF()
+
+/* ************>> */
+/* ACC_SCANREF > */
+/* ************>> */
+void Dblqh::execACC_SCANREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}//Dblqh::execACC_SCANREF()
+
+/* ***************>> */
+/* NEXT_SCANCONF > */
+/* ***************>> */
+void Dblqh::execNEXT_SCANCONF(Signal* signal)
+{
+ NextScanConf * const nextScanConf = (NextScanConf *)&signal->theData[0];
+ jamEntry();
+ scanptr.i = nextScanConf->scanPtr;
+ c_scanRecordPool.getPtr(scanptr);
+ if (nextScanConf->localKeyLength == 1) {
+ jam();
+ nextScanConf->localKey[1] =
+ nextScanConf->localKey[0] & MAX_TUPLES_PER_PAGE;
+ nextScanConf->localKey[0] = nextScanConf->localKey[0] >> MAX_TUPLES_BITS;
+ }//if
+ switch (scanptr.p->scanState) {
+ case ScanRecord::WAIT_CLOSE_SCAN:
+ jam();
+ accScanCloseConfLab(signal);
+ break;
+ case ScanRecord::WAIT_CLOSE_COPY:
+ jam();
+ accCopyCloseConfLab(signal);
+ break;
+ case ScanRecord::WAIT_NEXT_SCAN:
+ jam();
+ nextScanConfScanLab(signal);
+ break;
+ case ScanRecord::WAIT_NEXT_SCAN_COPY:
+ jam();
+ nextScanConfCopyLab(signal);
+ break;
+ case ScanRecord::WAIT_RELEASE_LOCK:
+ jam();
+ ndbrequire(signal->length() == 1);
+ scanLockReleasedLab(signal);
+ break;
+ default:
+ ndbrequire(false);
+ }//switch
+}//Dblqh::execNEXT_SCANCONF()
+
+/* ***************> */
+/* NEXT_SCANREF > */
+/* ***************> */
+void Dblqh::execNEXT_SCANREF(Signal* signal)
+{
+ jamEntry();
+ systemErrorLab(signal);
+ return;
+}//Dblqh::execNEXT_SCANREF()
+
+/* ******************> */
+/* STORED_PROCCONF > */
+/* ******************> */
+void Dblqh::execSTORED_PROCCONF(Signal* signal)
+{
+ jamEntry();
+ tcConnectptr.i = signal->theData[0];
+ Uint32 storedProcId = signal->theData[1];
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ switch (scanptr.p->scanState) {
+ case ScanRecord::WAIT_STORED_PROC_SCAN:
+ jam();
+ scanptr.p->scanStoredProcId = storedProcId;
+ storedProcConfScanLab(signal);
+ break;
+ case ScanRecord::WAIT_DELETE_STORED_PROC_ID_SCAN:
+ jam();
+ releaseActiveFrag(signal);
+ tupScanCloseConfLab(signal);
+ break;
+ case ScanRecord::WAIT_STORED_PROC_COPY:
+ jam();
+ scanptr.p->scanStoredProcId = storedProcId;
+ storedProcConfCopyLab(signal);
+ break;
+ case ScanRecord::WAIT_DELETE_STORED_PROC_ID_COPY:
+ jam();
+ releaseActiveFrag(signal);
+ tupCopyCloseConfLab(signal);
+ break;
+ default:
+ ndbrequire(false);
+ }//switch
+}//Dblqh::execSTORED_PROCCONF()
+
+/* ****************** */
+/* STORED_PROCREF > */
+/* ****************** */
+void Dblqh::execSTORED_PROCREF(Signal* signal)
+{
+ jamEntry();
+ tcConnectptr.i = signal->theData[0];
+ Uint32 errorCode = signal->theData[1];
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ switch (scanptr.p->scanState) {
+ case ScanRecord::WAIT_STORED_PROC_SCAN:
+ jam();
+ scanptr.p->scanCompletedStatus = ZTRUE;
+ scanptr.p->scanStoredProcId = signal->theData[2];
+ tcConnectptr.p->errorCode = errorCode;
+ closeScanLab(signal);
+ break;
+ default:
+ ndbrequire(false);
+ }//switch
+}//Dblqh::execSTORED_PROCREF()
+
+/* --------------------------------------------------------------------------
+ * ENTER SCAN_NEXTREQ
+ * --------------------------------------------------------------------------
+ * PRECONDITION:
+ * TRANSACTION_STATE = SCAN_STATE
+ * SCAN_STATE = WAIT_SCAN_NEXTREQ
+ *
+ * Case scanLockHold: ZTRUE = Unlock previous round of
+ * scanned row(s) and fetch next set of rows.
+ * ZFALSE = Fetch new set of rows.
+ * Number of rows to read depends on parallelism and how many rows
+ * left to scan in the fragment. SCAN_NEXTREQ can also be sent with
+ * closeFlag == ZTRUE to close the scan.
+ * ------------------------------------------------------------------------- */
+void Dblqh::execSCAN_NEXTREQ(Signal* signal)
+{
+ jamEntry();
+ const ScanFragNextReq * const nextReq =
+ (ScanFragNextReq*)&signal->theData[0];
+ const Uint32 transid1 = nextReq->transId1;
+ const Uint32 transid2 = nextReq->transId2;
+ const Uint32 senderData = nextReq->senderData;
+
+ if (findTransaction(transid1, transid2, senderData) != ZOK){
+ jam();
+ DEBUG(senderData <<
+ " Received SCAN_NEXTREQ in LQH with close flag when closed");
+ ndbrequire(nextReq->closeFlag == ZTRUE);
+ return;
+ }
+
+ // Crash node if signal sender is same node
+ CRASH_INSERTION2(5021, refToNode(signal->senderBlockRef()) == cownNodeid);
+ // Crash node if signal sender is NOT same node
+ CRASH_INSERTION2(5022, refToNode(signal->senderBlockRef()) != cownNodeid);
+
+ if (ERROR_INSERTED(5023)){
+ // Drop signal if sender is same node
+ if (refToNode(signal->senderBlockRef()) == cownNodeid) {
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }
+ }//if
+ if (ERROR_INSERTED(5024)){
+ // Drop signal if sender is NOT same node
+ if (refToNode(signal->senderBlockRef()) != cownNodeid) {
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }
+ }//if
+ if (ERROR_INSERTED(5025)){
+ // Delay signal if sender is NOT same node
+ if (refToNode(signal->senderBlockRef()) != cownNodeid) {
+ CLEAR_ERROR_INSERT_VALUE;
+ sendSignalWithDelay(cownref, GSN_SCAN_NEXTREQ, signal, 1000,
+ signal->length());
+ return;
+ }
+ }//if
+ if (ERROR_INSERTED(5030)){
+ ndbout << "ERROR 5030" << endl;
+ CLEAR_ERROR_INSERT_VALUE;
+ // Drop signal
+ return;
+ }//if
+
+ if(ERROR_INSERTED(5036)){
+ return;
+ }
+
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ ndbrequire(scanptr.i != RNIL);
+ c_scanRecordPool.getPtr(scanptr);
+ scanptr.p->scanTcWaiting = ZTRUE;
+
+ /* ------------------------------------------------------------------
+ * If close flag is set this scan should be closed
+ * If we are waiting for SCAN_NEXTREQ set flag to stop scanning and
+ * continue execution else set flags and wait until the scan
+ * completes itself
+ * ------------------------------------------------------------------ */
+ if (nextReq->closeFlag == ZTRUE){
+ jam();
+ if(ERROR_INSERTED(5034)){
+ CLEAR_ERROR_INSERT_VALUE;
+ }
+ if(ERROR_INSERTED(5036)){
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }
+ closeScanRequestLab(signal);
+ return;
+ }//if
+
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+
+ /**
+ * Change parameters while running
+ * (is currently not supported)
+ */
+ const Uint32 max_rows = nextReq->batch_size_rows;
+ const Uint32 max_bytes = nextReq->batch_size_bytes;
+ ndbrequire(scanptr.p->m_max_batch_size_rows == max_rows);
+ ndbrequire(scanptr.p->m_max_batch_size_bytes == max_bytes);
+
+ /* --------------------------------------------------------------------
+ * If scanLockHold = TRUE we need to unlock previous round of
+ * scanned records.
+ * scanReleaseLocks will set states for this and send a NEXT_SCANREQ.
+ * When confirm signal NEXT_SCANCONF arrives we call
+ * continueScanNextReqLab to continue scanning new rows and
+ * acquiring new locks.
+ * -------------------------------------------------------------------- */
+ if ((scanptr.p->scanLockHold == ZTRUE) &&
+ (scanptr.p->m_curr_batch_size_rows > 0)) {
+ jam();
+ scanptr.p->scanReleaseCounter = 1;
+ scanReleaseLocksLab(signal);
+ return;
+ }//if
+
+ /* -----------------------------------------------------------------------
+ * We end up here when scanLockHold = FALSE or no rows was locked from
+ * previous round.
+ * Simply continue scanning.
+ * ----------------------------------------------------------------------- */
+ continueScanNextReqLab(signal);
+}//Dblqh::execSCAN_NEXTREQ()
+
+void Dblqh::continueScanNextReqLab(Signal* signal)
+{
+ if (scanptr.p->scanCompletedStatus == ZTRUE) {
+ jam();
+ closeScanLab(signal);
+ return;
+ }//if
+
+ if(scanptr.p->m_last_row){
+ jam();
+ scanptr.p->scanCompletedStatus = ZTRUE;
+ scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ;
+ sendScanFragConf(signal, ZFALSE);
+ return;
+ }
+
+ // Update timer on tcConnectRecord
+ tcConnectptr.p->tcTimer = cLqhTimeOutCount;
+ init_acc_ptr_list(scanptr.p);
+ scanptr.p->scanFlag = NextScanReq::ZSCAN_NEXT;
+ scanNextLoopLab(signal);
+}//Dblqh::continueScanNextReqLab()
+
+/* -------------------------------------------------------------------------
+ * WE NEED TO RELEASE LOCKS BEFORE CONTINUING
+ * ------------------------------------------------------------------------- */
+void Dblqh::scanReleaseLocksLab(Signal* signal)
+{
+ switch (fragptr.p->fragStatus) {
+ case Fragrecord::FSACTIVE:
+ jam();
+ linkActiveFrag(signal);
+ break;
+ case Fragrecord::BLOCKED:
+ jam();
+ linkFragQueue(signal);
+ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_RELEASE_STOPPED;
+ return;
+ break;
+ case Fragrecord::FREE:
+ jam();
+ case Fragrecord::ACTIVE_CREATION:
+ jam();
+ case Fragrecord::CRASH_RECOVERING:
+ jam();
+ case Fragrecord::DEFINED:
+ jam();
+ case Fragrecord::REMOVING:
+ jam();
+ default:
+ ndbrequire(false);
+ }//switch
+ continueScanReleaseAfterBlockedLab(signal);
+}//Dblqh::scanReleaseLocksLab()
+
+void Dblqh::continueScanReleaseAfterBlockedLab(Signal* signal)
+{
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ scanptr.p->scanState = ScanRecord::WAIT_RELEASE_LOCK;
+ signal->theData[0] = scanptr.p->scanAccPtr;
+ signal->theData[1]=
+ get_acc_ptr_from_scan_record(scanptr.p,
+ scanptr.p->scanReleaseCounter -1,
+ false);
+ signal->theData[2] = NextScanReq::ZSCAN_COMMIT;
+ if (! scanptr.p->rangeScan)
+ sendSignal(tcConnectptr.p->tcAccBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
+ else
+ sendSignal(tcConnectptr.p->tcTuxBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
+}//Dblqh::continueScanReleaseAfterBlockedLab()
+
+/* -------------------------------------------------------------------------
+ * ENTER SCAN_NEXTREQ
+ * -------------------------------------------------------------------------
+ * SCAN_NEXT_REQ SIGNAL ARRIVED IN THE MIDDLE OF EXECUTION OF THE SCAN.
+ * IT WAS A REQUEST TO CLOSE THE SCAN. WE WILL CLOSE THE SCAN IN A
+ * CAREFUL MANNER TO ENSURE THAT NO ERROR OCCURS.
+ * -------------------------------------------------------------------------
+ * PRECONDITION:
+ * TRANSACTION_STATE = SCAN_STATE_USED
+ * TSCAN_COMPLETED = ZTRUE
+ * -------------------------------------------------------------------------
+ * WE CAN ALSO ARRIVE AT THIS LABEL AFTER A NODE CRASH OF THE SCAN
+ * COORDINATOR.
+ * ------------------------------------------------------------------------- */
+void Dblqh::closeScanRequestLab(Signal* signal)
+{
+ DEBUG("transactionState = " << tcConnectptr.p->transactionState);
+ switch (tcConnectptr.p->transactionState) {
+ case TcConnectionrec::SCAN_STATE_USED:
+ DEBUG("scanState = " << scanptr.p->scanState);
+ switch (scanptr.p->scanState) {
+ case ScanRecord::IN_QUEUE:
+ jam();
+ tupScanCloseConfLab(signal);
+ break;
+ case ScanRecord::WAIT_NEXT_SCAN:
+ jam();
+ /* -------------------------------------------------------------------
+ * SET COMPLETION STATUS AND WAIT FOR OPPORTUNITY TO STOP THE SCAN.
+ * ------------------------------------------------------------------- */
+ scanptr.p->scanCompletedStatus = ZTRUE;
+ break;
+ case ScanRecord::WAIT_ACC_SCAN:
+ case ScanRecord::WAIT_STORED_PROC_SCAN:
+ jam();
+ /* -------------------------------------------------------------------
+ * WE ARE CURRENTLY STARTING UP THE SCAN. SET COMPLETED STATUS
+ * AND WAIT FOR COMPLETION OF STARTUP.
+ * ------------------------------------------------------------------- */
+ scanptr.p->scanCompletedStatus = ZTRUE;
+ break;
+ case ScanRecord::WAIT_CLOSE_SCAN:
+ case ScanRecord::WAIT_DELETE_STORED_PROC_ID_SCAN:
+ jam();
+ /*empty*/;
+ break;
+ /* -------------------------------------------------------------------
+ * CLOSE IS ALREADY ONGOING. WE NEED NOT DO ANYTHING.
+ * ------------------------------------------------------------------- */
+ case ScanRecord::WAIT_RELEASE_LOCK:
+ jam();
+ /* -------------------------------------------------------------------
+ * WE ARE CURRENTLY RELEASING RECORD LOCKS. AFTER COMPLETING THIS
+ * WE WILL START TO CLOSE THE SCAN.
+ * ------------------------------------------------------------------- */
+ scanptr.p->scanCompletedStatus = ZTRUE;
+ break;
+ case ScanRecord::WAIT_SCAN_NEXTREQ:
+ jam();
+ /* -------------------------------------------------------------------
+ * WE ARE WAITING FOR A SCAN_NEXTREQ FROM SCAN COORDINATOR(TC)
+ * WICH HAVE CRASHED. CLOSE THE SCAN
+ * ------------------------------------------------------------------- */
+ scanptr.p->scanCompletedStatus = ZTRUE;
+
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+
+ if (scanptr.p->scanLockHold == ZTRUE) {
+ if (scanptr.p->m_curr_batch_size_rows > 0) {
+ jam();
+ scanptr.p->scanReleaseCounter = 1;
+ scanReleaseLocksLab(signal);
+ return;
+ }//if
+ }//if
+ closeScanLab(signal);
+ break;
+ default:
+ ndbrequire(false);
+ }//switch
+ break;
+ case TcConnectionrec::WAIT_SCAN_AI:
+ jam();
+ /* ---------------------------------------------------------------------
+ * WE ARE STILL WAITING FOR THE ATTRIBUTE INFORMATION THAT
+ * OBVIOUSLY WILL NOT ARRIVE. WE CAN QUIT IMMEDIATELY HERE.
+ * --------------------------------------------------------------------- */
+ //XXX jonas this have to be wrong...
+ releaseOprec(signal);
+ if (tcConnectptr.p->abortState == TcConnectionrec::NEW_FROM_TC) {
+ jam();
+ tcNodeFailptr.i = tcConnectptr.p->tcNodeFailrec;
+ ptrCheckGuard(tcNodeFailptr, ctcNodeFailrecFileSize, tcNodeFailRecord);
+ tcNodeFailptr.p->tcRecNow = tcConnectptr.i + 1;
+ signal->theData[0] = ZLQH_TRANS_NEXT;
+ signal->theData[1] = tcNodeFailptr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ }//if
+ tcConnectptr.p->abortState = TcConnectionrec::ABORT_ACTIVE;
+ scanptr.p->m_curr_batch_size_rows = 0;
+ scanptr.p->m_curr_batch_size_bytes= 0;
+ sendScanFragConf(signal, ZTRUE);
+ abort_scan(signal, scanptr.i, 0);
+ return;
+ break;
+ case TcConnectionrec::SCAN_TUPKEY:
+ case TcConnectionrec::SCAN_FIRST_STOPPED:
+ case TcConnectionrec::SCAN_CHECK_STOPPED:
+ case TcConnectionrec::SCAN_STOPPED:
+ jam();
+ /* ---------------------------------------------------------------------
+ * SET COMPLETION STATUS AND WAIT FOR OPPORTUNITY TO STOP THE SCAN.
+ * --------------------------------------------------------------------- */
+ scanptr.p->scanCompletedStatus = ZTRUE;
+ break;
+ case TcConnectionrec::SCAN_RELEASE_STOPPED:
+ jam();
+ /* ---------------------------------------------------------------------
+ * WE ARE CURRENTLY RELEASING RECORD LOCKS. AFTER COMPLETING
+ * THIS WE WILL START TO CLOSE THE SCAN.
+ * --------------------------------------------------------------------- */
+ scanptr.p->scanCompletedStatus = ZTRUE;
+ break;
+ case TcConnectionrec::SCAN_CLOSE_STOPPED:
+ jam();
+ /* ---------------------------------------------------------------------
+ * CLOSE IS ALREADY ONGOING. WE NEED NOT DO ANYTHING.
+ * --------------------------------------------------------------------- */
+ /*empty*/;
+ break;
+ default:
+ ndbrequire(false);
+ }//switch
+}//Dblqh::closeScanRequestLab()
+
+/* -------------------------------------------------------------------------
+ * ENTER NEXT_SCANCONF
+ * -------------------------------------------------------------------------
+ * PRECONDITION: SCAN_STATE = WAIT_RELEASE_LOCK
+ * ------------------------------------------------------------------------- */
+void Dblqh::scanLockReleasedLab(Signal* signal)
+{
+ tcConnectptr.i = scanptr.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ releaseActiveFrag(signal);
+
+ if (scanptr.p->scanReleaseCounter == scanptr.p->m_curr_batch_size_rows) {
+ if ((scanptr.p->scanErrorCounter > 0) ||
+ (scanptr.p->scanCompletedStatus == ZTRUE)) {
+ jam();
+ scanptr.p->m_curr_batch_size_rows = 0;
+ scanptr.p->m_curr_batch_size_bytes = 0;
+ closeScanLab(signal);
+ } else if (scanptr.p->check_scan_batch_completed() &&
+ scanptr.p->scanLockHold != ZTRUE) {
+ jam();
+ scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ;
+ sendScanFragConf(signal, ZFALSE);
+ } else if (scanptr.p->m_last_row && !scanptr.p->scanLockHold) {
+ jam();
+ closeScanLab(signal);
+ return;
+ } else {
+ jam();
+ /*
+ * We came here after releasing locks after
+ * receiving SCAN_NEXTREQ from TC. We only come here
+ * when scanHoldLock == ZTRUE
+ */
+ scanptr.p->m_curr_batch_size_rows = 0;
+ scanptr.p->m_curr_batch_size_bytes = 0;
+ continueScanNextReqLab(signal);
+ }//if
+ } else if (scanptr.p->scanReleaseCounter < scanptr.p->m_curr_batch_size_rows) {
+ jam();
+ scanptr.p->scanReleaseCounter++;
+ scanReleaseLocksLab(signal);
+ } else {
+ jam();
+ /*
+ We come here when we have been scanning for a long time and not been able
+ to find m_max_batch_size_rows records to return. We needed to release
+ the record we didn't want, but now we are returning all found records to
+ the API.
+ */
+ scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ;
+ sendScanFragConf(signal, ZFALSE);
+ }//if
+}//Dblqh::scanLockReleasedLab()
+
+bool
+Dblqh::seize_acc_ptr_list(ScanRecord* scanP, Uint32 batch_size)
+{
+ Uint32 i;
+ Uint32 attr_buf_recs= (batch_size + 30) / 32;
+
+ if (batch_size > 1) {
+ if (c_no_attrinbuf_recs < attr_buf_recs) {
+ jam();
+ return false;
+ }
+ for (i= 1; i <= attr_buf_recs; i++) {
+ scanP->scan_acc_op_ptr[i]= seize_attrinbuf();
+ }
+ }
+ scanP->scan_acc_attr_recs= attr_buf_recs;
+ scanP->scan_acc_index = 0;
+ return true;
+}
+
+void
+Dblqh::release_acc_ptr_list(ScanRecord* scanP)
+{
+ Uint32 i, attr_buf_recs;
+ attr_buf_recs= scanP->scan_acc_attr_recs;
+
+ for (i= 1; i <= attr_buf_recs; i++) {
+ release_attrinbuf(scanP->scan_acc_op_ptr[i]);
+ }
+ scanP->scan_acc_attr_recs= 0;
+ scanP->scan_acc_index = 0;
+}
+
+Uint32
+Dblqh::seize_attrinbuf()
+{
+ AttrbufPtr regAttrPtr;
+ Uint32 ret_attr_buf;
+ ndbrequire(c_no_attrinbuf_recs > 0);
+ c_no_attrinbuf_recs--;
+ ret_attr_buf= cfirstfreeAttrinbuf;
+ regAttrPtr.i= ret_attr_buf;
+ ptrCheckGuard(regAttrPtr, cattrinbufFileSize, attrbuf);
+ cfirstfreeAttrinbuf= regAttrPtr.p->attrbuf[ZINBUF_NEXT];
+ return ret_attr_buf;
+}
+
+Uint32
+Dblqh::release_attrinbuf(Uint32 attr_buf_i)
+{
+ Uint32 next_buf;
+ AttrbufPtr regAttrPtr;
+ c_no_attrinbuf_recs++;
+ regAttrPtr.i= attr_buf_i;
+ ptrCheckGuard(regAttrPtr, cattrinbufFileSize, attrbuf);
+ next_buf= regAttrPtr.p->attrbuf[ZINBUF_NEXT];
+ regAttrPtr.p->attrbuf[ZINBUF_NEXT]= cfirstfreeAttrinbuf;
+ cfirstfreeAttrinbuf= regAttrPtr.i;
+ return next_buf;
+}
+
+void
+Dblqh::init_acc_ptr_list(ScanRecord* scanP)
+{
+ scanP->scan_acc_index = 0;
+}
+
+Uint32
+Dblqh::get_acc_ptr_from_scan_record(ScanRecord* scanP,
+ Uint32 index,
+ bool crash_flag)
+{
+ Uint32* acc_ptr;
+ Uint32 attr_buf_rec, attr_buf_index;
+ if (!((index < MAX_PARALLEL_OP_PER_SCAN) &&
+ index < scanP->scan_acc_index)) {
+ ndbrequire(crash_flag);
+ return RNIL;
+ }
+ i_get_acc_ptr(scanP, acc_ptr, index);
+ return *acc_ptr;
+}
+
+void
+Dblqh::set_acc_ptr_in_scan_record(ScanRecord* scanP,
+ Uint32 index, Uint32 acc)
+{
+ Uint32 *acc_ptr;
+ ndbrequire((index == 0 || scanP->scan_acc_index == index) &&
+ (index < MAX_PARALLEL_OP_PER_SCAN));
+ scanP->scan_acc_index= index + 1;
+ i_get_acc_ptr(scanP, acc_ptr, index);
+ *acc_ptr= acc;
+}
+
+/* -------------------------------------------------------------------------
+ * SCAN_FRAGREQ: Request to start scanning the specified fragment of a table.
+ * ------------------------------------------------------------------------- */
+void Dblqh::execSCAN_FRAGREQ(Signal* signal)
+{
+ ScanFragReq * const scanFragReq = (ScanFragReq *)&signal->theData[0];
+ ScanFragRef * ref;
+ const Uint32 transid1 = scanFragReq->transId1;
+ const Uint32 transid2 = scanFragReq->transId2;
+ Uint32 errorCode= 0;
+ Uint32 senderData;
+ Uint32 hashIndex;
+ TcConnectionrecPtr nextHashptr;
+
+ jamEntry();
+ const Uint32 reqinfo = scanFragReq->requestInfo;
+ const Uint32 fragId = (scanFragReq->fragmentNoKeyLen & 0xFFFF);
+ const Uint32 keyLen = (scanFragReq->fragmentNoKeyLen >> 16);
+ tabptr.i = scanFragReq->tableId;
+ const Uint32 max_rows = scanFragReq->batch_size_rows;
+ const Uint32 scanLockMode = ScanFragReq::getLockMode(reqinfo);
+ const Uint8 keyinfo = ScanFragReq::getKeyinfoFlag(reqinfo);
+ const Uint8 rangeScan = ScanFragReq::getRangeScanFlag(reqinfo);
+
+ ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
+ if(tabptr.p->tableStatus != Tablerec::TABLE_DEFINED){
+ senderData = scanFragReq->senderData;
+ goto error_handler_early_1;
+ }
+
+ if (cfirstfreeTcConrec != RNIL) {
+ seizeTcrec();
+ tcConnectptr.p->clientConnectrec = scanFragReq->senderData;
+ tcConnectptr.p->clientBlockref = signal->senderBlockRef();
+ tcConnectptr.p->savePointId = scanFragReq->savePointId;
+ } else {
+ jam();
+ /* --------------------------------------------------------------------
+ * NO FREE TC RECORD AVAILABLE, THUS WE CANNOT HANDLE THE REQUEST.
+ * -------------------------------------------------------------------- */
+ errorCode = ZNO_TC_CONNECT_ERROR;
+ senderData = scanFragReq->senderData;
+ goto error_handler_early;
+ }//if
+ /**
+ * A write allways have to get keyinfo
+ */
+ ndbrequire(scanLockMode == 0 || keyinfo);
+
+ ndbrequire(max_rows > 0 && max_rows <= MAX_PARALLEL_OP_PER_SCAN);
+ if (!getFragmentrec(signal, fragId)) {
+ errorCode = __LINE__;
+ goto error_handler;
+ }//if
+
+ // Verify scan type vs table type (both sides are boolean)
+ if (rangeScan != DictTabInfo::isOrderedIndex(fragptr.p->tableType)) {
+ errorCode = __LINE__; // XXX fix
+ goto error_handler;
+ }//if
+
+ // 1 scan record is reserved for node recovery
+ if (cscanNoFreeRec < 2) {
+ jam();
+ errorCode = ScanFragRef::ZNO_FREE_SCANREC_ERROR;
+ goto error_handler;
+ }
+
+ // XXX adjust cmaxAccOps for range scans and remove this comment
+ if ((cbookedAccOps + max_rows) > cmaxAccOps) {
+ jam();
+ errorCode = ScanFragRef::ZSCAN_BOOK_ACC_OP_ERROR;
+ goto error_handler;
+ }//if
+
+ ndbrequire(c_scanRecordPool.seize(scanptr));
+ initScanTc(signal,
+ transid1,
+ transid2,
+ fragId,
+ ZNIL);
+ tcConnectptr.p->save1 = 4;
+ tcConnectptr.p->primKeyLen = keyLen + 4; // hard coded in execKEYINFO
+ errorCode = initScanrec(scanFragReq);
+ if (errorCode != ZOK) {
+ jam();
+ goto error_handler2;
+ }//if
+ cscanNoFreeRec--;
+ cbookedAccOps += max_rows;
+
+ hashIndex = (tcConnectptr.p->transid[0] ^ tcConnectptr.p->tcOprec) & 1023;
+ nextHashptr.i = ctransidHash[hashIndex];
+ ctransidHash[hashIndex] = tcConnectptr.i;
+ tcConnectptr.p->prevHashRec = RNIL;
+ tcConnectptr.p->nextHashRec = nextHashptr.i;
+ if (nextHashptr.i != RNIL) {
+ jam();
+ /* ---------------------------------------------------------------------
+ * ENSURE THAT THE NEXT RECORD HAS SET PREVIOUS TO OUR RECORD
+ * IF IT EXISTS
+ * --------------------------------------------------------------------- */
+ ptrCheckGuard(nextHashptr, ctcConnectrecFileSize, tcConnectionrec);
+ nextHashptr.p->prevHashRec = tcConnectptr.i;
+ }//if
+ if (scanptr.p->scanAiLength > 0) {
+ jam();
+ tcConnectptr.p->transactionState = TcConnectionrec::WAIT_SCAN_AI;
+ return;
+ }//if
+ continueAfterReceivingAllAiLab(signal);
+ return;
+
+error_handler2:
+ // no scan number allocated
+ c_scanRecordPool.release(scanptr);
+error_handler:
+ ref = (ScanFragRef*)&signal->theData[0];
+ tcConnectptr.p->abortState = TcConnectionrec::ABORT_ACTIVE;
+ ref->senderData = tcConnectptr.p->clientConnectrec;
+ ref->transId1 = transid1;
+ ref->transId2 = transid2;
+ ref->errorCode = errorCode;
+ sendSignal(tcConnectptr.p->clientBlockref, GSN_SCAN_FRAGREF, signal,
+ ScanFragRef::SignalLength, JBB);
+ releaseOprec(signal);
+ releaseTcrec(signal, tcConnectptr);
+ return;
+
+ error_handler_early_1:
+ if(tabptr.p->tableStatus == Tablerec::NOT_DEFINED){
+ jam();
+ errorCode = ZTABLE_NOT_DEFINED;
+ } else if (tabptr.p->tableStatus == Tablerec::PREP_DROP_TABLE_ONGOING ||
+ tabptr.p->tableStatus == Tablerec::PREP_DROP_TABLE_DONE){
+ jam();
+ errorCode = ZDROP_TABLE_IN_PROGRESS;
+ } else {
+ ndbrequire(0);
+ }
+ error_handler_early:
+ ref = (ScanFragRef*)&signal->theData[0];
+ ref->senderData = senderData;
+ ref->transId1 = transid1;
+ ref->transId2 = transid2;
+ ref->errorCode = errorCode;
+ sendSignal(signal->senderBlockRef(), GSN_SCAN_FRAGREF, signal,
+ ScanFragRef::SignalLength, JBB);
+}//Dblqh::execSCAN_FRAGREQ()
+
+void Dblqh::continueAfterReceivingAllAiLab(Signal* signal)
+{
+ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED;
+
+ if(scanptr.p->scanState == ScanRecord::IN_QUEUE){
+ jam();
+ return;
+ }
+
+ scanptr.p->scanState = ScanRecord::WAIT_ACC_SCAN;
+ AccScanReq * req = (AccScanReq*)&signal->theData[0];
+ req->senderData = scanptr.i;
+ req->senderRef = cownref;
+ req->tableId = tcConnectptr.p->tableref;
+ req->fragmentNo = tcConnectptr.p->fragmentid;
+ req->requestInfo = 0;
+ AccScanReq::setLockMode(req->requestInfo, scanptr.p->scanLockMode);
+ AccScanReq::setReadCommittedFlag(req->requestInfo, scanptr.p->readCommitted);
+ AccScanReq::setDescendingFlag(req->requestInfo, scanptr.p->descending);
+ req->transId1 = tcConnectptr.p->transid[0];
+ req->transId2 = tcConnectptr.p->transid[1];
+ req->savePointId = tcConnectptr.p->savePointId;
+ // always use if-stmt to switch (instead of setting a "scan block ref")
+ if (! scanptr.p->rangeScan)
+ sendSignal(tcConnectptr.p->tcAccBlockref, GSN_ACC_SCANREQ, signal,
+ AccScanReq::SignalLength, JBB);
+ else
+ sendSignal(tcConnectptr.p->tcTuxBlockref, GSN_ACC_SCANREQ, signal,
+ AccScanReq::SignalLength, JBB);
+}//Dblqh::continueAfterReceivingAllAiLab()
+
+void Dblqh::scanAttrinfoLab(Signal* signal, Uint32* dataPtr, Uint32 length)
+{
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ if (saveTupattrbuf(signal, dataPtr, length) == ZOK) {
+ if (tcConnectptr.p->currTupAiLen < scanptr.p->scanAiLength) {
+ jam();
+ } else {
+ jam();
+ ndbrequire(tcConnectptr.p->currTupAiLen == scanptr.p->scanAiLength);
+ continueAfterReceivingAllAiLab(signal);
+ }//if
+ return;
+ }//if
+ abort_scan(signal, scanptr.i, ZGET_ATTRINBUF_ERROR);
+}
+
+void Dblqh::abort_scan(Signal* signal, Uint32 scan_ptr_i, Uint32 errcode){
+ jam();
+ scanptr.i = scan_ptr_i;
+ c_scanRecordPool.getPtr(scanptr);
+
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ finishScanrec(signal);
+ releaseScanrec(signal);
+ tcConnectptr.p->transactionState = TcConnectionrec::IDLE;
+ tcConnectptr.p->abortState = TcConnectionrec::ABORT_ACTIVE;
+
+ if(errcode)
+ {
+ jam();
+ ScanFragRef * ref = (ScanFragRef*)&signal->theData[0];
+ ref->senderData = tcConnectptr.p->clientConnectrec;
+ ref->transId1 = tcConnectptr.p->transid[0];
+ ref->transId2 = tcConnectptr.p->transid[1];
+ ref->errorCode = errcode;
+ sendSignal(tcConnectptr.p->clientBlockref, GSN_SCAN_FRAGREF, signal,
+ ScanFragRef::SignalLength, JBB);
+ }
+ deleteTransidHash(signal);
+ releaseOprec(signal);
+ releaseTcrec(signal, tcConnectptr);
+}
+
+/*---------------------------------------------------------------------*/
+/* Send this 'I am alive' signal to TC when it is received from ACC */
+/* We include the scanPtr.i that comes from ACC in signalData[1], this */
+/* tells TC which fragment record to check for a timeout. */
+/*---------------------------------------------------------------------*/
+void Dblqh::execSCAN_HBREP(Signal* signal)
+{
+ jamEntry();
+ scanptr.i = signal->theData[0];
+ c_scanRecordPool.getPtr(scanptr);
+ switch(scanptr.p->scanType){
+ case ScanRecord::SCAN:
+ if (scanptr.p->scanTcWaiting == ZTRUE) {
+ jam();
+ tcConnectptr.i = scanptr.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ const Uint32 transid1 = signal->theData[1];
+ const Uint32 transid2 = signal->theData[2];
+ ndbrequire(transid1 == tcConnectptr.p->transid[0] &&
+ transid2 == tcConnectptr.p->transid[1]);
+
+ // Update counter on tcConnectPtr
+ if (tcConnectptr.p->tcTimer != 0){
+ tcConnectptr.p->tcTimer = cLqhTimeOutCount;
+ } else {
+ jam();
+ //ndbout << "SCAN_HBREP when tcTimer was off" << endl;
+ }
+
+ signal->theData[0] = tcConnectptr.p->clientConnectrec;
+ signal->theData[1] = tcConnectptr.p->transid[0];
+ signal->theData[2] = tcConnectptr.p->transid[1];
+ sendSignal(tcConnectptr.p->clientBlockref,
+ GSN_SCAN_HBREP, signal, 3, JBB);
+ }//if
+ break;
+ case ScanRecord::COPY:
+ // ndbout << "Dblqh::execSCAN_HBREP Dropping SCAN_HBREP" << endl;
+ break;
+ default:
+ ndbrequire(false);
+ }
+}
+
+void Dblqh::accScanConfScanLab(Signal* signal)
+{
+ AccScanConf * const accScanConf = (AccScanConf *)&signal->theData[0];
+ tcConnectptr.i = scanptr.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ /* -----------------------------------------------------------------------
+ * PRECONDITION: SCAN_STATE = WAIT_ACC_SCAN
+ * ----------------------------------------------------------------------- */
+ if (accScanConf->flag == AccScanConf::ZEMPTY_FRAGMENT) {
+ jam();
+ /* ---------------------------------------------------------------------
+ * THE FRAGMENT WAS EMPTY.
+ * REPORT SUCCESSFUL COPYING.
+ * --------------------------------------------------------------------- */
+ tupScanCloseConfLab(signal);
+ return;
+ }//if
+ scanptr.p->scanAccPtr = accScanConf->accPtr;
+ if (scanptr.p->rangeScan) {
+ jam();
+ TuxBoundInfo* req = (TuxBoundInfo*)signal->getDataPtrSend();
+ req->errorCode = RNIL;
+ req->tuxScanPtrI = scanptr.p->scanAccPtr;
+ Uint32 len = req->boundAiLength = copy_bounds(req->data, tcConnectptr.p);
+ EXECUTE_DIRECT(DBTUX, GSN_TUX_BOUND_INFO, signal,
+ TuxBoundInfo::SignalLength + len);
+
+ jamEntry();
+ if (req->errorCode != 0) {
+ jam();
+ /*
+ * Cannot use STORED_PROCREF to abort since even the REF
+ * returns a stored proc id. So record error and continue.
+ * The scan is already Invalid in TUX and returns empty set.
+ */
+ tcConnectptr.p->errorCode = req->errorCode;
+ }
+ }
+
+ scanptr.p->scanState = ScanRecord::WAIT_STORED_PROC_SCAN;
+ if(scanptr.p->scanStoredProcId == RNIL)
+ {
+ jam();
+ signal->theData[0] = tcConnectptr.p->tupConnectrec;
+ signal->theData[1] = tcConnectptr.p->tableref;
+ signal->theData[2] = scanptr.p->scanSchemaVersion;
+ signal->theData[3] = ZSTORED_PROC_SCAN;
+
+ signal->theData[4] = scanptr.p->scanAiLength;
+ sendSignal(tcConnectptr.p->tcTupBlockref,
+ GSN_STORED_PROCREQ, signal, 5, JBB);
+
+ signal->theData[0] = tcConnectptr.p->tupConnectrec;
+ AttrbufPtr regAttrinbufptr;
+ Uint32 firstAttr = regAttrinbufptr.i = tcConnectptr.p->firstAttrinbuf;
+ while (regAttrinbufptr.i != RNIL) {
+ ptrCheckGuard(regAttrinbufptr, cattrinbufFileSize, attrbuf);
+ jam();
+ Uint32 dataLen = regAttrinbufptr.p->attrbuf[ZINBUF_DATA_LEN];
+ ndbrequire(dataLen != 0);
+ // first 3 words already set in STORED_PROCREQ
+ MEMCOPY_NO_WORDS(&signal->theData[3],
+ &regAttrinbufptr.p->attrbuf[0],
+ dataLen);
+ sendSignal(tcConnectptr.p->tcTupBlockref,
+ GSN_ATTRINFO, signal, dataLen + 3, JBB);
+ regAttrinbufptr.i = regAttrinbufptr.p->attrbuf[ZINBUF_NEXT];
+ c_no_attrinbuf_recs++;
+ }//while
+
+ /**
+ * Release attr info
+ */
+ if(firstAttr != RNIL)
+ {
+ regAttrinbufptr.p->attrbuf[ZINBUF_NEXT] = cfirstfreeAttrinbuf;
+ cfirstfreeAttrinbuf = firstAttr;
+ tcConnectptr.p->firstAttrinbuf = tcConnectptr.p->lastAttrinbuf = RNIL;
+ }
+ }
+ else
+ {
+ jam();
+ storedProcConfScanLab(signal);
+ }
+}//Dblqh::accScanConfScanLab()
+
+#define print_buf(s,idx,len) {\
+ printf(s); Uint32 t2=len; DatabufPtr t3; t3.i = idx; \
+ while(t3.i != RNIL && t2-- > 0){\
+ ptrCheckGuard(t3, cdatabufFileSize, databuf);\
+ printf("%d ", t3.i); t3.i= t3.p->nextDatabuf;\
+ } printf("\n"); }
+
+Uint32
+Dblqh::copy_bounds(Uint32 * dst, TcConnectionrec* tcPtrP)
+{
+ /**
+ * copy_bounds handles multiple bounds by
+ * in the 16 upper bits of the first words (used to specify bound type)
+ * setting the length of this specific bound
+ *
+ */
+
+ DatabufPtr regDatabufptr;
+ Uint32 left = 4 - tcPtrP->m_offset_current_keybuf; // left in buf
+ Uint32 totalLen = tcPtrP->primKeyLen - 4;
+ regDatabufptr.i = tcPtrP->firstTupkeybuf;
+
+ ndbassert(tcPtrP->primKeyLen >= 4);
+ ndbassert(tcPtrP->m_offset_current_keybuf < 4);
+ ndbassert(!(totalLen == 0 && regDatabufptr.i != RNIL));
+ ndbassert(!(totalLen != 0 && regDatabufptr.i == RNIL));
+
+ if(totalLen)
+ {
+ ptrCheckGuard(regDatabufptr, cdatabufFileSize, databuf);
+ Uint32 sig0 = regDatabufptr.p->data[0];
+ Uint32 sig1 = regDatabufptr.p->data[1];
+ Uint32 sig2 = regDatabufptr.p->data[2];
+ Uint32 sig3 = regDatabufptr.p->data[3];
+
+ switch(left){
+ case 4:
+ * dst++ = sig0;
+ case 3:
+ * dst++ = sig1;
+ case 2:
+ * dst++ = sig2;
+ case 1:
+ * dst++ = sig3;
+ }
+
+ Uint32 first = (* (dst - left)); // First word in range
+
+ // Length of this range
+ Uint8 offset;
+ const Uint32 len = (first >> 16) ? (first >> 16) : totalLen;
+ tcPtrP->m_scan_curr_range_no = (first & 0xFFF0) >> 4;
+ (* (dst - left)) = (first & 0xF); // Remove length & range no
+
+ if(len < left)
+ {
+ offset = len;
+ }
+ else
+ {
+ Databuf * lastP;
+ left = (len - left);
+ regDatabufptr.i = regDatabufptr.p->nextDatabuf;
+
+ while(left >= 4)
+ {
+ left -= 4;
+ lastP = regDatabufptr.p;
+ ptrCheckGuard(regDatabufptr, cdatabufFileSize, databuf);
+ sig0 = regDatabufptr.p->data[0];
+ sig1 = regDatabufptr.p->data[1];
+ sig2 = regDatabufptr.p->data[2];
+ sig3 = regDatabufptr.p->data[3];
+ regDatabufptr.i = regDatabufptr.p->nextDatabuf;
+
+ * dst++ = sig0;
+ * dst++ = sig1;
+ * dst++ = sig2;
+ * dst++ = sig3;
+ }
+
+ if(left > 0)
+ {
+ lastP = regDatabufptr.p;
+ ptrCheckGuard(regDatabufptr, cdatabufFileSize, databuf);
+ sig0 = regDatabufptr.p->data[0];
+ sig1 = regDatabufptr.p->data[1];
+ sig2 = regDatabufptr.p->data[2];
+ sig3 = regDatabufptr.p->data[3];
+ * dst++ = sig0;
+ * dst++ = sig1;
+ * dst++ = sig2;
+ * dst++ = sig3;
+ }
+ else
+ {
+ lastP = regDatabufptr.p;
+ }
+ offset = left & 3;
+ lastP->nextDatabuf = cfirstfreeDatabuf;
+ cfirstfreeDatabuf = tcPtrP->firstTupkeybuf;
+ ndbassert(cfirstfreeDatabuf != RNIL);
+ }
+
+ if(len == totalLen && regDatabufptr.i != RNIL)
+ {
+ regDatabufptr.p->nextDatabuf = cfirstfreeDatabuf;
+ cfirstfreeDatabuf = regDatabufptr.i;
+ tcPtrP->lastTupkeybuf = regDatabufptr.i = RNIL;
+ ndbassert(cfirstfreeDatabuf != RNIL);
+ }
+
+ tcPtrP->m_offset_current_keybuf = offset;
+ tcPtrP->firstTupkeybuf = regDatabufptr.i;
+ tcPtrP->primKeyLen = 4 + totalLen - len;
+
+ return len;
+ }
+ return totalLen;
+}
+
+/* -------------------------------------------------------------------------
+ * ENTER STORED_PROCCONF WITH
+ * TC_CONNECTPTR,
+ * TSTORED_PROC_ID
+ * -------------------------------------------------------------------------
+ * PRECONDITION: SCAN_STATE = WAIT_STORED_PROC_SCAN
+ * ------------------------------------------------------------------------- */
+void Dblqh::storedProcConfScanLab(Signal* signal)
+{
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ if (scanptr.p->scanCompletedStatus == ZTRUE) {
+ jam();
+ // STOP THE SCAN PROCESS IF THIS HAS BEEN REQUESTED.
+ closeScanLab(signal);
+ return;
+ }//if
+ switch (fragptr.p->fragStatus) {
+ case Fragrecord::FSACTIVE:
+ jam();
+ linkActiveFrag(signal);
+ break;
+ case Fragrecord::BLOCKED:
+ jam();
+ linkFragQueue(signal);
+ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_FIRST_STOPPED;
+ return;
+ break;
+ case Fragrecord::FREE:
+ jam();
+ case Fragrecord::ACTIVE_CREATION:
+ jam();
+ case Fragrecord::CRASH_RECOVERING:
+ jam();
+ case Fragrecord::DEFINED:
+ jam();
+ case Fragrecord::REMOVING:
+ jam();
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ continueFirstScanAfterBlockedLab(signal);
+}//Dblqh::storedProcConfScanLab()
+
+void Dblqh::continueFirstScanAfterBlockedLab(Signal* signal)
+{
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ scanptr.p->scanState = ScanRecord::WAIT_NEXT_SCAN;
+ signal->theData[0] = scanptr.p->scanAccPtr;
+ signal->theData[1] = RNIL;
+ signal->theData[2] = NextScanReq::ZSCAN_NEXT;
+ if (! scanptr.p->rangeScan)
+ sendSignal(tcConnectptr.p->tcAccBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
+ else
+ sendSignal(tcConnectptr.p->tcTuxBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
+ return;
+}//Dblqh::continueFirstScanAfterBlockedLab()
+
+/* -------------------------------------------------------------------------
+ * When executing a scan we must come up to the surface at times to make
+ * sure we can quickly start local checkpoints.
+ * ------------------------------------------------------------------------- */
+void Dblqh::execCHECK_LCP_STOP(Signal* signal)
+{
+ jamEntry();
+ scanptr.i = signal->theData[0];
+ c_scanRecordPool.getPtr(scanptr);
+ tcConnectptr.i = scanptr.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ if (signal->theData[1] == ZTRUE) {
+ jam();
+ releaseActiveFrag(signal);
+ signal->theData[0] = ZCHECK_LCP_STOP_BLOCKED;
+ signal->theData[1] = scanptr.i;
+ sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 10, 2);
+ signal->theData[0] = RNIL;
+ return;
+ }//if
+ if (fragptr.p->fragStatus != Fragrecord::FSACTIVE) {
+ ndbrequire(fragptr.p->fragStatus == Fragrecord::BLOCKED);
+ releaseActiveFrag(signal);
+ linkFragQueue(signal);
+ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_CHECK_STOPPED;
+ signal->theData[0] = RNIL;
+ }//if
+}//Dblqh::execCHECK_LCP_STOP()
+
+void Dblqh::checkLcpStopBlockedLab(Signal* signal)
+{
+ switch (fragptr.p->fragStatus) {
+ case Fragrecord::FSACTIVE:
+ jam();
+ linkActiveFrag(signal);
+ continueAfterCheckLcpStopBlocked(signal);
+ break;
+ case Fragrecord::BLOCKED:
+ jam();
+ linkFragQueue(signal);
+ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_CHECK_STOPPED;
+ return;
+ break;
+ case Fragrecord::FREE:
+ jam();
+ case Fragrecord::ACTIVE_CREATION:
+ jam();
+ case Fragrecord::CRASH_RECOVERING:
+ jam();
+ case Fragrecord::DEFINED:
+ jam();
+ case Fragrecord::REMOVING:
+ jam();
+ default:
+ ndbrequire(false);
+ }//switch
+}//Dblqh::checkLcpStopBlockedLab()
+
+void Dblqh::continueAfterCheckLcpStopBlocked(Signal* signal)
+{
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ signal->theData[0] = scanptr.p->scanAccPtr;
+ signal->theData[1] = AccCheckScan::ZNOT_CHECK_LCP_STOP;
+ if (! scanptr.p->rangeScan)
+ EXECUTE_DIRECT(DBACC, GSN_ACC_CHECK_SCAN, signal, 2);
+ else
+ EXECUTE_DIRECT(DBTUX, GSN_ACC_CHECK_SCAN, signal, 2);
+}//Dblqh::continueAfterCheckLcpStopBlocked()
+
+/* -------------------------------------------------------------------------
+ * ENTER NEXT_SCANCONF
+ * -------------------------------------------------------------------------
+ * PRECONDITION: SCAN_STATE = WAIT_NEXT_SCAN
+ * ------------------------------------------------------------------------- */
+void Dblqh::nextScanConfScanLab(Signal* signal)
+{
+ NextScanConf * const nextScanConf = (NextScanConf *)&signal->theData[0];
+ tcConnectptr.i = scanptr.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ if (nextScanConf->fragId == RNIL) {
+ jam();
+ /* ---------------------------------------------------------------------
+ * THERE ARE NO MORE TUPLES TO FETCH. IF WE HAVE ANY
+ * OPERATIONS STILL NEEDING A LOCK WE REPORT TO THE
+ * APPLICATION AND CLOSE THE SCAN WHEN THE NEXT SCAN
+ * REQUEST IS RECEIVED. IF WE DO NOT HAVE ANY NEED FOR
+ * LOCKS WE CAN CLOSE THE SCAN IMMEDIATELY.
+ * --------------------------------------------------------------------- */
+ releaseActiveFrag(signal);
+ /*************************************************************
+ * STOP THE SCAN PROCESS IF THIS HAS BEEN REQUESTED.
+ ************************************************************ */
+ if (!scanptr.p->scanLockHold)
+ {
+ jam();
+ closeScanLab(signal);
+ return;
+ }
+
+ if (scanptr.p->scanCompletedStatus == ZTRUE) {
+ if ((scanptr.p->scanLockHold == ZTRUE) &&
+ (scanptr.p->m_curr_batch_size_rows > 0)) {
+ jam();
+ scanptr.p->scanReleaseCounter = 1;
+ scanReleaseLocksLab(signal);
+ return;
+ }//if
+ jam();
+ closeScanLab(signal);
+ return;
+ }//if
+
+ if (scanptr.p->m_curr_batch_size_rows > 0) {
+ jam();
+
+ if((tcConnectptr.p->primKeyLen - 4) == 0)
+ scanptr.p->scanCompletedStatus = ZTRUE;
+
+ scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ;
+ sendScanFragConf(signal, ZFALSE);
+ return;
+ }//if
+ closeScanLab(signal);
+ return;
+ }//if
+
+ // If accOperationPtr == RNIL no record was returned by ACC
+ if (nextScanConf->accOperationPtr == RNIL) {
+ jam();
+ /*************************************************************
+ * STOP THE SCAN PROCESS IF THIS HAS BEEN REQUESTED.
+ ************************************************************ */
+ if (scanptr.p->scanCompletedStatus == ZTRUE) {
+ releaseActiveFrag(signal);
+ if ((scanptr.p->scanLockHold == ZTRUE) &&
+ (scanptr.p->m_curr_batch_size_rows > 0)) {
+ jam();
+ scanptr.p->scanReleaseCounter = 1;
+ scanReleaseLocksLab(signal);
+ return;
+ }//if
+ jam();
+ closeScanLab(signal);
+ return;
+ }//if
+
+ if (scanptr.p->m_curr_batch_size_rows > 0) {
+ jam();
+ releaseActiveFrag(signal);
+ scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ;
+ sendScanFragConf(signal, ZFALSE);
+ return;
+ }//if
+
+ signal->theData[0] = scanptr.p->scanAccPtr;
+ signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP;
+ if (! scanptr.p->rangeScan)
+ sendSignal(tcConnectptr.p->tcAccBlockref,
+ GSN_ACC_CHECK_SCAN, signal, 2, JBB);
+ else
+ sendSignal(tcConnectptr.p->tcTuxBlockref,
+ GSN_ACC_CHECK_SCAN, signal, 2, JBB);
+ return;
+ }//if
+ jam();
+ set_acc_ptr_in_scan_record(scanptr.p,
+ scanptr.p->m_curr_batch_size_rows,
+ nextScanConf->accOperationPtr);
+ jam();
+ scanptr.p->scanLocalref[0] = nextScanConf->localKey[0];
+ scanptr.p->scanLocalref[1] = nextScanConf->localKey[1];
+ scanptr.p->scanLocalFragid = nextScanConf->fragId;
+ nextScanConfLoopLab(signal);
+}//Dblqh::nextScanConfScanLab()
+
+void Dblqh::nextScanConfLoopLab(Signal* signal)
+{
+ /* ----------------------------------------------------------------------
+ * STOP THE SCAN PROCESS IF THIS HAS BEEN REQUESTED.
+ * ---------------------------------------------------------------------- */
+ if (scanptr.p->scanCompletedStatus == ZTRUE) {
+ jam();
+ releaseActiveFrag(signal);
+ if ((scanptr.p->scanLockHold == ZTRUE) &&
+ (scanptr.p->m_curr_batch_size_rows > 0)) {
+ jam();
+ scanptr.p->scanReleaseCounter = 1;
+ scanReleaseLocksLab(signal);
+ return;
+ }//if
+ closeScanLab(signal);
+ return;
+ }//if
+ jam();
+ Uint32 tableRef;
+ Uint32 tupFragPtr;
+ Uint32 reqinfo = (scanptr.p->scanLockHold == ZFALSE);
+ reqinfo = reqinfo + (tcConnectptr.p->operation << 6);
+ reqinfo = reqinfo + (tcConnectptr.p->opExec << 10);
+ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_TUPKEY;
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ if (! scanptr.p->rangeScan) {
+ tableRef = tcConnectptr.p->tableref;
+ tupFragPtr = fragptr.p->tupFragptr[scanptr.p->scanLocalFragid & 1];
+ } else {
+ jam();
+ // for ordered index use primary table
+ FragrecordPtr tFragPtr;
+ tFragPtr.i = fragptr.p->tableFragptr;
+ ptrCheckGuard(tFragPtr, cfragrecFileSize, fragrecord);
+ tableRef = tFragPtr.p->tabRef;
+ tupFragPtr = tFragPtr.p->tupFragptr[scanptr.p->scanLocalFragid & 1];
+ }
+ {
+ jam();
+ TupKeyReq * const tupKeyReq = (TupKeyReq *)signal->getDataPtrSend();
+
+ tupKeyReq->connectPtr = tcConnectptr.p->tupConnectrec;
+ tupKeyReq->request = reqinfo;
+ tupKeyReq->tableRef = tableRef;
+ tupKeyReq->fragId = scanptr.p->scanLocalFragid;
+ tupKeyReq->keyRef1 = scanptr.p->scanLocalref[0];
+ tupKeyReq->keyRef2 = scanptr.p->scanLocalref[1];
+ tupKeyReq->attrBufLen = 0;
+ tupKeyReq->opRef = scanptr.p->scanApiOpPtr;
+ tupKeyReq->applRef = scanptr.p->scanApiBlockref;
+ tupKeyReq->schemaVersion = scanptr.p->scanSchemaVersion;
+ tupKeyReq->storedProcedure = scanptr.p->scanStoredProcId;
+ tupKeyReq->transId1 = tcConnectptr.p->transid[0];
+ tupKeyReq->transId2 = tcConnectptr.p->transid[1];
+ tupKeyReq->fragPtr = tupFragPtr;
+ tupKeyReq->primaryReplica = (tcConnectptr.p->seqNoReplica == 0)?true:false;
+ tupKeyReq->coordinatorTC = tcConnectptr.p->tcBlockref;
+ tupKeyReq->tcOpIndex = tcConnectptr.p->tcOprec;
+ tupKeyReq->savePointId = tcConnectptr.p->savePointId;
+ Uint32 blockNo = refToBlock(tcConnectptr.p->tcTupBlockref);
+ EXECUTE_DIRECT(blockNo, GSN_TUPKEYREQ, signal,
+ TupKeyReq::SignalLength);
+ }
+}
+
+/* -------------------------------------------------------------------------
+ * RECEPTION OF FURTHER KEY INFORMATION WHEN KEY SIZE > 16 BYTES.
+ * -------------------------------------------------------------------------
+ * PRECONDITION: SCAN_STATE = WAIT_SCAN_KEYINFO
+ * ------------------------------------------------------------------------- */
+void
+Dblqh::keyinfoLab(const Uint32 * src, const Uint32 * end)
+{
+ do {
+ jam();
+ seizeTupkeybuf(0);
+ databufptr.p->data[0] = * src ++;
+ databufptr.p->data[1] = * src ++;
+ databufptr.p->data[2] = * src ++;
+ databufptr.p->data[3] = * src ++;
+ } while (src < end);
+}//Dblqh::keyinfoLab()
+
+Uint32
+Dblqh::readPrimaryKeys(ScanRecord *scanP, TcConnectionrec *tcConP, Uint32 *dst)
+{
+ Uint32 tableId = tcConP->tableref;
+ Uint32 fragId = scanP->scanLocalFragid;
+ Uint32 fragPageId = scanP->scanLocalref[0];
+ Uint32 pageIndex = scanP->scanLocalref[1];
+
+ if(scanP->rangeScan)
+ {
+ jam();
+ // for ordered index use primary table
+ FragrecordPtr tFragPtr;
+ tFragPtr.i = fragptr.p->tableFragptr;
+ ptrCheckGuard(tFragPtr, cfragrecFileSize, fragrecord);
+ tableId = tFragPtr.p->tabRef;
+ }
+
+ int ret = c_tup->accReadPk(tableId, fragId, fragPageId, pageIndex, dst, false);
+ if(0)
+ ndbout_c("readPrimaryKeys(table: %d fragment: %d [ %d %d ] -> %d",
+ tableId, fragId, fragPageId, pageIndex, ret);
+ ndbassert(ret > 0);
+
+ return ret;
+}
+
+/* -------------------------------------------------------------------------
+ * ENTER TUPKEYCONF
+ * -------------------------------------------------------------------------
+ * PRECONDITION: TRANSACTION_STATE = SCAN_TUPKEY
+ * ------------------------------------------------------------------------- */
+void Dblqh::scanTupkeyConfLab(Signal* signal)
+{
+ const TupKeyConf * conf = (TupKeyConf *)signal->getDataPtr();
+ UintR tdata4 = conf->readLength;
+ UintR tdata5 = conf->lastRow;
+
+ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED;
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ releaseActiveFrag(signal);
+ c_scanRecordPool.getPtr(scanptr);
+ if (scanptr.p->scanCompletedStatus == ZTRUE) {
+ /* ---------------------------------------------------------------------
+ * STOP THE SCAN PROCESS IF THIS HAS BEEN REQUESTED.
+ * --------------------------------------------------------------------- */
+ if ((scanptr.p->scanLockHold == ZTRUE) &&
+ (scanptr.p->m_curr_batch_size_rows > 0)) {
+ jam();
+ scanptr.p->scanReleaseCounter = 1;
+ scanReleaseLocksLab(signal);
+ return;
+ }//if
+ jam();
+ closeScanLab(signal);
+ return;
+ }//if
+ if (scanptr.p->scanKeyinfoFlag) {
+ jam();
+ // Inform API about keyinfo len aswell
+ tdata4 += sendKeyinfo20(signal, scanptr.p, tcConnectptr.p);
+ }//if
+ ndbrequire(scanptr.p->m_curr_batch_size_rows < MAX_PARALLEL_OP_PER_SCAN);
+ scanptr.p->m_curr_batch_size_bytes+= tdata4;
+ scanptr.p->m_curr_batch_size_rows++;
+ scanptr.p->m_last_row = tdata5;
+ if (scanptr.p->check_scan_batch_completed() | tdata5){
+ if (scanptr.p->scanLockHold == ZTRUE) {
+ jam();
+ scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ;
+ sendScanFragConf(signal, ZFALSE);
+ return;
+ } else {
+ jam();
+ scanptr.p->scanReleaseCounter = scanptr.p->m_curr_batch_size_rows;
+ scanReleaseLocksLab(signal);
+ return;
+ }
+ } else {
+ if (scanptr.p->scanLockHold == ZTRUE) {
+ jam();
+ scanptr.p->scanFlag = NextScanReq::ZSCAN_NEXT;
+ } else {
+ jam();
+ scanptr.p->scanFlag = NextScanReq::ZSCAN_NEXT_COMMIT;
+ }
+ }
+ scanNextLoopLab(signal);
+}//Dblqh::scanTupkeyConfLab()
+
+void Dblqh::scanNextLoopLab(Signal* signal)
+{
+ switch (fragptr.p->fragStatus) {
+ case Fragrecord::FSACTIVE:
+ jam();
+ linkActiveFrag(signal);
+ break;
+ case Fragrecord::BLOCKED:
+ jam();
+ linkFragQueue(signal);
+ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STOPPED;
+ return;
+ break;
+ case Fragrecord::FREE:
+ jam();
+ case Fragrecord::ACTIVE_CREATION:
+ jam();
+ case Fragrecord::CRASH_RECOVERING:
+ jam();
+ case Fragrecord::DEFINED:
+ jam();
+ case Fragrecord::REMOVING:
+ jam();
+ default:
+ ndbrequire(false);
+ }//switch
+ continueScanAfterBlockedLab(signal);
+}//Dblqh::scanNextLoopLab()
+
+void Dblqh::continueScanAfterBlockedLab(Signal* signal)
+{
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ Uint32 accOpPtr;
+ if (scanptr.p->scanFlag == NextScanReq::ZSCAN_NEXT_ABORT) {
+ jam();
+ scanptr.p->scanFlag = NextScanReq::ZSCAN_NEXT_COMMIT;
+ accOpPtr= get_acc_ptr_from_scan_record(scanptr.p,
+ scanptr.p->m_curr_batch_size_rows,
+ false);
+ scanptr.p->scan_acc_index--;
+ } else if (scanptr.p->scanFlag == NextScanReq::ZSCAN_NEXT_COMMIT) {
+ jam();
+ accOpPtr= get_acc_ptr_from_scan_record(scanptr.p,
+ scanptr.p->m_curr_batch_size_rows-1,
+ false);
+ } else {
+ jam();
+ accOpPtr = RNIL; // The value is not used in ACC
+ }//if
+ scanptr.p->scanState = ScanRecord::WAIT_NEXT_SCAN;
+ signal->theData[0] = scanptr.p->scanAccPtr;
+ signal->theData[1] = accOpPtr;
+ signal->theData[2] = scanptr.p->scanFlag;
+ if (! scanptr.p->rangeScan)
+ sendSignal(tcConnectptr.p->tcAccBlockref, GSN_NEXT_SCANREQ, signal, 3,JBB);
+ else
+ sendSignal(tcConnectptr.p->tcTuxBlockref, GSN_NEXT_SCANREQ, signal, 3,JBB);
+}//Dblqh::continueScanAfterBlockedLab()
+
+/* -------------------------------------------------------------------------
+ * ENTER TUPKEYREF WITH
+ * TC_CONNECTPTR,
+ * TERROR_CODE
+ * -------------------------------------------------------------------------
+ * PRECONDITION: TRANSACTION_STATE = SCAN_TUPKEY
+ * ------------------------------------------------------------------------- */
+void Dblqh::scanTupkeyRefLab(Signal* signal)
+{
+ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED;
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ releaseActiveFrag(signal);
+ c_scanRecordPool.getPtr(scanptr);
+ if (scanptr.p->scanCompletedStatus == ZTRUE) {
+ /* ---------------------------------------------------------------------
+ * STOP THE SCAN PROCESS IF THIS HAS BEEN REQUESTED.
+ * --------------------------------------------------------------------- */
+ if ((scanptr.p->scanLockHold == ZTRUE) &&
+ (scanptr.p->m_curr_batch_size_rows > 0)) {
+ jam();
+ scanptr.p->scanReleaseCounter = 1;
+ scanReleaseLocksLab(signal);
+ return;
+ }//if
+ jam();
+ closeScanLab(signal);
+ return;
+ }//if
+ if ((terrorCode != ZSEARCH_CONDITION_FALSE) &&
+ (terrorCode != ZNO_TUPLE_FOUND) &&
+ (terrorCode >= ZUSER_ERROR_CODE_LIMIT)) {
+ scanptr.p->scanErrorCounter++;
+ tcConnectptr.p->errorCode = terrorCode;
+
+ if (scanptr.p->scanLockHold == ZTRUE) {
+ jam();
+ scanptr.p->scanReleaseCounter = 1;
+ } else {
+ jam();
+ scanptr.p->m_curr_batch_size_rows++;
+ scanptr.p->scanReleaseCounter = scanptr.p->m_curr_batch_size_rows;
+ }//if
+ /* --------------------------------------------------------------------
+ * WE NEED TO RELEASE ALL LOCKS CURRENTLY
+ * HELD BY THIS SCAN.
+ * -------------------------------------------------------------------- */
+ scanReleaseLocksLab(signal);
+ return;
+ }//if
+ Uint32 time_passed= tcConnectptr.p->tcTimer - cLqhTimeOutCount;
+ if (scanptr.p->m_curr_batch_size_rows > 0) {
+ if (time_passed > 1) {
+ /* -----------------------------------------------------------------------
+ * WE NEED TO ENSURE THAT WE DO NOT SEARCH FOR THE NEXT TUPLE FOR A
+ * LONG TIME WHILE WE KEEP A LOCK ON A FOUND TUPLE. WE RATHER REPORT
+ * THE FOUND TUPLE IF FOUND TUPLES ARE RARE. If more than 10 ms passed we
+ * send the found tuples to the API.
+ * ----------------------------------------------------------------------- */
+ scanptr.p->scanReleaseCounter = scanptr.p->m_curr_batch_size_rows + 1;
+ scanReleaseLocksLab(signal);
+ return;
+ }
+ } else {
+ if (time_passed > 10) {
+ jam();
+ signal->theData[0]= scanptr.i;
+ signal->theData[1]= tcConnectptr.p->transid[0];
+ signal->theData[2]= tcConnectptr.p->transid[1];
+ execSCAN_HBREP(signal);
+ }
+ }
+ scanptr.p->scanFlag = NextScanReq::ZSCAN_NEXT_ABORT;
+ scanNextLoopLab(signal);
+}//Dblqh::scanTupkeyRefLab()
+
+/* -------------------------------------------------------------------------
+ * THE SCAN HAS BEEN COMPLETED. EITHER BY REACHING THE END OR BY COMMAND
+ * FROM THE APPLICATION OR BY SOME SORT OF ERROR CONDITION.
+ * ------------------------------------------------------------------------- */
+void Dblqh::closeScanLab(Signal* signal)
+{
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ switch (fragptr.p->fragStatus) {
+ case Fragrecord::FSACTIVE:
+ jam();
+ linkActiveFrag(signal);
+ break;
+ case Fragrecord::BLOCKED:
+ jam();
+ linkFragQueue(signal);
+ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_CLOSE_STOPPED;
+ return;
+ break;
+ case Fragrecord::FREE:
+ jam();
+ case Fragrecord::ACTIVE_CREATION:
+ jam();
+ case Fragrecord::CRASH_RECOVERING:
+ jam();
+ case Fragrecord::DEFINED:
+ jam();
+ case Fragrecord::REMOVING:
+ jam();
+ default:
+ ndbrequire(false);
+ }//switch
+ continueCloseScanAfterBlockedLab(signal);
+}//Dblqh::closeScanLab()
+
+void Dblqh::continueCloseScanAfterBlockedLab(Signal* signal)
+{
+ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED;
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ scanptr.p->scanState = ScanRecord::WAIT_CLOSE_SCAN;
+ signal->theData[0] = scanptr.p->scanAccPtr;
+ signal->theData[1] = RNIL;
+ signal->theData[2] = NextScanReq::ZSCAN_CLOSE;
+ if (! scanptr.p->rangeScan)
+ sendSignal(tcConnectptr.p->tcAccBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
+ else
+ sendSignal(tcConnectptr.p->tcTuxBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
+}//Dblqh::continueCloseScanAfterBlockedLab()
+
+/* -------------------------------------------------------------------------
+ * ENTER NEXT_SCANCONF
+ * -------------------------------------------------------------------------
+ * PRECONDITION: SCAN_STATE = WAIT_CLOSE_SCAN
+ * ------------------------------------------------------------------------- */
+void Dblqh::accScanCloseConfLab(Signal* signal)
+{
+ tcConnectptr.i = scanptr.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+
+ if((tcConnectptr.p->primKeyLen - 4) > 0 &&
+ scanptr.p->scanCompletedStatus != ZTRUE)
+ {
+ jam();
+ releaseActiveFrag(signal);
+ continueAfterReceivingAllAiLab(signal);
+ return;
+ }
+
+ scanptr.p->scanState = ScanRecord::WAIT_DELETE_STORED_PROC_ID_SCAN;
+ signal->theData[0] = tcConnectptr.p->tupConnectrec;
+ signal->theData[1] = tcConnectptr.p->tableref;
+ signal->theData[2] = scanptr.p->scanSchemaVersion;
+ signal->theData[3] = ZDELETE_STORED_PROC_ID;
+ signal->theData[4] = scanptr.p->scanStoredProcId;
+ sendSignal(tcConnectptr.p->tcTupBlockref,
+ GSN_STORED_PROCREQ, signal, 5, JBB);
+}//Dblqh::accScanCloseConfLab()
+
+/* -------------------------------------------------------------------------
+ * ENTER STORED_PROCCONF WITH
+ * -------------------------------------------------------------------------
+ * PRECONDITION: SCAN_STATE = WAIT_DELETE_STORED_PROC_ID_SCAN
+ * ------------------------------------------------------------------------- */
+void Dblqh::tupScanCloseConfLab(Signal* signal)
+{
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ if (tcConnectptr.p->abortState == TcConnectionrec::NEW_FROM_TC) {
+ jam();
+ tcNodeFailptr.i = tcConnectptr.p->tcNodeFailrec;
+ ptrCheckGuard(tcNodeFailptr, ctcNodeFailrecFileSize, tcNodeFailRecord);
+ tcNodeFailptr.p->tcRecNow = tcConnectptr.i + 1;
+ signal->theData[0] = ZLQH_TRANS_NEXT;
+ signal->theData[1] = tcNodeFailptr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ } else if (tcConnectptr.p->errorCode != 0) {
+ jam();
+ ScanFragRef * ref = (ScanFragRef*)&signal->theData[0];
+ ref->senderData = tcConnectptr.p->clientConnectrec;
+ ref->transId1 = tcConnectptr.p->transid[0];
+ ref->transId2 = tcConnectptr.p->transid[1];
+ ref->errorCode = tcConnectptr.p->errorCode;
+ sendSignal(tcConnectptr.p->clientBlockref, GSN_SCAN_FRAGREF, signal,
+ ScanFragRef::SignalLength, JBB);
+ } else {
+ jam();
+ sendScanFragConf(signal, ZSCAN_FRAG_CLOSED);
+ }//if
+ finishScanrec(signal);
+ releaseScanrec(signal);
+ tcConnectptr.p->tcScanRec = RNIL;
+ deleteTransidHash(signal);
+ releaseOprec(signal);
+ releaseTcrec(signal, tcConnectptr);
+}//Dblqh::tupScanCloseConfLab()
+
+/* =========================================================================
+ * ======= INITIATE SCAN RECORD =======
+ *
+ * SUBROUTINE SHORT NAME = ISC
+ * ========================================================================= */
+Uint32 Dblqh::initScanrec(const ScanFragReq* scanFragReq)
+{
+ const Uint32 reqinfo = scanFragReq->requestInfo;
+ const Uint32 max_rows = scanFragReq->batch_size_rows;
+ const Uint32 max_bytes = scanFragReq->batch_size_bytes;
+ const Uint32 scanLockMode = ScanFragReq::getLockMode(reqinfo);
+ const Uint32 scanLockHold = ScanFragReq::getHoldLockFlag(reqinfo);
+ const Uint32 keyinfo = ScanFragReq::getKeyinfoFlag(reqinfo);
+ const Uint32 readCommitted = ScanFragReq::getReadCommittedFlag(reqinfo);
+ const Uint32 idx = ScanFragReq::getRangeScanFlag(reqinfo);
+ const Uint32 descending = ScanFragReq::getDescendingFlag(reqinfo);
+ const Uint32 attrLen = ScanFragReq::getAttrLen(reqinfo);
+ const Uint32 scanPrio = ScanFragReq::getScanPrio(reqinfo);
+
+ scanptr.p->scanKeyinfoFlag = keyinfo;
+ scanptr.p->scanLockHold = scanLockHold;
+ scanptr.p->scanCompletedStatus = ZFALSE;
+ scanptr.p->scanType = ScanRecord::SCAN;
+ scanptr.p->scanApiBlockref = scanFragReq->resultRef;
+ scanptr.p->scanAiLength = attrLen;
+ scanptr.p->scanTcrec = tcConnectptr.i;
+ scanptr.p->scanSchemaVersion = scanFragReq->schemaVersion;
+
+ scanptr.p->m_curr_batch_size_rows = 0;
+ scanptr.p->m_curr_batch_size_bytes= 0;
+ scanptr.p->m_max_batch_size_rows = max_rows;
+ scanptr.p->m_max_batch_size_bytes = max_bytes;
+
+ scanptr.p->scanErrorCounter = 0;
+ scanptr.p->scanLockMode = scanLockMode;
+ scanptr.p->readCommitted = readCommitted;
+ scanptr.p->rangeScan = idx;
+ scanptr.p->descending = descending;
+ scanptr.p->scanState = ScanRecord::SCAN_FREE;
+ scanptr.p->scanFlag = ZFALSE;
+ scanptr.p->scanLocalref[0] = 0;
+ scanptr.p->scanLocalref[1] = 0;
+ scanptr.p->scanLocalFragid = 0;
+ scanptr.p->scanTcWaiting = ZTRUE;
+ scanptr.p->scanNumber = ~0;
+ scanptr.p->scanApiOpPtr = scanFragReq->clientOpPtr;
+ scanptr.p->m_last_row = 0;
+ scanptr.p->scanStoredProcId = RNIL;
+
+ if (max_rows == 0 || (max_bytes > 0 && max_rows > max_bytes)){
+ jam();
+ return ScanFragRef::ZWRONG_BATCH_SIZE;
+ }
+ if (!seize_acc_ptr_list(scanptr.p, max_rows)){
+ jam();
+ return ScanFragRef::ZTOO_MANY_ACTIVE_SCAN_ERROR;
+ }
+ /**
+ * Used for scan take over
+ */
+ FragrecordPtr tFragPtr;
+ tFragPtr.i = fragptr.p->tableFragptr;
+ ptrCheckGuard(tFragPtr, cfragrecFileSize, fragrecord);
+ scanptr.p->fragPtrI = fragptr.p->tableFragptr;
+
+ /**
+ * !idx uses 1 - (MAX_PARALLEL_SCANS_PER_FRAG - 1) = 1-11
+ * idx uses from MAX_PARALLEL_SCANS_PER_FRAG - MAX = 12-42)
+ */
+ Uint32 start = (idx ? MAX_PARALLEL_SCANS_PER_FRAG : 1 );
+ Uint32 stop = (idx ? MAX_PARALLEL_INDEX_SCANS_PER_FRAG : MAX_PARALLEL_SCANS_PER_FRAG - 1);
+ stop += start;
+ Uint32 free = tFragPtr.p->m_scanNumberMask.find(start);
+
+ if(free == Fragrecord::ScanNumberMask::NotFound || free >= stop){
+ jam();
+
+ if(scanPrio == 0){
+ jam();
+ return ScanFragRef::ZTOO_MANY_ACTIVE_SCAN_ERROR;
+ }
+
+ /**
+ * Put on queue
+ */
+ scanptr.p->scanState = ScanRecord::IN_QUEUE;
+ LocalDLFifoList<ScanRecord> queue(c_scanRecordPool,
+ fragptr.p->m_queuedScans);
+ queue.add(scanptr);
+ return ZOK;
+ }
+
+ scanptr.p->scanNumber = free;
+ tFragPtr.p->m_scanNumberMask.clear(free);// Update mask
+
+ LocalDLList<ScanRecord> active(c_scanRecordPool, fragptr.p->m_activeScans);
+ active.add(scanptr);
+ if(scanptr.p->scanKeyinfoFlag){
+ jam();
+#ifdef VM_TRACE
+ ScanRecordPtr tmp;
+ ndbrequire(!c_scanTakeOverHash.find(tmp, * scanptr.p));
+#endif
+#ifdef TRACE_SCAN_TAKEOVER
+ ndbout_c("adding (%d %d) table: %d fragId: %d frag.i: %d tableFragptr: %d",
+ scanptr.p->scanNumber, scanptr.p->fragPtrI,
+ tabptr.i, scanFragReq->fragmentNoKeyLen & 0xFFFF,
+ fragptr.i, fragptr.p->tableFragptr);
+#endif
+ c_scanTakeOverHash.add(scanptr);
+ }
+ init_acc_ptr_list(scanptr.p);
+ return ZOK;
+}
+
+/* =========================================================================
+ * ======= INITIATE TC RECORD AT SCAN =======
+ *
+ * SUBROUTINE SHORT NAME = IST
+ * ========================================================================= */
+void Dblqh::initScanTc(Signal* signal,
+ Uint32 transid1,
+ Uint32 transid2,
+ Uint32 fragId,
+ Uint32 nodeId)
+{
+ tcConnectptr.p->transid[0] = transid1;
+ tcConnectptr.p->transid[1] = transid2;
+ tcConnectptr.p->tcScanRec = scanptr.i;
+ tcConnectptr.p->tableref = tabptr.i;
+ tcConnectptr.p->fragmentid = fragId;
+ tcConnectptr.p->fragmentptr = fragptr.i;
+ tcConnectptr.p->tcOprec = tcConnectptr.p->clientConnectrec;
+ tcConnectptr.p->tcBlockref = tcConnectptr.p->clientBlockref;
+ tcConnectptr.p->errorCode = 0;
+ tcConnectptr.p->reclenAiLqhkey = 0;
+ tcConnectptr.p->abortState = TcConnectionrec::ABORT_IDLE;
+ tcConnectptr.p->nextReplica = nodeId;
+ tcConnectptr.p->currTupAiLen = 0;
+ tcConnectptr.p->opExec = 1;
+ tcConnectptr.p->operation = ZREAD;
+ tcConnectptr.p->listState = TcConnectionrec::NOT_IN_LIST;
+ tcConnectptr.p->commitAckMarker = RNIL;
+ tcConnectptr.p->m_offset_current_keybuf = 0;
+ tcConnectptr.p->m_scan_curr_range_no = 0;
+
+ tabptr.p->usageCount++;
+}//Dblqh::initScanTc()
+
+/* =========================================================================
+ * ======= FINISH SCAN RECORD =======
+ *
+ * REMOVE SCAN RECORD FROM PER FRAGMENT LIST.
+ * ========================================================================= */
+void Dblqh::finishScanrec(Signal* signal)
+{
+ release_acc_ptr_list(scanptr.p);
+
+ LocalDLFifoList<ScanRecord> queue(c_scanRecordPool,
+ fragptr.p->m_queuedScans);
+
+ if(scanptr.p->scanState == ScanRecord::IN_QUEUE){
+ jam();
+ queue.release(scanptr);
+ return;
+ }
+
+ if(scanptr.p->scanKeyinfoFlag){
+ jam();
+ ScanRecordPtr tmp;
+#ifdef TRACE_SCAN_TAKEOVER
+ ndbout_c("removing (%d %d)", scanptr.p->scanNumber, scanptr.p->fragPtrI);
+#endif
+ c_scanTakeOverHash.remove(tmp, * scanptr.p);
+ ndbrequire(tmp.p == scanptr.p);
+ }
+
+ LocalDLList<ScanRecord> scans(c_scanRecordPool, fragptr.p->m_activeScans);
+ scans.release(scanptr);
+
+ FragrecordPtr tFragPtr;
+ tFragPtr.i = scanptr.p->fragPtrI;
+ ptrCheckGuard(tFragPtr, cfragrecFileSize, fragrecord);
+
+ const Uint32 scanNumber = scanptr.p->scanNumber;
+ ndbrequire(!tFragPtr.p->m_scanNumberMask.get(scanNumber));
+ ScanRecordPtr restart;
+
+ /**
+ * Start on of queued scans
+ */
+ if(scanNumber == NR_ScanNo || !queue.first(restart)){
+ jam();
+ tFragPtr.p->m_scanNumberMask.set(scanNumber);
+ return;
+ }
+
+ if(ERROR_INSERTED(5034)){
+ jam();
+ tFragPtr.p->m_scanNumberMask.set(scanNumber);
+ return;
+ }
+
+ ndbrequire(restart.p->scanState == ScanRecord::IN_QUEUE);
+
+ ScanRecordPtr tmpScan = scanptr;
+ TcConnectionrecPtr tmpTc = tcConnectptr;
+
+ tcConnectptr.i = restart.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ restart.p->scanNumber = scanNumber;
+
+ queue.remove(restart);
+ scans.add(restart);
+ if(restart.p->scanKeyinfoFlag){
+ jam();
+#ifdef VM_TRACE
+ ScanRecordPtr tmp;
+ ndbrequire(!c_scanTakeOverHash.find(tmp, * restart.p));
+#endif
+ c_scanTakeOverHash.add(restart);
+#ifdef TRACE_SCAN_TAKEOVER
+ ndbout_c("adding-r (%d %d)", restart.p->scanNumber, restart.p->fragPtrI);
+#endif
+ }
+
+ restart.p->scanState = ScanRecord::SCAN_FREE; // set in initScanRec
+ if(tcConnectptr.p->transactionState == TcConnectionrec::SCAN_STATE_USED)
+ {
+ jam();
+ scanptr = restart;
+ continueAfterReceivingAllAiLab(signal);
+ }
+ else
+ {
+ ndbrequire(tcConnectptr.p->transactionState == TcConnectionrec::WAIT_SCAN_AI);
+ }
+ scanptr = tmpScan;
+ tcConnectptr = tmpTc;
+}//Dblqh::finishScanrec()
+
+/* =========================================================================
+ * ======= RELEASE SCAN RECORD =======
+ *
+ * RELEASE A SCAN RECORD TO THE FREELIST.
+ * ========================================================================= */
+void Dblqh::releaseScanrec(Signal* signal)
+{
+ scanptr.p->scanState = ScanRecord::SCAN_FREE;
+ scanptr.p->scanType = ScanRecord::ST_IDLE;
+ scanptr.p->scanTcWaiting = ZFALSE;
+ cbookedAccOps -= scanptr.p->m_max_batch_size_rows;
+ cscanNoFreeRec++;
+}//Dblqh::releaseScanrec()
+
+/* ------------------------------------------------------------------------
+ * ------- SEND KEYINFO20 TO API -------
+ *
+ * ------------------------------------------------------------------------ */
+Uint32 Dblqh::sendKeyinfo20(Signal* signal,
+ ScanRecord * scanP,
+ TcConnectionrec * tcConP)
+{
+ ndbrequire(scanP->m_curr_batch_size_rows < MAX_PARALLEL_OP_PER_SCAN);
+ KeyInfo20 * keyInfo = (KeyInfo20 *)&signal->theData[0];
+
+ /**
+ * Note that this code requires signal->theData to be big enough for
+ * a entire key
+ */
+ const BlockReference ref = scanP->scanApiBlockref;
+ const Uint32 scanOp = scanP->m_curr_batch_size_rows;
+ const Uint32 nodeId = refToNode(ref);
+ const bool connectedToNode = getNodeInfo(nodeId).m_connected;
+ const Uint32 type = getNodeInfo(nodeId).m_type;
+ const bool is_api = (type >= NodeInfo::API && type <= NodeInfo::REP);
+ const bool old_dest = (getNodeInfo(nodeId).m_version < MAKE_VERSION(3,5,0));
+ const bool longable = true; // TODO is_api && !old_dest;
+
+ Uint32 * dst = keyInfo->keyData;
+ dst += nodeId == getOwnNodeId() ? 0 : KeyInfo20::DataLength;
+
+ Uint32 keyLen = readPrimaryKeys(scanP, tcConP, dst);
+ Uint32 fragId = tcConP->fragmentid;
+ keyInfo->clientOpPtr = scanP->scanApiOpPtr;
+ keyInfo->keyLen = keyLen;
+ keyInfo->scanInfo_Node =
+ KeyInfo20::setScanInfo(scanOp, scanP->scanNumber) + (fragId << 20);
+ keyInfo->transId1 = tcConP->transid[0];
+ keyInfo->transId2 = tcConP->transid[1];
+
+ Uint32 * src = signal->theData+25;
+ if(connectedToNode){
+ jam();
+
+ if(nodeId != getOwnNodeId()){
+ jam();
+
+ if(keyLen <= KeyInfo20::DataLength || !longable) {
+ while(keyLen > KeyInfo20::DataLength){
+ jam();
+ MEMCOPY_NO_WORDS(keyInfo->keyData, src, KeyInfo20::DataLength);
+ sendSignal(ref, GSN_KEYINFO20, signal, 25, JBB);
+ src += KeyInfo20::DataLength;;
+ keyLen -= KeyInfo20::DataLength;
+ }
+
+ MEMCOPY_NO_WORDS(keyInfo->keyData, src, keyLen);
+ sendSignal(ref, GSN_KEYINFO20, signal,
+ KeyInfo20::HeaderLength+keyLen, JBB);
+ return keyLen;
+ }
+
+ LinearSectionPtr ptr[3];
+ ptr[0].p = src;
+ ptr[0].sz = keyLen;
+ sendSignal(ref, GSN_KEYINFO20, signal, KeyInfo20::HeaderLength,
+ JBB, ptr, 1);
+ return keyLen;
+ }
+
+ EXECUTE_DIRECT(refToBlock(ref), GSN_KEYINFO20, signal,
+ KeyInfo20::HeaderLength + keyLen);
+ jamEntry();
+ return keyLen;
+ }
+
+ /**
+ * If this node does not have a direct connection
+ * to the receiving node we want to send the signals
+ * routed via the node that controls this read
+ */
+ Uint32 routeBlockref = tcConP->clientBlockref;
+
+ if(keyLen < KeyInfo20::DataLength || !longable){
+ jam();
+
+ while (keyLen > (KeyInfo20::DataLength - 1)) {
+ jam();
+ MEMCOPY_NO_WORDS(keyInfo->keyData, src, KeyInfo20::DataLength - 1);
+ keyInfo->keyData[KeyInfo20::DataLength-1] = ref;
+ sendSignal(routeBlockref, GSN_KEYINFO20_R, signal, 25, JBB);
+ src += KeyInfo20::DataLength - 1;
+ keyLen -= KeyInfo20::DataLength - 1;
+ }
+
+ MEMCOPY_NO_WORDS(keyInfo->keyData, src, keyLen);
+ keyInfo->keyData[keyLen] = ref;
+ sendSignal(routeBlockref, GSN_KEYINFO20_R, signal,
+ KeyInfo20::HeaderLength+keyLen+1, JBB);
+ return keyLen;
+ }
+
+ keyInfo->keyData[0] = ref;
+ LinearSectionPtr ptr[3];
+ ptr[0].p = src;
+ ptr[0].sz = keyLen;
+ sendSignal(routeBlockref, GSN_KEYINFO20_R, signal,
+ KeyInfo20::HeaderLength+1, JBB, ptr, 1);
+ return keyLen;
+}
+
+/* ------------------------------------------------------------------------
+ * ------- SEND SCAN_FRAGCONF TO TC THAT CONTROLS THE SCAN -------
+ *
+ * ------------------------------------------------------------------------ */
+void Dblqh::sendScanFragConf(Signal* signal, Uint32 scanCompleted)
+{
+ Uint32 completed_ops= scanptr.p->m_curr_batch_size_rows;
+ Uint32 total_len= scanptr.p->m_curr_batch_size_bytes;
+ scanptr.p->scanTcWaiting = ZFALSE;
+
+ if(ERROR_INSERTED(5037)){
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }
+ ScanFragConf * conf = (ScanFragConf*)&signal->theData[0];
+ NodeId tc_node_id= refToNode(tcConnectptr.p->clientBlockref);
+ Uint32 trans_id1= tcConnectptr.p->transid[0];
+ Uint32 trans_id2= tcConnectptr.p->transid[1];
+
+ conf->senderData = tcConnectptr.p->clientConnectrec;
+ conf->completedOps = completed_ops;
+ conf->fragmentCompleted = scanCompleted;
+ conf->transId1 = trans_id1;
+ conf->transId2 = trans_id2;
+ conf->total_len= total_len;
+ sendSignal(tcConnectptr.p->clientBlockref, GSN_SCAN_FRAGCONF,
+ signal, ScanFragConf::SignalLength, JBB);
+
+ if(!scanptr.p->scanLockHold)
+ {
+ jam();
+ scanptr.p->m_curr_batch_size_rows = 0;
+ scanptr.p->m_curr_batch_size_bytes= 0;
+ }
+}//Dblqh::sendScanFragConf()
+
+/* ######################################################################### */
+/* ####### NODE RECOVERY MODULE ####### */
+/* */
+/* ######################################################################### */
+/*---------------------------------------------------------------------------*/
+/* */
+/* THIS MODULE IS USED WHEN A NODE HAS FAILED. IT PERFORMS A COPY OF A */
+/* FRAGMENT TO A NEW REPLICA OF THE FRAGMENT. IT DOES ALSO SHUT DOWN ALL */
+/* CONNECTIONS TO THE FAILED NODE. */
+/*---------------------------------------------------------------------------*/
+void Dblqh::calculateHash(Signal* signal)
+{
+ DatabufPtr locDatabufptr;
+ UintR Ti;
+ UintR Tdata0;
+ UintR Tdata1;
+ UintR Tdata2;
+ UintR Tdata3;
+ UintR* Tdata32;
+ Uint64 Tdata[512];
+
+ Tdata32 = (UintR*)&Tdata[0];
+
+ Tdata0 = tcConnectptr.p->tupkeyData[0];
+ Tdata1 = tcConnectptr.p->tupkeyData[1];
+ Tdata2 = tcConnectptr.p->tupkeyData[2];
+ Tdata3 = tcConnectptr.p->tupkeyData[3];
+ Tdata32[0] = Tdata0;
+ Tdata32[1] = Tdata1;
+ Tdata32[2] = Tdata2;
+ Tdata32[3] = Tdata3;
+ locDatabufptr.i = tcConnectptr.p->firstTupkeybuf;
+ Ti = 4;
+ while (locDatabufptr.i != RNIL) {
+ ptrCheckGuard(locDatabufptr, cdatabufFileSize, databuf);
+ Tdata0 = locDatabufptr.p->data[0];
+ Tdata1 = locDatabufptr.p->data[1];
+ Tdata2 = locDatabufptr.p->data[2];
+ Tdata3 = locDatabufptr.p->data[3];
+ Tdata32[Ti ] = Tdata0;
+ Tdata32[Ti + 1] = Tdata1;
+ Tdata32[Ti + 2] = Tdata2;
+ Tdata32[Ti + 3] = Tdata3;
+ locDatabufptr.i = locDatabufptr.p->nextDatabuf;
+ Ti += 4;
+ }//while
+ tcConnectptr.p->hashValue =
+ md5_hash((Uint64*)&Tdata32[0], (UintR)tcConnectptr.p->primKeyLen);
+}//Dblqh::calculateHash()
+
+/* *************************************** */
+/* COPY_FRAGREQ: Start copying a fragment */
+/* *************************************** */
+void Dblqh::execCOPY_FRAGREQ(Signal* signal)
+{
+ jamEntry();
+ const CopyFragReq * const copyFragReq = (CopyFragReq *)&signal->theData[0];
+ tabptr.i = copyFragReq->tableId;
+ ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
+ const Uint32 fragId = copyFragReq->fragId;
+ const Uint32 copyPtr = copyFragReq->userPtr;
+ const Uint32 userRef = copyFragReq->userRef;
+ const Uint32 nodeId = copyFragReq->nodeId;
+
+ ndbrequire(cnoActiveCopy < 3);
+ ndbrequire(getFragmentrec(signal, fragId));
+ ndbrequire(fragptr.p->copyFragState == ZIDLE);
+ ndbrequire(cfirstfreeTcConrec != RNIL);
+ ndbrequire(fragptr.p->m_scanNumberMask.get(NR_ScanNo));
+
+ fragptr.p->fragDistributionKey = copyFragReq->distributionKey;
+
+ if (DictTabInfo::isOrderedIndex(tabptr.p->tableType)) {
+ jam();
+ /**
+ * Ordered index doesn't need to be copied
+ */
+ CopyFragConf * const conf = (CopyFragConf *)&signal->theData[0];
+ conf->userPtr = copyPtr;
+ conf->sendingNodeId = cownNodeid;
+ conf->startingNodeId = nodeId;
+ conf->tableId = tabptr.i;
+ conf->fragId = fragId;
+ sendSignal(userRef, GSN_COPY_FRAGCONF, signal,
+ CopyFragConf::SignalLength, JBB);
+ return;
+ }//if
+
+ LocalDLList<ScanRecord> scans(c_scanRecordPool, fragptr.p->m_activeScans);
+ ndbrequire(scans.seize(scanptr));
+/* ------------------------------------------------------------------------- */
+// We keep track of how many operation records in ACC that has been booked.
+// Copy fragment has records always booked and thus need not book any. The
+// most operations in parallel use is the m_max_batch_size_rows.
+// This variable has to be set-up here since it is used by releaseScanrec
+// to unbook operation records in ACC.
+/* ------------------------------------------------------------------------- */
+ scanptr.p->m_max_batch_size_rows = 0;
+ scanptr.p->rangeScan = 0;
+ seizeTcrec();
+
+ /**
+ * Remove implicit cast/usage of CopyFragReq
+ */
+ //initCopyrec(signal);
+ scanptr.p->copyPtr = copyPtr;
+ scanptr.p->scanType = ScanRecord::COPY;
+ scanptr.p->scanApiBlockref = userRef;
+ scanptr.p->scanNodeId = nodeId;
+ scanptr.p->scanTcrec = tcConnectptr.i;
+ scanptr.p->scanSchemaVersion = copyFragReq->schemaVersion;
+ scanptr.p->scanCompletedStatus = ZFALSE;
+ scanptr.p->scanErrorCounter = 0;
+ scanptr.p->scanNumber = NR_ScanNo;
+ scanptr.p->scanKeyinfoFlag = 0; // Don't put into hash
+ scanptr.p->fragPtrI = fragptr.i;
+ fragptr.p->m_scanNumberMask.clear(NR_ScanNo);
+
+ initScanTc(signal,
+ 0,
+ (DBLQH << 20) + (cownNodeid << 8),
+ fragId,
+ copyFragReq->nodeId);
+ cactiveCopy[cnoActiveCopy] = fragptr.i;
+ cnoActiveCopy++;
+
+ tcConnectptr.p->copyCountWords = 0;
+ tcConnectptr.p->tcOprec = tcConnectptr.i;
+ tcConnectptr.p->schemaVersion = scanptr.p->scanSchemaVersion;
+ scanptr.p->scanState = ScanRecord::WAIT_ACC_COPY;
+ AccScanReq * req = (AccScanReq*)&signal->theData[0];
+ req->senderData = scanptr.i;
+ req->senderRef = cownref;
+ req->tableId = tabptr.i;
+ req->fragmentNo = fragId;
+ req->requestInfo = 0;
+ AccScanReq::setLockMode(req->requestInfo, 0);
+ AccScanReq::setReadCommittedFlag(req->requestInfo, 0);
+ req->transId1 = tcConnectptr.p->transid[0];
+ req->transId2 = tcConnectptr.p->transid[1];
+ req->savePointId = tcConnectptr.p->savePointId;
+ sendSignal(tcConnectptr.p->tcAccBlockref, GSN_ACC_SCANREQ, signal,
+ AccScanReq::SignalLength, JBB);
+ return;
+}//Dblqh::execCOPY_FRAGREQ()
+
+void Dblqh::accScanConfCopyLab(Signal* signal)
+{
+ AccScanConf * const accScanConf = (AccScanConf *)&signal->theData[0];
+ tcConnectptr.i = scanptr.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+/*--------------------------------------------------------------------------*/
+/* PRECONDITION: SCAN_STATE = WAIT_ACC_COPY */
+/*--------------------------------------------------------------------------*/
+ if (accScanConf->flag == AccScanConf::ZEMPTY_FRAGMENT) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THE FRAGMENT WAS EMPTY. */
+/* REPORT SUCCESSFUL COPYING. */
+/*---------------------------------------------------------------------------*/
+ tupCopyCloseConfLab(signal);
+ return;
+ }//if
+ scanptr.p->scanAccPtr = accScanConf->accPtr;
+ scanptr.p->scanState = ScanRecord::WAIT_STORED_PROC_COPY;
+ signal->theData[0] = tcConnectptr.p->tupConnectrec;
+ signal->theData[1] = tcConnectptr.p->tableref;
+ signal->theData[2] = scanptr.p->scanSchemaVersion;
+ signal->theData[3] = ZSTORED_PROC_COPY;
+// theData[4] is not used in TUP with ZSTORED_PROC_COPY
+ sendSignal(tcConnectptr.p->tcTupBlockref, GSN_STORED_PROCREQ, signal, 5, JBB);
+ return;
+}//Dblqh::accScanConfCopyLab()
+
+/*---------------------------------------------------------------------------*/
+/* ENTER STORED_PROCCONF WITH */
+/* TC_CONNECTPTR, */
+/* TSTORED_PROC_ID */
+/*---------------------------------------------------------------------------*/
+void Dblqh::storedProcConfCopyLab(Signal* signal)
+{
+/*---------------------------------------------------------------------------*/
+/* PRECONDITION: SCAN_STATE = WAIT_STORED_PROC_COPY */
+/*---------------------------------------------------------------------------*/
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ if (scanptr.p->scanCompletedStatus == ZTRUE) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THE COPY PROCESS HAVE BEEN COMPLETED, MOST LIKELY DUE TO A NODE FAILURE.*/
+/*---------------------------------------------------------------------------*/
+ closeCopyLab(signal);
+ return;
+ }//if
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ scanptr.p->scanState = ScanRecord::WAIT_NEXT_SCAN_COPY;
+ switch (fragptr.p->fragStatus) {
+ case Fragrecord::FSACTIVE:
+ jam();
+ linkActiveFrag(signal);
+ break;
+ case Fragrecord::BLOCKED:
+ jam();
+ linkFragQueue(signal);
+ tcConnectptr.p->transactionState = TcConnectionrec::COPY_FIRST_STOPPED;
+ return;
+ break;
+ case Fragrecord::FREE:
+ jam();
+ case Fragrecord::ACTIVE_CREATION:
+ jam();
+ case Fragrecord::CRASH_RECOVERING:
+ jam();
+ case Fragrecord::DEFINED:
+ jam();
+ case Fragrecord::REMOVING:
+ jam();
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ }//switch
+ continueFirstCopyAfterBlockedLab(signal);
+ return;
+}//Dblqh::storedProcConfCopyLab()
+
+void Dblqh::continueFirstCopyAfterBlockedLab(Signal* signal)
+{
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ signal->theData[0] = scanptr.p->scanAccPtr;
+ signal->theData[1] = RNIL;
+ signal->theData[2] = NextScanReq::ZSCAN_NEXT;
+ sendSignal(tcConnectptr.p->tcAccBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
+ return;
+}//Dblqh::continueFirstCopyAfterBlockedLab()
+
+/*---------------------------------------------------------------------------*/
+/* ENTER NEXT_SCANCONF WITH */
+/* SCANPTR, */
+/* TFRAGID, */
+/* TACC_OPPTR, */
+/* TLOCAL_KEY1, */
+/* TLOCAL_KEY2, */
+/* TKEY_LENGTH, */
+/* TKEY1, */
+/* TKEY2, */
+/* TKEY3, */
+/* TKEY4 */
+/*---------------------------------------------------------------------------*/
+/* PRECONDITION: SCAN_STATE = WAIT_NEXT_SCAN_COPY */
+/*---------------------------------------------------------------------------*/
+void Dblqh::nextScanConfCopyLab(Signal* signal)
+{
+ NextScanConf * const nextScanConf = (NextScanConf *)&signal->theData[0];
+ tcConnectptr.i = scanptr.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ if (nextScanConf->fragId == RNIL) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THERE ARE NO MORE TUPLES TO FETCH. WE NEED TO CLOSE */
+/* THE COPY IN ACC AND DELETE THE STORED PROCEDURE IN TUP */
+/*---------------------------------------------------------------------------*/
+ releaseActiveFrag(signal);
+ if (tcConnectptr.p->copyCountWords == 0) {
+ closeCopyLab(signal);
+ return;
+ }//if
+/*---------------------------------------------------------------------------*/
+// Wait until copying is completed also at the starting node before reporting
+// completion. Signal completion through scanCompletedStatus-flag.
+/*---------------------------------------------------------------------------*/
+ scanptr.p->scanCompletedStatus = ZTRUE;
+ return;
+ }//if
+
+ // If accOperationPtr == RNIL no record was returned by ACC
+ if (nextScanConf->accOperationPtr == RNIL) {
+ jam();
+ signal->theData[0] = scanptr.p->scanAccPtr;
+ signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP;
+ sendSignal(tcConnectptr.p->tcAccBlockref, GSN_ACC_CHECK_SCAN, signal, 2, JBB);
+ return;
+ }
+
+ set_acc_ptr_in_scan_record(scanptr.p, 0, nextScanConf->accOperationPtr);
+ initCopyTc(signal);
+ copySendTupkeyReqLab(signal);
+ return;
+}//Dblqh::nextScanConfCopyLab()
+
+void Dblqh::copySendTupkeyReqLab(Signal* signal)
+{
+ Uint32 reqinfo = 0;
+ Uint32 tupFragPtr;
+
+ reqinfo = reqinfo + (tcConnectptr.p->operation << 6);
+ reqinfo = reqinfo + (tcConnectptr.p->opExec << 10);
+ tcConnectptr.p->transactionState = TcConnectionrec::COPY_TUPKEY;
+ scanptr.p->scanState = ScanRecord::WAIT_TUPKEY_COPY;
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ tupFragPtr = fragptr.p->tupFragptr[scanptr.p->scanLocalFragid & 1];
+ {
+ TupKeyReq * const tupKeyReq = (TupKeyReq *)signal->getDataPtrSend();
+
+ tupKeyReq->connectPtr = tcConnectptr.p->tupConnectrec;
+ tupKeyReq->request = reqinfo;
+ tupKeyReq->tableRef = tcConnectptr.p->tableref;
+ tupKeyReq->fragId = scanptr.p->scanLocalFragid;
+ tupKeyReq->keyRef1 = scanptr.p->scanLocalref[0];
+ tupKeyReq->keyRef2 = scanptr.p->scanLocalref[1];
+ tupKeyReq->attrBufLen = 0;
+ tupKeyReq->opRef = tcConnectptr.i;
+ tupKeyReq->applRef = cownref;
+ tupKeyReq->schemaVersion = scanptr.p->scanSchemaVersion;
+ tupKeyReq->storedProcedure = scanptr.p->scanStoredProcId;
+ tupKeyReq->transId1 = tcConnectptr.p->transid[0];
+ tupKeyReq->transId2 = tcConnectptr.p->transid[1];
+ tupKeyReq->fragPtr = tupFragPtr;
+ tupKeyReq->primaryReplica = (tcConnectptr.p->seqNoReplica == 0)?true:false;
+ tupKeyReq->coordinatorTC = tcConnectptr.p->tcBlockref;
+ tupKeyReq->tcOpIndex = tcConnectptr.p->tcOprec;
+ tupKeyReq->savePointId = tcConnectptr.p->savePointId;
+ Uint32 blockNo = refToBlock(tcConnectptr.p->tcTupBlockref);
+ EXECUTE_DIRECT(blockNo, GSN_TUPKEYREQ, signal,
+ TupKeyReq::SignalLength);
+ }
+}//Dblqh::copySendTupkeyReqLab()
+
+/*---------------------------------------------------------------------------*/
+/* USED IN COPYING OPERATION TO RECEIVE ATTRINFO FROM TUP. */
+/*---------------------------------------------------------------------------*/
+/* ************>> */
+/* TRANSID_AI > */
+/* ************>> */
+void Dblqh::execTRANSID_AI(Signal* signal)
+{
+ jamEntry();
+ tcConnectptr.i = signal->theData[0];
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ Uint32 length = signal->length() - 3;
+ ndbrequire(tcConnectptr.p->transactionState == TcConnectionrec::COPY_TUPKEY);
+ Uint32 * src = &signal->theData[3];
+ while(length > 22){
+ if (saveTupattrbuf(signal, src, 22) == ZOK) {
+ ;
+ } else {
+ jam();
+ tcConnectptr.p->errorCode = ZGET_ATTRINBUF_ERROR;
+ return;
+ }//if
+ src += 22;
+ length -= 22;
+ }
+ if (saveTupattrbuf(signal, src, length) == ZOK) {
+ return;
+ }
+ jam();
+ tcConnectptr.p->errorCode = ZGET_ATTRINBUF_ERROR;
+}//Dblqh::execTRANSID_AI()
+
+/*--------------------------------------------------------------------------*/
+/* ENTER TUPKEYCONF WITH */
+/* TC_CONNECTPTR, */
+/* TDATA2, */
+/* TDATA3, */
+/* TDATA4, */
+/* TDATA5 */
+/*--------------------------------------------------------------------------*/
+/* PRECONDITION: TRANSACTION_STATE = COPY_TUPKEY */
+/*--------------------------------------------------------------------------*/
+void Dblqh::copyTupkeyConfLab(Signal* signal)
+{
+ const TupKeyConf * const tupKeyConf = (TupKeyConf *)signal->getDataPtr();
+
+ UintR readLength = tupKeyConf->readLength;
+
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ ScanRecord* scanP = scanptr.p;
+ releaseActiveFrag(signal);
+ if (tcConnectptr.p->errorCode != 0) {
+ jam();
+ closeCopyLab(signal);
+ return;
+ }//if
+ if (scanptr.p->scanCompletedStatus == ZTRUE) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THE COPY PROCESS HAVE BEEN CLOSED. MOST LIKELY A NODE FAILURE. */
+/*---------------------------------------------------------------------------*/
+ closeCopyLab(signal);
+ return;
+ }//if
+ TcConnectionrec * tcConP = tcConnectptr.p;
+ tcConnectptr.p->totSendlenAi = readLength;
+ tcConnectptr.p->connectState = TcConnectionrec::COPY_CONNECTED;
+
+ // Read primary keys (used to get here via scan keyinfo)
+ Uint32* tmp = signal->getDataPtrSend()+24;
+ Uint32 len= tcConnectptr.p->primKeyLen = readPrimaryKeys(scanP, tcConP, tmp);
+
+ // Calculate hash (no need to linearies key)
+ tcConnectptr.p->hashValue = md5_hash((Uint64*)tmp, len);
+
+ // Move into databuffer to make packLqhkeyreqLab happy
+ memcpy(tcConP->tupkeyData, tmp, 4*4);
+ if(len > 4)
+ keyinfoLab(tmp+4, tmp + len);
+ LqhKeyReq::setKeyLen(tcConP->reqinfo, len);
+
+/*---------------------------------------------------------------------------*/
+// To avoid using up to many operation records in ACC we will increase the
+// constant to ensure that we never send more than 40 records at a time.
+// This is where the constant 56 comes from. For long records this constant
+// will not matter that much. The current maximum is 6000 words outstanding
+// (including a number of those 56 words not really sent). We also have to
+// ensure that there are never more simultaneous usage of these operation
+// records to ensure that node recovery does not fail because of simultaneous
+// scanning.
+/*---------------------------------------------------------------------------*/
+ UintR TnoOfWords = readLength + len;
+ TnoOfWords = TnoOfWords + MAGIC_CONSTANT;
+ TnoOfWords = TnoOfWords + (TnoOfWords >> 2);
+
+ /*-----------------------------------------------------------------
+ * NOTE for transid1!
+ * Transid1 in the tcConnection record is used load regulate the
+ * copy(node recovery) process.
+ * The number of outstanding words are written in the transid1
+ * variable. This will be sent to the starting node in the
+ * LQHKEYREQ signal and when the answer is returned in the LQHKEYCONF
+ * we can reduce the number of outstanding words and check to see
+ * if more LQHKEYREQ signals should be sent.
+ *
+ * However efficient this method is rather unsafe in such way that
+ * it overwrites the transid1 original data.
+ *
+ * Also see TR 587.
+ *----------------------------------------------------------------*/
+ tcConnectptr.p->transid[0] = TnoOfWords; // Data overload, see note!
+ packLqhkeyreqLab(signal);
+ tcConnectptr.p->copyCountWords += TnoOfWords;
+ scanptr.p->scanState = ScanRecord::WAIT_LQHKEY_COPY;
+ if (tcConnectptr.p->copyCountWords < cmaxWordsAtNodeRec) {
+ nextRecordCopy(signal);
+ return;
+ }//if
+ return;
+}//Dblqh::copyTupkeyConfLab()
+
+/*---------------------------------------------------------------------------*/
+/* ENTER LQHKEYCONF */
+/*---------------------------------------------------------------------------*/
+/* PRECONDITION: CONNECT_STATE = COPY_CONNECTED */
+/*---------------------------------------------------------------------------*/
+void Dblqh::copyCompletedLab(Signal* signal)
+{
+ const LqhKeyConf * const lqhKeyConf = (LqhKeyConf *)signal->getDataPtr();
+
+ ndbrequire(tcConnectptr.p->transid[1] == lqhKeyConf->transId2);
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ if (tcConnectptr.p->copyCountWords >= cmaxWordsAtNodeRec) {
+ tcConnectptr.p->copyCountWords -= lqhKeyConf->transId1; // Data overload, see note!
+ if (scanptr.p->scanCompletedStatus == ZTRUE) {
+ jam();
+/*---------------------------------------------------------------------------*/
+// Copy to complete, we will not start any new copying.
+/*---------------------------------------------------------------------------*/
+ closeCopyLab(signal);
+ return;
+ }//if
+ if (tcConnectptr.p->copyCountWords < cmaxWordsAtNodeRec) {
+ jam();
+ nextRecordCopy(signal);
+ }//if
+ return;
+ }//if
+ tcConnectptr.p->copyCountWords -= lqhKeyConf->transId1; // Data overload, see note!
+ ndbrequire(tcConnectptr.p->copyCountWords <= cmaxWordsAtNodeRec);
+ if (tcConnectptr.p->copyCountWords > 0) {
+ jam();
+ return;
+ }//if
+/*---------------------------------------------------------------------------*/
+// No more outstanding copies. We will only start new ones from here if it was
+// stopped before and this only happens when copyCountWords is bigger than the
+// threshold value. Since this did not occur we must be waiting for completion.
+// Check that this is so. If not we crash to find out what is going on.
+/*---------------------------------------------------------------------------*/
+ if (scanptr.p->scanCompletedStatus == ZTRUE) {
+ jam();
+ closeCopyLab(signal);
+ return;
+ }//if
+ if (scanptr.p->scanState == ScanRecord::WAIT_LQHKEY_COPY) {
+ jam();
+/*---------------------------------------------------------------------------*/
+// Make sure that something is in progress. Otherwise we will simply stop
+// and nothing more will happen.
+/*---------------------------------------------------------------------------*/
+ systemErrorLab(signal);
+ return;
+ }//if
+ return;
+}//Dblqh::copyCompletedLab()
+
+void Dblqh::nextRecordCopy(Signal* signal)
+{
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ if (scanptr.p->scanState != ScanRecord::WAIT_LQHKEY_COPY) {
+ jam();
+/*---------------------------------------------------------------------------*/
+// Make sure that nothing is in progress. Otherwise we will have to simultaneous
+// scans on the same record and this will certainly lead to unexpected
+// behaviour.
+/*---------------------------------------------------------------------------*/
+ systemErrorLab(signal);
+ return;
+ }//if
+ scanptr.p->scanState = ScanRecord::WAIT_NEXT_SCAN_COPY;
+ switch (fragptr.p->fragStatus) {
+ case Fragrecord::FSACTIVE:
+ jam();
+ linkActiveFrag(signal);
+ break;
+ case Fragrecord::BLOCKED:
+ jam();
+ linkFragQueue(signal);
+ tcConnectptr.p->transactionState = TcConnectionrec::COPY_STOPPED;
+ return;
+ break;
+ case Fragrecord::FREE:
+ jam();
+ case Fragrecord::ACTIVE_CREATION:
+ jam();
+ case Fragrecord::CRASH_RECOVERING:
+ jam();
+ case Fragrecord::DEFINED:
+ jam();
+ case Fragrecord::REMOVING:
+ jam();
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ }//switch
+ continueCopyAfterBlockedLab(signal);
+ return;
+}//Dblqh::nextRecordCopy()
+
+void Dblqh::continueCopyAfterBlockedLab(Signal* signal)
+{
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ tcConnectptr.p->errorCode = 0;
+ Uint32 acc_op_ptr= get_acc_ptr_from_scan_record(scanptr.p, 0, false);
+ signal->theData[0] = scanptr.p->scanAccPtr;
+ signal->theData[1] = acc_op_ptr;
+ signal->theData[2] = NextScanReq::ZSCAN_NEXT_COMMIT;
+ sendSignal(tcConnectptr.p->tcAccBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
+ return;
+}//Dblqh::continueCopyAfterBlockedLab()
+
+void Dblqh::copyLqhKeyRefLab(Signal* signal)
+{
+ ndbrequire(tcConnectptr.p->transid[1] == signal->theData[4]);
+ tcConnectptr.p->copyCountWords -= signal->theData[3];
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ scanptr.p->scanErrorCounter++;
+ tcConnectptr.p->errorCode = terrorCode;
+ closeCopyLab(signal);
+ return;
+}//Dblqh::copyLqhKeyRefLab()
+
+void Dblqh::closeCopyLab(Signal* signal)
+{
+ if (tcConnectptr.p->copyCountWords > 0) {
+/*---------------------------------------------------------------------------*/
+// We are still waiting for responses from the starting node.
+// Wait until all of those have arrived until we start the
+// close process.
+/*---------------------------------------------------------------------------*/
+ jam();
+ return;
+ }//if
+ tcConnectptr.p->transid[0] = 0;
+ tcConnectptr.p->transid[1] = 0;
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ scanptr.p->scanState = ScanRecord::WAIT_CLOSE_COPY;
+ switch (fragptr.p->fragStatus) {
+ case Fragrecord::FSACTIVE:
+ jam();
+ linkActiveFrag(signal);
+ break;
+ case Fragrecord::BLOCKED:
+ jam();
+ linkFragQueue(signal);
+ tcConnectptr.p->transactionState = TcConnectionrec::COPY_CLOSE_STOPPED;
+ return;
+ break;
+ case Fragrecord::FREE:
+ jam();
+ case Fragrecord::ACTIVE_CREATION:
+ jam();
+ case Fragrecord::CRASH_RECOVERING:
+ jam();
+ case Fragrecord::DEFINED:
+ jam();
+ case Fragrecord::REMOVING:
+ jam();
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ }//switch
+ continueCloseCopyAfterBlockedLab(signal);
+ return;
+}//Dblqh::closeCopyLab()
+
+void Dblqh::continueCloseCopyAfterBlockedLab(Signal* signal)
+{
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
+ signal->theData[0] = scanptr.p->scanAccPtr;
+ signal->theData[1] = RNIL;
+ signal->theData[2] = ZCOPY_CLOSE;
+ sendSignal(tcConnectptr.p->tcAccBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
+ return;
+}//Dblqh::continueCloseCopyAfterBlockedLab()
+
+/*---------------------------------------------------------------------------*/
+/* ENTER NEXT_SCANCONF WITH */
+/* SCANPTR, */
+/* TFRAGID, */
+/* TACC_OPPTR, */
+/* TLOCAL_KEY1, */
+/* TLOCAL_KEY2, */
+/* TKEY_LENGTH, */
+/* TKEY1, */
+/* TKEY2, */
+/* TKEY3, */
+/* TKEY4 */
+/*---------------------------------------------------------------------------*/
+/* PRECONDITION: SCAN_STATE = WAIT_CLOSE_COPY */
+/*---------------------------------------------------------------------------*/
+void Dblqh::accCopyCloseConfLab(Signal* signal)
+{
+ tcConnectptr.i = scanptr.p->scanTcrec;
+ scanptr.p->scanState = ScanRecord::WAIT_DELETE_STORED_PROC_ID_COPY;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ signal->theData[0] = tcConnectptr.p->tupConnectrec;
+ signal->theData[1] = tcConnectptr.p->tableref;
+ signal->theData[2] = scanptr.p->scanSchemaVersion;
+ signal->theData[3] = ZDELETE_STORED_PROC_ID;
+ signal->theData[4] = scanptr.p->scanStoredProcId;
+ sendSignal(tcConnectptr.p->tcTupBlockref, GSN_STORED_PROCREQ, signal, 5, JBB);
+ return;
+}//Dblqh::accCopyCloseConfLab()
+
+/*---------------------------------------------------------------------------*/
+/* ENTER STORED_PROCCONF WITH */
+/* TC_CONNECTPTR, */
+/* TSTORED_PROC_ID */
+/*---------------------------------------------------------------------------*/
+/* PRECONDITION: SCAN_STATE = WAIT_DELETE_STORED_PROC_ID_COPY */
+/*---------------------------------------------------------------------------*/
+void Dblqh::tupCopyCloseConfLab(Signal* signal)
+{
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ fragptr.p->copyFragState = ZIDLE;
+
+ if (tcConnectptr.p->abortState == TcConnectionrec::NEW_FROM_TC) {
+ jam();
+ tcNodeFailptr.i = tcConnectptr.p->tcNodeFailrec;
+ ptrCheckGuard(tcNodeFailptr, ctcNodeFailrecFileSize, tcNodeFailRecord);
+ tcNodeFailptr.p->tcRecNow = tcConnectptr.i + 1;
+ signal->theData[0] = ZLQH_TRANS_NEXT;
+ signal->theData[1] = tcNodeFailptr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+
+ CopyFragRef * const ref = (CopyFragRef *)&signal->theData[0];
+ ref->userPtr = scanptr.p->copyPtr;
+ ref->sendingNodeId = cownNodeid;
+ ref->startingNodeId = scanptr.p->scanNodeId;
+ ref->tableId = fragptr.p->tabRef;
+ ref->fragId = fragptr.p->fragId;
+ ref->errorCode = ZNODE_FAILURE_ERROR;
+ sendSignal(scanptr.p->scanApiBlockref, GSN_COPY_FRAGREF, signal,
+ CopyFragRef::SignalLength, JBB);
+ } else {
+ if (scanptr.p->scanErrorCounter > 0) {
+ jam();
+ CopyFragRef * const ref = (CopyFragRef *)&signal->theData[0];
+ ref->userPtr = scanptr.p->copyPtr;
+ ref->sendingNodeId = cownNodeid;
+ ref->startingNodeId = scanptr.p->scanNodeId;
+ ref->tableId = fragptr.p->tabRef;
+ ref->fragId = fragptr.p->fragId;
+ ref->errorCode = tcConnectptr.p->errorCode;
+ sendSignal(scanptr.p->scanApiBlockref, GSN_COPY_FRAGREF, signal,
+ CopyFragRef::SignalLength, JBB);
+ } else {
+ jam();
+ CopyFragConf * const conf = (CopyFragConf *)&signal->theData[0];
+ conf->userPtr = scanptr.p->copyPtr;
+ conf->sendingNodeId = cownNodeid;
+ conf->startingNodeId = scanptr.p->scanNodeId;
+ conf->tableId = tcConnectptr.p->tableref;
+ conf->fragId = tcConnectptr.p->fragmentid;
+ sendSignal(scanptr.p->scanApiBlockref, GSN_COPY_FRAGCONF, signal,
+ CopyFragConf::SignalLength, JBB);
+ }//if
+ }//if
+ releaseActiveCopy(signal);
+ tcConnectptr.p->tcScanRec = RNIL;
+ finishScanrec(signal);
+ releaseOprec(signal);
+ releaseTcrec(signal, tcConnectptr);
+ releaseScanrec(signal);
+}//Dblqh::tupCopyCloseConfLab()
+
+/*---------------------------------------------------------------------------*/
+/* A NODE FAILURE OCCURRED DURING THE COPY PROCESS. WE NEED TO CLOSE THE */
+/* COPY PROCESS SINCE A NODE FAILURE DURING THE COPY PROCESS WILL ALSO */
+/* FAIL THE NODE THAT IS TRYING TO START-UP. */
+/*---------------------------------------------------------------------------*/
+void Dblqh::closeCopyRequestLab(Signal* signal)
+{
+ scanptr.p->scanErrorCounter++;
+ switch (scanptr.p->scanState) {
+ case ScanRecord::WAIT_TUPKEY_COPY:
+ case ScanRecord::WAIT_NEXT_SCAN_COPY:
+ jam();
+/*---------------------------------------------------------------------------*/
+/* SET COMPLETION STATUS AND WAIT FOR OPPORTUNITY TO STOP THE SCAN. */
+// ALSO SET NO OF WORDS OUTSTANDING TO ZERO TO AVOID ETERNAL WAIT.
+/*---------------------------------------------------------------------------*/
+ scanptr.p->scanCompletedStatus = ZTRUE;
+ tcConnectptr.p->copyCountWords = 0;
+ break;
+ case ScanRecord::WAIT_ACC_COPY:
+ case ScanRecord::WAIT_STORED_PROC_COPY:
+ jam();
+/*---------------------------------------------------------------------------*/
+/* WE ARE CURRENTLY STARTING UP THE SCAN. SET COMPLETED STATUS AND WAIT FOR*/
+/* COMPLETION OF STARTUP. */
+/*---------------------------------------------------------------------------*/
+ scanptr.p->scanCompletedStatus = ZTRUE;
+ break;
+ case ScanRecord::WAIT_CLOSE_COPY:
+ case ScanRecord::WAIT_DELETE_STORED_PROC_ID_COPY:
+ jam();
+/*---------------------------------------------------------------------------*/
+/* CLOSE IS ALREADY ONGOING. WE NEED NOT DO ANYTHING. */
+/*---------------------------------------------------------------------------*/
+ break;
+ case ScanRecord::WAIT_LQHKEY_COPY:
+ jam();
+/*---------------------------------------------------------------------------*/
+/* WE ARE WAITING FOR THE FAILED NODE. THE NODE WILL NEVER COME BACK. */
+// WE NEED TO START THE FAILURE HANDLING IMMEDIATELY.
+// ALSO SET NO OF WORDS OUTSTANDING TO ZERO TO AVOID ETERNAL WAIT.
+/*---------------------------------------------------------------------------*/
+ tcConnectptr.p->copyCountWords = 0;
+ closeCopyLab(signal);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dblqh::closeCopyRequestLab()
+
+/* ****************************************************** */
+/* COPY_ACTIVEREQ: Change state of a fragment to ACTIVE. */
+/* ****************************************************** */
+void Dblqh::execCOPY_ACTIVEREQ(Signal* signal)
+{
+ CRASH_INSERTION(5026);
+
+ const CopyActiveReq * const req = (CopyActiveReq *)&signal->theData[0];
+ jamEntry();
+ Uint32 masterPtr = req->userPtr;
+ BlockReference masterRef = req->userRef;
+ tabptr.i = req->tableId;
+ ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
+ Uint32 fragId = req->fragId;
+ ndbrequire(getFragmentrec(signal, fragId));
+
+ fragptr.p->fragDistributionKey = req->distributionKey;
+
+ ndbrequire(cnoActiveCopy < 3);
+ cactiveCopy[cnoActiveCopy] = fragptr.i;
+ cnoActiveCopy++;
+ fragptr.p->masterBlockref = masterRef;
+ fragptr.p->masterPtr = masterPtr;
+ if (fragptr.p->fragStatus == Fragrecord::FSACTIVE) {
+ jam();
+/*------------------------------------------------------*/
+/* PROCESS HAVE ALREADY BEEN STARTED BY PREVIOUS */
+/* MASTER. WE HAVE ALREADY SET THE PROPER MASTER */
+/* BLOCK REFERENCE. */
+/*------------------------------------------------------*/
+ if (fragptr.p->activeTcCounter == 0) {
+ jam();
+/*------------------------------------------------------*/
+/* PROCESS WAS EVEN COMPLETED. */
+/*------------------------------------------------------*/
+ sendCopyActiveConf(signal, tabptr.i);
+ }//if
+ return;
+ }//if
+ fragptr.p->fragStatus = Fragrecord::FSACTIVE;
+ if (fragptr.p->lcpFlag == Fragrecord::LCP_STATE_TRUE) {
+ jam();
+ fragptr.p->logFlag = Fragrecord::STATE_TRUE;
+ }//if
+ fragptr.p->activeTcCounter = 1;
+/*------------------------------------------------------*/
+/* SET IT TO ONE TO ENSURE THAT IT IS NOT POSSIBLE*/
+/* TO DECREASE IT TO ZERO UNTIL WE HAVE COMPLETED */
+/* THE SCAN. */
+/*------------------------------------------------------*/
+ signal->theData[0] = ZSCAN_TC_CONNECT;
+ signal->theData[1] = 0;
+ signal->theData[2] = tabptr.i;
+ signal->theData[3] = fragId;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB);
+ return;
+}//Dblqh::execCOPY_ACTIVEREQ()
+
+void Dblqh::scanTcConnectLab(Signal* signal, Uint32 tstartTcConnect, Uint32 fragId)
+{
+ Uint32 tendTcConnect;
+
+ ndbrequire(getFragmentrec(signal, fragId));
+ if ((tstartTcConnect + 200) >= ctcConnectrecFileSize) {
+ jam();
+ tendTcConnect = ctcConnectrecFileSize - 1;
+ } else {
+ jam();
+ tendTcConnect = tstartTcConnect + 200;
+ }//if
+ for (tcConnectptr.i = tstartTcConnect;
+ tcConnectptr.i <= tendTcConnect;
+ tcConnectptr.i++) {
+ jam();
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ if (tcConnectptr.p->transactionState != TcConnectionrec::IDLE) {
+ switch (tcConnectptr.p->logWriteState) {
+ case TcConnectionrec::NOT_WRITTEN:
+ jam();
+ if (fragptr.i == tcConnectptr.p->fragmentptr) {
+ jam();
+ fragptr.p->activeTcCounter = fragptr.p->activeTcCounter + 1;
+ tcConnectptr.p->logWriteState = TcConnectionrec::NOT_WRITTEN_WAIT;
+ }//if
+ break;
+ default:
+ jam();
+ /*empty*/;
+ break;
+ }//switch
+ }//if
+ }//for
+ if (tendTcConnect < (ctcConnectrecFileSize - 1)) {
+ jam();
+ signal->theData[0] = ZSCAN_TC_CONNECT;
+ signal->theData[1] = tendTcConnect + 1;
+ signal->theData[2] = tabptr.i;
+ signal->theData[3] = fragId;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB);
+ } else {
+ jam();
+/*------------------------------------------------------*/
+/* THE SCAN HAVE BEEN COMPLETED. WE CHECK IF ALL */
+/* OPERATIONS HAVE ALREADY BEEN COMPLETED. */
+/*------------------------------------------------------*/
+ ndbrequire(fragptr.p->activeTcCounter > 0);
+ fragptr.p->activeTcCounter--;
+ if (fragptr.p->activeTcCounter == 0) {
+ jam();
+/*------------------------------------------------------*/
+/* SET START GLOBAL CHECKPOINT TO THE NEXT */
+/* CHECKPOINT WE HAVE NOT YET HEARD ANYTHING ABOUT*/
+/* THIS GCP WILL BE COMPLETELY COVERED BY THE LOG.*/
+/*------------------------------------------------------*/
+ fragptr.p->startGci = cnewestGci + 1;
+ sendCopyActiveConf(signal, tabptr.i);
+ }//if
+ }//if
+ return;
+}//Dblqh::scanTcConnectLab()
+
+/*---------------------------------------------------------------------------*/
+/* A NEW MASTER IS REQUESTING THE STATE IN LQH OF THE COPY FRAGMENT PARTS. */
+/*---------------------------------------------------------------------------*/
+/* ***************>> */
+/* COPY_STATEREQ > */
+/* ***************>> */
+void Dblqh::execCOPY_STATEREQ(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(0)
+#if 0
+ Uint32* dataPtr = &signal->theData[2];
+ BlockReference tmasterBlockref = signal->theData[0];
+ Uint32 tnoCopy = 0;
+ do {
+ jam();
+ arrGuard(tnoCopy, 4);
+ fragptr.i = cactiveCopy[tnoCopy];
+ if (fragptr.i == RNIL) {
+ jam();
+ break;
+ }//if
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ if (fragptr.p->copyFragState != ZIDLE) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THIS FRAGMENT IS CURRENTLY ACTIVE IN COPYING THE FRAGMENT. */
+/*---------------------------------------------------------------------------*/
+ scanptr.i = fragptr.p->fragScanRec[NR_ScanNo];
+ c_scanRecordPool.getPtr(scanptr);
+ if (scanptr.p->scanCompletedStatus == ZTRUE) {
+ jam();
+ dataPtr[3 + (tnoCopy << 2)] = ZCOPY_CLOSING;
+ } else {
+ jam();
+ dataPtr[3 + (tnoCopy << 2)] = ZCOPY_ONGOING;
+ }//if
+ dataPtr[2 + (tnoCopy << 2)] = scanptr.p->scanSchemaVersion;
+ scanptr.p->scanApiBlockref = tmasterBlockref;
+ } else {
+ ndbrequire(fragptr.p->activeTcCounter != 0);
+/*---------------------------------------------------------------------------*/
+/* COPY FRAGMENT IS COMPLETED AND WE ARE CURRENTLY GETTING THE STARTING */
+/* GCI OF THE NEW REPLICA OF THIS FRAGMENT. */
+/*---------------------------------------------------------------------------*/
+ fragptr.p->masterBlockref = tmasterBlockref;
+ dataPtr[3 + (tnoCopy << 2)] = ZCOPY_ACTIVATION;
+ }//if
+ dataPtr[tnoCopy << 2] = fragptr.p->tabRef;
+ dataPtr[1 + (tnoCopy << 2)] = fragptr.p->fragId;
+ tnoCopy++;
+ } while (tnoCopy < cnoActiveCopy);
+ signal->theData[0] = cownNodeid;
+ signal->theData[1] = tnoCopy;
+ sendSignal(tmasterBlockref, GSN_COPY_STATECONF, signal, 18, JBB);
+#endif
+ return;
+}//Dblqh::execCOPY_STATEREQ()
+
+/* ========================================================================= */
+/* ======= INITIATE TC RECORD AT COPY FRAGMENT ======= */
+/* */
+/* SUBROUTINE SHORT NAME = ICT */
+/* ========================================================================= */
+void Dblqh::initCopyTc(Signal* signal)
+{
+ const NextScanConf * const nextScanConf = (NextScanConf *)&signal->theData[0];
+ scanptr.p->scanLocalref[0] = nextScanConf->localKey[0];
+ scanptr.p->scanLocalref[1] = nextScanConf->localKey[1];
+ scanptr.p->scanLocalFragid = nextScanConf->fragId;
+ tcConnectptr.p->operation = ZREAD;
+ tcConnectptr.p->apiVersionNo = 0;
+ tcConnectptr.p->opExec = 0; /* NOT INTERPRETED MODE */
+ tcConnectptr.p->schemaVersion = scanptr.p->scanSchemaVersion;
+ Uint32 reqinfo = 0;
+ LqhKeyReq::setLockType(reqinfo, ZINSERT);
+ LqhKeyReq::setDirtyFlag(reqinfo, 1);
+ LqhKeyReq::setSimpleFlag(reqinfo, 1);
+ LqhKeyReq::setOperation(reqinfo, ZWRITE);
+ /* AILen in LQHKEYREQ IS ZERO */
+ tcConnectptr.p->reqinfo = reqinfo;
+/* ------------------------------------------------------------------------ */
+/* THE RECEIVING NODE WILL EXPECT THAT IT IS THE LAST NODE AND WILL */
+/* SEND COMPLETED AS THE RESPONSE SIGNAL SINCE DIRTY_OP BIT IS SET. */
+/* ------------------------------------------------------------------------ */
+ tcConnectptr.p->nodeAfterNext[0] = ZNIL;
+ tcConnectptr.p->nodeAfterNext[1] = ZNIL;
+ tcConnectptr.p->tcBlockref = cownref;
+ tcConnectptr.p->readlenAi = 0;
+ tcConnectptr.p->storedProcId = ZNIL;
+ tcConnectptr.p->opExec = 0;
+ tcConnectptr.p->nextSeqNoReplica = 0;
+ tcConnectptr.p->dirtyOp = ZFALSE;
+ tcConnectptr.p->lastReplicaNo = 0;
+ tcConnectptr.p->currTupAiLen = 0;
+ tcConnectptr.p->tcTimer = cLqhTimeOutCount;
+}//Dblqh::initCopyTc()
+
+/* ------------------------------------------------------------------------- */
+/* ------- SEND COPY_ACTIVECONF TO MASTER DIH ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dblqh::sendCopyActiveConf(Signal* signal, Uint32 tableId)
+{
+ releaseActiveCopy(signal);
+ CopyActiveConf * const conf = (CopyActiveConf *)&signal->theData[0];
+ conf->userPtr = fragptr.p->masterPtr;
+ conf->tableId = tableId;
+ conf->fragId = fragptr.p->fragId;
+ conf->startingNodeId = cownNodeid;
+ conf->startGci = fragptr.p->startGci;
+ sendSignal(fragptr.p->masterBlockref, GSN_COPY_ACTIVECONF, signal,
+ CopyActiveConf::SignalLength, JBB);
+}//Dblqh::sendCopyActiveConf()
+
+/* ##########################################################################
+ * ####### LOCAL CHECKPOINT MODULE #######
+ *
+ * ##########################################################################
+ * --------------------------------------------------------------------------
+ * THIS MODULE HANDLES THE EXECUTION AND CONTROL OF LOCAL CHECKPOINTS
+ * IT CONTROLS THE LOCAL CHECKPOINTS IN TUP AND ACC. IT DOES ALSO INTERACT
+ * WITH DIH TO CONTROL WHICH GLOBAL CHECKPOINTS THAT ARE RECOVERABLE
+ * ------------------------------------------------------------------------- */
+void Dblqh::execEMPTY_LCP_REQ(Signal* signal)
+{
+ jamEntry();
+ CRASH_INSERTION(5008);
+ EmptyLcpReq * const emptyLcpOrd = (EmptyLcpReq*)&signal->theData[0];
+
+ lcpPtr.i = 0;
+ ptrAss(lcpPtr, lcpRecord);
+
+ Uint32 nodeId = refToNode(emptyLcpOrd->senderRef);
+
+ lcpPtr.p->m_EMPTY_LCP_REQ.set(nodeId);
+ lcpPtr.p->reportEmpty = true;
+
+ if (lcpPtr.p->lcpState == LcpRecord::LCP_IDLE){
+ jam();
+ bool ok = false;
+ switch(clcpCompletedState){
+ case LCP_IDLE:
+ ok = true;
+ sendEMPTY_LCP_CONF(signal, true);
+ break;
+ case LCP_RUNNING:
+ ok = true;
+ sendEMPTY_LCP_CONF(signal, false);
+ break;
+ case LCP_CLOSE_STARTED:
+ jam();
+ case ACC_LCP_CLOSE_COMPLETED:
+ jam();
+ case TUP_LCP_CLOSE_COMPLETED:
+ jam();
+ ok = true;
+ break;
+ }
+ ndbrequire(ok);
+
+ }//if
+
+ return;
+}//Dblqh::execEMPTY_LCPREQ()
+
+void Dblqh::execLCP_FRAG_ORD(Signal* signal)
+{
+ jamEntry();
+ CRASH_INSERTION(5010);
+ LcpFragOrd * const lcpFragOrd = (LcpFragOrd *)&signal->theData[0];
+ Uint32 lcpId = lcpFragOrd->lcpId;
+
+ lcpPtr.i = 0;
+ ptrAss(lcpPtr, lcpRecord);
+
+ lcpPtr.p->lastFragmentFlag = lcpFragOrd->lastFragmentFlag;
+ if (lcpFragOrd->lastFragmentFlag) {
+ jam();
+ if (lcpPtr.p->lcpState == LcpRecord::LCP_IDLE) {
+ jam();
+ /* ----------------------------------------------------------
+ * NOW THE COMPLETE LOCAL CHECKPOINT ROUND IS COMPLETED.
+ * -------------------------------------------------------- */
+ if (cnoOfFragsCheckpointed > 0) {
+ jam();
+ completeLcpRoundLab(signal);
+ } else {
+ jam();
+ sendLCP_COMPLETE_REP(signal, lcpId);
+ }//if
+ }
+ return;
+ }//if
+ tabptr.i = lcpFragOrd->tableId;
+ ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
+
+ ndbrequire(tabptr.p->tableStatus == Tablerec::PREP_DROP_TABLE_ONGOING ||
+ tabptr.p->tableStatus == Tablerec::PREP_DROP_TABLE_DONE ||
+ tabptr.p->tableStatus == Tablerec::TABLE_DEFINED);
+
+ ndbrequire(getFragmentrec(signal, lcpFragOrd->fragmentId));
+
+ lcpPtr.i = 0;
+ ptrAss(lcpPtr, lcpRecord);
+ ndbrequire(!lcpPtr.p->lcpQueued);
+ if (c_lcpId < lcpFragOrd->lcpId) {
+ jam();
+ /**
+ * A new LCP
+ */
+ c_lcpId = lcpFragOrd->lcpId;
+ ndbrequire(lcpPtr.p->lcpState == LcpRecord::LCP_IDLE);
+ setLogTail(signal, lcpFragOrd->keepGci);
+ ndbrequire(clcpCompletedState == LCP_IDLE);
+ clcpCompletedState = LCP_RUNNING;
+ }//if
+ cnoOfFragsCheckpointed++;
+
+ if(tabptr.p->tableStatus == Tablerec::PREP_DROP_TABLE_DONE){
+ jam();
+ LcpRecord::FragOrd fragOrd;
+ fragOrd.fragPtrI = fragptr.i;
+ fragOrd.lcpFragOrd = * lcpFragOrd;
+ sendLCP_FRAG_REP(signal, fragOrd);
+ return;
+ }
+
+ if (lcpPtr.p->lcpState != LcpRecord::LCP_IDLE) {
+ ndbrequire(lcpPtr.p->lcpQueued == false);
+ lcpPtr.p->lcpQueued = true;
+ lcpPtr.p->queuedFragment.fragPtrI = fragptr.i;
+ lcpPtr.p->queuedFragment.lcpFragOrd = * lcpFragOrd;
+ return;
+ }//if
+
+ lcpPtr.p->currentFragment.fragPtrI = fragptr.i;
+ lcpPtr.p->currentFragment.lcpFragOrd = * lcpFragOrd;
+
+ sendLCP_FRAGIDREQ(signal);
+}//Dblqh::execLCP_FRAGORD()
+
+/* --------------------------------------------------------------------------
+ * PRECONDITION: LCP_PTR:LCP_STATE = WAIT_FRAGID
+ * --------------------------------------------------------------------------
+ * WE NOW HAVE THE LOCAL FRAGMENTS THAT THE LOCAL CHECKPOINT WILL USE.
+ * -------------------------------------------------------------------------- */
+void Dblqh::execLCP_FRAGIDCONF(Signal* signal)
+{
+ UintR Tfragid[4];
+
+ jamEntry();
+
+ lcpPtr.i = signal->theData[0];
+
+ Uint32 TaccPtr = signal->theData[1];
+ Uint32 noLocfrag = signal->theData[2];
+ Tfragid[0] = signal->theData[3];
+ Tfragid[1] = signal->theData[4];
+
+ ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
+ ndbrequire(lcpPtr.p->lcpState == LcpRecord::LCP_WAIT_FRAGID);
+ /* ------------------------------------------------------------------------
+ * NO ERROR CHECKING OF TNO_LOCFRAG VALUE. OUT OF BOUND WILL IMPLY THAT AN
+ * INDEX OUT OF RANGE WILL CAUSE A SYSTEM RESTART WHICH IS DESIRED.
+ * ------------------------------------------------------------------------ */
+ lcpPtr.p->lcpAccptr = TaccPtr;
+ fragptr.i = lcpPtr.p->currentFragment.fragPtrI;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ ndbrequire(noLocfrag - 1 < 2);
+ for (Uint32 Tindex = 0; Tindex < noLocfrag; Tindex++) {
+ jam();
+ Uint32 fragId = Tfragid[Tindex];
+ /* ----------------------------------------------------------------------
+ * THERE IS NO ERROR CHECKING ON PURPOSE. IT IS POSSIBLE TO CALCULATE HOW
+ * MANY LOCAL LCP RECORDS THERE SHOULD BE. IT SHOULD NEVER HAPPEN THAT
+ * THERE IS NO ONE FREE. IF THERE IS NO ONE IT WILL ALSO BE A POINTER
+ * OUT OF RANGE WHICH IS AN ERROR CODE IN ITSELF. REUSES ERROR HANDLING
+ * IN AXE VM.
+ * ---------------------------------------------------------------------- */
+ seizeLcpLoc(signal);
+ initLcpLocAcc(signal, fragId);
+ seizeLcpLoc(signal);
+ initLcpLocTup(signal, fragId);
+ signal->theData[0] = lcpLocptr.i;
+ signal->theData[1] = cownref;
+ signal->theData[2] = lcpPtr.p->currentFragment.lcpFragOrd.tableId;
+ signal->theData[3] = lcpLocptr.p->locFragid;
+ signal->theData[4] = lcpPtr.p->currentFragment.lcpFragOrd.lcpNo;
+ signal->theData[5] = lcpPtr.p->currentFragment.lcpFragOrd.lcpId % MAX_LCP_STORED;
+ sendSignal(fragptr.p->tupBlockref, GSN_TUP_PREPLCPREQ, signal, 6, JBB);
+ }//for
+ lcpPtr.p->lcpState = LcpRecord::LCP_WAIT_TUP_PREPLCP;
+ return;
+}//Dblqh::execLCP_FRAGIDCONF()
+
+/* --------------------------------------------------------------------------
+ * PRECONDITION: LCP_LOCPTR:LCP_STATE = WAIT_TUPPREPLCP
+ * --------------------------------------------------------------------------
+ * WE HAVE NOW PREPARED A LOCAL FRAGMENT IN TUP FOR LCP EXECUTION.
+ * -------------------------------------------------------------------------- */
+void Dblqh::execTUP_PREPLCPCONF(Signal* signal)
+{
+ UintR ttupPtr;
+
+ jamEntry();
+ lcpLocptr.i = signal->theData[0];
+ ttupPtr = signal->theData[1];
+ ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+
+ lcpPtr.i = lcpLocptr.p->masterLcpRec;
+ ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
+ ndbrequire(lcpLocptr.p->lcpLocstate == LcpLocRecord::WAIT_TUP_PREPLCP);
+
+ lcpLocptr.p->tupRef = ttupPtr;
+ lcpLocptr.p->lcpLocstate = LcpLocRecord::IDLE;
+ checkLcpTupprep(signal);
+ if (lcpPtr.p->lcpState != LcpRecord::LCP_WAIT_HOLDOPS) {
+ jam();
+ return;
+ }//if
+ fragptr.i = lcpPtr.p->currentFragment.fragPtrI;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ lcpLocptr.i = lcpPtr.p->firstLcpLocAcc;
+ do {
+ jam();
+ ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ lcpLocptr.p->lcpLocstate = LcpLocRecord::WAIT_LCPHOLDOP;
+ signal->theData[0] = lcpPtr.p->lcpAccptr;
+ signal->theData[1] = lcpLocptr.p->locFragid;
+ signal->theData[2] = 0;
+ signal->theData[3] = lcpLocptr.i;
+ sendSignal(fragptr.p->accBlockref, GSN_LCP_HOLDOPREQ, signal, 4, JBA);
+ lcpLocptr.i = lcpLocptr.p->nextLcpLoc;
+ } while (lcpLocptr.i != RNIL);
+ /* ------------------------------------------------------------------------
+ * SET STATE ON FRAGMENT TO BLOCKED TO ENSURE THAT NO MORE OPERATIONS ARE
+ * STARTED FROM LQH IN TUP AND ACC UNTIL THE START CHECKPOINT HAS BEEN
+ * COMPLETED. ALSO SET THE LOCAL CHECKPOINT STATE TO WAIT FOR
+ * LCP_HOLDOPCONF
+ * ----------------------------------------------------------------------- */
+ fragptr.p->fragStatus = Fragrecord::BLOCKED;
+ fragptr.p->fragActiveStatus = ZTRUE;
+ lcpPtr.p->lcpState = LcpRecord::LCP_WAIT_HOLDOPS;
+ return;
+}//Dblqh::execTUP_PREPLCPCONF()
+
+void Dblqh::execTUP_PREPLCPREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}//Dblqh::execTUP_PREPLCPREF()
+
+void Dblqh::execLCP_FRAGIDREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}//Dblqh::execLCP_FRAGIDREF()
+
+/* --------------------------------------------------------------------------
+ * A NUMBER OF OPERATIONS THAT HAVE BEEN SET ON HOLD IN ACC. MOVE THOSE TO
+ * LIST OF BLOCKED ACC OPERATIONS. IF MORE OPERATIONS ARE BLOCKED GET THOSE
+ * OTHERWISE CONTINUE THE LOCAL CHECKPOINT BY REQUESTING TUP AND ACC TO
+ * WRITE THEIR START CHECKPOINT.
+ * --------------------------------------------------------------------------
+ * PRECONDITION: LCP_LOCPTR:LCP_LOCSTATE = WAIT_LCPHOLDOP
+ * ------------------------------------------------------------------------- */
+/* ***************>> */
+/* LCP_HOLDOPCONF > */
+/* ***************>> */
+void Dblqh::execLCP_HOLDOPCONF(Signal* signal)
+{
+ UintR tnoHoldops;
+ Uint32 Tdata[23];
+ Uint32 Tlength;
+
+ jamEntry();
+ lcpLocptr.i = signal->theData[0];
+ Tlength = signal->theData[1];
+ for (Uint32 i = 0; i < 23; i++)
+ Tdata[i] = signal->theData[i + 2];
+ ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ ndbrequire(lcpLocptr.p->lcpLocstate == LcpLocRecord::WAIT_LCPHOLDOP);
+
+ lcpPtr.i = lcpLocptr.p->masterLcpRec;
+ /* ------------------------------------------------------------------------
+ * NO ERROR CHECK ON USING VALUE IN MASTER_LCP_REC. ERROR IN THIS
+ * REFERENCE WILL CAUSE POINTER OUT OF RANGE WHICH CAUSES A SYSTEM RESTART.
+ * ----------------------------------------------------------------------- */
+ tnoHoldops = Tlength & 65535;
+ ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
+ fragptr.i = lcpPtr.p->currentFragment.fragPtrI;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ ndbrequire(tnoHoldops <= 23);
+ for (Uint32 Tindex = 0; Tindex < tnoHoldops; Tindex++) {
+ jam();
+ tcConnectptr.i = Tdata[Tindex];
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ moveActiveToAcc(signal);
+ }//for
+ if ((Tlength >> 16) == 1) {
+ jam();
+ /* MORE HOLDOPS NEEDED */
+ signal->theData[0] = lcpPtr.p->lcpAccptr;
+ signal->theData[1] = lcpLocptr.p->locFragid;
+ signal->theData[2] = 1;
+ signal->theData[3] = lcpLocptr.i;
+ sendSignal(fragptr.p->accBlockref, GSN_LCP_HOLDOPREQ, signal, 4, JBA);
+ return;
+ } else {
+ jam();
+
+ /* NO MORE HOLDOPS NEEDED */
+ lcpLocptr.p->lcpLocstate = LcpLocRecord::HOLDOP_READY;
+ checkLcpHoldop(signal);
+
+ if (lcpPtr.p->lcpState == LcpRecord::LCP_WAIT_ACTIVE_FINISH) {
+ if (fragptr.p->activeList == RNIL) {
+ jam();
+ /* ------------------------------------------------------------------
+ * THERE ARE NO MORE ACTIVE OPERATIONS. IT IS NOW OK TO START THE
+ * LOCAL CHECKPOINT IN BOTH TUP AND ACC.
+ * ----------------------------------------------------------------- */
+ sendStartLcp(signal);
+ lcpPtr.p->lcpState = LcpRecord::LCP_START_CHKP;
+ } else {
+ jam();
+ // Set this to signal releaseActiveFrag
+ // that it should check to see if itäs time to call sendStartLcp
+ fragptr.p->lcpRef = lcpPtr.i;
+ }//if
+ }//if
+ }//if
+
+ /* ----------------------- */
+ /* ELSE */
+ /* ------------------------------------------------------------------------
+ * THERE ARE STILL MORE ACTIVE OPERATIONS. WAIT UNTIL THEY ARE FINSIHED.
+ * THIS IS DISCOVERED WHEN RELEASE_ACTIVE_FRAG IS EXECUTED.
+ * ------------------------------------------------------------------------
+ * DO NOTHING, EXIT IS EXECUTED BELOW
+ * ----------------------------------------------------------------------- */
+ return;
+}//Dblqh::execLCP_HOLDOPCONF()
+
+/* ***************> */
+/* LCP_HOLDOPREF > */
+/* ***************> */
+void Dblqh::execLCP_HOLDOPREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}//Dblqh::execLCP_HOLDOPREF()
+
+/* ************************************************************************>>
+ * ACC_LCPSTARTED: Confirm that ACC started local checkpoint and undo
+ * logging is on.
+ * ************************************************************************>>
+ * --------------------------------------------------------------------------
+ * PRECONDITION: LCP_LOCPTR:LCP_LOCSTATE = ACC_WAIT_STARTED
+ * ------------------------------------------------------------------------- */
+void Dblqh::execACC_LCPSTARTED(Signal* signal)
+{
+ jamEntry();
+ lcpLocptr.i = signal->theData[0];
+ ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ ndbrequire(lcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_WAIT_STARTED);
+
+ lcpPtr.i = lcpLocptr.p->masterLcpRec;
+ ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
+ /* ------------------------------------------------------------------------
+ * NO ERROR CHECK ON USING VALUE IN MASTER_LCP_REC. ERROR IN THIS
+ * REFERENCE WILL CAUSE POINTER OUT OF RANGE WHICH CAUSES A SYSTEM RESTART.
+ * ----------------------------------------------------------------------- */
+ lcpLocptr.p->lcpLocstate = LcpLocRecord::ACC_STARTED;
+ lcpStartedLab(signal);
+ return;
+}//Dblqh::execACC_LCPSTARTED()
+
+/* ******************************************> */
+/* TUP_LCPSTARTED: Same as above but for TUP. */
+/* ******************************************> */
+/* --------------------------------------------------------------------------
+ * PRECONDITION: LCP_LOCPTR:LCP_LOCSTATE = TUP_WAIT_STARTED
+ * ------------------------------------------------------------------------- */
+void Dblqh::execTUP_LCPSTARTED(Signal* signal)
+{
+ jamEntry();
+ lcpLocptr.i = signal->theData[0];
+ ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ ndbrequire(lcpLocptr.p->lcpLocstate == LcpLocRecord::TUP_WAIT_STARTED);
+
+ lcpPtr.i = lcpLocptr.p->masterLcpRec;
+ ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
+ /* ------------------------------------------------------------------------
+ * NO ERROR CHECK ON USING VALUE IN MASTER_LCP_REC. ERROR IN THIS REFERENCE
+ * WILL CAUSE POINTER OUT OF RANGE WHICH CAUSES A SYSTEM RESTART.
+ * ----------------------------------------------------------------------- */
+ lcpLocptr.p->lcpLocstate = LcpLocRecord::TUP_STARTED;
+ lcpStartedLab(signal);
+ return;
+}//Dblqh::execTUP_LCPSTARTED()
+
+void Dblqh::lcpStartedLab(Signal* signal)
+{
+ checkLcpStarted(signal);
+ if (lcpPtr.p->lcpState == LcpRecord::LCP_STARTED) {
+ jam();
+ /* ----------------------------------------------------------------------
+ * THE LOCAL CHECKPOINT HAS BEEN STARTED. IT IS NOW TIME TO
+ * RESTART THE TRANSACTIONS WHICH HAVE BEEN BLOCKED.
+ * --------------------------------------------------------------------- */
+ fragptr.i = lcpPtr.p->currentFragment.fragPtrI;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ /* ----------------------------------------------------------------------
+ * UPDATE THE MAX_GCI_IN_LCP AND MAX_GCI_COMPLETED_IN_LCP NOW BEFORE
+ * ACTIVATING THE FRAGMENT AGAIN.
+ * --------------------------------------------------------------------- */
+ ndbrequire(lcpPtr.p->currentFragment.lcpFragOrd.lcpNo < MAX_LCP_STORED);
+ fragptr.p->maxGciInLcp = fragptr.p->newestGci;
+ fragptr.p->maxGciCompletedInLcp = cnewestCompletedGci;
+ sendAccContOp(signal); /* START OPERATIONS IN ACC */
+ moveAccActiveFrag(signal); /* MOVE FROM ACC BLOCKED LIST TO ACTIVE LIST
+ ON FRAGMENT */
+ }
+ /*---------------*/
+ /* ELSE */
+ /*-------------------------------------------------------------------------*/
+ /* THE LOCAL CHECKPOINT HAS NOT BEEN STARTED. EXIT AND WAIT FOR
+ * MORE SIGNALS */
+ /*-------------------------------------------------------------------------*/
+ /* DO NOTHING, EXIT IS EXECUTED BELOW */
+ /*-------------------------------------------------------------------------*/
+ return;
+}//Dblqh::lcpStartedLab()
+
+/*---------------------------------------------------------------------------
+ * ACC HAVE RESTARTED THE BLOCKED OPERATIONS AGAIN IN ONE FRAGMENT PART.
+ * IT IS NOW OUR TURN TO RESTART ALL OPERATIONS QUEUED IN LQH IF ALL
+ * FRAGMENT PARTS ARE COMPLETED.
+ *-------------------------------------------------------------------------- */
+void Dblqh::execACC_CONTOPCONF(Signal* signal)
+{
+ if(ERROR_INSERTED(5035) && signal->getSendersBlockRef() != reference()){
+ sendSignalWithDelay(reference(), GSN_ACC_CONTOPCONF, signal, 1000,
+ signal->length());
+ return;
+ }
+
+ jamEntry();
+ lcpLocptr.i = signal->theData[0];
+ ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ lcpLocptr.p->accContCounter = 1;
+
+ lcpPtr.i = lcpLocptr.p->masterLcpRec;
+ ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
+ lcpLocptr.i = lcpPtr.p->firstLcpLocAcc;
+ do {
+ ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ if (lcpLocptr.p->accContCounter == 0) {
+ jam();
+ return;
+ }//if
+ lcpLocptr.i = lcpLocptr.p->nextLcpLoc;
+ } while (lcpLocptr.i != RNIL);
+ fragptr.i = lcpPtr.p->currentFragment.fragPtrI;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ restartOperationsLab(signal);
+ return;
+}//Dblqh::execACC_CONTOPCONF()
+
+/* ********************************************************* */
+/* LQH_RESTART_OP: Restart operations after beeing blocked. */
+/* ********************************************************* */
+/*---------------------------------------------------------------------------*/
+/* PRECONDITION: FRAG_STATUS = BLOCKED AND LCP_STATE = STARTED */
+/*---------------------------------------------------------------------------*/
+void Dblqh::execLQH_RESTART_OP(Signal* signal)
+{
+ jamEntry();
+ fragptr.i = signal->theData[0];
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+
+ lcpPtr.i = signal->theData[1];
+ ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
+ ndbrequire(fragptr.p->fragStatus == Fragrecord::BLOCKED);
+ if (lcpPtr.p->lcpState == LcpRecord::LCP_STARTED) {
+ jam();
+ /***********************************************************************/
+ /* THIS SIGNAL CAN ONLY BE RECEIVED WHEN FRAGMENT IS BLOCKED AND
+ * THE LOCAL CHECKPOINT HAS BEEN STARTED. THE BLOCKING WILL BE
+ * REMOVED AS SOON AS ALL OPERATIONS HAVE BEEN STARTED.
+ ***********************************************************************/
+ restartOperationsLab(signal);
+ } else if (lcpPtr.p->lcpState == LcpRecord::LCP_BLOCKED_COMP) {
+ jam();
+ /*******************************************************************>
+ * THE CHECKPOINT IS COMPLETED BUT HAS NOT YET STARTED UP
+ * ALL OPERATIONS AGAIN.
+ * WE PERFORM THIS START-UP BEFORE CONTINUING WITH THE NEXT
+ * FRAGMENT OF THE LOCAL CHECKPOINT TO AVOID ANY STRANGE ERRORS.
+ *******************************************************************> */
+ restartOperationsLab(signal);
+ } else {
+ ndbrequire(false);
+ }
+}//Dblqh::execLQH_RESTART_OP()
+
+void Dblqh::restartOperationsLab(Signal* signal)
+{
+ Uint32 loopCount = 0;
+ tcConnectptr.i = fragptr.p->firstWaitQueue;
+ do {
+ if (tcConnectptr.i != RNIL) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* START UP THE TRANSACTION AGAIN. WE START IT AS A SEPARATE SIGNAL. */
+/*---------------------------------------------------------------------------*/
+ signal->theData[0] = ZRESTART_OPERATIONS_AFTER_STOP;
+ signal->theData[1] = tcConnectptr.i;
+ signal->theData[2] = fragptr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ tcConnectptr.i = tcConnectptr.p->nextTc;
+ } else {
+ jam();
+/*--------------------------------------------------------------------------*/
+/* NO MORE OPERATIONS TO RESTART. WE CAN NOW RESET THE STATE TO ACTIVE AND */
+/* RESTART NORMAL ACTIVITIES ON THE FRAGMENT WHILE THE FUZZY PART OF THE */
+/* LOCAL CHECKPOINT IS COMPLETING. */
+/* IF THE CHECKPOINT WAS COMPLETED ALREADY ON THIS FRAGMENT WE PROCEED WITH */
+/* THE NEXT FRAGMENT NOW THAT WE HAVE COMPLETED THIS CHECKPOINT. */
+/*--------------------------------------------------------------------------*/
+ fragptr.p->fragStatus = Fragrecord::FSACTIVE;
+ if (lcpPtr.p->lcpState == LcpRecord::LCP_BLOCKED_COMP) {
+ jam();
+ contChkpNextFragLab(signal);
+ return;
+ }//if
+ return;
+ }//if
+ loopCount++;
+ if (loopCount > 16) {
+ jam();
+ signal->theData[0] = fragptr.i;
+ signal->theData[1] = lcpPtr.i;
+ sendSignal(cownref, GSN_LQH_RESTART_OP, signal, 2, JBB);
+ return;
+ }//if
+ } while (1);
+}//Dblqh::restartOperationsLab()
+
+void Dblqh::restartOperationsAfterStopLab(Signal* signal)
+{
+ /*-------------------------------------------------------------------------
+ * WHEN ARRIVING HERE THE OPERATION IS ALREADY SET IN THE ACTIVE LIST.
+ * THUS WE CAN IMMEDIATELY CALL THE METHODS THAT EXECUTE FROM WHERE
+ * THE OPERATION WAS STOPPED.
+ *------------------------------------------------------------------------ */
+ switch (tcConnectptr.p->transactionState) {
+ case TcConnectionrec::STOPPED:
+ jam();
+ /*-----------------------------------------------------------------------
+ * STOPPED BEFORE TRYING TO SEND ACCKEYREQ
+ *---------------------------------------------------------------------- */
+ prepareContinueAfterBlockedLab(signal);
+ return;
+ break;
+ case TcConnectionrec::COMMIT_STOPPED:
+ jam();
+ /* ----------------------------------------------------------------------
+ * STOPPED BEFORE TRYING TO SEND ACC_COMMITREQ
+ * --------------------------------------------------------------------- */
+ releaseActiveFrag(signal);
+ commitContinueAfterBlockedLab(signal);
+ return;
+ break;
+ case TcConnectionrec::ABORT_STOPPED:
+ jam();
+ /* ----------------------------------------------------------------------
+ * STOPPED BEFORE TRYING TO SEND ACC_ABORTREQ
+ * --------------------------------------------------------------------- */
+ abortContinueAfterBlockedLab(signal, true);
+ return;
+ break;
+ case TcConnectionrec::COPY_STOPPED:
+ jam();
+ /* ----------------------------------------------------------------------
+ * STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING COPY FRAGMENT
+ * --------------------------------------------------------------------- */
+ continueCopyAfterBlockedLab(signal);
+ return;
+ break;
+ case TcConnectionrec::COPY_FIRST_STOPPED:
+ jam();
+ /* ----------------------------------------------------------------------
+ * STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING COPY FRAGMENT
+ * --------------------------------------------------------------------- */
+ continueFirstCopyAfterBlockedLab(signal);
+ return;
+ break;
+ case TcConnectionrec::SCAN_FIRST_STOPPED:
+ jam();
+ /* ----------------------------------------------------------------------
+ * STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING SCAN
+ * --------------------------------------------------------------------- */
+ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED;
+ continueFirstScanAfterBlockedLab(signal);
+ return;
+ break;
+ case TcConnectionrec::SCAN_CHECK_STOPPED:
+ jam();
+ /* ----------------------------------------------------------------------
+ * STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING SCAN
+ * --------------------------------------------------------------------- */
+ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED;
+ continueAfterCheckLcpStopBlocked(signal);
+ return;
+ break;
+ case TcConnectionrec::SCAN_STOPPED:
+ jam();
+ /* ----------------------------------------------------------------------
+ * STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING SCAN
+ * --------------------------------------------------------------------- */
+ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED;
+ continueScanAfterBlockedLab(signal);
+ return;
+ break;
+ case TcConnectionrec::SCAN_RELEASE_STOPPED:
+ jam();
+ /* ----------------------------------------------------------------------
+ * STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING RELEASE
+ * LOCKS IN SCAN
+ * --------------------------------------------------------------------- */
+ tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED;
+ continueScanReleaseAfterBlockedLab(signal);
+ return;
+ break;
+ case TcConnectionrec::SCAN_CLOSE_STOPPED:
+ jam();
+ /* ----------------------------------------------------------------------
+ * STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING CLOSE OF SCAN
+ * --------------------------------------------------------------------- */
+ continueCloseScanAfterBlockedLab(signal);
+ return;
+ break;
+ case TcConnectionrec::COPY_CLOSE_STOPPED:
+ jam();
+ /* ----------------------------------------------------------------------
+ * STOPPED BEFORE TRYING TO SEND NEXT_SCANREQ DURING CLOSE OF COPY
+ * --------------------------------------------------------------------- */
+ continueCloseCopyAfterBlockedLab(signal);
+ return;
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ }//switch
+}//Dblqh::restartOperationsAfterStopLab()
+
+/* *************** */
+/* ACC_LCPCONF > */
+/* *************** */
+/*---------------------------------------------------------------------------
+ * PRECONDITION: LCP_LOCPTR:LCP_LOCSTATE = ACC_STARTED
+ *-------------------------------------------------------------------------- */
+void Dblqh::execACC_LCPCONF(Signal* signal)
+{
+ jamEntry();
+ lcpLocptr.i = signal->theData[0];
+ ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ ndbrequire(lcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_STARTED);
+
+ lcpPtr.i = lcpLocptr.p->masterLcpRec;
+ ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
+ /* ------------------------------------------------------------------------
+ * NO ERROR CHECK ON USING VALUE IN MASTER_LCP_REC. ERROR IN
+ * THIS REFERENCE WILL CAUSE POINTER OUT OF RANGE WHICH CAUSES A
+ * SYSTEM RESTART.
+ * ----------------------------------------------------------------------- */
+ lcpLocptr.p->lcpLocstate = LcpLocRecord::ACC_COMPLETED;
+ lcpCompletedLab(signal);
+ return;
+}//Dblqh::execACC_LCPCONF()
+
+/* *************** */
+/* TUP_LCPCONF > */
+/* *************** */
+/* --------------------------------------------------------------------------
+ * PRECONDITION: LCP_LOCPTR:LCP_LOCSTATE = TUP_STARTED
+ * ------------------------------------------------------------------------- */
+void Dblqh::execTUP_LCPCONF(Signal* signal)
+{
+ jamEntry();
+ lcpLocptr.i = signal->theData[0];
+ ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ ndbrequire(lcpLocptr.p->lcpLocstate == LcpLocRecord::TUP_STARTED);
+
+ lcpPtr.i = lcpLocptr.p->masterLcpRec;
+ ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
+ /* ------------------------------------------------------------------------
+ * NO ERROR CHECK ON USING VALUE IN MASTER_LCP_REC. ERROR IN THIS
+ * REFERENCE WILL CAUSE POINTER OUT OF RANGE WHICH CAUSES A SYSTEM RESTART.
+ * ----------------------------------------------------------------------- */
+ lcpLocptr.p->lcpLocstate = LcpLocRecord::TUP_COMPLETED;
+ lcpCompletedLab(signal);
+ return;
+}//Dblqh::execTUP_LCPCONF()
+
+void Dblqh::lcpCompletedLab(Signal* signal)
+{
+ checkLcpCompleted(signal);
+ if (lcpPtr.p->lcpState != LcpRecord::LCP_COMPLETED) {
+ jam();
+ /* ----------------------------------------------------------------------
+ * THE LOCAL CHECKPOINT HAS NOT BEEN COMPLETED, EXIT & WAIT
+ * FOR MORE SIGNALS
+ * --------------------------------------------------------------------- */
+ return;
+ }//if
+ /* ------------------------------------------------------------------------
+ * THE LOCAL CHECKPOINT HAS BEEN COMPLETED. IT IS NOW TIME TO START
+ * A LOCAL CHECKPOINT ON THE NEXT FRAGMENT OR COMPLETE THIS LCP ROUND.
+ * ------------------------------------------------------------------------
+ * WE START BY SENDING LCP_REPORT TO DIH TO REPORT THE COMPLETED LCP.
+ * TO CATER FOR NODE CRASHES WE SEND IT IN PARALLEL TO ALL NODES.
+ * ----------------------------------------------------------------------- */
+ fragptr.i = lcpPtr.p->currentFragment.fragPtrI;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ fragptr.p->fragActiveStatus = ZFALSE;
+
+ contChkpNextFragLab(signal);
+ return;
+}//Dblqh::lcpCompletedLab()
+
+void
+Dblqh::sendLCP_FRAG_REP(Signal * signal,
+ const LcpRecord::FragOrd & fragOrd) const {
+
+ FragrecordPtr fragPtr;
+ fragPtr.i = fragOrd.fragPtrI;
+ ptrCheckGuard(fragPtr, cfragrecFileSize, fragrecord);
+
+ ndbrequire(fragOrd.lcpFragOrd.lcpNo < MAX_LCP_STORED);
+ LcpFragRep * const lcpReport = (LcpFragRep *)&signal->theData[0];
+ lcpReport->nodeId = cownNodeid;
+ lcpReport->lcpId = fragOrd.lcpFragOrd.lcpId;
+ lcpReport->lcpNo = fragOrd.lcpFragOrd.lcpNo;
+ lcpReport->tableId = fragOrd.lcpFragOrd.tableId;
+ lcpReport->fragId = fragOrd.lcpFragOrd.fragmentId;
+ lcpReport->maxGciCompleted = fragPtr.p->maxGciCompletedInLcp;
+ lcpReport->maxGciStarted = fragPtr.p->maxGciInLcp;
+
+ for (Uint32 i = 0; i < cnoOfNodes; i++) {
+ jam();
+ Uint32 nodeId = cnodeData[i];
+ if(cnodeStatus[i] == ZNODE_UP){
+ jam();
+ BlockReference Tblockref = calcDihBlockRef(nodeId);
+ sendSignal(Tblockref, GSN_LCP_FRAG_REP, signal,
+ LcpFragRep::SignalLength, JBB);
+ }//if
+ }//for
+}
+
+void Dblqh::contChkpNextFragLab(Signal* signal)
+{
+ /* ------------------------------------------------------------------------
+ * UPDATE THE LATEST LOCAL CHECKPOINT COMPLETED ON FRAGMENT.
+ * UPDATE THE LCP_ID OF THIS CHECKPOINT.
+ * REMOVE THE LINK BETWEEN THE FRAGMENT RECORD AND THE LCP RECORD.
+ * ----------------------------------------------------------------------- */
+ if (fragptr.p->fragStatus == Fragrecord::BLOCKED) {
+ jam();
+ /**
+ * LCP of fragment complete
+ * but restarting of operations isn't
+ */
+ lcpPtr.p->lcpState = LcpRecord::LCP_BLOCKED_COMP;
+ //restartOperationsLab(signal);
+ return;
+ }//if
+
+ /**
+ * Send rep when fragment is done + unblocked
+ */
+ sendLCP_FRAG_REP(signal, lcpPtr.p->currentFragment);
+
+ /* ------------------------------------------------------------------------
+ * WE ALSO RELEASE THE LOCAL LCP RECORDS.
+ * ----------------------------------------------------------------------- */
+ releaseLocalLcps(signal);
+ if (lcpPtr.p->lcpQueued) {
+ jam();
+ /* ----------------------------------------------------------------------
+ * Transfer the state from the queued to the active LCP.
+ * --------------------------------------------------------------------- */
+ lcpPtr.p->lcpQueued = false;
+ lcpPtr.p->currentFragment = lcpPtr.p->queuedFragment;
+
+ /* ----------------------------------------------------------------------
+ * START THE QUEUED LOCAL CHECKPOINT.
+ * --------------------------------------------------------------------- */
+ sendLCP_FRAGIDREQ(signal);
+ return;
+ }//if
+
+ lcpPtr.p->lcpState = LcpRecord::LCP_IDLE;
+ if (lcpPtr.p->lastFragmentFlag){
+ jam();
+ /* ----------------------------------------------------------------------
+ * NOW THE COMPLETE LOCAL CHECKPOINT ROUND IS COMPLETED.
+ * --------------------------------------------------------------------- */
+ completeLcpRoundLab(signal);
+ return;
+ }//if
+
+ if (lcpPtr.p->reportEmpty) {
+ jam();
+ sendEMPTY_LCP_CONF(signal, false);
+ }//if
+ return;
+}//Dblqh::contChkpNextFragLab()
+
+void Dblqh::sendLCP_FRAGIDREQ(Signal* signal)
+{
+ ndbrequire(lcpPtr.p->firstLcpLocTup == RNIL);
+ ndbrequire(lcpPtr.p->firstLcpLocAcc == RNIL);
+
+ TablerecPtr tabPtr;
+ tabPtr.i = lcpPtr.p->currentFragment.lcpFragOrd.tableId;
+ ptrAss(tabPtr, tablerec);
+ if(tabPtr.p->tableStatus == Tablerec::PREP_DROP_TABLE_ONGOING ||
+ tabPtr.p->tableStatus == Tablerec::PREP_DROP_TABLE_DONE){
+ jam();
+ /**
+ * Fake that the fragment is done
+ */
+ lcpCompletedLab(signal);
+ return;
+ }
+
+ ndbrequire(tabPtr.p->tableStatus == Tablerec::TABLE_DEFINED);
+
+ lcpPtr.p->lcpState = LcpRecord::LCP_WAIT_FRAGID;
+ signal->theData[0] = lcpPtr.i;
+ signal->theData[1] = cownref;
+ signal->theData[2] = lcpPtr.p->currentFragment.lcpFragOrd.lcpNo;
+ signal->theData[3] = lcpPtr.p->currentFragment.lcpFragOrd.tableId;
+ signal->theData[4] = lcpPtr.p->currentFragment.lcpFragOrd.fragmentId;
+ signal->theData[5] = lcpPtr.p->currentFragment.lcpFragOrd.lcpId % MAX_LCP_STORED;
+ sendSignal(fragptr.p->accBlockref, GSN_LCP_FRAGIDREQ, signal, 6, JBB);
+}//Dblqh::sendLCP_FRAGIDREQ()
+
+void Dblqh::sendEMPTY_LCP_CONF(Signal* signal, bool idle)
+{
+
+ EmptyLcpConf * const rep = (EmptyLcpConf*)&signal->theData[0];
+ /* ----------------------------------------------------------------------
+ * We have been requested to report when there are no more local
+ * waiting to be started or ongoing. In this signal we also report
+ * the last completed fragments state.
+ * ---------------------------------------------------------------------- */
+ rep->senderNodeId = getOwnNodeId();
+ if(!idle){
+ jam();
+ rep->idle = 0 ;
+ rep->tableId = lcpPtr.p->currentFragment.lcpFragOrd.tableId;
+ rep->fragmentId = lcpPtr.p->currentFragment.lcpFragOrd.fragmentId;
+ rep->lcpNo = lcpPtr.p->currentFragment.lcpFragOrd.lcpNo;
+ rep->lcpId = lcpPtr.p->currentFragment.lcpFragOrd.lcpId;
+ } else {
+ jam();
+ rep->idle = 1;
+ rep->tableId = ~0;
+ rep->fragmentId = ~0;
+ rep->lcpNo = ~0;
+ rep->lcpId = c_lcpId;
+ }
+
+ for (Uint32 i = 0; i < cnoOfNodes; i++) {
+ jam();
+ Uint32 nodeId = cnodeData[i];
+ if (lcpPtr.p->m_EMPTY_LCP_REQ.get(nodeId)) {
+ jam();
+
+ BlockReference blockref = calcDihBlockRef(nodeId);
+ sendSignal(blockref, GSN_EMPTY_LCP_CONF, signal,
+ EmptyLcpConf::SignalLength, JBB);
+ }//if
+ }//for
+
+ lcpPtr.p->reportEmpty = false;
+ lcpPtr.p->m_EMPTY_LCP_REQ.clear();
+}//Dblqh::sendEMPTY_LCPCONF()
+
+void Dblqh::execACC_LCPREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}//Dblqh::execACC_LCPREF()
+
+void Dblqh::execTUP_LCPREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}//Dblqh::execTUP_LCPREF()
+
+/* --------------------------------------------------------------------------
+ * THE LOCAL CHECKPOINT ROUND IS NOW COMPLETED. SEND COMPLETED MESSAGE
+ * TO THE MASTER DIH.
+ * ------------------------------------------------------------------------- */
+void Dblqh::completeLcpRoundLab(Signal* signal)
+{
+ clcpCompletedState = LCP_CLOSE_STARTED;
+ signal->theData[0] = caccBlockref;
+ signal->theData[1] = cownref;
+ sendSignal(caccBlockref, GSN_END_LCPREQ, signal, 2, JBB);
+ signal->theData[0] = ctupBlockref;
+ signal->theData[1] = cownref;
+ sendSignal(ctupBlockref, GSN_END_LCPREQ, signal, 2, JBB);
+ return;
+}//Dblqh::completeLcpRoundLab()
+
+void Dblqh::execEND_LCPCONF(Signal* signal)
+{
+ jamEntry();
+ BlockReference userpointer = signal->theData[0];
+ if (userpointer == caccBlockref) {
+ if (clcpCompletedState == LCP_CLOSE_STARTED) {
+ jam();
+ clcpCompletedState = ACC_LCP_CLOSE_COMPLETED;
+ return;
+ } else {
+ jam();
+ ndbrequire(clcpCompletedState == TUP_LCP_CLOSE_COMPLETED);
+ clcpCompletedState = LCP_IDLE;
+ }//if
+ } else {
+ ndbrequire(userpointer == ctupBlockref);
+ if (clcpCompletedState == LCP_CLOSE_STARTED) {
+ jam();
+ clcpCompletedState = TUP_LCP_CLOSE_COMPLETED;
+ return;
+ } else {
+ jam();
+ ndbrequire(clcpCompletedState == ACC_LCP_CLOSE_COMPLETED);
+ clcpCompletedState = LCP_IDLE;
+ }//if
+ }//if
+ lcpPtr.i = 0;
+ ptrAss(lcpPtr, lcpRecord);
+ sendLCP_COMPLETE_REP(signal, lcpPtr.p->currentFragment.lcpFragOrd.lcpId);
+}//Dblqh::execEND_LCPCONF()
+
+void Dblqh::sendLCP_COMPLETE_REP(Signal* signal, Uint32 lcpId)
+{
+ cnoOfFragsCheckpointed = 0;
+ ndbrequire((cnoOfNodes - 1) < (MAX_NDB_NODES - 1));
+ /* ------------------------------------------------------------------------
+ * WE SEND COMP_LCP_ROUND TO ALL NODES TO PREPARE FOR NODE CRASHES.
+ * ----------------------------------------------------------------------- */
+ lcpPtr.i = 0;
+ ptrAss(lcpPtr, lcpRecord);
+ lcpPtr.p->lastFragmentFlag = false;
+
+ LcpCompleteRep* rep = (LcpCompleteRep*)signal->getDataPtrSend();
+ rep->nodeId = getOwnNodeId();
+ rep->lcpId = lcpId;
+ rep->blockNo = DBLQH;
+
+ for (Uint32 i = 0; i < cnoOfNodes; i++) {
+ jam();
+ Uint32 nodeId = cnodeData[i];
+ if(cnodeStatus[i] == ZNODE_UP){
+ jam();
+
+ BlockReference blockref = calcDihBlockRef(nodeId);
+ sendSignal(blockref, GSN_LCP_COMPLETE_REP, signal,
+ LcpCompleteRep::SignalLength, JBB);
+ }//if
+ }//for
+
+ if(lcpPtr.p->reportEmpty){
+ jam();
+ sendEMPTY_LCP_CONF(signal, true);
+ }
+ return;
+}//Dblqh::sendCOMP_LCP_ROUND()
+
+/* ==========================================================================
+ * ======= CHECK IF ALL PARTS OF A LOCAL CHECKPOINT ARE COMPLETED =======
+ *
+ * SUBROUTINE SHORT NAME = CLC
+ * ========================================================================= */
+void Dblqh::checkLcpCompleted(Signal* signal)
+{
+ LcpLocRecordPtr clcLcpLocptr;
+
+ clcLcpLocptr.i = lcpPtr.p->firstLcpLocAcc;
+ while (clcLcpLocptr.i != RNIL) {
+ ptrCheckGuard(clcLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ if (clcLcpLocptr.p->lcpLocstate != LcpLocRecord::ACC_COMPLETED) {
+ jam();
+ ndbrequire((clcLcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_WAIT_STARTED) ||
+ (clcLcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_STARTED));
+ return;
+ }//if
+ clcLcpLocptr.i = clcLcpLocptr.p->nextLcpLoc;
+ }
+
+ clcLcpLocptr.i = lcpPtr.p->firstLcpLocTup;
+ while (clcLcpLocptr.i != RNIL){
+ ptrCheckGuard(clcLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ if (clcLcpLocptr.p->lcpLocstate != LcpLocRecord::TUP_COMPLETED) {
+ jam();
+ ndbrequire((clcLcpLocptr.p->lcpLocstate==LcpLocRecord::TUP_WAIT_STARTED)
+ ||(clcLcpLocptr.p->lcpLocstate == LcpLocRecord::TUP_STARTED));
+ return;
+ }//if
+ clcLcpLocptr.i = clcLcpLocptr.p->nextLcpLoc;
+ }
+
+ lcpPtr.p->lcpState = LcpRecord::LCP_COMPLETED;
+}//Dblqh::checkLcpCompleted()
+
+/* ==========================================================================
+ * ======= CHECK IF ALL HOLD OPERATIONS ARE COMPLETED =======
+ *
+ * SUBROUTINE SHORT NAME = CHO
+ * ========================================================================= */
+void Dblqh::checkLcpHoldop(Signal* signal)
+{
+ LcpLocRecordPtr choLcpLocptr;
+
+ choLcpLocptr.i = lcpPtr.p->firstLcpLocAcc;
+ do {
+ ptrCheckGuard(choLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ if (choLcpLocptr.p->lcpLocstate != LcpLocRecord::HOLDOP_READY) {
+ ndbrequire(choLcpLocptr.p->lcpLocstate == LcpLocRecord::WAIT_LCPHOLDOP);
+ return;
+ }//if
+ choLcpLocptr.i = choLcpLocptr.p->nextLcpLoc;
+ } while (choLcpLocptr.i != RNIL);
+ lcpPtr.p->lcpState = LcpRecord::LCP_WAIT_ACTIVE_FINISH;
+}//Dblqh::checkLcpHoldop()
+
+/* ==========================================================================
+ * ======= CHECK IF ALL PARTS OF A LOCAL CHECKPOINT ARE STARTED =======
+ *
+ * SUBROUTINE SHORT NAME = CLS
+ * ========================================================================== */
+void Dblqh::checkLcpStarted(Signal* signal)
+{
+ LcpLocRecordPtr clsLcpLocptr;
+
+ terrorCode = ZOK;
+ clsLcpLocptr.i = lcpPtr.p->firstLcpLocAcc;
+ int i = 0;
+ do {
+ ptrCheckGuard(clsLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ if (clsLcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_WAIT_STARTED){
+ return;
+ }//if
+ clsLcpLocptr.i = clsLcpLocptr.p->nextLcpLoc;
+ i++;
+ } while (clsLcpLocptr.i != RNIL);
+
+ i = 0;
+ clsLcpLocptr.i = lcpPtr.p->firstLcpLocTup;
+ do {
+ ptrCheckGuard(clsLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ if (clsLcpLocptr.p->lcpLocstate == LcpLocRecord::TUP_WAIT_STARTED){
+ return;
+ }//if
+ clsLcpLocptr.i = clsLcpLocptr.p->nextLcpLoc;
+ i++;
+ } while (clsLcpLocptr.i != RNIL);
+ lcpPtr.p->lcpState = LcpRecord::LCP_STARTED;
+}//Dblqh::checkLcpStarted()
+
+/* ==========================================================================
+ * ======= CHECK IF ALL PREPARE TUP OPERATIONS ARE COMPLETED =======
+ *
+ * SUBROUTINE SHORT NAME = CLT
+ * ========================================================================== */
+void Dblqh::checkLcpTupprep(Signal* signal)
+{
+ LcpLocRecordPtr cltLcpLocptr;
+ cltLcpLocptr.i = lcpPtr.p->firstLcpLocTup;
+ do {
+ ptrCheckGuard(cltLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ if (cltLcpLocptr.p->lcpLocstate != LcpLocRecord::IDLE) {
+ ndbrequire(cltLcpLocptr.p->lcpLocstate == LcpLocRecord::WAIT_TUP_PREPLCP);
+ return;
+ }//if
+ cltLcpLocptr.i = cltLcpLocptr.p->nextLcpLoc;
+ } while (cltLcpLocptr.i != RNIL);
+ lcpPtr.p->lcpState = LcpRecord::LCP_WAIT_HOLDOPS;
+}//Dblqh::checkLcpTupprep()
+
+/* ==========================================================================
+ * ======= INITIATE LCP LOCAL RECORD USED TOWARDS ACC =======
+ *
+ * ========================================================================== */
+void Dblqh::initLcpLocAcc(Signal* signal, Uint32 fragId)
+{
+ lcpLocptr.p->nextLcpLoc = lcpPtr.p->firstLcpLocAcc;
+ lcpPtr.p->firstLcpLocAcc = lcpLocptr.i;
+ lcpLocptr.p->locFragid = fragId;
+ lcpLocptr.p->waitingBlock = LcpLocRecord::ACC;
+ lcpLocptr.p->lcpLocstate = LcpLocRecord::IDLE;
+ lcpLocptr.p->masterLcpRec = lcpPtr.i;
+ lcpLocptr.p->tupRef = RNIL;
+}//Dblqh::initLcpLocAcc()
+
+/* ==========================================================================
+ * ======= INITIATE LCP LOCAL RECORD USED TOWARDS TUP =======
+ *
+ * ========================================================================== */
+void Dblqh::initLcpLocTup(Signal* signal, Uint32 fragId)
+{
+ lcpLocptr.p->nextLcpLoc = lcpPtr.p->firstLcpLocTup;
+ lcpPtr.p->firstLcpLocTup = lcpLocptr.i;
+ lcpLocptr.p->locFragid = fragId;
+ lcpLocptr.p->waitingBlock = LcpLocRecord::TUP;
+ lcpLocptr.p->lcpLocstate = LcpLocRecord::WAIT_TUP_PREPLCP;
+ lcpLocptr.p->masterLcpRec = lcpPtr.i;
+ lcpLocptr.p->tupRef = RNIL;
+}//Dblqh::initLcpLocTup()
+
+/* --------------------------------------------------------------------------
+ * ------- MOVE OPERATION FROM ACC WAITING LIST ON FRAGMENT -------
+ * ------- TO ACTIVE LIST ON FRAGMENT -------
+ *
+ * SUBROUTINE SHORT NAME = MAA
+ * -------------------------------------------------------------------------- */
+void Dblqh::moveAccActiveFrag(Signal* signal)
+{
+ UintR maaTcNextConnectptr;
+
+ tcConnectptr.i = fragptr.p->accBlockedList;
+ fragptr.p->accBlockedList = RNIL;
+ /* ------------------------------------------------------------------------
+ * WE WILL MOVE ALL RECORDS FROM THE ACC BLOCKED LIST AT ONCE.
+ * ------------------------------------------------------------------------ */
+ while (tcConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ maaTcNextConnectptr = tcConnectptr.p->nextTc;
+ ndbrequire(tcConnectptr.p->listState == TcConnectionrec::ACC_BLOCK_LIST);
+ tcConnectptr.p->listState = TcConnectionrec::NOT_IN_LIST;
+ linkActiveFrag(signal);
+ tcConnectptr.i = maaTcNextConnectptr;
+ }//while
+}//Dblqh::moveAccActiveFrag()
+
+/* --------------------------------------------------------------------------
+ * ------- MOVE OPERATION FROM ACTIVE LIST ON FRAGMENT -------
+ * ------- TO ACC BLOCKED LIST ON FRAGMENT -------
+ *
+ * SUBROUTINE SHORT NAME = MAT
+ * -------------------------------------------------------------------------- */
+void Dblqh::moveActiveToAcc(Signal* signal)
+{
+ TcConnectionrecPtr matTcNextConnectptr;
+
+ releaseActiveList(signal);
+ /* ------------------------------------------------------------------------
+ * PUT OPERATION RECORD FIRST IN ACC BLOCKED LIST.
+ * ------------------------------------------------------------------------ */
+ matTcNextConnectptr.i = fragptr.p->accBlockedList;
+ tcConnectptr.p->nextTc = matTcNextConnectptr.i;
+ tcConnectptr.p->prevTc = RNIL;
+ tcConnectptr.p->listState = TcConnectionrec::ACC_BLOCK_LIST;
+ fragptr.p->accBlockedList = tcConnectptr.i;
+ if (matTcNextConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(matTcNextConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ matTcNextConnectptr.p->prevTc = tcConnectptr.i;
+ }//if
+}//Dblqh::moveActiveToAcc()
+
+/* ------------------------------------------------------------------------- */
+/* ---- RELEASE LOCAL LCP RECORDS AFTER COMPLETION OF A LOCAL CHECKPOINT---- */
+/* */
+/* SUBROUTINE SHORT NAME = RLL */
+/* ------------------------------------------------------------------------- */
+void Dblqh::releaseLocalLcps(Signal* signal)
+{
+ lcpLocptr.i = lcpPtr.p->firstLcpLocAcc;
+ while (lcpLocptr.i != RNIL){
+ ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ Uint32 tmp = lcpLocptr.p->nextLcpLoc;
+ releaseLcpLoc(signal);
+ lcpLocptr.i = tmp;
+ }
+ lcpPtr.p->firstLcpLocAcc = RNIL;
+
+ lcpLocptr.i = lcpPtr.p->firstLcpLocTup;
+ while (lcpLocptr.i != RNIL){
+ ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ Uint32 tmp = lcpLocptr.p->nextLcpLoc;
+ releaseLcpLoc(signal);
+ lcpLocptr.i = tmp;
+ }
+ lcpPtr.p->firstLcpLocTup = RNIL;
+
+}//Dblqh::releaseLocalLcps()
+
+/* ------------------------------------------------------------------------- */
+/* ------- SEIZE LCP LOCAL RECORD ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dblqh::seizeLcpLoc(Signal* signal)
+{
+ lcpLocptr.i = cfirstfreeLcpLoc;
+ ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ cfirstfreeLcpLoc = lcpLocptr.p->nextLcpLoc;
+ lcpLocptr.p->nextLcpLoc = RNIL;
+}//Dblqh::seizeLcpLoc()
+
+/* ------------------------------------------------------------------------- */
+/* ------- SEND ACC_CONT_OP ------- */
+/* */
+/* INPUT: LCP_PTR LOCAL CHECKPOINT RECORD */
+/* FRAGPTR FRAGMENT RECORD */
+/* */
+/* SUBROUTINE SHORT NAME = SAC */
+/* ------------------------------------------------------------------------- */
+void Dblqh::sendAccContOp(Signal* signal)
+{
+ LcpLocRecordPtr sacLcpLocptr;
+
+ int count = 0;
+ sacLcpLocptr.i = lcpPtr.p->firstLcpLocAcc;
+ do {
+ ptrCheckGuard(sacLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ sacLcpLocptr.p->accContCounter = 0;
+ if(sacLcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_STARTED){
+ /* ------------------------------------------------------------------- */
+ /*SEND START OPERATIONS TO ACC AGAIN */
+ /* ------------------------------------------------------------------- */
+ signal->theData[0] = lcpPtr.p->lcpAccptr;
+ signal->theData[1] = sacLcpLocptr.p->locFragid;
+ sendSignal(fragptr.p->accBlockref, GSN_ACC_CONTOPREQ, signal, 2, JBA);
+ count++;
+ } else if(sacLcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_COMPLETED){
+ signal->theData[0] = sacLcpLocptr.i;
+ sendSignal(reference(), GSN_ACC_CONTOPCONF, signal, 1, JBB);
+ } else {
+ ndbrequire(false);
+ }
+ sacLcpLocptr.i = sacLcpLocptr.p->nextLcpLoc;
+ } while (sacLcpLocptr.i != RNIL);
+
+}//Dblqh::sendAccContOp()
+
+/* ------------------------------------------------------------------------- */
+/* ------- SEND ACC_LCPREQ AND TUP_LCPREQ ------- */
+/* */
+/* INPUT: LCP_PTR LOCAL CHECKPOINT RECORD */
+/* FRAGPTR FRAGMENT RECORD */
+/* SUBROUTINE SHORT NAME = STL */
+/* ------------------------------------------------------------------------- */
+void Dblqh::sendStartLcp(Signal* signal)
+{
+ LcpLocRecordPtr stlLcpLocptr;
+ stlLcpLocptr.i = lcpPtr.p->firstLcpLocAcc;
+ do {
+ jam();
+ ptrCheckGuard(stlLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ stlLcpLocptr.p->lcpLocstate = LcpLocRecord::ACC_WAIT_STARTED;
+ signal->theData[0] = lcpPtr.p->lcpAccptr;
+ signal->theData[1] = stlLcpLocptr.i;
+ signal->theData[2] = stlLcpLocptr.p->locFragid;
+ sendSignal(fragptr.p->accBlockref, GSN_ACC_LCPREQ, signal, 3, JBA);
+ stlLcpLocptr.i = stlLcpLocptr.p->nextLcpLoc;
+ } while (stlLcpLocptr.i != RNIL);
+
+ stlLcpLocptr.i = lcpPtr.p->firstLcpLocTup;
+ do {
+ jam();
+ ptrCheckGuard(stlLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ stlLcpLocptr.p->lcpLocstate = LcpLocRecord::TUP_WAIT_STARTED;
+ signal->theData[0] = stlLcpLocptr.i;
+ signal->theData[1] = cownref;
+ signal->theData[2] = stlLcpLocptr.p->tupRef;
+ sendSignal(fragptr.p->tupBlockref, GSN_TUP_LCPREQ, signal, 3, JBA);
+ stlLcpLocptr.i = stlLcpLocptr.p->nextLcpLoc;
+ } while (stlLcpLocptr.i != RNIL);
+}//Dblqh::sendStartLcp()
+
+/* ------------------------------------------------------------------------- */
+/* ------- SET THE LOG TAIL IN THE LOG FILES ------- */
+/* */
+/*THIS SUBROUTINE HAVE BEEN BUGGY AND IS RATHER COMPLEX. IT IS IMPORTANT TO */
+/*REMEMBER THAT WE SEARCH FROM THE TAIL UNTIL WE REACH THE HEAD (CURRENT). */
+/*THE TAIL AND HEAD CAN BE ON THE SAME MBYTE. WE SEARCH UNTIL WE FIND A MBYTE*/
+/*THAT WE NEED TO KEEP. WE THEN SET THE TAIL TO BE THE PREVIOUS. IF WE DO */
+/*NOT FIND A MBYTE THAT WE NEED TO KEEP UNTIL WE REACH THE HEAD THEN WE USE */
+/*THE HEAD AS TAIL. FINALLY WE HAVE TO MOVE BACK THE TAIL TO ALSO INCLUDE */
+/*ALL PREPARE RECORDS. THIS MEANS THAT LONG-LIVED TRANSACTIONS ARE DANGEROUS */
+/*FOR SHORT LOGS. */
+/* ------------------------------------------------------------------------- */
+
+// this function has not been verified yet
+Uint32 Dblqh::remainingLogSize(const LogFileRecordPtr &sltCurrLogFilePtr,
+ const LogPartRecordPtr &sltLogPartPtr)
+{
+ Uint32 hf = sltCurrLogFilePtr.p->fileNo*ZNO_MBYTES_IN_FILE+sltCurrLogFilePtr.p->currentMbyte;
+ Uint32 tf = sltLogPartPtr.p->logTailFileNo*ZNO_MBYTES_IN_FILE+sltLogPartPtr.p->logTailMbyte;
+ Uint32 sz = sltLogPartPtr.p->noLogFiles*ZNO_MBYTES_IN_FILE;
+ if (tf > hf) hf += sz;
+ return sz-(hf-tf);
+}
+
+void Dblqh::setLogTail(Signal* signal, Uint32 keepGci)
+{
+ LogPartRecordPtr sltLogPartPtr;
+ LogFileRecordPtr sltLogFilePtr;
+#if 0
+ LogFileRecordPtr sltCurrLogFilePtr;
+#endif
+ UintR tsltMbyte;
+ UintR tsltStartMbyte;
+ UintR tsltIndex;
+ UintR tsltFlag;
+
+ for (sltLogPartPtr.i = 0; sltLogPartPtr.i < 4; sltLogPartPtr.i++) {
+ jam();
+ ptrAss(sltLogPartPtr, logPartRecord);
+ findLogfile(signal, sltLogPartPtr.p->logTailFileNo,
+ sltLogPartPtr, &sltLogFilePtr);
+
+#if 0
+ sltCurrLogFilePtr.i = sltLogPartPtr.p->currentLogfile;
+ ptrCheckGuard(sltCurrLogFilePtr, clogFileFileSize, logFileRecord);
+ infoEvent("setLogTail: Available log file %d size = %d[mbytes]+%d[words]", sltLogPartPtr.i,
+ remainingLogSize(sltCurrLogFilePtr, sltLogPartPtr), sltCurrLogFilePtr.p->remainingWordsInMbyte);
+#endif
+
+ tsltMbyte = sltLogPartPtr.p->logTailMbyte;
+ tsltStartMbyte = tsltMbyte;
+ tsltFlag = ZFALSE;
+ if (sltLogFilePtr.i == sltLogPartPtr.p->currentLogfile) {
+/* ------------------------------------------------------------------------- */
+/*THE LOG AND THE TAIL IS ALREADY IN THE SAME FILE. */
+/* ------------------------------------------------------------------------- */
+ if (sltLogFilePtr.p->currentMbyte >= sltLogPartPtr.p->logTailMbyte) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/*THE CURRENT MBYTE IS AHEAD OF OR AT THE TAIL. THUS WE WILL ONLY LOOK FOR */
+/*THE TAIL UNTIL WE REACH THE CURRENT MBYTE WHICH IS IN THIS LOG FILE. */
+/*IF THE LOG TAIL IS AHEAD OF THE CURRENT MBYTE BUT IN THE SAME LOG FILE */
+/*THEN WE HAVE TO SEARCH THROUGH ALL FILES BEFORE WE COME TO THE CURRENT */
+/*MBYTE. WE ALWAYS STOP WHEN WE COME TO THE CURRENT MBYTE SINCE THE TAIL */
+/*CAN NEVER BE BEFORE THE HEAD. */
+/* ------------------------------------------------------------------------- */
+ tsltFlag = ZTRUE;
+ }//if
+ }//if
+
+/* ------------------------------------------------------------------------- */
+/*NOW START SEARCHING FOR THE NEW TAIL, STARTING AT THE CURRENT TAIL AND */
+/*PROCEEDING UNTIL WE FIND A MBYTE WHICH IS NEEDED TO KEEP OR UNTIL WE REACH */
+/*CURRENT MBYTE (THE HEAD). */
+/* ------------------------------------------------------------------------- */
+ SLT_LOOP:
+ for (tsltIndex = tsltStartMbyte;
+ tsltIndex <= ZNO_MBYTES_IN_FILE - 1;
+ tsltIndex++) {
+ if (sltLogFilePtr.p->logMaxGciStarted[tsltIndex] >= keepGci) {
+/* ------------------------------------------------------------------------- */
+/*WE ARE NOT ALLOWED TO STEP THE LOG ANY FURTHER AHEAD */
+/*SET THE NEW LOG TAIL AND CONTINUE WITH NEXT LOG PART. */
+/*THIS MBYTE IS NOT TO BE INCLUDED SO WE NEED TO STEP BACK ONE MBYTE. */
+/* ------------------------------------------------------------------------- */
+ if (tsltIndex != 0) {
+ jam();
+ tsltMbyte = tsltIndex - 1;
+ } else {
+ jam();
+/* ------------------------------------------------------------------------- */
+/*STEPPING BACK INCLUDES ALSO STEPPING BACK TO THE PREVIOUS LOG FILE. */
+/* ------------------------------------------------------------------------- */
+ tsltMbyte = ZNO_MBYTES_IN_FILE - 1;
+ sltLogFilePtr.i = sltLogFilePtr.p->prevLogFile;
+ ptrCheckGuard(sltLogFilePtr, clogFileFileSize, logFileRecord);
+ }//if
+ goto SLT_BREAK;
+ } else {
+ jam();
+ if (tsltFlag == ZTRUE) {
+/* ------------------------------------------------------------------------- */
+/*WE ARE IN THE SAME FILE AS THE CURRENT MBYTE AND WE CAN REACH THE CURRENT */
+/*MBYTE BEFORE WE REACH A NEW TAIL. */
+/* ------------------------------------------------------------------------- */
+ if (tsltIndex == sltLogFilePtr.p->currentMbyte) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/*THE TAIL OF THE LOG IS ACTUALLY WITHIN THE CURRENT MBYTE. THUS WE SET THE */
+/*LOG TAIL TO BE THE CURRENT MBYTE. */
+/* ------------------------------------------------------------------------- */
+ tsltMbyte = sltLogFilePtr.p->currentMbyte;
+ goto SLT_BREAK;
+ }//if
+ }//if
+ }//if
+ }//for
+ sltLogFilePtr.i = sltLogFilePtr.p->nextLogFile;
+ ptrCheckGuard(sltLogFilePtr, clogFileFileSize, logFileRecord);
+ if (sltLogFilePtr.i == sltLogPartPtr.p->currentLogfile) {
+ jam();
+ tsltFlag = ZTRUE;
+ }//if
+ tsltStartMbyte = 0;
+ goto SLT_LOOP;
+ SLT_BREAK:
+ jam();
+ {
+ UintR ToldTailFileNo = sltLogPartPtr.p->logTailFileNo;
+ UintR ToldTailMByte = sltLogPartPtr.p->logTailMbyte;
+
+ arrGuard(tsltMbyte, 16);
+ sltLogPartPtr.p->logTailFileNo =
+ sltLogFilePtr.p->logLastPrepRef[tsltMbyte] >> 16;
+/* ------------------------------------------------------------------------- */
+/*SINCE LOG_MAX_GCI_STARTED ONLY KEEP TRACK OF COMMIT LOG RECORDS WE ALSO */
+/*HAVE TO STEP BACK THE TAIL SO THAT WE INCLUDE ALL PREPARE RECORDS */
+/*NEEDED FOR THOSE COMMIT RECORDS IN THIS MBYTE. THIS IS A RATHER */
+/*CONSERVATIVE APPROACH BUT IT WORKS. */
+/* ------------------------------------------------------------------------- */
+ sltLogPartPtr.p->logTailMbyte =
+ sltLogFilePtr.p->logLastPrepRef[tsltMbyte] & 65535;
+ if ((ToldTailFileNo != sltLogPartPtr.p->logTailFileNo) ||
+ (ToldTailMByte != sltLogPartPtr.p->logTailMbyte)) {
+ jam();
+ if (sltLogPartPtr.p->logPartState == LogPartRecord::TAIL_PROBLEM) {
+ if (sltLogPartPtr.p->firstLogQueue == RNIL) {
+ jam();
+ sltLogPartPtr.p->logPartState = LogPartRecord::IDLE;
+ } else {
+ jam();
+ sltLogPartPtr.p->logPartState = LogPartRecord::ACTIVE;
+ }//if
+ }//if
+ }//if
+ }
+#if 0
+ infoEvent("setLogTail: Available log file %d size = %d[mbytes]+%d[words]", sltLogPartPtr.i,
+ remainingLogSize(sltCurrLogFilePtr, sltLogPartPtr), sltCurrLogFilePtr.p->remainingWordsInMbyte);
+#endif
+ }//for
+
+}//Dblqh::setLogTail()
+
+/* ######################################################################### */
+/* ####### GLOBAL CHECKPOINT MODULE ####### */
+/* */
+/* ######################################################################### */
+/*---------------------------------------------------------------------------*/
+/* THIS MODULE HELPS DIH IN DISCOVERING WHEN GLOBAL CHECKPOINTS ARE */
+/* RECOVERABLE. IT HANDLES THE REQUEST GCP_SAVEREQ THAT REQUESTS LQH TO */
+/* SAVE A PARTICULAR GLOBAL CHECKPOINT TO DISK AND RESPOND WHEN COMPLETED. */
+/*---------------------------------------------------------------------------*/
+/* *************** */
+/* GCP_SAVEREQ > */
+/* *************** */
+void Dblqh::execGCP_SAVEREQ(Signal* signal)
+{
+ jamEntry();
+ const GCPSaveReq * const saveReq = (GCPSaveReq *)&signal->theData[0];
+
+ if (ERROR_INSERTED(5000)) {
+ systemErrorLab(signal);
+ }
+
+ if (ERROR_INSERTED(5007)){
+ CLEAR_ERROR_INSERT_VALUE;
+ sendSignalWithDelay(cownref, GSN_GCP_SAVEREQ, signal, 10000,
+ signal->length());
+ return;
+ }
+
+ const Uint32 dihBlockRef = saveReq->dihBlockRef;
+ const Uint32 dihPtr = saveReq->dihPtr;
+ const Uint32 gci = saveReq->gci;
+
+ ndbrequire(gci >= cnewestCompletedGci);
+
+ if (gci == cnewestCompletedGci) {
+/*---------------------------------------------------------------------------*/
+/* GLOBAL CHECKPOINT HAVE ALREADY BEEN HANDLED. REQUEST MUST HAVE BEEN SENT */
+/* FROM NEW MASTER DIH. */
+/*---------------------------------------------------------------------------*/
+ if (ccurrentGcprec == RNIL) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THIS INDICATES THAT WE HAVE ALREADY SENT GCP_SAVECONF TO PREVIOUS MASTER. */
+/* WE SIMPLY SEND IT ALSO TO THE NEW MASTER. */
+/*---------------------------------------------------------------------------*/
+ GCPSaveConf * const saveConf = (GCPSaveConf*)&signal->theData[0];
+ saveConf->dihPtr = dihPtr;
+ saveConf->nodeId = getOwnNodeId();
+ saveConf->gci = cnewestCompletedGci;
+ sendSignal(dihBlockRef, GSN_GCP_SAVECONF, signal,
+ GCPSaveConf::SignalLength, JBA);
+ return;
+ }
+ jam();
+/*---------------------------------------------------------------------------*/
+/* WE HAVE NOT YET SENT THE RESPONSE TO THE OLD MASTER. WE WILL SET THE NEW */
+/* RECEIVER OF THE RESPONSE AND THEN EXIT SINCE THE PROCESS IS ALREADY */
+/* STARTED. */
+/*---------------------------------------------------------------------------*/
+ gcpPtr.i = ccurrentGcprec;
+ ptrCheckGuard(gcpPtr, cgcprecFileSize, gcpRecord);
+ gcpPtr.p->gcpUserptr = dihPtr;
+ gcpPtr.p->gcpBlockref = dihBlockRef;
+ return;
+ }//if
+
+ ndbrequire(ccurrentGcprec == RNIL);
+
+
+ if(getNodeState().startLevel >= NodeState::SL_STOPPING_4){
+ GCPSaveRef * const saveRef = (GCPSaveRef*)&signal->theData[0];
+ saveRef->dihPtr = dihPtr;
+ saveRef->nodeId = getOwnNodeId();
+ saveRef->gci = gci;
+ saveRef->errorCode = GCPSaveRef::NodeShutdownInProgress;
+ sendSignal(dihBlockRef, GSN_GCP_SAVEREF, signal,
+ GCPSaveRef::SignalLength, JBB);
+ return;
+ }
+
+ if(getNodeState().getNodeRestartInProgress()){
+ GCPSaveRef * const saveRef = (GCPSaveRef*)&signal->theData[0];
+ saveRef->dihPtr = dihPtr;
+ saveRef->nodeId = getOwnNodeId();
+ saveRef->gci = gci;
+ saveRef->errorCode = GCPSaveRef::NodeRestartInProgress;
+ sendSignal(dihBlockRef, GSN_GCP_SAVEREF, signal,
+ GCPSaveRef::SignalLength, JBB);
+ return;
+ }
+
+ ccurrentGcprec = 0;
+ gcpPtr.i = ccurrentGcprec;
+ ptrCheckGuard(gcpPtr, cgcprecFileSize, gcpRecord);
+
+ cnewestCompletedGci = gci;
+ if (gci > cnewestGci) {
+ jam();
+ cnewestGci = gci;
+ }//if
+
+ gcpPtr.p->gcpBlockref = dihBlockRef;
+ gcpPtr.p->gcpUserptr = dihPtr;
+ gcpPtr.p->gcpId = gci;
+ bool tlogActive = false;
+ for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) {
+ ptrAss(logPartPtr, logPartRecord);
+ if (logPartPtr.p->logPartState == LogPartRecord::ACTIVE) {
+ jam();
+ logPartPtr.p->waitWriteGciLog = LogPartRecord::WWGL_TRUE;
+ tlogActive = true;
+ } else {
+ jam();
+ logPartPtr.p->waitWriteGciLog = LogPartRecord::WWGL_FALSE;
+ logFilePtr.i = logPartPtr.p->currentLogfile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ logPagePtr.i = logFilePtr.p->currentLogpage;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ writeCompletedGciLog(signal);
+ }//if
+ }//for
+ if (tlogActive == true) {
+ jam();
+ return;
+ }//if
+ initGcpRecLab(signal);
+ startTimeSupervision(signal);
+ return;
+}//Dblqh::execGCP_SAVEREQ()
+
+/* ------------------------------------------------------------------------- */
+/* START TIME SUPERVISION OF THE LOG PARTS. */
+/* ------------------------------------------------------------------------- */
+void Dblqh::startTimeSupervision(Signal* signal)
+{
+ for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) {
+ jam();
+ ptrAss(logPartPtr, logPartRecord);
+/* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
+/* WE HAVE TO START CHECKING IF THE LOG IS TO BE WRITTEN EVEN IF PAGES ARE */
+/* FULL. INITIALISE THE VALUES OF WHERE WE ARE IN THE LOG CURRENTLY. */
+/* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
+ logPartPtr.p->logPartTimer = 0;
+ logPartPtr.p->logTimer = 1;
+ signal->theData[0] = ZTIME_SUPERVISION;
+ signal->theData[1] = logPartPtr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ }//for
+}//Dblqh::startTimeSupervision()
+
+/*---------------------------------------------------------------------------*/
+/* WE SET THE GLOBAL CHECKPOINT VARIABLES AFTER WRITING THE COMPLETED GCI LOG*/
+/* RECORD. THIS ENSURES THAT WE WILL ENCOUNTER THE COMPLETED GCI RECORD WHEN */
+/* WE EXECUTE THE FRAGMENT LOG. */
+/*---------------------------------------------------------------------------*/
+void Dblqh::initGcpRecLab(Signal* signal)
+{
+/* ======================================================================== */
+/* ======= INITIATE GCP RECORD ======= */
+/* */
+/* SUBROUTINE SHORT NAME = IGR */
+/* ======================================================================== */
+ for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) {
+ jam();
+ ptrAss(logPartPtr, logPartRecord);
+/*--------------------------------------------------*/
+/* BY SETTING THE GCPREC = 0 WE START THE */
+/* CHECKING BY CHECK_GCP_COMPLETED. THIS */
+/* CHECKING MUST NOT BE STARTED UNTIL WE HAVE */
+/* INSERTED ALL COMPLETE GCI LOG RECORDS IN */
+/* ALL LOG PARTS. */
+/*--------------------------------------------------*/
+ logPartPtr.p->gcprec = 0;
+ gcpPtr.p->gcpLogPartState[logPartPtr.i] = ZWAIT_DISK;
+ gcpPtr.p->gcpSyncReady[logPartPtr.i] = ZFALSE;
+ logFilePtr.i = logPartPtr.p->currentLogfile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ gcpPtr.p->gcpFilePtr[logPartPtr.i] = logFilePtr.i;
+ logPagePtr.i = logFilePtr.p->currentLogpage;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ if (logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] == ZPAGE_HEADER_SIZE) {
+ jam();
+/*--------------------------------------------------*/
+/* SINCE THE CURRENT FILEPAGE POINTS AT THE */
+/* NEXT WORD TO BE WRITTEN WE HAVE TO ADJUST */
+/* FOR THIS BY DECREASING THE FILE PAGE BY ONE*/
+/* IF NO WORD HAS BEEN WRITTEN ON THE CURRENT */
+/* FILEPAGE. */
+/*--------------------------------------------------*/
+ gcpPtr.p->gcpPageNo[logPartPtr.i] = logFilePtr.p->currentFilepage - 1;
+ gcpPtr.p->gcpWordNo[logPartPtr.i] = ZPAGE_SIZE - 1;
+ } else {
+ jam();
+ gcpPtr.p->gcpPageNo[logPartPtr.i] = logFilePtr.p->currentFilepage;
+ gcpPtr.p->gcpWordNo[logPartPtr.i] =
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] - 1;
+ }//if
+ }//for
+ return;
+}//Dblqh::initGcpRecLab()
+
+/* ========================================================================= */
+/* ==== CHECK IF ANY GLOBAL CHECKPOINTS ARE COMPLETED AFTER A COMPLETED===== */
+/* DISK WRITE. */
+/* */
+/* SUBROUTINE SHORT NAME = CGC */
+/* ========================================================================= */
+void Dblqh::checkGcpCompleted(Signal* signal,
+ Uint32 tcgcPageWritten,
+ Uint32 tcgcWordWritten)
+{
+ UintR tcgcFlag;
+ UintR tcgcJ;
+
+ gcpPtr.i = logPartPtr.p->gcprec;
+ if (gcpPtr.i != RNIL) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/* IF THE GLOBAL CHECKPOINT IS NOT WAITING FOR COMPLETION THEN WE CAN QUIT */
+/* THE SEARCH IMMEDIATELY. */
+/* ------------------------------------------------------------------------- */
+ ptrCheckGuard(gcpPtr, cgcprecFileSize, gcpRecord);
+ if (gcpPtr.p->gcpFilePtr[logPartPtr.i] == logFilePtr.i) {
+/* ------------------------------------------------------------------------- */
+/* IF THE COMPLETED DISK OPERATION WAS ON ANOTHER FILE THAN THE ONE WE ARE */
+/* WAITING FOR, THEN WE CAN ALSO QUIT THE SEARCH IMMEDIATELY. */
+/* ------------------------------------------------------------------------- */
+ if (tcgcPageWritten < gcpPtr.p->gcpPageNo[logPartPtr.i]) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/* THIS LOG PART HAVE NOT YET WRITTEN THE GLOBAL CHECKPOINT TO DISK. */
+/* ------------------------------------------------------------------------- */
+ return;
+ } else {
+ if (tcgcPageWritten == gcpPtr.p->gcpPageNo[logPartPtr.i]) {
+ if (tcgcWordWritten < gcpPtr.p->gcpWordNo[logPartPtr.i]) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/* THIS LOG PART HAVE NOT YET WRITTEN THE GLOBAL CHECKPOINT TO DISK. */
+/* ------------------------------------------------------------------------- */
+ return;
+ }//if
+ }//if
+ }//if
+/* ------------------------------------------------------------------------- */
+/* THIS LOG PART HAVE WRITTEN THE GLOBAL CHECKPOINT TO DISK. */
+/* ------------------------------------------------------------------------- */
+ logPartPtr.p->gcprec = RNIL;
+ gcpPtr.p->gcpLogPartState[logPartPtr.i] = ZON_DISK;
+ tcgcFlag = ZTRUE;
+ for (tcgcJ = 0; tcgcJ <= 3; tcgcJ++) {
+ jam();
+ if (gcpPtr.p->gcpLogPartState[tcgcJ] != ZON_DISK) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/*ALL LOG PARTS HAVE NOT SAVED THIS GLOBAL CHECKPOINT TO DISK YET. WAIT FOR */
+/*THEM TO COMPLETE. */
+/* ------------------------------------------------------------------------- */
+ tcgcFlag = ZFALSE;
+ }//if
+ }//for
+ if (tcgcFlag == ZTRUE) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/*WE HAVE FOUND A COMPLETED GLOBAL CHECKPOINT OPERATION. WE NOW NEED TO SEND */
+/*GCP_SAVECONF, REMOVE THE GCP RECORD FROM THE LIST OF WAITING GCP RECORDS */
+/*ON THIS LOG PART AND RELEASE THE GCP RECORD. */
+// After changing the log implementation we need to perform a FSSYNCREQ on all
+// log files where the last log word resided first before proceeding.
+/* ------------------------------------------------------------------------- */
+ UintR Ti;
+ for (Ti = 0; Ti < 4; Ti++) {
+ LogFileRecordPtr loopLogFilePtr;
+ loopLogFilePtr.i = gcpPtr.p->gcpFilePtr[Ti];
+ ptrCheckGuard(loopLogFilePtr, clogFileFileSize, logFileRecord);
+ if (loopLogFilePtr.p->logFileStatus == LogFileRecord::OPEN) {
+ jam();
+ signal->theData[0] = loopLogFilePtr.p->fileRef;
+ signal->theData[1] = cownref;
+ signal->theData[2] = gcpPtr.p->gcpFilePtr[Ti];
+ sendSignal(NDBFS_REF, GSN_FSSYNCREQ, signal, 3, JBA);
+ } else {
+ ndbrequire((loopLogFilePtr.p->logFileStatus ==
+ LogFileRecord::CLOSED) ||
+ (loopLogFilePtr.p->logFileStatus ==
+ LogFileRecord::CLOSING_WRITE_LOG) ||
+ (loopLogFilePtr.p->logFileStatus ==
+ LogFileRecord::OPENING_WRITE_LOG));
+ signal->theData[0] = loopLogFilePtr.i;
+ execFSSYNCCONF(signal);
+ }//if
+ }//for
+ return;
+ }//if
+ }//if
+ }//if
+}//Dblqh::checkGcpCompleted()
+
+void
+Dblqh::execFSSYNCCONF(Signal* signal)
+{
+ GcpRecordPtr localGcpPtr;
+ LogFileRecordPtr localLogFilePtr;
+ LogPartRecordPtr localLogPartPtr;
+ localLogFilePtr.i = signal->theData[0];
+ ptrCheckGuard(localLogFilePtr, clogFileFileSize, logFileRecord);
+ localLogPartPtr.i = localLogFilePtr.p->logPartRec;
+ localGcpPtr.i = ccurrentGcprec;
+ ptrCheckGuard(localGcpPtr, cgcprecFileSize, gcpRecord);
+ localGcpPtr.p->gcpSyncReady[localLogPartPtr.i] = ZTRUE;
+ UintR Ti;
+ for (Ti = 0; Ti < 4; Ti++) {
+ jam();
+ if (localGcpPtr.p->gcpSyncReady[Ti] == ZFALSE) {
+ jam();
+ return;
+ }//if
+ }//for
+ GCPSaveConf * const saveConf = (GCPSaveConf *)&signal->theData[0];
+ saveConf->dihPtr = localGcpPtr.p->gcpUserptr;
+ saveConf->nodeId = getOwnNodeId();
+ saveConf->gci = localGcpPtr.p->gcpId;
+ sendSignal(localGcpPtr.p->gcpBlockref, GSN_GCP_SAVECONF, signal,
+ GCPSaveConf::SignalLength, JBA);
+ ccurrentGcprec = RNIL;
+}//Dblqh::execFSSYNCCONF()
+
+void
+Dblqh::execFSSYNCREF(Signal* signal)
+{
+ jamEntry();
+ systemErrorLab(signal);
+ return;
+}//Dblqh::execFSSYNCREF()
+
+
+/* ######################################################################### */
+/* ####### FILE HANDLING MODULE ####### */
+/* */
+/* ######################################################################### */
+/* THIS MODULE HANDLES RESPONSE MESSAGES FROM THE FILE SYSTEM */
+/* ######################################################################### */
+/* ######################################################################### */
+/* SIGNAL RECEPTION MODULE */
+/* THIS MODULE IS A SUB-MODULE OF THE FILE SYSTEM HANDLING. */
+/* */
+/* THIS MODULE CHECKS THE STATE AND JUMPS TO THE PROPER PART OF THE FILE */
+/* HANDLING MODULE. */
+/* ######################################################################### */
+/* *************** */
+/* FSCLOSECONF > */
+/* *************** */
+void Dblqh::execFSCLOSECONF(Signal* signal)
+{
+ jamEntry();
+ logFilePtr.i = signal->theData[0];
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ switch (logFilePtr.p->logFileStatus) {
+ case LogFileRecord::CLOSE_SR_INVALIDATE_PAGES:
+ jam();
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSED;
+ // Set the prev file to check if we shall close it.
+ logFilePtr.i = logFilePtr.p->prevLogFile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ exitFromInvalidate(signal);
+ return;
+ break;
+ case LogFileRecord::CLOSING_INIT:
+ jam();
+ closingInitLab(signal);
+ return;
+ break;
+ case LogFileRecord::CLOSING_SR:
+ jam();
+ closingSrLab(signal);
+ return;
+ break;
+ case LogFileRecord::CLOSING_EXEC_SR:
+ jam();
+ closeExecSrLab(signal);
+ return;
+ break;
+ case LogFileRecord::CLOSING_EXEC_SR_COMPLETED:
+ jam();
+ closeExecSrCompletedLab(signal);
+ return;
+ break;
+ case LogFileRecord::CLOSING_WRITE_LOG:
+ jam();
+ closeWriteLogLab(signal);
+ return;
+ break;
+ case LogFileRecord::CLOSING_EXEC_LOG:
+ jam();
+ closeExecLogLab(signal);
+ return;
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ }//switch
+}//Dblqh::execFSCLOSECONF()
+
+/* ************>> */
+/* FSCLOSEREF > */
+/* ************>> */
+void Dblqh::execFSCLOSEREF(Signal* signal)
+{
+ jamEntry();
+ terrorCode = signal->theData[1];
+ systemErrorLab(signal);
+ return;
+}//Dblqh::execFSCLOSEREF()
+
+/* ************>> */
+/* FSOPENCONF > */
+/* ************>> */
+void Dblqh::execFSOPENCONF(Signal* signal)
+{
+ jamEntry();
+ initFsopenconf(signal);
+ switch (logFilePtr.p->logFileStatus) {
+ case LogFileRecord::OPEN_SR_INVALIDATE_PAGES:
+ jam();
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
+ readFileInInvalidate(signal);
+ return;
+ break;
+ case LogFileRecord::OPENING_INIT:
+ jam();
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
+ openFileInitLab(signal);
+ return;
+ break;
+ case LogFileRecord::OPEN_SR_FRONTPAGE:
+ jam();
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
+ openSrFrontpageLab(signal);
+ return;
+ break;
+ case LogFileRecord::OPEN_SR_LAST_FILE:
+ jam();
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
+ openSrLastFileLab(signal);
+ return;
+ break;
+ case LogFileRecord::OPEN_SR_NEXT_FILE:
+ jam();
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
+ openSrNextFileLab(signal);
+ return;
+ break;
+ case LogFileRecord::OPEN_EXEC_SR_START:
+ jam();
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
+ openExecSrStartLab(signal);
+ return;
+ break;
+ case LogFileRecord::OPEN_EXEC_SR_NEW_MBYTE:
+ jam();
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
+ openExecSrNewMbyteLab(signal);
+ return;
+ break;
+ case LogFileRecord::OPEN_SR_FOURTH_PHASE:
+ jam();
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
+ openSrFourthPhaseLab(signal);
+ return;
+ break;
+ case LogFileRecord::OPEN_SR_FOURTH_NEXT:
+ jam();
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
+ openSrFourthNextLab(signal);
+ return;
+ break;
+ case LogFileRecord::OPEN_SR_FOURTH_ZERO:
+ jam();
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
+ openSrFourthZeroLab(signal);
+ return;
+ break;
+ case LogFileRecord::OPENING_WRITE_LOG:
+ jam();
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
+ return;
+ break;
+ case LogFileRecord::OPEN_EXEC_LOG:
+ jam();
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
+ openExecLogLab(signal);
+ return;
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ }//switch
+}//Dblqh::execFSOPENCONF()
+
+/* ************> */
+/* FSOPENREF > */
+/* ************> */
+void Dblqh::execFSOPENREF(Signal* signal)
+{
+ jamEntry();
+ terrorCode = signal->theData[1];
+ systemErrorLab(signal);
+ return;
+}//Dblqh::execFSOPENREF()
+
+/* ************>> */
+/* FSREADCONF > */
+/* ************>> */
+void Dblqh::execFSREADCONF(Signal* signal)
+{
+ jamEntry();
+ initFsrwconf(signal);
+
+ switch (lfoPtr.p->lfoState) {
+ case LogFileOperationRecord::READ_SR_LAST_MBYTE:
+ jam();
+ releaseLfo(signal);
+ readSrLastMbyteLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::READ_SR_FRONTPAGE:
+ jam();
+ releaseLfo(signal);
+ readSrFrontpageLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::READ_SR_LAST_FILE:
+ jam();
+ releaseLfo(signal);
+ readSrLastFileLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::READ_SR_NEXT_FILE:
+ jam();
+ releaseLfo(signal);
+ readSrNextFileLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::READ_EXEC_SR:
+ jam();
+ readExecSrLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::READ_EXEC_LOG:
+ jam();
+ readExecLogLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::READ_SR_INVALIDATE_PAGES:
+ jam();
+ invalidateLogAfterLastGCI(signal);
+ return;
+ break;
+ case LogFileOperationRecord::READ_SR_FOURTH_PHASE:
+ jam();
+ releaseLfo(signal);
+ readSrFourthPhaseLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::READ_SR_FOURTH_ZERO:
+ jam();
+ releaseLfo(signal);
+ readSrFourthZeroLab(signal);
+ return;
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ }//switch
+}//Dblqh::execFSREADCONF()
+
+/* ************>> */
+/* FSREADCONF > */
+/* ************>> */
+void Dblqh::execFSREADREF(Signal* signal)
+{
+ jamEntry();
+ lfoPtr.i = signal->theData[0];
+ ptrCheckGuard(lfoPtr, clfoFileSize, logFileOperationRecord);
+ terrorCode = signal->theData[1];
+ switch (lfoPtr.p->lfoState) {
+ case LogFileOperationRecord::READ_SR_LAST_MBYTE:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::READ_SR_FRONTPAGE:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::READ_SR_LAST_FILE:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::READ_SR_NEXT_FILE:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::READ_EXEC_SR:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::READ_EXEC_LOG:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::READ_SR_FOURTH_PHASE:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::READ_SR_FOURTH_ZERO:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::READ_SR_INVALIDATE_PAGES:
+ jam()
+ systemErrorLab(signal);
+ return;
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ }//switch
+ return;
+}//Dblqh::execFSREADREF()
+
+/* *************** */
+/* FSWRITECONF > */
+/* *************** */
+void Dblqh::execFSWRITECONF(Signal* signal)
+{
+ jamEntry();
+ initFsrwconf(signal);
+ switch (lfoPtr.p->lfoState) {
+ case LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES:
+ jam();
+ invalidateLogAfterLastGCI(signal);
+ return;
+ break;
+ case LogFileOperationRecord::WRITE_PAGE_ZERO:
+ jam();
+ writePageZeroLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::LAST_WRITE_IN_FILE:
+ jam();
+ lastWriteInFileLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::INIT_WRITE_AT_END:
+ jam();
+ initWriteEndLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::INIT_FIRST_PAGE:
+ jam();
+ initFirstPageLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::WRITE_GCI_ZERO:
+ jam();
+ writeGciZeroLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::WRITE_DIRTY:
+ jam();
+ writeDirtyLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::WRITE_INIT_MBYTE:
+ jam();
+ writeInitMbyteLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::ACTIVE_WRITE_LOG:
+ jam();
+ writeLogfileLab(signal);
+ return;
+ break;
+ case LogFileOperationRecord::FIRST_PAGE_WRITE_IN_LOGFILE:
+ jam();
+ firstPageWriteLab(signal);
+ return;
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ }//switch
+}//Dblqh::execFSWRITECONF()
+
+/* ************>> */
+/* FSWRITEREF > */
+/* ************>> */
+void Dblqh::execFSWRITEREF(Signal* signal)
+{
+ jamEntry();
+ lfoPtr.i = signal->theData[0];
+ ptrCheckGuard(lfoPtr, clfoFileSize, logFileOperationRecord);
+ terrorCode = signal->theData[1];
+ switch (lfoPtr.p->lfoState) {
+ case LogFileOperationRecord::WRITE_PAGE_ZERO:
+ jam();
+ systemErrorLab(signal);
+ break;
+ case LogFileOperationRecord::LAST_WRITE_IN_FILE:
+ jam();
+ systemErrorLab(signal);
+ break;
+ case LogFileOperationRecord::INIT_WRITE_AT_END:
+ jam();
+ systemErrorLab(signal);
+ break;
+ case LogFileOperationRecord::INIT_FIRST_PAGE:
+ jam();
+ systemErrorLab(signal);
+ break;
+ case LogFileOperationRecord::WRITE_GCI_ZERO:
+ jam();
+ systemErrorLab(signal);
+ break;
+ case LogFileOperationRecord::WRITE_DIRTY:
+ jam();
+ systemErrorLab(signal);
+ break;
+ case LogFileOperationRecord::WRITE_INIT_MBYTE:
+ jam();
+ systemErrorLab(signal);
+ break;
+ case LogFileOperationRecord::ACTIVE_WRITE_LOG:
+ jam();
+ systemErrorLab(signal);
+ break;
+ case LogFileOperationRecord::FIRST_PAGE_WRITE_IN_LOGFILE:
+ jam();
+ systemErrorLab(signal);
+ break;
+ case LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES:
+ jam();
+ systemErrorLab(signal);
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ break;
+ }//switch
+}//Dblqh::execFSWRITEREF()
+
+
+/* ========================================================================= */
+/* ======= INITIATE WHEN RECEIVING FSOPENCONF ======= */
+/* */
+/* ========================================================================= */
+void Dblqh::initFsopenconf(Signal* signal)
+{
+ logFilePtr.i = signal->theData[0];
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ logFilePtr.p->fileRef = signal->theData[1];
+ logPartPtr.i = logFilePtr.p->logPartRec;
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+ logFilePtr.p->currentMbyte = 0;
+ logFilePtr.p->filePosition = 0;
+ logFilePtr.p->logFilePagesToDiskWithoutSynch = 0;
+}//Dblqh::initFsopenconf()
+
+/* ========================================================================= */
+/* ======= INITIATE WHEN RECEIVING FSREADCONF AND FSWRITECONF ======= */
+/* */
+/* ========================================================================= */
+void Dblqh::initFsrwconf(Signal* signal)
+{
+ lfoPtr.i = signal->theData[0];
+ ptrCheckGuard(lfoPtr, clfoFileSize, logFileOperationRecord);
+ logFilePtr.i = lfoPtr.p->logFileRec;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ logPartPtr.i = logFilePtr.p->logPartRec;
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+ logPagePtr.i = lfoPtr.p->firstLfoPage;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+}//Dblqh::initFsrwconf()
+
+/* ######################################################################### */
+/* NORMAL OPERATION MODULE */
+/* THIS MODULE IS A SUB-MODULE OF THE FILE SYSTEM HANDLING. */
+/* */
+/* THIS PART HANDLES THE NORMAL OPENING, CLOSING AND WRITING OF LOG FILES */
+/* DURING NORMAL OPERATION. */
+/* ######################################################################### */
+/*---------------------------------------------------------------------------*/
+/* THIS SIGNAL IS USED TO SUPERVISE THAT THE LOG RECORDS ARE NOT KEPT IN MAIN*/
+/* MEMORY FOR MORE THAN 1 SECOND TO ACHIEVE THE PROPER RELIABILITY. */
+/*---------------------------------------------------------------------------*/
+void Dblqh::timeSup(Signal* signal)
+{
+ LogPageRecordPtr origLogPagePtr;
+ Uint32 wordWritten;
+
+ jamEntry();
+ logPartPtr.i = signal->theData[0];
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+ logFilePtr.i = logPartPtr.p->currentLogfile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ logPagePtr.i = logFilePtr.p->currentLogpage;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ if (logPartPtr.p->logPartTimer != logPartPtr.p->logTimer) {
+ jam();
+/*--------------------------------------------------------------------------*/
+/* THIS LOG PART HAS NOT WRITTEN TO DISK DURING THE LAST SECOND. */
+/*--------------------------------------------------------------------------*/
+ switch (logPartPtr.p->logPartState) {
+ case LogPartRecord::FILE_CHANGE_PROBLEM:
+ jam();
+/*--------------------------------------------------------------------------*/
+/* THIS LOG PART HAS PROBLEMS IN CHANGING FILES MAKING IT IMPOSSIBLE */
+// TO WRITE TO THE FILE CURRENTLY. WE WILL COMEBACK LATER AND SEE IF
+// THE PROBLEM HAS BEEN FIXED.
+/*--------------------------------------------------------------------------*/
+ case LogPartRecord::ACTIVE:
+ jam();
+/*---------------------------------------------------------------------------*/
+/* AN OPERATION IS CURRENTLY ACTIVE IN WRITING THIS LOG PART. WE THUS CANNOT */
+/* WRITE ANYTHING TO DISK AT THIS MOMENT. WE WILL SEND A SIGNAL DELAYED FOR */
+/* 10 MS AND THEN TRY AGAIN. POSSIBLY THE LOG PART WILL HAVE BEEN WRITTEN */
+/* UNTIL THEN OR ELSE IT SHOULD BE FREE TO WRITE AGAIN. */
+/*---------------------------------------------------------------------------*/
+ signal->theData[0] = ZTIME_SUPERVISION;
+ signal->theData[1] = logPartPtr.i;
+ sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 10, 2);
+ return;
+ break;
+ case LogPartRecord::IDLE:
+ case LogPartRecord::TAIL_PROBLEM:
+ jam();
+/*---------------------------------------------------------------------------*/
+/* IDLE AND NOT WRITTEN TO DISK IN A SECOND. ALSO WHEN WE HAVE A TAIL PROBLEM*/
+/* WE HAVE TO WRITE TO DISK AT TIMES. WE WILL FIRST CHECK WHETHER ANYTHING */
+/* AT ALL HAVE BEEN WRITTEN TO THE PAGES BEFORE WRITING TO DISK. */
+/*---------------------------------------------------------------------------*/
+/* WE HAVE TO WRITE TO DISK IN ALL CASES SINCE THERE COULD BE INFORMATION */
+/* STILL IN THE LOG THAT WAS GENERATED BEFORE THE PREVIOUS TIME SUPERVISION */
+/* BUT AFTER THE LAST DISK WRITE. THIS PREVIOUSLY STOPPED ALL DISK WRITES */
+/* WHEN NO MORE LOG WRITES WERE PERFORMED (THIS HAPPENED WHEN LOG GOT FULL */
+/* AND AFTER LOADING THE INITIAL RECORDS IN INITIAL START). */
+/*---------------------------------------------------------------------------*/
+ if (((logFilePtr.p->currentFilepage + 1) & (ZPAGES_IN_MBYTE -1)) == 0) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THIS IS THE LAST PAGE IN THIS MBYTE. WRITE NEXT LOG AND SWITCH TO NEXT */
+/* MBYTE. */
+/*---------------------------------------------------------------------------*/
+ changeMbyte(signal);
+ } else {
+/*---------------------------------------------------------------------------*/
+/* WRITE THE LOG PAGE TO DISK EVEN IF IT IS NOT FULL. KEEP PAGE AND WRITE A */
+/* COPY. THE ORIGINAL PAGE WILL BE WRITTEN AGAIN LATER ON. */
+/*---------------------------------------------------------------------------*/
+ wordWritten = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] - 1;
+ origLogPagePtr.i = logPagePtr.i;
+ origLogPagePtr.p = logPagePtr.p;
+ seizeLogpage(signal);
+ MEMCOPY_NO_WORDS(&logPagePtr.p->logPageWord[0],
+ &origLogPagePtr.p->logPageWord[0],
+ wordWritten + 1);
+ ndbrequire(wordWritten < ZPAGE_SIZE);
+ if (logFilePtr.p->noLogpagesInBuffer > 0) {
+ jam();
+ completedLogPage(signal, ZENFORCE_WRITE);
+/*---------------------------------------------------------------------------*/
+/*SINCE WE ARE ONLY WRITING PART OF THE LAST PAGE WE HAVE TO UPDATE THE WORD */
+/*WRITTEN TO REFLECT THE REAL LAST WORD WRITTEN. WE ALSO HAVE TO MOVE THE */
+/*FILE POSITION ONE STEP BACKWARDS SINCE WE ARE NOT WRITING THE LAST PAGE */
+/*COMPLETELY. IT WILL BE WRITTEN AGAIN. */
+/*---------------------------------------------------------------------------*/
+ lfoPtr.p->lfoWordWritten = wordWritten;
+ logFilePtr.p->filePosition = logFilePtr.p->filePosition - 1;
+ } else {
+ if (wordWritten == (ZPAGE_HEADER_SIZE - 1)) {
+/*---------------------------------------------------------------------------*/
+/*THIS IS POSSIBLE BUT VERY UNLIKELY. IF THE PAGE WAS COMPLETED AFTER THE LAST*/
+/*WRITE TO DISK THEN NO_LOG_PAGES_IN_BUFFER > 0 AND IF NOT WRITTEN SINCE LAST*/
+/*WRITE TO DISK THEN THE PREVIOUS PAGE MUST HAVE BEEN WRITTEN BY SOME */
+/*OPERATION AND THAT BECAME COMPLETELY FULL. IN ANY CASE WE NEED NOT WRITE AN*/
+/*EMPTY PAGE TO DISK. */
+/*---------------------------------------------------------------------------*/
+ jam();
+ releaseLogpage(signal);
+ } else {
+ jam();
+ writeSinglePage(signal, logFilePtr.p->currentFilepage, wordWritten);
+ lfoPtr.p->lfoState = LogFileOperationRecord::ACTIVE_WRITE_LOG;
+ }//if
+ }//if
+ }//if
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ }//if
+ logPartPtr.p->logTimer++;
+ return;
+}//Dblqh::timeSup()
+
+void Dblqh::writeLogfileLab(Signal* signal)
+{
+/*---------------------------------------------------------------------------*/
+/* CHECK IF ANY GLOBAL CHECKPOINTS ARE COMPLETED DUE TO THIS COMPLETED DISK */
+/* WRITE. */
+/*---------------------------------------------------------------------------*/
+ switch (logFilePtr.p->fileChangeState) {
+ case LogFileRecord::NOT_ONGOING:
+ jam();
+ checkGcpCompleted(signal,
+ ((lfoPtr.p->lfoPageNo + lfoPtr.p->noPagesRw) - 1),
+ lfoPtr.p->lfoWordWritten);
+ break;
+#if 0
+ case LogFileRecord::BOTH_WRITES_ONGOING:
+ jam();
+ ndbout_c("not crashing!!");
+ // Fall-through
+#endif
+ case LogFileRecord::WRITE_PAGE_ZERO_ONGOING:
+ case LogFileRecord::LAST_WRITE_ONGOING:
+ jam();
+ logFilePtr.p->lastPageWritten = (lfoPtr.p->lfoPageNo + lfoPtr.p->noPagesRw) - 1;
+ logFilePtr.p->lastWordWritten = lfoPtr.p->lfoWordWritten;
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ }//switch
+ releaseLfoPages(signal);
+ releaseLfo(signal);
+ return;
+}//Dblqh::writeLogfileLab()
+
+void Dblqh::closeWriteLogLab(Signal* signal)
+{
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSED;
+ return;
+}//Dblqh::closeWriteLogLab()
+
+/* ######################################################################### */
+/* FILE CHANGE MODULE */
+/* THIS MODULE IS A SUB-MODULE OF THE FILE SYSTEM HANDLING. */
+/* */
+/*THIS PART OF THE FILE MODULE HANDLES WHEN WE ARE CHANGING LOG FILE DURING */
+/*NORMAL OPERATION. WE HAVE TO BE CAREFUL WHEN WE ARE CHANGING LOG FILE SO */
+/*THAT WE DO NOT COMPLICATE THE SYSTEM RESTART PROCESS TOO MUCH. */
+/*THE IDEA IS THAT WE START BY WRITING THE LAST WRITE IN THE OLD FILE AND WE */
+/*ALSO WRITE THE FIRST PAGE OF THE NEW FILE CONCURRENT WITH THAT. THIS FIRST */
+/*PAGE IN THE NEW FILE DO NOT CONTAIN ANY LOG RECORDS OTHER THAN A DESCRIPTOR*/
+/*CONTAINING INFORMATION ABOUT GCI'S NEEDED AT SYSTEM RESTART AND A NEXT LOG */
+/*RECORD. */
+/* */
+/*WHEN BOTH OF THOSE WRITES HAVE COMPLETED WE ALSO WRITE PAGE ZERO IN FILE */
+/*ZERO. THE ONLY INFORMATION WHICH IS INTERESTING HERE IS THE NEW FILE NUMBER*/
+/* */
+/*IF OPTIMISATIONS ARE NEEDED OF THE LOG HANDLING THEN IT IS POSSIBLE TO */
+/*AVOID WRITING THE FIRST PAGE OF THE NEW PAGE IMMEDIATELY. THIS COMPLICATES */
+/*THE SYSTEM RESTART AND ONE HAS TO TAKE SPECIAL CARE WITH FILE ZERO. IT IS */
+/*HOWEVER NO LARGE PROBLEM TO CHANGE INTO THIS SCENARIO. TO AVOID ALSO THE */
+/*WRITING OF PAGE ZERO IS ALSO POSSIBLE BUT COMPLICATES THE DESIGN EVEN */
+/*FURTHER. IT GETS FAIRLY COMPLEX TO FIND THE END OF THE LOG. SOME SORT OF */
+/*BINARY SEARCH IS HOWEVER MOST LIKELY A GOOD METHODOLOGY FOR THIS. */
+/* ######################################################################### */
+void Dblqh::firstPageWriteLab(Signal* signal)
+{
+ releaseLfo(signal);
+/*---------------------------------------------------------------------------*/
+/* RELEASE PAGE ZERO IF THE FILE IS NOT FILE 0. */
+/*---------------------------------------------------------------------------*/
+ Uint32 fileNo = logFilePtr.p->fileNo;
+ if (fileNo != 0) {
+ jam();
+ releaseLogpage(signal);
+ }//if
+/*---------------------------------------------------------------------------*/
+/* IF A NEW FILE HAS BEEN OPENED WE SHALL ALWAYS ALSO WRITE TO PAGE O IN */
+/* FILE 0. THE AIM IS TO MAKE RESTARTS EASIER BY SPECIFYING WHICH IS THE */
+/* LAST FILE WHERE LOGGING HAS STARTED. */
+/*---------------------------------------------------------------------------*/
+/* FIRST CHECK WHETHER THE LAST WRITE IN THE PREVIOUS FILE HAVE COMPLETED */
+/*---------------------------------------------------------------------------*/
+ if (logFilePtr.p->fileChangeState == LogFileRecord::BOTH_WRITES_ONGOING) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THE LAST WRITE WAS STILL ONGOING. */
+/*---------------------------------------------------------------------------*/
+ logFilePtr.p->fileChangeState = LogFileRecord::LAST_WRITE_ONGOING;
+ return;
+ } else {
+ jam();
+ ndbrequire(logFilePtr.p->fileChangeState == LogFileRecord::FIRST_WRITE_ONGOING);
+/*---------------------------------------------------------------------------*/
+/* WRITE TO PAGE 0 IN IN FILE 0 NOW. */
+/*---------------------------------------------------------------------------*/
+ logFilePtr.p->fileChangeState = LogFileRecord::WRITE_PAGE_ZERO_ONGOING;
+ if (fileNo == 0) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* IF THE NEW FILE WAS 0 THEN WE HAVE ALREADY WRITTEN PAGE ZERO IN FILE 0. */
+/*---------------------------------------------------------------------------*/
+ logFilePtr.p->fileChangeState = LogFileRecord::NOT_ONGOING;
+ return;
+ } else {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* WRITE PAGE ZERO IN FILE ZERO. LOG_FILE_REC WILL REFER TO THE LOG FILE WE */
+/* HAVE JUST WRITTEN PAGE ZERO IN TO GET HOLD OF LOG_FILE_PTR FOR THIS */
+/* RECORD QUICKLY. THIS IS NEEDED TO GET HOLD OF THE FILE_CHANGE_STATE. */
+/* THE ONLY INFORMATION WE WANT TO CHANGE IS THE LAST FILE NUMBER IN THE */
+/* FILE DESCRIPTOR. THIS IS USED AT SYSTEM RESTART TO FIND THE END OF THE */
+/* LOG PART. */
+/*---------------------------------------------------------------------------*/
+ Uint32 currLogFile = logFilePtr.i;
+ logFilePtr.i = logPartPtr.p->firstLogfile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ logPagePtr.i = logFilePtr.p->logPageZero;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_FILE_NO] = fileNo;
+ writeSinglePage(signal, 0, ZPAGE_SIZE - 1);
+ lfoPtr.p->logFileRec = currLogFile;
+ lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_PAGE_ZERO;
+ return;
+ }//if
+ }//if
+}//Dblqh::firstPageWriteLab()
+
+void Dblqh::lastWriteInFileLab(Signal* signal)
+{
+ LogFileRecordPtr locLogFilePtr;
+/*---------------------------------------------------------------------------*/
+/* CHECK IF ANY GLOBAL CHECKPOINTS ARE COMPLETED DUE TO THIS COMPLETED DISK */
+/* WRITE. */
+/*---------------------------------------------------------------------------*/
+ checkGcpCompleted(signal,
+ ((lfoPtr.p->lfoPageNo + lfoPtr.p->noPagesRw) - 1),
+ (ZPAGE_SIZE - 1));
+ releaseLfoPages(signal);
+ releaseLfo(signal);
+/*---------------------------------------------------------------------------*/
+/* IF THE FILE IS NOT IN USE OR THE NEXT FILE TO BE USED WE WILL CLOSE IT. */
+/*---------------------------------------------------------------------------*/
+ locLogFilePtr.i = logPartPtr.p->currentLogfile;
+ ptrCheckGuard(locLogFilePtr, clogFileFileSize, logFileRecord);
+ if (logFilePtr.i != locLogFilePtr.i) {
+ if (logFilePtr.i != locLogFilePtr.p->nextLogFile) {
+ if (logFilePtr.p->fileNo != 0) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THE FILE IS NOT FILE ZERO EITHER. WE WILL NOT CLOSE FILE ZERO SINCE WE */
+/* USE IT TO KEEP TRACK OF THE CURRENT LOG FILE BY WRITING PAGE ZERO IN */
+/* FILE ZERO. */
+/*---------------------------------------------------------------------------*/
+/* WE WILL CLOSE THE FILE. */
+/*---------------------------------------------------------------------------*/
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_WRITE_LOG;
+ closeFile(signal, logFilePtr);
+ }//if
+ }//if
+ }//if
+/*---------------------------------------------------------------------------*/
+/* IF A NEW FILE HAS BEEN OPENED WE SHALL ALWAYS ALSO WRITE TO PAGE O IN */
+/* FILE 0. THE AIM IS TO MAKE RESTARTS EASIER BY SPECIFYING WHICH IS THE */
+/* LAST FILE WHERE LOGGING HAS STARTED. */
+/*---------------------------------------------------------------------------*/
+/* FIRST CHECK WHETHER THE FIRST WRITE IN THE NEW FILE HAVE COMPLETED */
+/* THIS STATE INFORMATION IS IN THE NEW LOG FILE AND THUS WE HAVE TO MOVE */
+/* THE LOG FILE POINTER TO THIS LOG FILE. */
+/*---------------------------------------------------------------------------*/
+ logFilePtr.i = logFilePtr.p->nextLogFile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ if (logFilePtr.p->fileChangeState == LogFileRecord::BOTH_WRITES_ONGOING) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THE FIRST WRITE WAS STILL ONGOING. */
+/*---------------------------------------------------------------------------*/
+ logFilePtr.p->fileChangeState = LogFileRecord::FIRST_WRITE_ONGOING;
+ return;
+ } else {
+ ndbrequire(logFilePtr.p->fileChangeState == LogFileRecord::LAST_WRITE_ONGOING);
+/*---------------------------------------------------------------------------*/
+/* WRITE TO PAGE 0 IN IN FILE 0 NOW. */
+/*---------------------------------------------------------------------------*/
+ logFilePtr.p->fileChangeState = LogFileRecord::WRITE_PAGE_ZERO_ONGOING;
+ Uint32 fileNo = logFilePtr.p->fileNo;
+ if (fileNo == 0) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* IF THE NEW FILE WAS 0 THEN WE HAVE ALREADY WRITTEN PAGE ZERO IN FILE 0. */
+/*---------------------------------------------------------------------------*/
+ logFilePtr.p->fileChangeState = LogFileRecord::NOT_ONGOING;
+ return;
+ } else {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* WRITE PAGE ZERO IN FILE ZERO. LOG_FILE_REC WILL REFER TO THE LOG FILE WE */
+/* HAVE JUST WRITTEN PAGE ZERO IN TO GET HOLD OF LOG_FILE_PTR FOR THIS */
+/* RECORD QUICKLY. THIS IS NEEDED TO GET HOLD OF THE FILE_CHANGE_STATE. */
+/* THE ONLY INFORMATION WE WANT TO CHANGE IS THE LAST FILE NUMBER IN THE */
+/* FILE DESCRIPTOR. THIS IS USED AT SYSTEM RESTART TO FIND THE END OF THE */
+/* LOG PART. */
+/*---------------------------------------------------------------------------*/
+ Uint32 currLogFile = logFilePtr.i;
+ logFilePtr.i = logPartPtr.p->firstLogfile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ logPagePtr.i = logFilePtr.p->logPageZero;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_FILE_NO] = fileNo;
+ writeSinglePage(signal, 0, ZPAGE_SIZE - 1);
+ lfoPtr.p->logFileRec = currLogFile;
+ lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_PAGE_ZERO;
+ return;
+ }//if
+ }//if
+}//Dblqh::lastWriteInFileLab()
+
+void Dblqh::writePageZeroLab(Signal* signal)
+{
+ logFilePtr.p->fileChangeState = LogFileRecord::NOT_ONGOING;
+/*---------------------------------------------------------------------------*/
+/* IT COULD HAVE ARRIVED PAGE WRITES TO THE CURRENT FILE WHILE WE WERE */
+/* WAITING FOR THIS DISK WRITE TO COMPLETE. THEY COULD NOT CHECK FOR */
+/* COMPLETED GLOBAL CHECKPOINTS. THUS WE SHOULD DO THAT NOW INSTEAD. */
+/*---------------------------------------------------------------------------*/
+ checkGcpCompleted(signal,
+ logFilePtr.p->lastPageWritten,
+ logFilePtr.p->lastWordWritten);
+ releaseLfo(signal);
+ return;
+}//Dblqh::writePageZeroLab()
+
+/* ######################################################################### */
+/* INITIAL START MODULE */
+/* THIS MODULE IS A SUB-MODULE OF THE FILE SYSTEM HANDLING. */
+/* */
+/*THIS MODULE INITIALISES ALL THE LOG FILES THAT ARE NEEDED AT A SYSTEM */
+/*RESTART AND WHICH ARE USED DURING NORMAL OPERATIONS. IT CREATES THE FILES */
+/*AND SETS A PROPER SIZE OF THEM AND INITIALISES THE FIRST PAGE IN EACH FILE */
+/* ######################################################################### */
+void Dblqh::openFileInitLab(Signal* signal)
+{
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN_INIT;
+ seizeLogpage(signal);
+ writeSinglePage(signal, (ZNO_MBYTES_IN_FILE * ZPAGES_IN_MBYTE) - 1, ZPAGE_SIZE - 1);
+ lfoPtr.p->lfoState = LogFileOperationRecord::INIT_WRITE_AT_END;
+ return;
+}//Dblqh::openFileInitLab()
+
+void Dblqh::initWriteEndLab(Signal* signal)
+{
+ releaseLfo(signal);
+ initLogpage(signal);
+ if (logFilePtr.p->fileNo == 0) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* PAGE ZERO IN FILE ZERO MUST SET LOG LAP TO ONE SINCE IT HAS STARTED */
+/* WRITING TO THE LOG, ALSO GLOBAL CHECKPOINTS ARE SET TO ZERO. */
+/*---------------------------------------------------------------------------*/
+ logPagePtr.p->logPageWord[ZPOS_LOG_LAP] = 1;
+ logPagePtr.p->logPageWord[ZPOS_MAX_GCI_STARTED] = 0;
+ logPagePtr.p->logPageWord[ZPOS_MAX_GCI_COMPLETED] = 0;
+ logFilePtr.p->logMaxGciStarted[0] = 0;
+ logFilePtr.p->logMaxGciCompleted[0] = 0;
+ }//if
+/*---------------------------------------------------------------------------*/
+/* REUSE CODE FOR INITIALISATION OF FIRST PAGE IN ALL LOG FILES. */
+/*---------------------------------------------------------------------------*/
+ writeFileHeaderOpen(signal, ZINIT);
+ return;
+}//Dblqh::initWriteEndLab()
+
+void Dblqh::initFirstPageLab(Signal* signal)
+{
+ releaseLfo(signal);
+ if (logFilePtr.p->fileNo == 0) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* IN FILE ZERO WE WILL INSERT A PAGE ONE WHERE WE WILL INSERT A COMPLETED */
+/* GCI RECORD FOR GCI = 0. */
+/*---------------------------------------------------------------------------*/
+ initLogpage(signal);
+ logPagePtr.p->logPageWord[ZPOS_LOG_LAP] = 1;
+ logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE] = ZCOMPLETED_GCI_TYPE;
+ logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + 1] = 1;
+ writeSinglePage(signal, 1, ZPAGE_SIZE - 1);
+ lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_GCI_ZERO;
+ return;
+ }//if
+ logFilePtr.p->currentMbyte = 1;
+ writeInitMbyte(signal);
+ return;
+}//Dblqh::initFirstPageLab()
+
+void Dblqh::writeGciZeroLab(Signal* signal)
+{
+ releaseLfo(signal);
+ logFilePtr.p->currentMbyte = 1;
+ writeInitMbyte(signal);
+ return;
+}//Dblqh::writeGciZeroLab()
+
+void Dblqh::writeInitMbyteLab(Signal* signal)
+{
+ releaseLfo(signal);
+ logFilePtr.p->currentMbyte = logFilePtr.p->currentMbyte + 1;
+ if (logFilePtr.p->currentMbyte == ZNO_MBYTES_IN_FILE) {
+ jam();
+ releaseLogpage(signal);
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_INIT;
+ closeFile(signal, logFilePtr);
+ return;
+ }//if
+ writeInitMbyte(signal);
+ return;
+}//Dblqh::writeInitMbyteLab()
+
+void Dblqh::closingInitLab(Signal* signal)
+{
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSED;
+ logPartPtr.i = logFilePtr.p->logPartRec;
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+ if (logFilePtr.p->nextLogFile == logPartPtr.p->firstLogfile) {
+ jam();
+ checkInitCompletedLab(signal);
+ return;
+ } else {
+ jam();
+ logFilePtr.i = logFilePtr.p->nextLogFile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ openLogfileInit(signal);
+ }//if
+ return;
+}//Dblqh::closingInitLab()
+
+void Dblqh::checkInitCompletedLab(Signal* signal)
+{
+ logPartPtr.p->logPartState = LogPartRecord::SR_FIRST_PHASE_COMPLETED;
+/*---------------------------------------------------------------------------*/
+/* WE HAVE NOW INITIALISED ALL FILES IN THIS LOG PART. WE CAN NOW SET THE */
+/* THE LOG LAP TO ONE SINCE WE WILL START WITH LOG LAP ONE. LOG LAP = ZERO */
+/* MEANS THIS PART OF THE LOG IS NOT WRITTEN YET. */
+/*---------------------------------------------------------------------------*/
+ logPartPtr.p->logLap = 1;
+ logPartPtr.i = 0;
+CHECK_LOG_PARTS_LOOP:
+ ptrAss(logPartPtr, logPartRecord);
+ if (logPartPtr.p->logPartState != LogPartRecord::SR_FIRST_PHASE_COMPLETED) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THIS PART HAS STILL NOT COMPLETED. WAIT FOR THIS TO OCCUR. */
+/*---------------------------------------------------------------------------*/
+ return;
+ }//if
+ if (logPartPtr.i == 3) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* ALL LOG PARTS ARE COMPLETED. NOW WE CAN CONTINUE WITH THE RESTART */
+/* PROCESSING. THE NEXT STEP IS TO PREPARE FOR EXECUTING OPERATIONS. THUS WE */
+/* NEED TO INITIALISE ALL NEEDED DATA AND TO OPEN FILE ZERO AND THE NEXT AND */
+/* TO SET THE CURRENT LOG PAGE TO BE PAGE 1 IN FILE ZERO. */
+/*---------------------------------------------------------------------------*/
+ for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) {
+ ptrAss(logPartPtr, logPartRecord);
+ signal->theData[0] = ZINIT_FOURTH;
+ signal->theData[1] = logPartPtr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ }//for
+ return;
+ } else {
+ jam();
+ logPartPtr.i = logPartPtr.i + 1;
+ goto CHECK_LOG_PARTS_LOOP;
+ }//if
+}//Dblqh::checkInitCompletedLab()
+
+/* ========================================================================= */
+/* ======= INITIATE LOG FILE OPERATION RECORD WHEN ALLOCATED ======= */
+/* */
+/* ========================================================================= */
+void Dblqh::initLfo(Signal* signal)
+{
+ lfoPtr.p->firstLfoPage = RNIL;
+ lfoPtr.p->lfoState = LogFileOperationRecord::IDLE;
+ lfoPtr.p->logFileRec = logFilePtr.i;
+ lfoPtr.p->noPagesRw = 0;
+ lfoPtr.p->lfoPageNo = ZNIL;
+}//Dblqh::initLfo()
+
+/* ========================================================================= */
+/* ======= INITIATE LOG FILE WHEN ALLOCATED ======= */
+/* */
+/* INPUT: TFILE_NO NUMBER OF THE FILE INITIATED */
+/* LOG_PART_PTR NUMBER OF LOG PART */
+/* SUBROUTINE SHORT NAME = IL */
+/* ========================================================================= */
+void Dblqh::initLogfile(Signal* signal, Uint32 fileNo)
+{
+ UintR tilTmp;
+ UintR tilIndex;
+
+ logFilePtr.p->currentFilepage = 0;
+ logFilePtr.p->currentLogpage = RNIL;
+ logFilePtr.p->fileName[0] = (UintR)-1;
+ logFilePtr.p->fileName[1] = (UintR)-1; /* = H'FFFFFFFF = -1 */
+ logFilePtr.p->fileName[2] = fileNo; /* Sfile_no */
+ tilTmp = 1; /* VERSION 1 OF FILE NAME */
+ tilTmp = (tilTmp << 8) + 1; /* FRAGMENT LOG => .FRAGLOG AS EXTENSION */
+ tilTmp = (tilTmp << 8) + (8 + logPartPtr.i); /* DIRECTORY = D(8+Part)/DBLQH */
+ tilTmp = (tilTmp << 8) + 255; /* IGNORE Pxx PART OF FILE NAME */
+ logFilePtr.p->fileName[3] = tilTmp;
+/* ========================================================================= */
+/* FILE NAME BECOMES /D2/DBLQH/Tpart_no/Sfile_no.FRAGLOG */
+/* ========================================================================= */
+ logFilePtr.p->fileNo = fileNo;
+ logFilePtr.p->filePosition = 0;
+ logFilePtr.p->firstLfo = RNIL;
+ logFilePtr.p->lastLfo = RNIL;
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSED;
+ logFilePtr.p->logPartRec = logPartPtr.i;
+ logFilePtr.p->noLogpagesInBuffer = 0;
+ logFilePtr.p->firstFilledPage = RNIL;
+ logFilePtr.p->lastFilledPage = RNIL;
+ logFilePtr.p->lastPageWritten = 0;
+ logFilePtr.p->logPageZero = RNIL;
+ logFilePtr.p->currentMbyte = 0;
+ for (tilIndex = 0; tilIndex <= 15; tilIndex++) {
+ logFilePtr.p->logMaxGciCompleted[tilIndex] = (UintR)-1;
+ logFilePtr.p->logMaxGciStarted[tilIndex] = (UintR)-1;
+ logFilePtr.p->logLastPrepRef[tilIndex] = 0;
+ }//for
+}//Dblqh::initLogfile()
+
+/* ========================================================================= */
+/* ======= INITIATE LOG PAGE WHEN ALLOCATED ======= */
+/* */
+/* ========================================================================= */
+void Dblqh::initLogpage(Signal* signal)
+{
+ TcConnectionrecPtr ilpTcConnectptr;
+
+ logPagePtr.p->logPageWord[ZPOS_LOG_LAP] = logPartPtr.p->logLap;
+ logPagePtr.p->logPageWord[ZPOS_MAX_GCI_COMPLETED] =
+ logPartPtr.p->logPartNewestCompletedGCI;
+ logPagePtr.p->logPageWord[ZPOS_MAX_GCI_STARTED] = cnewestGci;
+ logPagePtr.p->logPageWord[ZPOS_VERSION] = NDB_VERSION;
+ logPagePtr.p->logPageWord[ZPOS_NO_LOG_FILES] = logPartPtr.p->noLogFiles;
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = ZPAGE_HEADER_SIZE;
+ ilpTcConnectptr.i = logPartPtr.p->firstLogTcrec;
+ if (ilpTcConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(ilpTcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ logPagePtr.p->logPageWord[ZLAST_LOG_PREP_REF] =
+ (ilpTcConnectptr.p->logStartFileNo << 16) +
+ (ilpTcConnectptr.p->logStartPageNo >> ZTWOLOG_NO_PAGES_IN_MBYTE);
+ } else {
+ jam();
+ logPagePtr.p->logPageWord[ZLAST_LOG_PREP_REF] =
+ (logFilePtr.p->fileNo << 16) +
+ (logFilePtr.p->currentFilepage >> ZTWOLOG_NO_PAGES_IN_MBYTE);
+ }//if
+}//Dblqh::initLogpage()
+
+/* ------------------------------------------------------------------------- */
+/* ------- OPEN LOG FILE FOR READ AND WRITE ------- */
+/* */
+/* SUBROUTINE SHORT NAME = OFR */
+/* ------------------------------------------------------------------------- */
+void Dblqh::openFileRw(Signal* signal, LogFileRecordPtr olfLogFilePtr)
+{
+ signal->theData[0] = cownref;
+ signal->theData[1] = olfLogFilePtr.i;
+ signal->theData[2] = olfLogFilePtr.p->fileName[0];
+ signal->theData[3] = olfLogFilePtr.p->fileName[1];
+ signal->theData[4] = olfLogFilePtr.p->fileName[2];
+ signal->theData[5] = olfLogFilePtr.p->fileName[3];
+ signal->theData[6] = ZOPEN_READ_WRITE;
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
+}//Dblqh::openFileRw()
+
+/* ------------------------------------------------------------------------- */
+/* ------- OPEN LOG FILE DURING INITIAL START ------- */
+/* */
+/* SUBROUTINE SHORT NAME = OLI */
+/* ------------------------------------------------------------------------- */
+void Dblqh::openLogfileInit(Signal* signal)
+{
+ logFilePtr.p->logFileStatus = LogFileRecord::OPENING_INIT;
+ signal->theData[0] = cownref;
+ signal->theData[1] = logFilePtr.i;
+ signal->theData[2] = logFilePtr.p->fileName[0];
+ signal->theData[3] = logFilePtr.p->fileName[1];
+ signal->theData[4] = logFilePtr.p->fileName[2];
+ signal->theData[5] = logFilePtr.p->fileName[3];
+ signal->theData[6] = 0x302;
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
+}//Dblqh::openLogfileInit()
+
+/* OPEN FOR READ/WRITE, DO CREATE AND DO TRUNCATE FILE */
+/* ------------------------------------------------------------------------- */
+/* ------- OPEN NEXT LOG FILE ------- */
+/* */
+/* SUBROUTINE SHORT NAME = ONL */
+/* ------------------------------------------------------------------------- */
+void Dblqh::openNextLogfile(Signal* signal)
+{
+ LogFileRecordPtr onlLogFilePtr;
+
+ if (logPartPtr.p->noLogFiles > 2) {
+ jam();
+/* -------------------------------------------------- */
+/* IF ONLY 1 OR 2 LOG FILES EXIST THEN THEY ARE */
+/* ALWAYS OPEN AND THUS IT IS NOT NECESSARY TO */
+/* OPEN THEM NOW. */
+/* -------------------------------------------------- */
+ onlLogFilePtr.i = logFilePtr.p->nextLogFile;
+ ptrCheckGuard(onlLogFilePtr, clogFileFileSize, logFileRecord);
+ if (onlLogFilePtr.p->logFileStatus != LogFileRecord::CLOSED) {
+ ndbrequire(onlLogFilePtr.p->fileNo == 0);
+ return;
+ }//if
+ onlLogFilePtr.p->logFileStatus = LogFileRecord::OPENING_WRITE_LOG;
+ signal->theData[0] = cownref;
+ signal->theData[1] = onlLogFilePtr.i;
+ signal->theData[2] = onlLogFilePtr.p->fileName[0];
+ signal->theData[3] = onlLogFilePtr.p->fileName[1];
+ signal->theData[4] = onlLogFilePtr.p->fileName[2];
+ signal->theData[5] = onlLogFilePtr.p->fileName[3];
+ signal->theData[6] = 2;
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
+ }//if
+}//Dblqh::openNextLogfile()
+
+ /* OPEN FOR READ/WRITE, DON'T CREATE AND DON'T TRUNCATE FILE */
+/* ------------------------------------------------------------------------- */
+/* ------- RELEASE LFO RECORD ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dblqh::releaseLfo(Signal* signal)
+{
+#ifdef VM_TRACE
+ // Check that lfo record isn't already in free list
+ LogFileOperationRecordPtr TlfoPtr;
+ TlfoPtr.i = cfirstfreeLfo;
+ while (TlfoPtr.i != RNIL){
+ ptrCheckGuard(TlfoPtr, clfoFileSize, logFileOperationRecord);
+ ndbrequire(TlfoPtr.i != lfoPtr.i);
+ TlfoPtr.i = TlfoPtr.p->nextLfo;
+ }
+#endif
+ lfoPtr.p->nextLfo = cfirstfreeLfo;
+ lfoPtr.p->lfoTimer = 0;
+ cfirstfreeLfo = lfoPtr.i;
+ lfoPtr.p->lfoState = LogFileOperationRecord::IDLE;
+}//Dblqh::releaseLfo()
+
+/* ------------------------------------------------------------------------- */
+/* ------- RELEASE ALL LOG PAGES CONNECTED TO A LFO RECORD ------- */
+/* */
+/* SUBROUTINE SHORT NAME = RLP */
+/* ------------------------------------------------------------------------- */
+void Dblqh::releaseLfoPages(Signal* signal)
+{
+ LogPageRecordPtr rlpLogPagePtr;
+
+ logPagePtr.i = lfoPtr.p->firstLfoPage;
+RLP_LOOP:
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ rlpLogPagePtr.i = logPagePtr.p->logPageWord[ZNEXT_PAGE];
+ releaseLogpage(signal);
+ if (rlpLogPagePtr.i != RNIL) {
+ jam();
+ logPagePtr.i = rlpLogPagePtr.i;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ goto RLP_LOOP;
+ }//if
+ lfoPtr.p->firstLfoPage = RNIL;
+}//Dblqh::releaseLfoPages()
+
+/* ------------------------------------------------------------------------- */
+/* ------- RELEASE LOG PAGE ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dblqh::releaseLogpage(Signal* signal)
+{
+#ifdef VM_TRACE
+ // Check that log page isn't already in free list
+ LogPageRecordPtr TlogPagePtr;
+ TlogPagePtr.i = cfirstfreeLogPage;
+ while (TlogPagePtr.i != RNIL){
+ ptrCheckGuard(TlogPagePtr, clogPageFileSize, logPageRecord);
+ ndbrequire(TlogPagePtr.i != logPagePtr.i);
+ TlogPagePtr.i = TlogPagePtr.p->logPageWord[ZNEXT_PAGE];
+ }
+#endif
+
+ cnoOfLogPages++;
+ logPagePtr.p->logPageWord[ZNEXT_PAGE] = cfirstfreeLogPage;
+ cfirstfreeLogPage = logPagePtr.i;
+}//Dblqh::releaseLogpage()
+
+/* ------------------------------------------------------------------------- */
+/* ------- SEIZE LFO RECORD ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dblqh::seizeLfo(Signal* signal)
+{
+ lfoPtr.i = cfirstfreeLfo;
+ ptrCheckGuard(lfoPtr, clfoFileSize, logFileOperationRecord);
+ cfirstfreeLfo = lfoPtr.p->nextLfo;
+ lfoPtr.p->nextLfo = RNIL;
+ lfoPtr.p->lfoTimer = cLqhTimeOutCount;
+}//Dblqh::seizeLfo()
+
+/* ------------------------------------------------------------------------- */
+/* ------- SEIZE LOG FILE RECORD ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dblqh::seizeLogfile(Signal* signal)
+{
+ logFilePtr.i = cfirstfreeLogFile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+/* ------------------------------------------------------------------------- */
+/*IF LIST IS EMPTY THEN A SYSTEM CRASH IS INVOKED SINCE LOG_FILE_PTR = RNIL */
+/* ------------------------------------------------------------------------- */
+ cfirstfreeLogFile = logFilePtr.p->nextLogFile;
+ logFilePtr.p->nextLogFile = RNIL;
+}//Dblqh::seizeLogfile()
+
+/* ------------------------------------------------------------------------- */
+/* ------- SEIZE LOG PAGE RECORD ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dblqh::seizeLogpage(Signal* signal)
+{
+ cnoOfLogPages--;
+ logPagePtr.i = cfirstfreeLogPage;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+/* ------------------------------------------------------------------------- */
+/*IF LIST IS EMPTY THEN A SYSTEM CRASH IS INVOKED SINCE LOG_PAGE_PTR = RNIL */
+/* ------------------------------------------------------------------------- */
+ cfirstfreeLogPage = logPagePtr.p->logPageWord[ZNEXT_PAGE];
+ logPagePtr.p->logPageWord[ZNEXT_PAGE] = RNIL;
+}//Dblqh::seizeLogpage()
+
+/* ------------------------------------------------------------------------- */
+/* ------- WRITE FILE DESCRIPTOR INFORMATION ------- */
+/* */
+/* SUBROUTINE SHORT NAME: WFD */
+// Pointer handling:
+// logFilePtr in
+// logPartPtr in
+/* ------------------------------------------------------------------------- */
+void Dblqh::writeFileDescriptor(Signal* signal)
+{
+ TcConnectionrecPtr wfdTcConnectptr;
+ UintR twfdFileNo;
+ UintR twfdMbyte;
+
+/* -------------------------------------------------- */
+/* START BY WRITING TO LOG FILE RECORD */
+/* -------------------------------------------------- */
+ arrGuard(logFilePtr.p->currentMbyte, 16);
+ logFilePtr.p->logMaxGciCompleted[logFilePtr.p->currentMbyte] =
+ logPartPtr.p->logPartNewestCompletedGCI;
+ logFilePtr.p->logMaxGciStarted[logFilePtr.p->currentMbyte] = cnewestGci;
+ wfdTcConnectptr.i = logPartPtr.p->firstLogTcrec;
+ if (wfdTcConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(wfdTcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ twfdFileNo = wfdTcConnectptr.p->logStartFileNo;
+ twfdMbyte = wfdTcConnectptr.p->logStartPageNo >> ZTWOLOG_NO_PAGES_IN_MBYTE;
+ logFilePtr.p->logLastPrepRef[logFilePtr.p->currentMbyte] =
+ (twfdFileNo << 16) + twfdMbyte;
+ } else {
+ jam();
+ logFilePtr.p->logLastPrepRef[logFilePtr.p->currentMbyte] =
+ (logFilePtr.p->fileNo << 16) + logFilePtr.p->currentMbyte;
+ }//if
+}//Dblqh::writeFileDescriptor()
+
+/* ------------------------------------------------------------------------- */
+/* ------- WRITE THE HEADER PAGE OF A NEW FILE ------- */
+/* */
+/* SUBROUTINE SHORT NAME: WMO */
+/* ------------------------------------------------------------------------- */
+void Dblqh::writeFileHeaderOpen(Signal* signal, Uint32 wmoType)
+{
+ LogFileRecordPtr wmoLogFilePtr;
+ UintR twmoNoLogDescriptors;
+ UintR twmoLoop;
+ UintR twmoIndex;
+
+/* -------------------------------------------------- */
+/* WRITE HEADER INFORMATION IN THE NEW FILE. */
+/* -------------------------------------------------- */
+ logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_LOG_TYPE] = ZFD_TYPE;
+ logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_FILE_NO] =
+ logFilePtr.p->fileNo;
+ if (logPartPtr.p->noLogFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) {
+ jam();
+ twmoNoLogDescriptors = ZMAX_LOG_FILES_IN_PAGE_ZERO;
+ } else {
+ jam();
+ twmoNoLogDescriptors = logPartPtr.p->noLogFiles;
+ }//if
+ logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_NO_FD] =
+ twmoNoLogDescriptors;
+ wmoLogFilePtr.i = logFilePtr.i;
+ twmoLoop = 0;
+WMO_LOOP:
+ jam();
+ if (twmoLoop < twmoNoLogDescriptors) {
+ jam();
+ ptrCheckGuard(wmoLogFilePtr, clogFileFileSize, logFileRecord);
+ for (twmoIndex = 0; twmoIndex <= ZNO_MBYTES_IN_FILE - 1; twmoIndex++) {
+ jam();
+ arrGuard(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
+ (twmoLoop * ZFD_PART_SIZE)) + twmoIndex, ZPAGE_SIZE);
+ logPagePtr.p->logPageWord[((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
+ (twmoLoop * ZFD_PART_SIZE)) + twmoIndex] =
+ wmoLogFilePtr.p->logMaxGciCompleted[twmoIndex];
+ arrGuard((((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
+ (twmoLoop * ZFD_PART_SIZE)) + ZNO_MBYTES_IN_FILE) +
+ twmoIndex, ZPAGE_SIZE);
+ logPagePtr.p->logPageWord[(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
+ (twmoLoop * ZFD_PART_SIZE)) + ZNO_MBYTES_IN_FILE) + twmoIndex] =
+ wmoLogFilePtr.p->logMaxGciStarted[twmoIndex];
+ arrGuard((((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
+ (twmoLoop * ZFD_PART_SIZE)) + (2 * ZNO_MBYTES_IN_FILE)) +
+ twmoIndex, ZPAGE_SIZE);
+ logPagePtr.p->logPageWord[(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
+ (twmoLoop * ZFD_PART_SIZE)) + (2 * ZNO_MBYTES_IN_FILE)) + twmoIndex] =
+ wmoLogFilePtr.p->logLastPrepRef[twmoIndex];
+ }//for
+ wmoLogFilePtr.i = wmoLogFilePtr.p->prevLogFile;
+ twmoLoop = twmoLoop + 1;
+ goto WMO_LOOP;
+ }//if
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] =
+ (ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
+ (ZFD_PART_SIZE * twmoNoLogDescriptors);
+ arrGuard(logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX], ZPAGE_SIZE);
+ logPagePtr.p->logPageWord[logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]] =
+ ZNEXT_LOG_RECORD_TYPE;
+/* ------------------------------------------------------- */
+/* THIS IS A SPECIAL WRITE OF THE FIRST PAGE IN THE */
+/* LOG FILE. THIS HAS SPECIAL SIGNIFANCE TO FIND */
+/* THE END OF THE LOG AT SYSTEM RESTART. */
+/* ------------------------------------------------------- */
+ writeSinglePage(signal, 0, ZPAGE_SIZE - 1);
+ if (wmoType == ZINIT) {
+ jam();
+ lfoPtr.p->lfoState = LogFileOperationRecord::INIT_FIRST_PAGE;
+ } else {
+ jam();
+ lfoPtr.p->lfoState = LogFileOperationRecord::FIRST_PAGE_WRITE_IN_LOGFILE;
+ }//if
+ logFilePtr.p->filePosition = 1;
+ if (wmoType == ZNORMAL) {
+ jam();
+/* -------------------------------------------------- */
+/* ALLOCATE A NEW PAGE SINCE THE CURRENT IS */
+/* WRITTEN. */
+/* -------------------------------------------------- */
+ seizeLogpage(signal);
+ initLogpage(signal);
+ logFilePtr.p->currentLogpage = logPagePtr.i;
+ logFilePtr.p->currentFilepage = logFilePtr.p->currentFilepage + 1;
+ }//if
+}//Dblqh::writeFileHeaderOpen()
+
+/* -------------------------------------------------- */
+/* THE NEW FILE POSITION WILL ALWAYS BE 1 SINCE */
+/* WE JUST WROTE THE FIRST PAGE IN THE LOG FILE */
+/* -------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* ------- WRITE A MBYTE HEADER DURING INITIAL START ------- */
+/* */
+/* SUBROUTINE SHORT NAME: WIM */
+/* ------------------------------------------------------------------------- */
+void Dblqh::writeInitMbyte(Signal* signal)
+{
+ initLogpage(signal);
+ writeSinglePage(signal, logFilePtr.p->currentMbyte * ZPAGES_IN_MBYTE, ZPAGE_SIZE - 1);
+ lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_INIT_MBYTE;
+}//Dblqh::writeInitMbyte()
+
+/* ------------------------------------------------------------------------- */
+/* ------- WRITE A SINGLE PAGE INTO A FILE ------- */
+/* */
+/* INPUT: TWSP_PAGE_NO THE PAGE NUMBER WRITTEN */
+/* SUBROUTINE SHORT NAME: WSP */
+/* ------------------------------------------------------------------------- */
+void Dblqh::writeSinglePage(Signal* signal, Uint32 pageNo, Uint32 wordWritten)
+{
+ seizeLfo(signal);
+ initLfo(signal);
+ lfoPtr.p->firstLfoPage = logPagePtr.i;
+ logPagePtr.p->logPageWord[ZNEXT_PAGE] = RNIL;
+
+ // Calculate checksum for page
+ logPagePtr.p->logPageWord[ZPOS_CHECKSUM] = calcPageCheckSum(logPagePtr);
+
+ lfoPtr.p->lfoPageNo = pageNo;
+ lfoPtr.p->lfoWordWritten = wordWritten;
+ lfoPtr.p->noPagesRw = 1;
+/* -------------------------------------------------- */
+/* SET TIMER ON THIS LOG PART TO SIGNIFY THAT A */
+/* LOG RECORD HAS BEEN SENT AT THIS TIME. */
+/* -------------------------------------------------- */
+ logPartPtr.p->logPartTimer = logPartPtr.p->logTimer;
+ signal->theData[0] = logFilePtr.p->fileRef;
+ signal->theData[1] = cownref;
+ signal->theData[2] = lfoPtr.i;
+ signal->theData[3] = ZLIST_OF_PAIRS_SYNCH;
+ signal->theData[4] = ZVAR_NO_LOG_PAGE_WORD;
+ signal->theData[5] = 1; /* ONE PAGE WRITTEN */
+ signal->theData[6] = logPagePtr.i;
+ signal->theData[7] = pageNo;
+ sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA);
+}//Dblqh::writeSinglePage()
+
+/* ##########################################################################
+ * SYSTEM RESTART PHASE ONE MODULE
+ * THIS MODULE IS A SUB-MODULE OF THE FILE SYSTEM HANDLING.
+ *
+ * THIS MODULE CONTAINS THE CODE FOR THE FIRST PHASE OF THE SYSTEM RESTART.
+ * THE AIM OF THIS PHASE IS TO FIND THE END OF THE LOG AND TO FIND
+ * INFORMATION ABOUT WHERE GLOBAL CHECKPOINTS ARE COMPLETED AND STARTED
+ * IN THE LOG. THIS INFORMATION IS NEEDED TO START PHASE THREE OF
+ * THE SYSTEM RESTART.
+ * ########################################################################## */
+/* --------------------------------------------------------------------------
+ * A SYSTEM RESTART OR NODE RESTART IS ONGOING. WE HAVE NOW OPENED FILE 0
+ * NOW WE NEED TO READ PAGE 0 TO FIND WHICH LOG FILE THAT WAS OPEN AT
+ * CRASH TIME.
+ * -------------------------------------------------------------------------- */
+void Dblqh::openSrFrontpageLab(Signal* signal)
+{
+ readSinglePage(signal, 0);
+ lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_FRONTPAGE;
+ return;
+}//Dblqh::openSrFrontpageLab()
+
+/* -------------------------------------------------------------------------
+ * WE HAVE NOW READ PAGE 0 IN FILE 0. CHECK THE LAST OPEN FILE. ACTUALLY THE
+ * LAST OPEN FILE COULD BE THE NEXT AFTER THAT. CHECK THAT FIRST. WHEN THE
+ * LAST WAS FOUND WE CAN FIND ALL THE NEEDED INFORMATION WHERE TO START AND
+ * STOP READING THE LOG.
+ * -------------------------------------------------------------------------- */
+void Dblqh::readSrFrontpageLab(Signal* signal)
+{
+ Uint32 fileNo = logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_FILE_NO];
+ if (fileNo == 0) {
+ jam();
+ /* ----------------------------------------------------------------------
+ * FILE 0 WAS ALSO LAST FILE SO WE DO NOT NEED TO READ IT AGAIN.
+ * ---------------------------------------------------------------------- */
+ readSrLastFileLab(signal);
+ return;
+ }//if
+ /* ------------------------------------------------------------------------
+ * CLOSE FILE 0 SO THAT WE HAVE CLOSED ALL FILES WHEN STARTING TO READ
+ * THE FRAGMENT LOG. ALSO RELEASE PAGE ZERO.
+ * ------------------------------------------------------------------------ */
+ releaseLogpage(signal);
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_SR;
+ closeFile(signal, logFilePtr);
+ LogFileRecordPtr locLogFilePtr;
+ findLogfile(signal, fileNo, logPartPtr, &locLogFilePtr);
+ locLogFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_LAST_FILE;
+ openFileRw(signal, locLogFilePtr);
+ return;
+}//Dblqh::readSrFrontpageLab()
+
+void Dblqh::openSrLastFileLab(Signal* signal)
+{
+ readSinglePage(signal, 0);
+ lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_LAST_FILE;
+ return;
+}//Dblqh::openSrLastFileLab()
+
+void Dblqh::readSrLastFileLab(Signal* signal)
+{
+ logPartPtr.p->logLap = logPagePtr.p->logPageWord[ZPOS_LOG_LAP];
+ if (logPartPtr.p->noLogFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) {
+ jam();
+ initGciInLogFileRec(signal, ZMAX_LOG_FILES_IN_PAGE_ZERO);
+ } else {
+ jam();
+ initGciInLogFileRec(signal, logPartPtr.p->noLogFiles);
+ }//if
+ releaseLogpage(signal);
+ /* ------------------------------------------------------------------------
+ * NOW WE HAVE FOUND THE LAST LOG FILE. WE ALSO NEED TO FIND THE LAST
+ * MBYTE THAT WAS LAST WRITTEN BEFORE THE SYSTEM CRASH.
+ * ------------------------------------------------------------------------ */
+ logPartPtr.p->lastLogfile = logFilePtr.i;
+ readSinglePage(signal, 0);
+ lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_LAST_MBYTE;
+ logFilePtr.p->currentMbyte = 0;
+ return;
+}//Dblqh::readSrLastFileLab()
+
+void Dblqh::readSrLastMbyteLab(Signal* signal)
+{
+ if (logPartPtr.p->lastMbyte == ZNIL) {
+ if (logPagePtr.p->logPageWord[ZPOS_LOG_LAP] < logPartPtr.p->logLap) {
+ jam();
+ logPartPtr.p->lastMbyte = logFilePtr.p->currentMbyte - 1;
+ }//if
+ }//if
+ arrGuard(logFilePtr.p->currentMbyte, 16);
+ logFilePtr.p->logMaxGciCompleted[logFilePtr.p->currentMbyte] =
+ logPagePtr.p->logPageWord[ZPOS_MAX_GCI_COMPLETED];
+ logFilePtr.p->logMaxGciStarted[logFilePtr.p->currentMbyte] =
+ logPagePtr.p->logPageWord[ZPOS_MAX_GCI_STARTED];
+ logFilePtr.p->logLastPrepRef[logFilePtr.p->currentMbyte] =
+ logPagePtr.p->logPageWord[ZLAST_LOG_PREP_REF];
+ releaseLogpage(signal);
+ if (logFilePtr.p->currentMbyte < (ZNO_MBYTES_IN_FILE - 1)) {
+ jam();
+ logFilePtr.p->currentMbyte++;
+ readSinglePage(signal, ZPAGES_IN_MBYTE * logFilePtr.p->currentMbyte);
+ lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_LAST_MBYTE;
+ return;
+ } else {
+ jam();
+ /* ----------------------------------------------------------------------
+ * THE LOG WAS IN THE LAST MBYTE WHEN THE CRASH OCCURRED SINCE ALL
+ * LOG LAPS ARE EQUAL TO THE CURRENT LOG LAP.
+ * ---------------------------------------------------------------------- */
+ if (logPartPtr.p->lastMbyte == ZNIL) {
+ jam();
+ logPartPtr.p->lastMbyte = ZNO_MBYTES_IN_FILE - 1;
+ }//if
+ }//if
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_SR;
+ closeFile(signal, logFilePtr);
+ if (logPartPtr.p->noLogFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) {
+ Uint32 fileNo;
+ if (logFilePtr.p->fileNo >= ZMAX_LOG_FILES_IN_PAGE_ZERO) {
+ jam();
+ fileNo = logFilePtr.p->fileNo - ZMAX_LOG_FILES_IN_PAGE_ZERO;
+ } else {
+ jam();
+ fileNo =
+ (logPartPtr.p->noLogFiles + logFilePtr.p->fileNo) -
+ ZMAX_LOG_FILES_IN_PAGE_ZERO;
+ }//if
+ if (fileNo == 0) {
+ jam();
+ /* --------------------------------------------------------------------
+ * AVOID USING FILE 0 AGAIN SINCE THAT IS PROBABLY CLOSING AT THE
+ * MOMENT.
+ * -------------------------------------------------------------------- */
+ fileNo = 1;
+ logPartPtr.p->srRemainingFiles =
+ logPartPtr.p->noLogFiles - (ZMAX_LOG_FILES_IN_PAGE_ZERO - 1);
+ } else {
+ jam();
+ logPartPtr.p->srRemainingFiles =
+ logPartPtr.p->noLogFiles - ZMAX_LOG_FILES_IN_PAGE_ZERO;
+ }//if
+ LogFileRecordPtr locLogFilePtr;
+ findLogfile(signal, fileNo, logPartPtr, &locLogFilePtr);
+ locLogFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_NEXT_FILE;
+ openFileRw(signal, locLogFilePtr);
+ return;
+ }//if
+ /* ------------------------------------------------------------------------
+ * THERE WERE NO NEED TO READ ANY MORE PAGE ZERO IN OTHER FILES.
+ * WE NOW HAVE ALL THE NEEDED INFORMATION ABOUT THE GCI'S THAT WE NEED.
+ * NOW JUST WAIT FOR CLOSE OPERATIONS TO COMPLETE.
+ * ------------------------------------------------------------------------ */
+ return;
+}//Dblqh::readSrLastMbyteLab()
+
+void Dblqh::openSrNextFileLab(Signal* signal)
+{
+ readSinglePage(signal, 0);
+ lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_NEXT_FILE;
+ return;
+}//Dblqh::openSrNextFileLab()
+
+void Dblqh::readSrNextFileLab(Signal* signal)
+{
+ if (logPartPtr.p->srRemainingFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) {
+ jam();
+ initGciInLogFileRec(signal, ZMAX_LOG_FILES_IN_PAGE_ZERO);
+ } else {
+ jam();
+ initGciInLogFileRec(signal, logPartPtr.p->srRemainingFiles);
+ }//if
+ releaseLogpage(signal);
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_SR;
+ closeFile(signal, logFilePtr);
+ if (logPartPtr.p->srRemainingFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) {
+ Uint32 fileNo;
+ if (logFilePtr.p->fileNo >= ZMAX_LOG_FILES_IN_PAGE_ZERO) {
+ jam();
+ fileNo = logFilePtr.p->fileNo - ZMAX_LOG_FILES_IN_PAGE_ZERO;
+ } else {
+ jam();
+ fileNo =
+ (logPartPtr.p->noLogFiles + logFilePtr.p->fileNo) -
+ ZMAX_LOG_FILES_IN_PAGE_ZERO;
+ }//if
+ if (fileNo == 0) {
+ jam();
+ /* --------------------------------------------------------------------
+ * AVOID USING FILE 0 AGAIN SINCE THAT IS PROBABLY CLOSING AT THE MOMENT.
+ * -------------------------------------------------------------------- */
+ fileNo = 1;
+ logPartPtr.p->srRemainingFiles =
+ logPartPtr.p->srRemainingFiles - (ZMAX_LOG_FILES_IN_PAGE_ZERO - 1);
+ } else {
+ jam();
+ logPartPtr.p->srRemainingFiles =
+ logPartPtr.p->srRemainingFiles - ZMAX_LOG_FILES_IN_PAGE_ZERO;
+ }//if
+ LogFileRecordPtr locLogFilePtr;
+ findLogfile(signal, fileNo, logPartPtr, &locLogFilePtr);
+ locLogFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_NEXT_FILE;
+ openFileRw(signal, locLogFilePtr);
+ }//if
+ /* ------------------------------------------------------------------------
+ * THERE WERE NO NEED TO READ ANY MORE PAGE ZERO IN OTHER FILES.
+ * WE NOW HAVE ALL THE NEEDED INFORMATION ABOUT THE GCI'S THAT WE NEED.
+ * NOW JUST WAIT FOR CLOSE OPERATIONS TO COMPLETE.
+ * ------------------------------------------------------------------------ */
+ return;
+}//Dblqh::readSrNextFileLab()
+
+void Dblqh::closingSrLab(Signal* signal)
+{
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSED;
+ logPartPtr.i = logFilePtr.p->logPartRec;
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+ logFilePtr.i = logPartPtr.p->firstLogfile;
+ do {
+ jam();
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ if (logFilePtr.p->logFileStatus != LogFileRecord::CLOSED) {
+ jam();
+ /* --------------------------------------------------------------------
+ * EXIT AND WAIT FOR REMAINING LOG FILES TO COMPLETE THEIR WORK.
+ * -------------------------------------------------------------------- */
+ return;
+ }//if
+ logFilePtr.i = logFilePtr.p->nextLogFile;
+ } while (logFilePtr.i != logPartPtr.p->firstLogfile);
+ /* ------------------------------------------------------------------------
+ * ALL FILES IN THIS PART HAVE BEEN CLOSED. THIS INDICATES THAT THE FIRST
+ * PHASE OF THE SYSTEM RESTART HAVE BEEN CONCLUDED FOR THIS LOG PART.
+ * CHECK IF ALL OTHER LOG PARTS ARE ALSO COMPLETED.
+ * ------------------------------------------------------------------------ */
+ logPartPtr.p->logPartState = LogPartRecord::SR_FIRST_PHASE_COMPLETED;
+ for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) {
+ jam();
+ ptrAss(logPartPtr, logPartRecord);
+ if (logPartPtr.p->logPartState != LogPartRecord::SR_FIRST_PHASE_COMPLETED) {
+ jam();
+ /* --------------------------------------------------------------------
+ * EXIT AND WAIT FOR THE REST OF THE LOG PARTS TO COMPLETE.
+ * -------------------------------------------------------------------- */
+ return;
+ }//if
+ }//for
+ /* ------------------------------------------------------------------------
+ * THE FIRST PHASE HAVE BEEN COMPLETED.
+ * ------------------------------------------------------------------------ */
+ signal->theData[0] = ZSR_PHASE3_START;
+ signal->theData[1] = ZSR_PHASE1_COMPLETED;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ return;
+}//Dblqh::closingSrLab()
+
+/* ##########################################################################
+ * ####### SYSTEM RESTART PHASE TWO MODULE #######
+ *
+ * THIS MODULE HANDLES THE SYSTEM RESTART WHERE LQH CONTROLS TUP AND ACC TO
+ * ENSURE THAT THEY HAVE KNOWLEDGE OF ALL FRAGMENTS AND HAVE DONE THE NEEDED
+ * READING OF DATA FROM FILE AND EXECUTION OF LOCAL LOGS. THIS PROCESS
+ * EXECUTES CONCURRENTLY WITH PHASE ONE OF THE SYSTEM RESTART. THIS PHASE
+ * FINDS THE INFORMATION ABOUT THE FRAGMENT LOG NEEDED TO EXECUTE THE FRAGMENT
+ * LOG.
+ * WHEN TUP AND ACC HAVE PREPARED ALL FRAGMENTS THEN LQH ORDERS THOSE LQH'S
+ * THAT ARE RESPONSIBLE TO EXECUTE THE FRAGMENT LOGS TO DO SO. IT IS POSSIBLE
+ * THAT ANOTHER NODE EXECUTES THE LOG FOR A FRAGMENT RESIDING AT THIS NODE.
+ * ########################################################################## */
+/* ***************>> */
+/* START_FRAGREQ > */
+/* ***************>> */
+void Dblqh::execSTART_FRAGREQ(Signal* signal)
+{
+ const StartFragReq * const startFragReq = (StartFragReq *)&signal->theData[0];
+ jamEntry();
+
+ tabptr.i = startFragReq->tableId;
+ Uint32 fragId = startFragReq->fragId;
+
+ ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
+ if (!getFragmentrec(signal, fragId)) {
+ startFragRefLab(signal);
+ return;
+ }//if
+ tabptr.p->tableStatus = Tablerec::TABLE_DEFINED;
+
+ initFragrecSr(signal);
+ if (startFragReq->lcpNo == ZNIL) {
+ jam();
+ /* ----------------------------------------------------------------------
+ * THERE WAS NO LOCAL CHECKPOINT AVAILABLE FOR THIS FRAGMENT. WE DO
+ * NOT NEED TO READ IN THE LOCAL FRAGMENT. WE HAVE ALREADY ADDED THE
+ * FRAGMENT AS AN EMPTY FRAGMENT AT THIS POINT. THUS WE CAN SIMPLY
+ * EXIT AND THE FRAGMENT WILL PARTICIPATE IN THE EXECUTION OF THE LOG.
+ * PUT FRAGMENT ON LIST OF COMPLETED FRAGMENTS FOR EXECUTION OF LOG.
+ * ---------------------------------------------------------------------- */
+ fragptr.p->nextFrag = cfirstCompletedFragSr;
+ cfirstCompletedFragSr = fragptr.i;
+ return;
+ }//if
+ if (cfirstWaitFragSr == RNIL) {
+ jam();
+ lcpPtr.i = 0;
+ ptrAss(lcpPtr, lcpRecord);
+ if (lcpPtr.p->lcpState == LcpRecord::LCP_IDLE) {
+ jam();
+ initLcpSr(signal, startFragReq->lcpNo,
+ startFragReq->lcpId, tabptr.i,
+ fragId, fragptr.i);
+ signal->theData[0] = lcpPtr.i;
+ signal->theData[1] = cownref;
+ signal->theData[2] = lcpPtr.p->currentFragment.lcpFragOrd.lcpNo;
+ signal->theData[3] = lcpPtr.p->currentFragment.lcpFragOrd.tableId;
+ signal->theData[4] = lcpPtr.p->currentFragment.lcpFragOrd.fragmentId;
+ sendSignal(fragptr.p->accBlockref, GSN_SR_FRAGIDREQ, signal, 5, JBB);
+ return;
+ }//if
+ }//if
+ fragptr.p->nextFrag = cfirstWaitFragSr;
+ cfirstWaitFragSr = fragptr.i;
+}//Dblqh::execSTART_FRAGREQ()
+
+void Dblqh::startFragRefLab(Signal* signal)
+{
+ const StartFragReq * const startFragReq = (StartFragReq *)&signal->theData[0];
+ BlockReference userRef = startFragReq->userRef;
+ Uint32 userPtr = startFragReq->userPtr;
+ signal->theData[0] = userPtr;
+ signal->theData[1] = terrorCode;
+ signal->theData[2] = cownNodeid;
+ sendSignal(userRef, GSN_START_FRAGREF, signal, 3, JBB);
+ return;
+}//Dblqh::startFragRefLab()
+
+/* ***************>> */
+/* SR_FRAGIDCONF > */
+/* ***************>> */
+/* --------------------------------------------------------------------------
+ * PRECONDITION: LCP_PTR:LCP_STATE = SR_WAIT_FRAGID
+ * -------------------------------------------------------------------------- */
+void Dblqh::execSR_FRAGIDCONF(Signal* signal)
+{
+ SrFragidConf * const srFragidConf = (SrFragidConf *)&signal->theData[0];
+ jamEntry();
+
+ lcpPtr.i = srFragidConf->lcpPtr;
+ ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
+ ndbrequire(lcpPtr.p->lcpState == LcpRecord::LCP_SR_WAIT_FRAGID);
+ /* ------------------------------------------------------------------------
+ * NO ERROR CHECKING OF TNO_LOCFRAG VALUE. OUT OF BOUND WILL IMPLY THAT AN
+ * INDEX OUT OF RANGE WILL CAUSE A SYSTEM RESTART WHICH IS DESIRED.
+ * ------------------------------------------------------------------------ */
+ lcpPtr.p->lcpAccptr = srFragidConf->accPtr;
+ fragptr.i = lcpPtr.p->currentFragment.fragPtrI;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ fragptr.p->accFragptr[0] = srFragidConf->fragPtr[0];
+ fragptr.p->accFragptr[1] = srFragidConf->fragPtr[1];
+ Uint32 noLocFrag = srFragidConf->noLocFrag;
+ ndbrequire(noLocFrag == 2);
+ Uint32 fragid[2];
+ Uint32 i;
+ for (i = 0; i < noLocFrag; i++) {
+ fragid[i] = srFragidConf->fragId[i];
+ }//for
+
+ for (i = 0; i < noLocFrag; i++) {
+ jam();
+ Uint32 fragId = fragid[i];
+ /* ----------------------------------------------------------------------
+ * THERE IS NO ERROR CHECKING ON PURPOSE. IT IS POSSIBLE TO CALCULATE HOW
+ * MANY LOCAL LCP RECORDS THERE SHOULD BE. IT SHOULD NEVER HAPPEN THAT
+ * THERE IS NO ONE FREE. IF THERE IS NO ONE IT WILL ALSO BE A POINTER
+ * OUT OF RANGE WHICH IS AN ERROR CODE IN ITSELF. REUSES ERROR
+ * HANDLING IN AXE VM.
+ * ---------------------------------------------------------------------- */
+ seizeLcpLoc(signal);
+ initLcpLocAcc(signal, fragId);
+ lcpLocptr.p->lcpLocstate = LcpLocRecord::SR_ACC_STARTED;
+ signal->theData[0] = lcpPtr.p->lcpAccptr;
+ signal->theData[1] = lcpLocptr.i;
+ signal->theData[2] = lcpLocptr.p->locFragid;
+ signal->theData[3] = lcpPtr.p->currentFragment.lcpFragOrd.lcpId % MAX_LCP_STORED;
+ sendSignal(fragptr.p->accBlockref, GSN_ACC_SRREQ, signal, 4, JBB);
+ seizeLcpLoc(signal);
+ initLcpLocTup(signal, fragId);
+ lcpLocptr.p->lcpLocstate = LcpLocRecord::SR_TUP_STARTED;
+ signal->theData[0] = lcpLocptr.i;
+ signal->theData[1] = cownref;
+ signal->theData[2] = lcpPtr.p->currentFragment.lcpFragOrd.tableId;
+ signal->theData[3] = lcpLocptr.p->locFragid;
+ signal->theData[4] = lcpPtr.p->currentFragment.lcpFragOrd.lcpNo;
+ sendSignal(fragptr.p->tupBlockref, GSN_TUP_SRREQ, signal, 5, JBB);
+ }//for
+ lcpPtr.p->lcpState = LcpRecord::LCP_SR_STARTED;
+ return;
+}//Dblqh::execSR_FRAGIDCONF()
+
+/* ***************> */
+/* SR_FRAGIDREF > */
+/* ***************> */
+void Dblqh::execSR_FRAGIDREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}//Dblqh::execSR_FRAGIDREF()
+
+/* ************>> */
+/* ACC_SRCONF > */
+/* ************>> */
+/* --------------------------------------------------------------------------
+ * PRECONDITION: LCP_LOCPTR:LCP_LOCSTATE = SR_ACC_STARTED
+ * -------------------------------------------------------------------------- */
+void Dblqh::execACC_SRCONF(Signal* signal)
+{
+ jamEntry();
+ lcpLocptr.i = signal->theData[0];
+ ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ if (lcpLocptr.p->lcpLocstate != LcpLocRecord::SR_ACC_STARTED) {
+ jam();
+ systemErrorLab(signal);
+ return;
+ }//if
+
+ lcpPtr.i = lcpLocptr.p->masterLcpRec;
+ ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
+ /* ------------------------------------------------------------------------
+ * NO ERROR CHECK ON USING VALUE IN MASTER_LCP_REC. ERROR IN THIS REFERENCE
+ * WILL CAUSE POINTER OUT OF RANGE WHICH CAUSES A SYSTEM RESTART.
+ * ------------------------------------------------------------------------ */
+ lcpLocptr.p->lcpLocstate = LcpLocRecord::SR_ACC_COMPLETED;
+ srCompletedLab(signal);
+ return;
+}//Dblqh::execACC_SRCONF()
+
+/* ************> */
+/* ACC_SRREF > */
+/* ************> */
+void Dblqh::execACC_SRREF(Signal* signal)
+{
+ jamEntry();
+ terrorCode = signal->theData[1];
+ systemErrorLab(signal);
+ return;
+}//Dblqh::execACC_SRREF()
+
+/* ************>> */
+/* TUP_SRCONF > */
+/* ************>> */
+/* --------------------------------------------------------------------------
+ * PRECONDITION: LCP_LOCPTR:LCP_LOCSTATE = SR_TUP_STARTED
+ * -------------------------------------------------------------------------- */
+void Dblqh::execTUP_SRCONF(Signal* signal)
+{
+ jamEntry();
+ lcpLocptr.i = signal->theData[0];
+ ptrCheckGuard(lcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ Uint32 tupFragPtr = signal->theData[1];
+ ndbrequire(lcpLocptr.p->lcpLocstate == LcpLocRecord::SR_TUP_STARTED);
+
+ lcpPtr.i = lcpLocptr.p->masterLcpRec;
+ ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord);
+ /* ------------------------------------------------------------------------
+ * NO ERROR CHECK ON USING VALUE IN MASTER_LCP_REC. ERROR IN THIS REFERENCE
+ * WILL CAUSE POINTER OUT OF RANGE WHICH CAUSES A SYSTEM RESTART.
+ * ------------------------------------------------------------------------ */
+ lcpLocptr.p->lcpLocstate = LcpLocRecord::SR_TUP_COMPLETED;
+ fragptr.i = lcpPtr.p->currentFragment.fragPtrI;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ if (lcpLocptr.i == lcpPtr.p->firstLcpLocTup) {
+ jam();
+ fragptr.p->tupFragptr[1] = tupFragPtr;
+ } else {
+ jam();
+ fragptr.p->tupFragptr[0] = tupFragPtr;
+ }//if
+ srCompletedLab(signal);
+ return;
+}//Dblqh::execTUP_SRCONF()
+
+void Dblqh::srCompletedLab(Signal* signal)
+{
+ checkSrCompleted(signal);
+ if (lcpPtr.p->lcpState == LcpRecord::LCP_SR_COMPLETED) {
+ jam();
+ /* ----------------------------------------------------------------------
+ * THE SYSTEM RESTART OF THIS FRAGMENT HAS BEEN COMPLETED. IT IS NOW
+ * TIME TO START A SYSTEM RESTART ON THE NEXT FRAGMENT OR CONTINUE
+ * WITH THE NEXT STEP OF THE SYSTEM RESTART. THIS STEP IS TO EXECUTE
+ * THE FRAGMENT LOGS.
+ * ----------------------------------------------------------------------
+ * WE RELEASE THE LOCAL LCP RECORDS.
+ * --------------------------------------------------------------------- */
+ releaseLocalLcps(signal);
+ /* ----------------------------------------------------------------------
+ * PUT FRAGMENT ON LIST OF FRAGMENTS WHICH HAVE BEEN STARTED AS PART OF
+ * THE SYSTEM RESTART. THEY ARE NOW WAITING TO EXECUTE THE FRAGMENT LOG.
+ * --------------------------------------------------------------------- */
+ fragptr.i = lcpPtr.p->currentFragment.fragPtrI;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ fragptr.p->nextFrag = cfirstCompletedFragSr;
+ cfirstCompletedFragSr = fragptr.i;
+ if (cfirstWaitFragSr != RNIL) {
+ jam();
+ /* --------------------------------------------------------------------
+ * ANOTHER FRAGMENT IS WAITING FOR SYSTEM RESTART. RESTART THIS
+ * FRAGMENT AS WELL.
+ * -------------------------------------------------------------------- */
+ fragptr.i = cfirstWaitFragSr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ cfirstWaitFragSr = fragptr.p->nextFrag;
+ /* --------------------------------------------------------------------
+ * RETRIEVE DATA FROM THE FRAGMENT RECORD.
+ * -------------------------------------------------------------------- */
+ ndbrequire(fragptr.p->srChkpnr < MAX_LCP_STORED);
+ initLcpSr(signal,
+ fragptr.p->srChkpnr,
+ fragptr.p->lcpId[fragptr.p->srChkpnr],
+ fragptr.p->tabRef,
+ fragptr.p->fragId,
+ fragptr.i);
+ signal->theData[0] = lcpPtr.i;
+ signal->theData[1] = cownref;
+ signal->theData[2] = lcpPtr.p->currentFragment.lcpFragOrd.lcpNo;
+ signal->theData[3] = lcpPtr.p->currentFragment.lcpFragOrd.tableId;
+ signal->theData[4] = lcpPtr.p->currentFragment.lcpFragOrd.fragmentId;
+ sendSignal(fragptr.p->accBlockref, GSN_SR_FRAGIDREQ, signal, 5, JBB);
+ return;
+ } else {
+ jam();
+ /* --------------------------------------------------------------------
+ * NO MORE FRAGMENTS ARE WAITING FOR SYSTEM RESTART.
+ * -------------------------------------------------------------------- */
+ lcpPtr.p->lcpState = LcpRecord::LCP_IDLE;
+ if (cstartRecReq == ZTRUE) {
+ jam();
+ /* ----------------------------------------------------------------
+ * WE HAVE ALSO RECEIVED AN INDICATION THAT NO MORE FRAGMENTS
+ * NEEDS RESTART.
+ * NOW IT IS TIME TO START EXECUTING THE UNDO LOG.
+ * ----------------------------------------------------------------
+ * WE ARE NOW IN A POSITION TO ORDER TUP AND ACC TO START
+ * EXECUTING THEIR UNDO LOGS. THIS MUST BE DONE BEFORE THE
+ * FRAGMENT LOGS CAN BE EXECUTED.
+ * ---------------------------------------------------------------- */
+ csrExecUndoLogState = EULS_STARTED;
+ signal->theData[0] = caccBlockref;
+ signal->theData[1] = cownref;
+ sendSignal(caccBlockref, GSN_START_RECREQ, signal, 2, JBB);
+ signal->theData[0] = ctupBlockref;
+ signal->theData[1] = cownref;
+ sendSignal(ctupBlockref, GSN_START_RECREQ, signal, 2, JBB);
+ return;
+ } else {
+ jam();
+ /* ----------------------------------------------------------------
+ * WE HAVE NOT RECEIVED ALL FRAGMENTS YET OR AT LEAST NOT WE
+ * HAVE NOT RECEIVED THE START_RECREQ SIGNAL. EXIT AND WAIT
+ * FOR MORE.
+ * ---------------------------------------------------------------- */
+ return;
+ }//if
+ }//if
+ }//if
+ /*---------------*/
+ /* ELSE */
+ /*-------------------------------------------------------------------------
+ * THE SYSTEM RESTART ON THIS FRAGMENT HAS NOT BEEN COMPLETED,
+ * EXIT AND WAIT FOR MORE SIGNALS
+ *-------------------------------------------------------------------------
+ * DO NOTHING, EXIT IS EXECUTED BELOW
+ *------------------------------------------------------------------------- */
+ return;
+}//Dblqh::srCompletedLab()
+
+/* ************> */
+/* TUP_SRREF > */
+/* ************> */
+void Dblqh::execTUP_SRREF(Signal* signal)
+{
+ jamEntry();
+ terrorCode = signal->theData[1];
+ systemErrorLab(signal);
+ return;
+}//Dblqh::execTUP_SRREF()
+
+/* ***************> */
+/* START_RECREQ > */
+/* ***************> */
+void Dblqh::execSTART_RECREQ(Signal* signal)
+{
+ CRASH_INSERTION(5027);
+
+ jamEntry();
+ StartRecReq * const req = (StartRecReq*)&signal->theData[0];
+ cmasterDihBlockref = req->senderRef;
+
+ crestartOldestGci = req->keepGci;
+ crestartNewestGci = req->lastCompletedGci;
+ cnewestGci = req->newestGci;
+
+ ndbrequire(req->receivingNodeId == cownNodeid);
+
+ cnewestCompletedGci = cnewestGci;
+ cstartRecReq = ZTRUE;
+ for (logPartPtr.i = 0; logPartPtr.i < 4; logPartPtr.i++) {
+ ptrAss(logPartPtr, logPartRecord);
+ logPartPtr.p->logPartNewestCompletedGCI = cnewestCompletedGci;
+ }//for
+ /* ------------------------------------------------------------------------
+ * WE HAVE TO SET THE OLDEST AND THE NEWEST GLOBAL CHECKPOINT IDENTITY
+ * THAT WILL SURVIVE THIS SYSTEM RESTART. THIS IS NEEDED SO THAT WE CAN
+ * SET THE LOG HEAD AND LOG TAIL PROPERLY BEFORE STARTING THE SYSTEM AGAIN.
+ * WE ALSO NEED TO SET CNEWEST_GCI TO ENSURE THAT LOG RECORDS ARE EXECUTED
+ * WITH A PROPER GCI.
+ *------------------------------------------------------------------------ */
+ if (cstartType == NodeState::ST_NODE_RESTART) {
+ jam();
+ signal->theData[0] = ZSR_PHASE3_START;
+ signal->theData[1] = ZSR_PHASE2_COMPLETED;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ }//if
+ if(cstartType == NodeState::ST_INITIAL_NODE_RESTART){
+ jam();
+ StartRecConf * conf = (StartRecConf*)signal->getDataPtrSend();
+ conf->startingNodeId = getOwnNodeId();
+ sendSignal(cmasterDihBlockref, GSN_START_RECCONF, signal,
+ StartRecConf::SignalLength, JBB);
+ return;
+ }//if
+ if (cfirstWaitFragSr == RNIL) {
+ /* ----------------------------------------------------------------------
+ * THERE ARE NO FRAGMENTS WAITING TO BE RESTARTED.
+ * --------------------------------------------------------------------- */
+ lcpPtr.i = 0;
+ ptrAss(lcpPtr, lcpRecord);
+ if (lcpPtr.p->lcpState == LcpRecord::LCP_IDLE) {
+ jam();
+ /* --------------------------------------------------------------------
+ * THERE ARE NO FRAGMENTS THAT ARE CURRENTLY PERFORMING THEIR
+ * SYSTEM RESTART.
+ * --------------------------------------------------------------------
+ * WE ARE NOW IN A POSITION TO ORDER TUP AND ACC TO START EXECUTING
+ * THEIR UNDO LOGS. THIS MUST BE DONE BEFORE THE FRAGMENT LOGS
+ * CAN BE EXECUTED.
+ * ------------------------------------------------------------------- */
+ csrExecUndoLogState = EULS_STARTED;
+ signal->theData[0] = caccBlockref;
+ signal->theData[1] = cownref;
+ sendSignal(caccBlockref, GSN_START_RECREQ, signal, 2, JBB);
+ signal->theData[0] = ctupBlockref;
+ signal->theData[1] = cownref;
+ sendSignal(ctupBlockref, GSN_START_RECREQ, signal, 2, JBB);
+ }//if
+ }//if
+ /* -----------------------------------------------------------------------
+ * EXIT AND WAIT FOR COMPLETION OF ALL FRAGMENTS.
+ * ----------------------------------------------------------------------- */
+ return;
+}//Dblqh::execSTART_RECREQ()
+
+/* ***************>> */
+/* START_RECCONF > */
+/* ***************>> */
+void Dblqh::execSTART_RECCONF(Signal* signal)
+{
+ jamEntry();
+ BlockReference userRef = signal->theData[0];
+ if (userRef == caccBlockref) {
+ if (csrExecUndoLogState == EULS_STARTED) {
+ jam();
+ csrExecUndoLogState = EULS_ACC_COMPLETED;
+ } else {
+ ndbrequire(csrExecUndoLogState == EULS_TUP_COMPLETED);
+ jam();
+ csrExecUndoLogState = EULS_COMPLETED;
+ /* --------------------------------------------------------------------
+ * START THE FIRST PHASE OF EXECUTION OF THE LOG.
+ * ------------------------------------------------------------------- */
+ startExecSr(signal);
+ }//if
+ } else {
+ ndbrequire(userRef == ctupBlockref);
+ if (csrExecUndoLogState == EULS_STARTED) {
+ jam();
+ csrExecUndoLogState = EULS_TUP_COMPLETED;
+ } else {
+ ndbrequire(csrExecUndoLogState == EULS_ACC_COMPLETED);
+ jam();
+ csrExecUndoLogState = EULS_COMPLETED;
+ /* --------------------------------------------------------------------
+ * START THE FIRST PHASE OF EXECUTION OF THE LOG.
+ * ------------------------------------------------------------------- */
+ startExecSr(signal);
+ }//if
+ }//if
+ return;
+}//Dblqh::execSTART_RECCONF()
+
+/* ***************> */
+/* START_RECREF > */
+/* ***************> */
+void Dblqh::execSTART_RECREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}//Dblqh::execSTART_RECREF()
+
+/* ***************>> */
+/* START_EXEC_SR > */
+/* ***************>> */
+void Dblqh::execSTART_EXEC_SR(Signal* signal)
+{
+ FragrecordPtr prevFragptr;
+ jamEntry();
+ fragptr.i = signal->theData[0];
+ prevFragptr.i = signal->theData[1];
+ if (fragptr.i == RNIL) {
+ jam();
+ ndbrequire(cnoOfNodes < MAX_NDB_NODES);
+ /* ----------------------------------------------------------------------
+ * NO MORE FRAGMENTS TO START EXECUTING THE LOG ON.
+ * SEND EXEC_SRREQ TO ALL LQH TO INDICATE THAT THIS NODE WILL
+ * NOT REQUEST ANY MORE FRAGMENTS TO EXECUTE THE FRAGMENT LOG ON.
+ * ----------------------------------------------------------------------
+ * WE NEED TO SEND THOSE SIGNALS EVEN IF WE HAVE NOT REQUESTED
+ * ANY FRAGMENTS PARTICIPATE IN THIS PHASE.
+ * --------------------------------------------------------------------- */
+ for (Uint32 i = 0; i < cnoOfNodes; i++) {
+ jam();
+ if (cnodeStatus[i] == ZNODE_UP) {
+ jam();
+ ndbrequire(cnodeData[i] < MAX_NDB_NODES);
+ BlockReference ref = calcLqhBlockRef(cnodeData[i]);
+ signal->theData[0] = cownNodeid;
+ sendSignal(ref, GSN_EXEC_SRREQ, signal, 1, JBB);
+ }//if
+ }//for
+ } else {
+ jam();
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ if (fragptr.p->srNoLognodes > csrPhasesCompleted) {
+ jam();
+ Uint32 index = csrPhasesCompleted;
+ arrGuard(index, 4);
+ BlockReference ref = calcLqhBlockRef(fragptr.p->srLqhLognode[index]);
+ fragptr.p->srStatus = Fragrecord::SS_STARTED;
+ /* --------------------------------------------------------------------
+ * SINCE WE CAN HAVE SEVERAL LQH NODES PER FRAGMENT WE CALCULATE
+ * THE LQH POINTER IN SUCH A WAY THAT WE CAN DEDUCE WHICH OF THE
+ * LQH NODES THAT HAS RESPONDED WHEN EXEC_FRAGCONF IS RECEIVED.
+ * ------------------------------------------------------------------- */
+ ExecFragReq * const execFragReq = (ExecFragReq *)&signal->theData[0];
+ execFragReq->userPtr = fragptr.i;
+ execFragReq->userRef = cownref;
+ execFragReq->tableId = fragptr.p->tabRef;
+ execFragReq->fragId = fragptr.p->fragId;
+ execFragReq->startGci = fragptr.p->srStartGci[index];
+ execFragReq->lastGci = fragptr.p->srLastGci[index];
+ sendSignal(ref, GSN_EXEC_FRAGREQ, signal, ExecFragReq::SignalLength, JBB);
+ prevFragptr.i = fragptr.i;
+ fragptr.i = fragptr.p->nextFrag;
+ } else {
+ jam();
+ /* --------------------------------------------------------------------
+ * THIS FRAGMENT IS NOW FINISHED WITH THE SYSTEM RESTART. IT DOES
+ * NOT NEED TO PARTICIPATE IN ANY MORE PHASES. REMOVE IT FROM THE
+ * LIST OF COMPLETED FRAGMENTS TO EXECUTE THE LOG ON.
+ * ALSO SEND START_FRAGCONF TO DIH AND SET THE STATE TO ACTIVE ON THE
+ * FRAGMENT.
+ * ------------------------------------------------------------------- */
+ Uint32 next = fragptr.p->nextFrag;
+ if (prevFragptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(prevFragptr, cfragrecFileSize, fragrecord);
+ prevFragptr.p->nextFrag = next;
+ } else {
+ jam();
+ cfirstCompletedFragSr = next;
+ }//if
+
+ /**
+ * Put fragment on list which has completed REDO log
+ */
+ fragptr.p->nextFrag = c_redo_log_complete_frags;
+ c_redo_log_complete_frags = fragptr.i;
+
+ fragptr.p->fragStatus = Fragrecord::FSACTIVE;
+ fragptr.p->logFlag = Fragrecord::STATE_TRUE;
+ signal->theData[0] = fragptr.p->srUserptr;
+ signal->theData[1] = cownNodeid;
+ sendSignal(fragptr.p->srBlockref, GSN_START_FRAGCONF, signal, 2, JBB);
+ /* --------------------------------------------------------------------
+ * WE HAVE TO ENSURE THAT THIS FRAGMENT IS NOT PUT BACK ON THE LIST BY
+ * MISTAKE. WE DO THIS BY ALSO REMOVING IT AS PREVIOUS IN START_EXEC_SR
+ * THIS IS PERFORMED BY KEEPING PREV_FRAGPTR AS PREV_FRAGPTR BUT MOVING
+ * FRAGPTR TO THE NEXT FRAGMENT IN THE LIST.
+ * ------------------------------------------------------------------- */
+ fragptr.i = next;
+ }//if
+ signal->theData[0] = fragptr.i;
+ signal->theData[1] = prevFragptr.i;
+ sendSignal(cownref, GSN_START_EXEC_SR, signal, 2, JBB);
+ }//if
+ return;
+}//Dblqh::execSTART_EXEC_SR()
+
+/* ***************> */
+/* EXEC_FRAGREQ > */
+/* ***************> */
+/* --------------------------------------------------------------------------
+ * THIS SIGNAL IS USED TO REQUEST THAT A FRAGMENT PARTICIPATES IN EXECUTING
+ * THE LOG IN THIS NODE.
+ * ------------------------------------------------------------------------- */
+void Dblqh::execEXEC_FRAGREQ(Signal* signal)
+{
+ ExecFragReq * const execFragReq = (ExecFragReq *)&signal->theData[0];
+ jamEntry();
+ tabptr.i = execFragReq->tableId;
+ Uint32 fragId = execFragReq->fragId;
+ ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
+ if (!getFragmentrec(signal, fragId)) {
+ jam();
+ if (!insertFragrec(signal, fragId)) {
+ jam();
+ sendExecFragRefLab(signal);
+ return;
+ }//if
+ initFragrec(signal, tabptr.i, fragId, ZLOG_NODE);
+ fragptr.p->execSrStatus = Fragrecord::ACTIVE_REMOVE_AFTER;
+ } else {
+ jam();
+ if (fragptr.p->execSrStatus == Fragrecord::ACTIVE_REMOVE_AFTER) {
+ jam();
+ fragptr.p->execSrStatus = Fragrecord::ACTIVE_REMOVE_AFTER;
+ } else {
+ jam();
+ }//if
+ }//if
+ ndbrequire(fragptr.p->execSrNoReplicas < 4);
+ fragptr.p->execSrBlockref[fragptr.p->execSrNoReplicas] = execFragReq->userRef;
+ fragptr.p->execSrUserptr[fragptr.p->execSrNoReplicas] = execFragReq->userPtr;
+ fragptr.p->execSrStartGci[fragptr.p->execSrNoReplicas] = execFragReq->startGci;
+ fragptr.p->execSrLastGci[fragptr.p->execSrNoReplicas] = execFragReq->lastGci;
+ fragptr.p->execSrStatus = Fragrecord::ACTIVE;
+ fragptr.p->execSrNoReplicas++;
+ cnoFragmentsExecSr++;
+ return;
+}//Dblqh::execEXEC_FRAGREQ()
+
+void Dblqh::sendExecFragRefLab(Signal* signal)
+{
+ ExecFragReq * const execFragReq = (ExecFragReq *)&signal->theData[0];
+ BlockReference retRef = execFragReq->userRef;
+ Uint32 retPtr = execFragReq->userPtr;
+
+ signal->theData[0] = retPtr;
+ signal->theData[1] = terrorCode;
+ sendSignal(retRef, GSN_EXEC_FRAGREF, signal, 2, JBB);
+ return;
+}//Dblqh::sendExecFragRefLab()
+
+/* ***************>> */
+/* EXEC_FRAGCONF > */
+/* ***************>> */
+void Dblqh::execEXEC_FRAGCONF(Signal* signal)
+{
+ jamEntry();
+ fragptr.i = signal->theData[0];
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ fragptr.p->srStatus = Fragrecord::SS_COMPLETED;
+ return;
+}//Dblqh::execEXEC_FRAGCONF()
+
+/* ***************> */
+/* EXEC_FRAGREF > */
+/* ***************> */
+void Dblqh::execEXEC_FRAGREF(Signal* signal)
+{
+ jamEntry();
+ terrorCode = signal->theData[1];
+ systemErrorLab(signal);
+ return;
+}//Dblqh::execEXEC_FRAGREF()
+
+/* *************** */
+/* EXEC_SRCONF > */
+/* *************** */
+void Dblqh::execEXEC_SRCONF(Signal* signal)
+{
+ jamEntry();
+ Uint32 nodeId = signal->theData[0];
+ arrGuard(nodeId, MAX_NDB_NODES);
+ cnodeExecSrState[nodeId] = ZEXEC_SR_COMPLETED;
+ ndbrequire(cnoOfNodes < MAX_NDB_NODES);
+ for (Uint32 i = 0; i < cnoOfNodes; i++) {
+ jam();
+ if (cnodeStatus[i] == ZNODE_UP) {
+ jam();
+ nodeId = cnodeData[i];
+ arrGuard(nodeId, MAX_NDB_NODES);
+ if (cnodeExecSrState[nodeId] != ZEXEC_SR_COMPLETED) {
+ jam();
+ /* ------------------------------------------------------------------
+ * ALL NODES HAVE NOT REPORTED COMPLETION OF EXECUTING FRAGMENT
+ * LOGS YET.
+ * ----------------------------------------------------------------- */
+ return;
+ }//if
+ }//if
+ }//for
+ /* ------------------------------------------------------------------------
+ * CLEAR NODE SYSTEM RESTART EXECUTION STATE TO PREPARE FOR NEXT PHASE OF
+ * LOG EXECUTION.
+ * ----------------------------------------------------------------------- */
+ for (nodeId = 0; nodeId < MAX_NDB_NODES; nodeId++) {
+ cnodeExecSrState[nodeId] = ZSTART_SR;
+ }//for
+ /* ------------------------------------------------------------------------
+ * NOW CHECK IF ALL FRAGMENTS IN THIS PHASE HAVE COMPLETED. IF SO START THE
+ * NEXT PHASE.
+ * ----------------------------------------------------------------------- */
+ fragptr.i = cfirstCompletedFragSr;
+ if (fragptr.i == RNIL) {
+ jam();
+ execSrCompletedLab(signal);
+ return;
+ }//if
+ do {
+ jam();
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ ndbrequire(fragptr.p->srStatus == Fragrecord::SS_COMPLETED);
+ fragptr.i = fragptr.p->nextFrag;
+ } while (fragptr.i != RNIL);
+ execSrCompletedLab(signal);
+ return;
+}//Dblqh::execEXEC_SRCONF()
+
+void Dblqh::execSrCompletedLab(Signal* signal)
+{
+ csrPhasesCompleted++;
+ /* ------------------------------------------------------------------------
+ * ALL FRAGMENTS WERE COMPLETED. THIS PHASE IS COMPLETED. IT IS NOW TIME TO
+ * START THE NEXT PHASE.
+ * ----------------------------------------------------------------------- */
+ if (csrPhasesCompleted >= 4) {
+ jam();
+ /* ----------------------------------------------------------------------
+ * THIS WAS THE LAST PHASE. WE HAVE NOW COMPLETED THE EXECUTION THE
+ * FRAGMENT LOGS IN ALL NODES. BEFORE WE SEND START_RECCONF TO THE
+ * MASTER DIH TO INDICATE A COMPLETED SYSTEM RESTART IT IS NECESSARY
+ * TO FIND THE HEAD AND THE TAIL OF THE LOG WHEN NEW OPERATIONS START
+ * TO COME AGAIN.
+ *
+ * THE FIRST STEP IS TO FIND THE HEAD AND TAIL MBYTE OF EACH LOG PART.
+ * TO DO THIS WE REUSE THE CONTINUEB SIGNAL SR_LOG_LIMITS. THEN WE
+ * HAVE TO FIND THE ACTUAL PAGE NUMBER AND PAGE INDEX WHERE TO
+ * CONTINUE WRITING THE LOG AFTER THE SYSTEM RESTART.
+ * --------------------------------------------------------------------- */
+ for (logPartPtr.i = 0; logPartPtr.i < 4; logPartPtr.i++) {
+ jam();
+ ptrAss(logPartPtr, logPartRecord);
+ logPartPtr.p->logPartState = LogPartRecord::SR_FOURTH_PHASE_STARTED;
+ logPartPtr.p->logLastGci = crestartNewestGci;
+ logPartPtr.p->logStartGci = crestartOldestGci;
+ logPartPtr.p->logExecState = LogPartRecord::LES_SEARCH_STOP;
+ if (logPartPtr.p->headFileNo == ZNIL) {
+ jam();
+ /* -----------------------------------------------------------------
+ * IF WE HAVEN'T FOUND ANY HEAD OF THE LOG THEN WE ARE IN SERIOUS
+ * PROBLEM. THIS SHOULD NOT OCCUR. IF IT OCCURS ANYWAY THEN WE
+ * HAVE TO FIND A CURE FOR THIS PROBLEM.
+ * ----------------------------------------------------------------- */
+ systemErrorLab(signal);
+ return;
+ }//if
+ signal->theData[0] = ZSR_LOG_LIMITS;
+ signal->theData[1] = logPartPtr.i;
+ signal->theData[2] = logPartPtr.p->lastLogfile;
+ signal->theData[3] = logPartPtr.p->lastMbyte;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB);
+ }//for
+ return;
+ } else {
+ jam();
+ /* ----------------------------------------------------------------------
+ * THERE ARE YET MORE PHASES TO RESTART.
+ * WE MUST INITIALISE DATA FOR NEXT PHASE AND SEND START SIGNAL.
+ * --------------------------------------------------------------------- */
+ startExecSr(signal);
+ }//if
+ return;
+}//Dblqh::execSrCompletedLab()
+
+/* ************>> */
+/* EXEC_SRREQ > */
+/* ************>> */
+void Dblqh::execEXEC_SRREQ(Signal* signal)
+{
+ jamEntry();
+ Uint32 nodeId = signal->theData[0];
+ ndbrequire(nodeId < MAX_NDB_NODES);
+ cnodeSrState[nodeId] = ZEXEC_SR_COMPLETED;
+ ndbrequire(cnoOfNodes < MAX_NDB_NODES);
+ for (Uint32 i = 0; i < cnoOfNodes; i++) {
+ jam();
+ if (cnodeStatus[i] == ZNODE_UP) {
+ jam();
+ nodeId = cnodeData[i];
+ if (cnodeSrState[nodeId] != ZEXEC_SR_COMPLETED) {
+ jam();
+ /* ------------------------------------------------------------------
+ * ALL NODES HAVE NOT REPORTED COMPLETION OF SENDING EXEC_FRAGREQ YET.
+ * ----------------------------------------------------------------- */
+ return;
+ }//if
+ }//if
+ }//for
+ /* ------------------------------------------------------------------------
+ * CLEAR NODE SYSTEM RESTART STATE TO PREPARE FOR NEXT PHASE OF LOG
+ * EXECUTION
+ * ----------------------------------------------------------------------- */
+ for (nodeId = 0; nodeId < MAX_NDB_NODES; nodeId++) {
+ cnodeSrState[nodeId] = ZSTART_SR;
+ }//for
+ if (csrPhasesCompleted != 0) {
+ /* ----------------------------------------------------------------------
+ * THE FIRST PHASE MUST ALWAYS EXECUTE THE LOG.
+ * --------------------------------------------------------------------- */
+ if (cnoFragmentsExecSr == 0) {
+ jam();
+ /* --------------------------------------------------------------------
+ * THERE WERE NO FRAGMENTS THAT NEEDED TO EXECUTE THE LOG IN THIS PHASE.
+ * ------------------------------------------------------------------- */
+ srPhase3Comp(signal);
+ return;
+ }//if
+ }//if
+ /* ------------------------------------------------------------------------
+ * NOW ALL NODES HAVE SENT ALL EXEC_FRAGREQ. NOW WE CAN START EXECUTING THE
+ * LOG FROM THE MINIMUM GCI NEEDED UNTIL THE MAXIMUM GCI NEEDED.
+ *
+ * WE MUST FIRST CHECK IF THE FIRST PHASE OF THE SYSTEM RESTART HAS BEEN
+ * COMPLETED. THIS HANDLING IS PERFORMED IN THE FILE SYSTEM MODULE
+ * ----------------------------------------------------------------------- */
+ signal->theData[0] = ZSR_PHASE3_START;
+ signal->theData[1] = ZSR_PHASE2_COMPLETED;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ return;
+}//Dblqh::execEXEC_SRREQ()
+
+/* ######################################################################### */
+/* SYSTEM RESTART PHASE THREE MODULE */
+/* THIS MODULE IS A SUB-MODULE OF THE FILE SYSTEM HANDLING. */
+/* */
+/* THIS MODULE IS CONCERNED WITH EXECUTING THE FRAGMENT LOG. IT DOES ALSO */
+/* CONTAIN SIGNAL RECEPTIONS LQHKEYCONF AND LQHKEYREF SINCE LQHKEYREQ IS USED*/
+/* TO EXECUTE THE LOG RECORDS. */
+/* */
+/* BEFORE IT STARTS IT HAS BEEN DECIDED WHERE TO START AND WHERE TO STOP */
+/* READING THE FRAGMENT LOG BY USING THE INFORMATION ABOUT GCI DISCOVERED IN */
+/* PHASE ONE OF THE SYSTEM RESTART. */
+/* ######################################################################### */
+/*---------------------------------------------------------------------------*/
+/* PHASE THREE OF THE SYSTEM RESTART CAN NOW START. ONE OF THE PHASES HAVE */
+/* COMPLETED. */
+/*---------------------------------------------------------------------------*/
+void Dblqh::srPhase3Start(Signal* signal)
+{
+ UintR tsrPhaseStarted;
+
+ jamEntry();
+ tsrPhaseStarted = signal->theData[0];
+ if (csrPhaseStarted == ZSR_NO_PHASE_STARTED) {
+ jam();
+ csrPhaseStarted = tsrPhaseStarted;
+ if (cstartType == NodeState::ST_NODE_RESTART) {
+ ndbrequire(cinitialStartOngoing == ZTRUE);
+ cinitialStartOngoing = ZFALSE;
+ checkStartCompletedLab(signal);
+ }//if
+ return;
+ }//if
+ ndbrequire(csrPhaseStarted != tsrPhaseStarted);
+ ndbrequire(csrPhaseStarted != ZSR_BOTH_PHASES_STARTED);
+
+ csrPhaseStarted = ZSR_BOTH_PHASES_STARTED;
+ for (logPartPtr.i = 0; logPartPtr.i < 4; logPartPtr.i++) {
+ jam();
+ ptrAss(logPartPtr, logPartRecord);
+ logPartPtr.p->logPartState = LogPartRecord::SR_THIRD_PHASE_STARTED;
+ logPartPtr.p->logStartGci = (UintR)-1;
+ if (csrPhasesCompleted == 0) {
+ jam();
+ /* --------------------------------------------------------------------
+ * THE FIRST PHASE WE MUST ENSURE THAT IT REACHES THE END OF THE LOG.
+ * ------------------------------------------------------------------- */
+ logPartPtr.p->logLastGci = crestartNewestGci;
+ } else {
+ jam();
+ logPartPtr.p->logLastGci = 2;
+ }//if
+ }//for
+ if (cstartType == NodeState::ST_NODE_RESTART) {
+ jam();
+ /* ----------------------------------------------------------------------
+ * FOR A NODE RESTART WE HAVE NO FRAGMENTS DEFINED YET.
+ * THUS WE CAN SKIP THAT PART
+ * --------------------------------------------------------------------- */
+ signal->theData[0] = ZSR_GCI_LIMITS;
+ signal->theData[1] = RNIL;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ } else {
+ jam();
+ signal->theData[0] = ZSR_GCI_LIMITS;
+ signal->theData[1] = 0;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ }//if
+ return;
+}//Dblqh::srPhase3Start()
+
+/* --------------------------------------------------------------------------
+ * WE NOW WE NEED TO FIND THE LIMITS WITHIN WHICH TO EXECUTE
+ * THE FRAGMENT LOG
+ * ------------------------------------------------------------------------- */
+void Dblqh::srGciLimits(Signal* signal)
+{
+ LogPartRecordPtr tmpLogPartPtr;
+
+ jamEntry();
+ fragptr.i = signal->theData[0];
+ Uint32 loopCount = 0;
+ logPartPtr.i = 0;
+ ptrAss(logPartPtr, logPartRecord);
+ while (fragptr.i < cfragrecFileSize) {
+ jam();
+ ptrAss(fragptr, fragrecord);
+ if (fragptr.p->execSrStatus != Fragrecord::IDLE) {
+ jam();
+ ndbrequire(fragptr.p->execSrNoReplicas - 1 < 4);
+ for (Uint32 i = 0; i < fragptr.p->execSrNoReplicas; i++) {
+ jam();
+ if (fragptr.p->execSrStartGci[i] < logPartPtr.p->logStartGci) {
+ jam();
+ logPartPtr.p->logStartGci = fragptr.p->execSrStartGci[i];
+ }//if
+ if (fragptr.p->execSrLastGci[i] > logPartPtr.p->logLastGci) {
+ jam();
+ logPartPtr.p->logLastGci = fragptr.p->execSrLastGci[i];
+ }//if
+ }//for
+ }//if
+ loopCount++;
+ if (loopCount > 20) {
+ jam();
+ signal->theData[0] = ZSR_GCI_LIMITS;
+ signal->theData[1] = fragptr.i + 1;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ } else {
+ jam();
+ fragptr.i++;
+ }//if
+ }//while
+ if (logPartPtr.p->logStartGci == (UintR)-1) {
+ jam();
+ /* --------------------------------------------------------------------
+ * THERE WERE NO FRAGMENTS TO INSTALL WE WILL EXECUTE THE LOG AS
+ * SHORT AS POSSIBLE TO REACH THE END OF THE LOG. THIS WE DO BY
+ * STARTING AT THE STOP GCI.
+ * ------------------------------------------------------------------- */
+ logPartPtr.p->logStartGci = logPartPtr.p->logLastGci;
+ }//if
+ for (tmpLogPartPtr.i = 1; tmpLogPartPtr.i < 4; tmpLogPartPtr.i++) {
+ ptrAss(tmpLogPartPtr, logPartRecord);
+ tmpLogPartPtr.p->logStartGci = logPartPtr.p->logStartGci;
+ tmpLogPartPtr.p->logLastGci = logPartPtr.p->logLastGci;
+ }//for
+ for (logPartPtr.i = 0; logPartPtr.i < 4; logPartPtr.i++) {
+ jam();
+ ptrAss(logPartPtr, logPartRecord);
+ logPartPtr.p->logExecState = LogPartRecord::LES_SEARCH_STOP;
+ signal->theData[0] = ZSR_LOG_LIMITS;
+ signal->theData[1] = logPartPtr.i;
+ signal->theData[2] = logPartPtr.p->lastLogfile;
+ signal->theData[3] = logPartPtr.p->lastMbyte;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB);
+ }//for
+}//Dblqh::srGciLimits()
+
+/* --------------------------------------------------------------------------
+ * IT IS NOW TIME TO FIND WHERE TO START EXECUTING THE LOG.
+ * THIS SIGNAL IS SENT FOR EACH LOG PART AND STARTS THE EXECUTION
+ * OF THE LOG FOR THIS PART.
+ *-------------------------------------------------------------------------- */
+void Dblqh::srLogLimits(Signal* signal)
+{
+ Uint32 tlastPrepRef;
+ Uint32 tmbyte;
+
+ jamEntry();
+ logPartPtr.i = signal->theData[0];
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+ logFilePtr.i = signal->theData[1];
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ tmbyte = signal->theData[2];
+ Uint32 loopCount = 0;
+ /* ------------------------------------------------------------------------
+ * WE ARE SEARCHING FOR THE START AND STOP MBYTE OF THE LOG THAT IS TO BE
+ * EXECUTED.
+ * ----------------------------------------------------------------------- */
+ while(true) {
+ ndbrequire(tmbyte < 16);
+ if (logPartPtr.p->logExecState == LogPartRecord::LES_SEARCH_STOP) {
+ if (logFilePtr.p->logMaxGciCompleted[tmbyte] < logPartPtr.p->logLastGci) {
+ jam();
+ /* --------------------------------------------------------------------
+ * WE ARE STEPPING BACKWARDS FROM MBYTE TO MBYTE. THIS IS THE FIRST
+ * MBYTE WHICH IS TO BE INCLUDED IN THE LOG EXECUTION. THE STOP GCI
+ * HAS NOT BEEN COMPLETED BEFORE THIS MBYTE. THUS THIS MBYTE HAVE
+ * TO BE EXECUTED.
+ * ------------------------------------------------------------------- */
+ logPartPtr.p->stopLogfile = logFilePtr.i;
+ logPartPtr.p->stopMbyte = tmbyte;
+ logPartPtr.p->logExecState = LogPartRecord::LES_SEARCH_START;
+ }//if
+ }//if
+ /* ------------------------------------------------------------------------
+ * WHEN WE HAVEN'T FOUND THE STOP MBYTE IT IS NOT NECESSARY TO LOOK FOR THE
+ * START MBYTE. THE REASON IS THE FOLLOWING LOGIC CHAIN:
+ * MAX_GCI_STARTED >= MAX_GCI_COMPLETED >= LAST_GCI >= START_GCI
+ * THUS MAX_GCI_STARTED >= START_GCI. THUS MAX_GCI_STARTED < START_GCI CAN
+ * NOT BE TRUE AS WE WILL CHECK OTHERWISE.
+ * ----------------------------------------------------------------------- */
+ if (logPartPtr.p->logExecState == LogPartRecord::LES_SEARCH_START) {
+ if (logFilePtr.p->logMaxGciStarted[tmbyte] < logPartPtr.p->logStartGci) {
+ jam();
+ /* --------------------------------------------------------------------
+ * WE HAVE NOW FOUND THE START OF THE EXECUTION OF THE LOG.
+ * WE STILL HAVE TO MOVE IT BACKWARDS TO ALSO INCLUDE THE
+ * PREPARE RECORDS WHICH WERE STARTED IN A PREVIOUS MBYTE.
+ * ------------------------------------------------------------------- */
+ tlastPrepRef = logFilePtr.p->logLastPrepRef[tmbyte];
+ logPartPtr.p->startMbyte = tlastPrepRef & 65535;
+ LogFileRecordPtr locLogFilePtr;
+ findLogfile(signal, tlastPrepRef >> 16, logPartPtr, &locLogFilePtr);
+ logPartPtr.p->startLogfile = locLogFilePtr.i;
+ logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG;
+ }//if
+ }//if
+ if (logPartPtr.p->logExecState != LogPartRecord::LES_EXEC_LOG) {
+ if (tmbyte == 0) {
+ jam();
+ tmbyte = ZNO_MBYTES_IN_FILE - 1;
+ logFilePtr.i = logFilePtr.p->prevLogFile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ } else {
+ jam();
+ tmbyte--;
+ }//if
+ if (logPartPtr.p->lastLogfile == logFilePtr.i) {
+ ndbrequire(logPartPtr.p->lastMbyte != tmbyte);
+ }//if
+ if (loopCount > 20) {
+ jam();
+ signal->theData[0] = ZSR_LOG_LIMITS;
+ signal->theData[1] = logPartPtr.i;
+ signal->theData[2] = logFilePtr.i;
+ signal->theData[3] = tmbyte;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB);
+ return;
+ }//if
+ loopCount++;
+ } else {
+ jam();
+ break;
+ }//if
+ }//while
+ /* ------------------------------------------------------------------------
+ * WE HAVE NOW FOUND BOTH THE START AND THE STOP OF THE LOG. NOW START
+ * EXECUTING THE LOG. THE FIRST ACTION IS TO OPEN THE LOG FILE WHERE TO
+ * START EXECUTING THE LOG.
+ * ----------------------------------------------------------------------- */
+ if (logPartPtr.p->logPartState == LogPartRecord::SR_THIRD_PHASE_STARTED) {
+ jam();
+ logFilePtr.i = logPartPtr.p->startLogfile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN_EXEC_SR_START;
+ openFileRw(signal, logFilePtr);
+ } else {
+ jam();
+ ndbrequire(logPartPtr.p->logPartState == LogPartRecord::SR_FOURTH_PHASE_STARTED);
+ /* --------------------------------------------------------------------
+ * WE HAVE NOW FOUND THE TAIL MBYTE IN THE TAIL FILE.
+ * SET THOSE PARAMETERS IN THE LOG PART.
+ * WE HAVE ALSO FOUND THE HEAD MBYTE. WE STILL HAVE TO SEARCH
+ * FOR THE PAGE NUMBER AND PAGE INDEX WHERE TO SET THE HEAD.
+ * ------------------------------------------------------------------- */
+ logFilePtr.i = logPartPtr.p->startLogfile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ logPartPtr.p->logTailFileNo = logFilePtr.p->fileNo;
+ logPartPtr.p->logTailMbyte = logPartPtr.p->startMbyte;
+ /* --------------------------------------------------------------------
+ * THE HEAD WE ACTUALLY FOUND DURING EXECUTION OF LOG SO WE USE
+ * THIS INFO HERE RATHER THAN THE MBYTE WE FOUND TO BE THE HEADER.
+ * ------------------------------------------------------------------- */
+ LogFileRecordPtr locLogFilePtr;
+ findLogfile(signal, logPartPtr.p->headFileNo, logPartPtr, &locLogFilePtr);
+ locLogFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_FOURTH_PHASE;
+ openFileRw(signal, locLogFilePtr);
+ }//if
+ return;
+}//Dblqh::srLogLimits()
+
+void Dblqh::openExecSrStartLab(Signal* signal)
+{
+ logPartPtr.p->currentLogfile = logFilePtr.i;
+ logFilePtr.p->currentMbyte = logPartPtr.p->startMbyte;
+ /* ------------------------------------------------------------------------
+ * WE NEED A TC CONNECT RECORD TO HANDLE EXECUTION OF LOG RECORDS.
+ * ------------------------------------------------------------------------ */
+ seizeTcrec();
+ logPartPtr.p->logTcConrec = tcConnectptr.i;
+ /* ------------------------------------------------------------------------
+ * THE FIRST LOG RECORD TO EXECUTE IS ALWAYS AT A NEW MBYTE.
+ * SET THE NUMBER OF PAGES IN THE MAIN MEMORY BUFFER TO ZERO AS AN INITIAL
+ * VALUE. THIS VALUE WILL BE UPDATED AND ENSURED THAT IT RELEASES PAGES IN
+ * THE SUBROUTINE READ_EXEC_SR.
+ * ----------------------------------------------------------------------- */
+ logPartPtr.p->mmBufferSize = 0;
+ readExecSrNewMbyte(signal);
+ return;
+}//Dblqh::openExecSrStartLab()
+
+/* ---------------------------------------------------------------------------
+ * WE WILL ALWAYS ENSURE THAT WE HAVE AT LEAST 16 KBYTE OF LOG PAGES WHEN WE
+ * START READING A LOG RECORD. THE ONLY EXCEPTION IS WHEN WE COME CLOSE TO A
+ * MBYTE BOUNDARY. SINCE WE KNOW THAT LOG RECORDS ARE NEVER WRITTEN ACROSS A
+ * MBYTE BOUNDARY THIS IS NOT A PROBLEM.
+ *
+ * WE START BY READING 64 KBYTE BEFORE STARTING TO EXECUTE THE LOG RECORDS.
+ * WHEN WE COME BELOW 64 KBYTE WE READ ANOTHER SET OF LOG PAGES. WHEN WE
+ * GO BELOW 16 KBYTE WE WAIT UNTIL THE READ PAGES HAVE ENTERED THE BLOCK.
+ * ------------------------------------------------------------------------- */
+/* --------------------------------------------------------------------------
+ * NEW PAGES FROM LOG FILE DURING EXECUTION OF LOG HAS ARRIVED.
+ * ------------------------------------------------------------------------- */
+void Dblqh::readExecSrLab(Signal* signal)
+{
+ buildLinkedLogPageList(signal);
+ /* ------------------------------------------------------------------------
+ * WE NEED TO SET THE CURRENT PAGE INDEX OF THE FIRST PAGE SINCE IT CAN BE
+ * USED IMMEDIATELY WITHOUT ANY OTHER INITIALISATION. THE REST OF THE PAGES
+ * WILL BE INITIALISED BY READ_LOGWORD.
+ * ----------------------------------------------------------------------- */
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = ZPAGE_HEADER_SIZE;
+ if (logPartPtr.p->logExecState ==
+ LogPartRecord::LES_WAIT_READ_EXEC_SR_NEW_MBYTE) {
+ jam();
+ /* ----------------------------------------------------------------------
+ * THIS IS THE FIRST READ DURING THE EXECUTION OF THIS MBYTE. SET THE
+ * NEW CURRENT LOG PAGE TO THE FIRST OF THESE PAGES. CHANGE
+ * LOG_EXEC_STATE TO ENSURE THAT WE START EXECUTION OF THE LOG.
+ * --------------------------------------------------------------------- */
+ logFilePtr.p->currentFilepage = logFilePtr.p->currentMbyte *
+ ZPAGES_IN_MBYTE;
+ logPartPtr.p->prevFilepage = logFilePtr.p->currentFilepage;
+ logFilePtr.p->currentLogpage = lfoPtr.p->firstLfoPage;
+ logPartPtr.p->prevLogpage = logFilePtr.p->currentLogpage;
+ }//if
+ moveToPageRef(signal);
+ releaseLfo(signal);
+ /* ------------------------------------------------------------------------
+ * NOW WE HAVE COMPLETED THE RECEPTION OF THESE PAGES.
+ * NOW CHECK IF WE NEED TO READ MORE PAGES.
+ * ----------------------------------------------------------------------- */
+ checkReadExecSr(signal);
+ if (logPartPtr.p->logExecState == LogPartRecord::LES_EXEC_LOG) {
+ jam();
+ signal->theData[0] = ZEXEC_SR;
+ signal->theData[1] = logPartPtr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ }//if
+ return;
+}//Dblqh::readExecSrLab()
+
+void Dblqh::openExecSrNewMbyteLab(Signal* signal)
+{
+ readExecSrNewMbyte(signal);
+ return;
+}//Dblqh::openExecSrNewMbyteLab()
+
+void Dblqh::closeExecSrLab(Signal* signal)
+{
+ LogFileRecordPtr locLogFilePtr;
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSED;
+ logPartPtr.i = logFilePtr.p->logPartRec;
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+ locLogFilePtr.i = logPartPtr.p->currentLogfile;
+ ptrCheckGuard(locLogFilePtr, clogFileFileSize, logFileRecord);
+ locLogFilePtr.p->logFileStatus = LogFileRecord::OPEN_EXEC_SR_NEW_MBYTE;
+ openFileRw(signal, locLogFilePtr);
+ return;
+}//Dblqh::closeExecSrLab()
+
+void Dblqh::writeDirtyLab(Signal* signal)
+{
+ releaseLfo(signal);
+ signal->theData[0] = logPartPtr.i;
+ execSr(signal);
+ return;
+}//Dblqh::writeDirtyLab()
+
+/* --------------------------------------------------------------------------
+ * EXECUTE A LOG RECORD WITHIN THE CURRENT MBYTE.
+ * ------------------------------------------------------------------------- */
+void Dblqh::execSr(Signal* signal)
+{
+ LogFileRecordPtr nextLogFilePtr;
+ LogPageRecordPtr tmpLogPagePtr;
+ Uint32 logWord;
+
+ jamEntry();
+ logPartPtr.i = signal->theData[0];
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+
+ do {
+ jam();
+ logFilePtr.i = logPartPtr.p->currentLogfile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ logPagePtr.i = logPartPtr.p->prevLogpage;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ if (logPagePtr.p->logPageWord[ZPOS_DIRTY] == ZDIRTY) {
+ jam();
+ switch (logPartPtr.p->logExecState) {
+ case LogPartRecord::LES_EXEC_LOG_COMPLETED:
+ case LogPartRecord::LES_EXEC_LOG_NEW_FILE:
+ case LogPartRecord::LES_EXEC_LOG_NEW_MBYTE:
+ jam();
+ /* ------------------------------------------------------------------
+ * IN THIS WE HAVE COMPLETED EXECUTION OF THE CURRENT LOG PAGE
+ * AND CAN WRITE IT TO DISK SINCE IT IS DIRTY.
+ * ----------------------------------------------------------------- */
+ writeDirty(signal);
+ return;
+ break;
+ case LogPartRecord::LES_EXEC_LOG:
+ jam();
+ /* --------------------------------------------------------------------
+ * IN THIS CASE WE ONLY WRITE THE PAGE TO DISK IF WE HAVE COMPLETED
+ * EXECUTION OF LOG RECORDS BELONGING TO THIS LOG PAGE.
+ * ------------------------------------------------------------------- */
+ if (logFilePtr.p->currentLogpage != logPartPtr.p->prevLogpage) {
+ jam();
+ writeDirty(signal);
+ return;
+ }//if
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ }//if
+ if (logFilePtr.p->currentLogpage != logPartPtr.p->prevLogpage) {
+ jam();
+ logPartPtr.p->prevLogpage = logPagePtr.p->logPageWord[ZNEXT_PAGE];
+ logPartPtr.p->prevFilepage++;
+ continue;
+ }//if
+ switch (logPartPtr.p->logExecState) {
+ case LogPartRecord::LES_EXEC_LOG_COMPLETED:
+ jam();
+ releaseMmPages(signal);
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_EXEC_SR_COMPLETED;
+ closeFile(signal, logFilePtr);
+ return;
+ break;
+ case LogPartRecord::LES_EXEC_LOG_NEW_MBYTE:
+ jam();
+ logFilePtr.p->currentMbyte++;
+ readExecSrNewMbyte(signal);
+ return;
+ break;
+ case LogPartRecord::LES_EXEC_LOG_NEW_FILE:
+ jam();
+ nextLogFilePtr.i = logFilePtr.p->nextLogFile;
+ logPartPtr.p->currentLogfile = nextLogFilePtr.i;
+ ptrCheckGuard(nextLogFilePtr, clogFileFileSize, logFileRecord);
+ nextLogFilePtr.p->currentMbyte = 0;
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_EXEC_SR;
+ closeFile(signal, logFilePtr);
+ return;
+ break;
+ case LogPartRecord::LES_EXEC_LOG:
+ jam();
+ /*empty*/;
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ }//switch
+ logPagePtr.i = logFilePtr.p->currentLogpage;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ logPartPtr.p->savePageIndex = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ if (logPartPtr.p->execSrPagesRead < ZMIN_READ_BUFFER_SIZE) {
+ /* --------------------------------------------------------------------
+ * THERE WERE LESS THAN 16 KBYTE OF LOG PAGES REMAINING. WE WAIT UNTIL
+ * THE NEXT 64 KBYTE ARRIVES UNTIL WE CONTINUE AGAIN.
+ * ------------------------------------------------------------------- */
+ if ((logPartPtr.p->execSrPagesRead +
+ logPartPtr.p->execSrPagesExecuted) < ZPAGES_IN_MBYTE) {
+ jam();
+ /* ------------------------------------------------------------------
+ * WE ONLY STOP AND WAIT IF THERE MORE PAGES TO READ. IF IT IS NOT
+ * THEN IT IS THE END OF THE MBYTE AND WE WILL CONTINUE. IT IS NO
+ * RISK THAT A LOG RECORD WE FIND WILL NOT BE READ AT THIS TIME
+ * SINCE THE LOG RECORDS NEVER SPAN OVER A MBYTE BOUNDARY.
+ * ----------------------------------------------------------------- */
+ readExecSr(signal);
+ logPartPtr.p->logExecState = LogPartRecord::LES_WAIT_READ_EXEC_SR;
+ return;
+ }//if
+ }//if
+ logWord = readLogword(signal);
+ switch (logWord) {
+/* ========================================================================= */
+/* ========================================================================= */
+ case ZPREP_OP_TYPE:
+ {
+ logWord = readLogword(signal);
+ stepAhead(signal, logWord - 2);
+ break;
+ }
+/* ========================================================================= */
+/* ========================================================================= */
+ case ZINVALID_COMMIT_TYPE:
+ jam();
+ stepAhead(signal, ZCOMMIT_LOG_SIZE - 1);
+ break;
+/* ========================================================================= */
+/* ========================================================================= */
+ case ZCOMMIT_TYPE:
+ {
+ CommitLogRecord commitLogRecord;
+ jam();
+ tcConnectptr.i = logPartPtr.p->logTcConrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ readCommitLog(signal, &commitLogRecord);
+ if (tcConnectptr.p->gci > crestartNewestGci) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THIS LOG RECORD MUST BE IGNORED. IT IS PART OF A GLOBAL CHECKPOINT WHICH */
+/* WILL BE INVALIDATED BY THE SYSTEM RESTART. IF NOT INVALIDATED IT MIGHT BE */
+/* EXECUTED IN A FUTURE SYSTEM RESTART. */
+/*---------------------------------------------------------------------------*/
+ tmpLogPagePtr.i = logPartPtr.p->prevLogpage;
+ ptrCheckGuard(tmpLogPagePtr, clogPageFileSize, logPageRecord);
+ arrGuard(logPartPtr.p->savePageIndex, ZPAGE_SIZE);
+ tmpLogPagePtr.p->logPageWord[logPartPtr.p->savePageIndex] =
+ ZINVALID_COMMIT_TYPE;
+ tmpLogPagePtr.p->logPageWord[ZPOS_DIRTY] = ZDIRTY;
+ } else {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* CHECK IF I AM SUPPOSED TO EXECUTE THIS LOG RECORD. IF I AM THEN SAVE PAGE */
+/* INDEX IN CURRENT LOG PAGE SINCE IT WILL BE OVERWRITTEN WHEN EXECUTING THE */
+/* LOG RECORD. */
+/*---------------------------------------------------------------------------*/
+ logPartPtr.p->execSrExecuteIndex = 0;
+ Uint32 result = checkIfExecLog(signal);
+ if (result == ZOK) {
+ jam();
+//*---------------------------------------------------------------------------*/
+/* IN A NODE RESTART WE WILL NEVER END UP HERE SINCE NO FRAGMENTS HAVE BEEN */
+/* DEFINED YET. THUS NO EXTRA CHECKING FOR NODE RESTART IS NECESSARY. */
+/*---------------------------------------------------------------------------*/
+ logPartPtr.p->savePageIndex =
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ tcConnectptr.p->fragmentptr = fragptr.i;
+ findPageRef(signal, &commitLogRecord);
+ logPartPtr.p->execSrLogPageIndex = commitLogRecord.startPageIndex;
+ if (logPagePtr.i != RNIL) {
+ jam();
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = commitLogRecord.startPageIndex;
+ logPartPtr.p->execSrLogPage = logPagePtr.i;
+ execLogRecord(signal);
+ return;
+ }//if
+ logPartPtr.p->execSrStartPageNo = commitLogRecord.startPageNo;
+ logPartPtr.p->execSrStopPageNo = commitLogRecord.stopPageNo;
+ findLogfile(signal, commitLogRecord.fileNo, logPartPtr, &logFilePtr);
+ logPartPtr.p->execSrExecLogFile = logFilePtr.i;
+ if (logFilePtr.i == logPartPtr.p->currentLogfile) {
+ jam();
+ readExecLog(signal);
+ lfoPtr.p->lfoState = LogFileOperationRecord::READ_EXEC_LOG;
+ return;
+ } else {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THE FILE IS CURRENTLY NOT OPEN. WE MUST OPEN IT BEFORE WE CAN READ FROM */
+/* THE FILE. */
+/*---------------------------------------------------------------------------*/
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN_EXEC_LOG;
+ openFileRw(signal, logFilePtr);
+ return;
+ }//if
+ }//if
+ }//if
+ break;
+ }
+/* ========================================================================= */
+/* ========================================================================= */
+ case ZABORT_TYPE:
+ jam();
+ stepAhead(signal, ZABORT_LOG_SIZE - 1);
+ break;
+/* ========================================================================= */
+/* ========================================================================= */
+ case ZFD_TYPE:
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THIS IS THE FIRST ITEM WE ENCOUNTER IN A NEW FILE. AT THIS MOMENT WE SHALL*/
+/* SIMPLY BYPASS IT. IT HAS NO SIGNIFANCE WHEN EXECUTING THE LOG. IT HAS ITS */
+/* SIGNIFANCE WHEN FINDING THE START END THE END OF THE LOG. */
+/* WE HARDCODE THE PAGE INDEX SINCE THIS SHOULD NEVER BE FOUND AT ANY OTHER */
+/* PLACE THAN IN THE FIRST PAGE OF A NEW FILE IN THE FIRST POSITION AFTER THE*/
+/* HEADER. */
+/*---------------------------------------------------------------------------*/
+ ndbrequire(logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] ==
+ (ZPAGE_HEADER_SIZE + ZPOS_NO_FD));
+ {
+ Uint32 noFdDescriptors =
+ logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_NO_FD];
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] =
+ (ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
+ (noFdDescriptors * ZFD_PART_SIZE);
+ }
+ break;
+/* ========================================================================= */
+/* ========================================================================= */
+ case ZNEXT_LOG_RECORD_TYPE:
+ jam();
+ stepAhead(signal, ZPAGE_SIZE - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]);
+ break;
+/* ========================================================================= */
+/* ========================================================================= */
+ case ZNEXT_MBYTE_TYPE:
+/*---------------------------------------------------------------------------*/
+/* WE WILL SKIP A PART OF THE LOG FILE. ACTUALLY THE NEXT POINTER IS TO */
+/* A NEW MBYTE. THEREFORE WE WILL START UP A NEW MBYTE. THIS NEW MBYTE IS */
+/* HOWEVER ONLY STARTED IF IT IS NOT AFTER THE STOP MBYTE. */
+/* IF WE HAVE REACHED THE END OF THE STOP MBYTE THEN THE EXECUTION OF THE LOG*/
+/* IS COMPLETED. */
+/*---------------------------------------------------------------------------*/
+ if (logPartPtr.p->currentLogfile == logPartPtr.p->stopLogfile) {
+ if (logFilePtr.p->currentMbyte == logPartPtr.p->stopMbyte) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* THIS WAS THE LAST MBYTE TO EXECUTE IN THIS LOG PART. WE SHOULD HAVE FOUND */
+/* A COMPLETED GCI RECORD OF THE LAST GCI BEFORE THIS. FOR SOME REASON THIS */
+/* RECORD WAS NOT AVAILABLE ON THE LOG. CRASH THE SYSTEM, A VERY SERIOUS */
+/* ERROR WHICH WE MUST REALLY WORK HARD TO AVOID. */
+/*---------------------------------------------------------------------------*/
+/*---------------------------------------------------------------------------*/
+/* SEND A SIGNAL TO THE SIGNAL LOG AND THEN CRASH THE SYSTEM. */
+/*---------------------------------------------------------------------------*/
+ signal->theData[0] = RNIL;
+ signal->theData[1] = logPartPtr.i;
+ Uint32 tmp = logFilePtr.p->fileName[3];
+ tmp = (tmp >> 8) & 0xff;// To get the Directory, DXX.
+ signal->theData[2] = tmp;
+ signal->theData[3] = logFilePtr.p->fileNo;
+ signal->theData[4] = logFilePtr.p->currentFilepage;
+ signal->theData[5] = logFilePtr.p->currentMbyte;
+ signal->theData[6] = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ sendSignal(cownref, GSN_DEBUG_SIG, signal, 7, JBA);
+ return;
+ }//if
+ }//if
+/*---------------------------------------------------------------------------*/
+/* START EXECUTION OF A NEW MBYTE IN THE LOG. */
+/*---------------------------------------------------------------------------*/
+ if (logFilePtr.p->currentMbyte < (ZNO_MBYTES_IN_FILE - 1)) {
+ jam();
+ logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG_NEW_MBYTE;
+ } else {
+ ndbrequire(logFilePtr.p->currentMbyte == (ZNO_MBYTES_IN_FILE - 1));
+ jam();
+/*---------------------------------------------------------------------------*/
+/* WE HAVE TO CHANGE FILE. CLOSE THIS ONE AND THEN OPEN THE NEXT. */
+/*---------------------------------------------------------------------------*/
+ logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG_NEW_FILE;
+ }//if
+ break;
+/* ========================================================================= */
+/* ========================================================================= */
+ case ZCOMPLETED_GCI_TYPE:
+ jam();
+ logWord = readLogword(signal);
+ if (logWord == logPartPtr.p->logLastGci) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/* IF IT IS THE LAST GCI TO LIVE AFTER SYSTEM RESTART THEN WE RECORD THE NEXT*/
+/* WORD AS THE NEW HEADER OF THE LOG FILE. OTHERWISE WE SIMPLY IGNORE THIS */
+/* LOG RECORD. */
+/*---------------------------------------------------------------------------*/
+ if (csrPhasesCompleted == 0) {
+ jam();
+/*---------------------------------------------------------------------------*/
+/*WE ONLY RECORD THE HEAD OF THE LOG IN THE FIRST LOG ROUND OF LOG EXECUTION.*/
+/*---------------------------------------------------------------------------*/
+ logPartPtr.p->headFileNo = logFilePtr.p->fileNo;
+ logPartPtr.p->headPageNo = logFilePtr.p->currentFilepage;
+ logPartPtr.p->headPageIndex =
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ }//if
+/*---------------------------------------------------------------------------*/
+/* THERE IS NO NEED OF EXECUTING PAST THIS LINE SINCE THERE WILL ONLY BE LOG */
+/* RECORDS THAT WILL BE OF NO INTEREST. THUS CLOSE THE FILE AND START THE */
+/* NEXT PHASE OF THE SYSTEM RESTART. */
+/*---------------------------------------------------------------------------*/
+ logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG_COMPLETED;
+ }//if
+ break;
+ default:
+ jam();
+/* ========================================================================= */
+/* ========================================================================= */
+/*---------------------------------------------------------------------------*/
+/* SEND A SIGNAL TO THE SIGNAL LOG AND THEN CRASH THE SYSTEM. */
+/*---------------------------------------------------------------------------*/
+ signal->theData[0] = RNIL;
+ signal->theData[1] = logPartPtr.i;
+ Uint32 tmp = logFilePtr.p->fileName[3];
+ tmp = (tmp >> 8) & 0xff;// To get the Directory, DXX.
+ signal->theData[2] = tmp;
+ signal->theData[3] = logFilePtr.p->fileNo;
+ signal->theData[4] = logFilePtr.p->currentMbyte;
+ signal->theData[5] = logFilePtr.p->currentFilepage;
+ signal->theData[6] = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ signal->theData[7] = logWord;
+ sendSignal(cownref, GSN_DEBUG_SIG, signal, 8, JBA);
+ return;
+ break;
+ }//switch
+/*---------------------------------------------------------------------------*/
+// We continue to execute log records until we find a proper one to execute or
+// that we reach a new page.
+/*---------------------------------------------------------------------------*/
+ } while (1);
+}//Dblqh::execSr()
+
+/*---------------------------------------------------------------------------*/
+/* THIS SIGNAL IS ONLY RECEIVED TO BE CAPTURED IN THE SIGNAL LOG. IT IS */
+/* ALSO USED TO CRASH THE SYSTEM AFTER SENDING A SIGNAL TO THE LOG. */
+/*---------------------------------------------------------------------------*/
+void Dblqh::execDEBUG_SIG(Signal* signal)
+{
+/*
+2.5 TEMPORARY VARIABLES
+-----------------------
+*/
+ UintR tdebug;
+
+ jamEntry();
+ logPagePtr.i = signal->theData[0];
+ tdebug = logPagePtr.p->logPageWord[0];
+
+ char buf[100];
+ BaseString::snprintf(buf, 100,
+ "Error while reading REDO log.\n"
+ "D=%d, F=%d Mb=%d FP=%d W1=%d W2=%d",
+ signal->theData[2], signal->theData[3], signal->theData[4],
+ signal->theData[5], signal->theData[6], signal->theData[7]);
+
+ progError(__LINE__, ERR_SR_REDOLOG, buf);
+
+ return;
+}//Dblqh::execDEBUG_SIG()
+
+/*---------------------------------------------------------------------------*/
+/*---------------------------------------------------------------------------*/
+void Dblqh::closeExecLogLab(Signal* signal)
+{
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSED;
+ signal->theData[0] = ZEXEC_SR;
+ signal->theData[1] = logFilePtr.p->logPartRec;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ return;
+}//Dblqh::closeExecLogLab()
+
+void Dblqh::openExecLogLab(Signal* signal)
+{
+ readExecLog(signal);
+ lfoPtr.p->lfoState = LogFileOperationRecord::READ_EXEC_LOG;
+ return;
+}//Dblqh::openExecLogLab()
+
+void Dblqh::readExecLogLab(Signal* signal)
+{
+ buildLinkedLogPageList(signal);
+ logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOGREC_FROM_FILE;
+ logPartPtr.p->execSrLfoRec = lfoPtr.i;
+ logPartPtr.p->execSrLogPage = logPagePtr.i;
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] =
+ logPartPtr.p->execSrLogPageIndex;
+ execLogRecord(signal);
+ return;
+}//Dblqh::readExecLogLab()
+
+/*---------------------------------------------------------------------------*/
+/* THIS CODE IS USED TO EXECUTE A LOG RECORD WHEN IT'S DATA HAVE BEEN LOCATED*/
+/* AND TRANSFERRED INTO MEMORY. */
+/*---------------------------------------------------------------------------*/
+void Dblqh::execLogRecord(Signal* signal)
+{
+ jamEntry();
+
+ tcConnectptr.i = logPartPtr.p->logTcConrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ // Read a log record and prepare it for execution
+ readLogHeader(signal);
+ readKey(signal);
+ readAttrinfo(signal);
+ initReqinfoExecSr(signal);
+ arrGuard(logPartPtr.p->execSrExecuteIndex, 4);
+ BlockReference ref = fragptr.p->execSrBlockref[logPartPtr.p->execSrExecuteIndex];
+ tcConnectptr.p->nextReplica = refToNode(ref);
+ tcConnectptr.p->connectState = TcConnectionrec::LOG_CONNECTED;
+ tcConnectptr.p->tcOprec = tcConnectptr.i;
+ packLqhkeyreqLab(signal);
+ return;
+}//Dblqh::execLogRecord()
+
+//----------------------------------------------------------------------------
+// This function invalidates log pages after the last GCI record in a
+// system/node restart. This is to ensure that the end of the log is
+// consistent. This function is executed last in start phase 3.
+// RT 450. EDTJAMO.
+//----------------------------------------------------------------------------
+void Dblqh::invalidateLogAfterLastGCI(Signal* signal) {
+
+ jam();
+ if (logPartPtr.p->logExecState != LogPartRecord::LES_EXEC_LOG_INVALIDATE) {
+ jam();
+ systemError(signal);
+ }
+
+ if (logFilePtr.p->fileNo != logPartPtr.p->invalidateFileNo) {
+ jam();
+ systemError(signal);
+ }
+
+ switch (lfoPtr.p->lfoState) {
+ case LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES:
+ jam();
+ releaseLfo(signal);
+ releaseLogpage(signal);
+ if (logPartPtr.p->invalidatePageNo < (ZNO_MBYTES_IN_FILE * ZPAGES_IN_MBYTE - 1)) {
+ // We continue in this file.
+ logPartPtr.p->invalidatePageNo++;
+ } else {
+ // We continue in the next file.
+ logFilePtr.i = logFilePtr.p->nextLogFile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ logPartPtr.p->invalidateFileNo = logFilePtr.p->fileNo;
+ // Page 0 is used for file descriptors.
+ logPartPtr.p->invalidatePageNo = 1;
+ if (logFilePtr.p->logFileStatus != LogFileRecord::OPEN) {
+ jam();
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_INVALIDATE_PAGES;
+ openFileRw(signal, logFilePtr);
+ return;
+ break;
+ }
+ }
+ // Read a page from the log file.
+ readFileInInvalidate(signal);
+ return;
+ break;
+
+ case LogFileOperationRecord::READ_SR_INVALIDATE_PAGES:
+ jam();
+ releaseLfo(signal);
+ // Check if this page must be invalidated.
+ // If the log lap number on a page after the head of the tail is the same
+ // as the actual log lap number we must invalidate this page. Otherwise it
+ // could be impossible to find the end of the log in a later system/node
+ // restart.
+ if (logPagePtr.p->logPageWord[ZPOS_LOG_LAP] == logPartPtr.p->logLap) {
+ // This page must be invalidated.
+ logPagePtr.p->logPageWord[ZPOS_LOG_LAP] = 0;
+ // Contact NDBFS. Real time break.
+ writeSinglePage(signal, logPartPtr.p->invalidatePageNo, ZPAGE_SIZE - 1);
+ lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES;
+ } else {
+ // We are done with invalidating. Finish start phase 3.4.
+ exitFromInvalidate(signal);
+ }
+ return;
+ break;
+
+ default:
+ jam();
+ systemError(signal);
+ return;
+ break;
+ }
+
+ return;
+}//Dblqh::invalidateLogAfterLastGCI
+
+void Dblqh::readFileInInvalidate(Signal* signal) {
+ jam();
+ // Contact NDBFS. Real time break.
+ readSinglePage(signal, logPartPtr.p->invalidatePageNo);
+ lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_INVALIDATE_PAGES;
+}
+
+void Dblqh::exitFromInvalidate(Signal* signal) {
+ jam();
+ // Close files if necessary. Current file and the next file should be
+ // left open.
+ if (logFilePtr.i != logPartPtr.p->currentLogfile) {
+ LogFileRecordPtr currentLogFilePtr;
+ LogFileRecordPtr nextAfterCurrentLogFilePtr;
+
+ currentLogFilePtr.i = logPartPtr.p->currentLogfile;
+ ptrCheckGuard(currentLogFilePtr, clogFileFileSize, logFileRecord);
+
+ nextAfterCurrentLogFilePtr.i = currentLogFilePtr.p->nextLogFile;
+
+ if (logFilePtr.i != nextAfterCurrentLogFilePtr.i) {
+ // This file should be closed.
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSE_SR_INVALIDATE_PAGES;
+ closeFile(signal, logFilePtr);
+ // Return from this function and wait for close confirm. Then come back
+ // and test the previous file for closing.
+ return;
+ }
+ }
+
+ // We are done with closing files, send completed signal and exit this phase.
+ signal->theData[0] = ZSR_FOURTH_COMP;
+ signal->theData[1] = logPartPtr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ return;
+}
+
+
+/*---------------------------------------------------------------------------*/
+/* THE EXECUTION OF A LOG RECORD IS COMPLETED. RELEASE PAGES IF THEY WERE */
+/* READ FROM DISK FOR THIS PARTICULAR OPERATION. */
+/*---------------------------------------------------------------------------*/
+void Dblqh::completedLab(Signal* signal)
+{
+ Uint32 result = returnExecLog(signal);
+/*---------------------------------------------------------------------------*/
+/* ENTER COMPLETED WITH */
+/* LQH_CONNECTPTR */
+/*---------------------------------------------------------------------------*/
+ if (result == ZOK) {
+ jam();
+ execLogRecord(signal);
+ return;
+ } else if (result == ZNOT_OK) {
+ jam();
+ signal->theData[0] = ZEXEC_SR;
+ signal->theData[1] = logPartPtr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ } else {
+ jam();
+ /*empty*/;
+ }//if
+/*---------------------------------------------------------------------------*/
+/* WE HAVE TO WAIT FOR CLOSING OF THE EXECUTED LOG FILE BEFORE PROCEEDING IN */
+/* RARE CASES. */
+/*---------------------------------------------------------------------------*/
+ return;
+}//Dblqh::completedLab()
+
+/*---------------------------------------------------------------------------*/
+/* EXECUTION OF LOG RECORD WAS NOT SUCCESSFUL. CHECK IF IT IS OK ANYWAY, */
+/* THEN EXECUTE THE NEXT LOG RECORD. */
+/*---------------------------------------------------------------------------*/
+void Dblqh::logLqhkeyrefLab(Signal* signal)
+{
+ Uint32 result = returnExecLog(signal);
+ switch (tcConnectptr.p->operation) {
+ case ZUPDATE:
+ case ZDELETE:
+ jam();
+ ndbrequire(terrorCode == ZNO_TUPLE_FOUND);
+ break;
+ case ZINSERT:
+ jam();
+ ndbrequire(terrorCode == ZTUPLE_ALREADY_EXIST);
+ break;
+ default:
+ ndbrequire(false);
+ return;
+ break;
+ }//switch
+ if (result == ZOK) {
+ jam();
+ execLogRecord(signal);
+ return;
+ } else if (result == ZNOT_OK) {
+ jam();
+ signal->theData[0] = ZEXEC_SR;
+ signal->theData[1] = logPartPtr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ } else {
+ jam();
+ /*empty*/;
+ }//if
+ /* ------------------------------------------------------------------------
+ * WE HAVE TO WAIT FOR CLOSING OF THE EXECUTED LOG FILE BEFORE
+ * PROCEEDING IN RARE CASES.
+ * ----------------------------------------------------------------------- */
+ return;
+}//Dblqh::logLqhkeyrefLab()
+
+void Dblqh::closeExecSrCompletedLab(Signal* signal)
+{
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSED;
+ signal->theData[0] = logFilePtr.p->logPartRec;
+ execLogComp(signal);
+ return;
+}//Dblqh::closeExecSrCompletedLab()
+
+/* --------------------------------------------------------------------------
+ * ONE OF THE LOG PARTS HAVE COMPLETED EXECUTING THE LOG. CHECK IF ALL LOG
+ * PARTS ARE COMPLETED. IF SO START SENDING EXEC_FRAGCONF AND EXEC_SRCONF.
+ * ------------------------------------------------------------------------- */
+void Dblqh::execLogComp(Signal* signal)
+{
+ logPartPtr.i = signal->theData[0];
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+ logPartPtr.p->logPartState = LogPartRecord::SR_THIRD_PHASE_COMPLETED;
+ /* ------------------------------------------------------------------------
+ * WE MUST RELEASE THE TC CONNECT RECORD HERE SO THAT IT CAN BE REUSED.
+ * ----------------------------------------------------------------------- */
+ tcConnectptr.i = logPartPtr.p->logTcConrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ releaseTcrecLog(signal, tcConnectptr);
+ for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) {
+ jam();
+ ptrAss(logPartPtr, logPartRecord);
+ if (logPartPtr.p->logPartState != LogPartRecord::SR_THIRD_PHASE_COMPLETED) {
+ if (logPartPtr.p->logPartState != LogPartRecord::SR_THIRD_PHASE_STARTED) {
+ jam();
+ systemErrorLab(signal);
+ return;
+ } else {
+ jam();
+ /* ------------------------------------------------------------------
+ * THIS LOG PART WAS NOT COMPLETED YET. EXIT AND WAIT FOR IT
+ * TO COMPLETE
+ * ----------------------------------------------------------------- */
+ return;
+ }//if
+ }//if
+ }//for
+ /* ------------------------------------------------------------------------
+ * ALL LOG PARTS HAVE COMPLETED THE EXECUTION OF THE LOG. WE CAN NOW START
+ * SENDING THE EXEC_FRAGCONF SIGNALS TO ALL INVOLVED FRAGMENTS.
+ * ----------------------------------------------------------------------- */
+ if (cstartType != NodeState::ST_NODE_RESTART) {
+ jam();
+ signal->theData[0] = ZSEND_EXEC_CONF;
+ signal->theData[1] = 0;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ } else {
+ jam();
+ /* ----------------------------------------------------------------------
+ * FOR NODE RESTART WE CAN SKIP A NUMBER OF STEPS SINCE WE HAVE NO
+ * FRAGMENTS DEFINED AT THIS POINT. OBVIOUSLY WE WILL NOT NEED TO
+ * EXECUTE ANY MORE LOG STEPS EITHER AND THUS WE CAN IMMEDIATELY
+ * START FINDING THE END AND THE START OF THE LOG.
+ * --------------------------------------------------------------------- */
+ csrPhasesCompleted = 3;
+ execSrCompletedLab(signal);
+ return;
+ }//if
+ return;
+}//Dblqh::execLogComp()
+
+/* --------------------------------------------------------------------------
+ * GO THROUGH THE FRAGMENT RECORDS TO DEDUCE TO WHICH SHALL BE SENT
+ * EXEC_FRAGCONF AFTER COMPLETING THE EXECUTION OF THE LOG.
+ * ------------------------------------------------------------------------- */
+void Dblqh::sendExecConf(Signal* signal)
+{
+ jamEntry();
+ fragptr.i = signal->theData[0];
+ Uint32 loopCount = 0;
+ while (fragptr.i < cfragrecFileSize) {
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ if (fragptr.p->execSrStatus != Fragrecord::IDLE) {
+ jam();
+ ndbrequire(fragptr.p->execSrNoReplicas - 1 < 4);
+ for (Uint32 i = 0; i < fragptr.p->execSrNoReplicas; i++) {
+ jam();
+ signal->theData[0] = fragptr.p->execSrUserptr[i];
+ sendSignal(fragptr.p->execSrBlockref[i], GSN_EXEC_FRAGCONF,
+ signal, 1, JBB);
+ }//for
+ if (fragptr.p->execSrStatus == Fragrecord::ACTIVE) {
+ jam();
+ fragptr.p->execSrStatus = Fragrecord::IDLE;
+ } else {
+ ndbrequire(fragptr.p->execSrStatus == Fragrecord::ACTIVE_REMOVE_AFTER);
+ jam();
+ Uint32 fragId = fragptr.p->fragId;
+ tabptr.i = fragptr.p->tabRef;
+ ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
+ deleteFragrec(fragId);
+ }//if
+ fragptr.p->execSrNoReplicas = 0;
+ }//if
+ loopCount++;
+ if (loopCount > 20) {
+ jam();
+ signal->theData[0] = ZSEND_EXEC_CONF;
+ signal->theData[1] = fragptr.i + 1;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ } else {
+ jam();
+ fragptr.i++;
+ }//if
+ }//while
+ /* ----------------------------------------------------------------------
+ * WE HAVE NOW SENT ALL EXEC_FRAGCONF. NOW IT IS TIME TO SEND
+ * EXEC_SRCONF TO ALL NODES.
+ * --------------------------------------------------------------------- */
+ srPhase3Comp(signal);
+}//Dblqh::sendExecConf()
+
+/* --------------------------------------------------------------------------
+ * PHASE 3 HAS NOW COMPLETED. INFORM ALL OTHER NODES OF THIS EVENT.
+ * ------------------------------------------------------------------------- */
+void Dblqh::srPhase3Comp(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(cnoOfNodes < MAX_NDB_NODES);
+ for (Uint32 i = 0; i < cnoOfNodes; i++) {
+ jam();
+ if (cnodeStatus[i] == ZNODE_UP) {
+ jam();
+ ndbrequire(cnodeData[i] < MAX_NDB_NODES);
+ BlockReference ref = calcLqhBlockRef(cnodeData[i]);
+ signal->theData[0] = cownNodeid;
+ sendSignal(ref, GSN_EXEC_SRCONF, signal, 1, JBB);
+ }//if
+ }//for
+ return;
+}//Dblqh::srPhase3Comp()
+
+/* ##########################################################################
+ * SYSTEM RESTART PHASE FOUR MODULE
+ * THIS MODULE IS A SUB-MODULE OF THE FILE SYSTEM HANDLING.
+ *
+ * THIS MODULE SETS UP THE HEAD AND TAIL POINTERS OF THE LOG PARTS IN THE
+ * FRAGMENT LOG. WHEN IT IS COMPLETED IT REPORTS TO THE MASTER DIH THAT
+ * IT HAS COMPLETED THE PART OF THE SYSTEM RESTART WHERE THE DATABASE IS
+ * LOADED.
+ * IT ALSO OPENS THE CURRENT LOG FILE AND THE NEXT AND SETS UP THE FIRST
+ * LOG PAGE WHERE NEW LOG DATA IS TO BE INSERTED WHEN THE SYSTEM STARTS
+ * AGAIN.
+ *
+ * THIS PART IS ACTUALLY EXECUTED FOR ALL RESTART TYPES.
+ * ######################################################################### */
+void Dblqh::initFourth(Signal* signal)
+{
+ LogFileRecordPtr locLogFilePtr;
+ jamEntry();
+ logPartPtr.i = signal->theData[0];
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+ crestartNewestGci = 1;
+ crestartOldestGci = 1;
+ /* ------------------------------------------------------------------------
+ * INITIALISE LOG PART AND LOG FILES AS NEEDED.
+ * ----------------------------------------------------------------------- */
+ logPartPtr.p->headFileNo = 0;
+ logPartPtr.p->headPageNo = 1;
+ logPartPtr.p->headPageIndex = ZPAGE_HEADER_SIZE + 2;
+ logPartPtr.p->logPartState = LogPartRecord::SR_FOURTH_PHASE_STARTED;
+ logPartPtr.p->logTailFileNo = 0;
+ logPartPtr.p->logTailMbyte = 0;
+ locLogFilePtr.i = logPartPtr.p->firstLogfile;
+ ptrCheckGuard(locLogFilePtr, clogFileFileSize, logFileRecord);
+ locLogFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_FOURTH_PHASE;
+ openFileRw(signal, locLogFilePtr);
+ return;
+}//Dblqh::initFourth()
+
+void Dblqh::openSrFourthPhaseLab(Signal* signal)
+{
+ /* ------------------------------------------------------------------------
+ * WE HAVE NOW OPENED THE HEAD LOG FILE WE WILL NOW START READING IT
+ * FROM THE HEAD MBYTE TO FIND THE NEW HEAD OF THE LOG.
+ * ----------------------------------------------------------------------- */
+ readSinglePage(signal, logPartPtr.p->headPageNo);
+ lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_FOURTH_PHASE;
+ return;
+}//Dblqh::openSrFourthPhaseLab()
+
+void Dblqh::readSrFourthPhaseLab(Signal* signal)
+{
+ if(c_diskless){
+ jam();
+ logPagePtr.p->logPageWord[ZPOS_LOG_LAP] = 1;
+ }
+
+ /* ------------------------------------------------------------------------
+ * INITIALISE ALL LOG PART INFO AND LOG FILE INFO THAT IS NEEDED TO
+ * START UP THE SYSTEM.
+ * ------------------------------------------------------------------------
+ * INITIALISE THE NEWEST GLOBAL CHECKPOINT IDENTITY AND THE NEWEST
+ * COMPLETED GLOBAL CHECKPOINT IDENITY AS THE NEWEST THAT WAS RESTARTED.
+ * ------------------------------------------------------------------------
+ * INITIALISE THE HEAD PAGE INDEX IN THIS PAGE.
+ * ASSIGN IT AS THE CURRENT LOGPAGE.
+ * ASSIGN THE FILE AS THE CURRENT LOG FILE.
+ * ASSIGN THE CURRENT FILE NUMBER FROM THE CURRENT LOG FILE AND THE NEXT
+ * FILE NUMBER FROM THE NEXT LOG FILE.
+ * ASSIGN THE CURRENT FILEPAGE FROM HEAD PAGE NUMBER.
+ * ASSIGN THE CURRENT MBYTE BY DIVIDING PAGE NUMBER BY 128.
+ * INITIALISE LOG LAP TO BE THE LOG LAP AS FOUND IN THE HEAD PAGE.
+ * WE HAVE TO CALCULATE THE NUMBER OF REMAINING WORDS IN THIS MBYTE.
+ * ----------------------------------------------------------------------- */
+ cnewestGci = crestartNewestGci;
+ cnewestCompletedGci = crestartNewestGci;
+ logPartPtr.p->logPartNewestCompletedGCI = cnewestCompletedGci;
+ logPartPtr.p->currentLogfile = logFilePtr.i;
+ logFilePtr.p->filePosition = logPartPtr.p->headPageNo;
+ logFilePtr.p->currentMbyte =
+ logPartPtr.p->headPageNo >> ZTWOLOG_NO_PAGES_IN_MBYTE;
+ logFilePtr.p->fileChangeState = LogFileRecord::NOT_ONGOING;
+ logPartPtr.p->logLap = logPagePtr.p->logPageWord[ZPOS_LOG_LAP];
+ logFilePtr.p->currentFilepage = logPartPtr.p->headPageNo;
+ logFilePtr.p->currentLogpage = logPagePtr.i;
+
+ initLogpage(signal);
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPartPtr.p->headPageIndex;
+ logFilePtr.p->remainingWordsInMbyte =
+ ((
+ ((logFilePtr.p->currentMbyte + 1) * ZPAGES_IN_MBYTE) -
+ logFilePtr.p->currentFilepage) *
+ (ZPAGE_SIZE - ZPAGE_HEADER_SIZE)) -
+ (logPartPtr.p->headPageIndex - ZPAGE_HEADER_SIZE);
+ /* ------------------------------------------------------------------------
+ * THE NEXT STEP IS TO OPEN THE NEXT LOG FILE (IF THERE IS ONE).
+ * ----------------------------------------------------------------------- */
+ if (logFilePtr.p->nextLogFile != logFilePtr.i) {
+ LogFileRecordPtr locLogFilePtr;
+ jam();
+ locLogFilePtr.i = logFilePtr.p->nextLogFile;
+ ptrCheckGuard(locLogFilePtr, clogFileFileSize, logFileRecord);
+ locLogFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_FOURTH_NEXT;
+ openFileRw(signal, locLogFilePtr);
+ } else {
+ jam();
+ /* ----------------------------------------------------------------------
+ * THIS CAN ONLY OCCUR IF WE HAVE ONLY ONE LOG FILE. THIS LOG FILE MUST
+ * BE LOG FILE ZERO AND THAT IS THE FILE WE CURRENTLY HAVE READ.
+ * THUS WE CAN CONTINUE IMMEDIATELY TO READ PAGE ZERO IN FILE ZERO.
+ * --------------------------------------------------------------------- */
+ openSrFourthZeroSkipInitLab(signal);
+ return;
+ }//if
+ return;
+}//Dblqh::readSrFourthPhaseLab()
+
+void Dblqh::openSrFourthNextLab(Signal* signal)
+{
+ /* ------------------------------------------------------------------------
+ * WE MUST ALSO HAVE FILE 0 OPEN ALL THE TIME.
+ * ----------------------------------------------------------------------- */
+ logFilePtr.i = logPartPtr.p->firstLogfile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ if (logFilePtr.p->logFileStatus == LogFileRecord::OPEN) {
+ jam();
+ openSrFourthZeroSkipInitLab(signal);
+ return;
+ } else {
+ jam();
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_FOURTH_ZERO;
+ openFileRw(signal, logFilePtr);
+ }//if
+ return;
+}//Dblqh::openSrFourthNextLab()
+
+void Dblqh::openSrFourthZeroLab(Signal* signal)
+{
+ openSrFourthZeroSkipInitLab(signal);
+ return;
+}//Dblqh::openSrFourthZeroLab()
+
+void Dblqh::openSrFourthZeroSkipInitLab(Signal* signal)
+{
+ if (logFilePtr.i == logPartPtr.p->currentLogfile) {
+ if (logFilePtr.p->currentFilepage == 0) {
+ jam();
+ /* -------------------------------------------------------------------
+ * THE HEADER PAGE IN THE LOG IS PAGE ZERO IN FILE ZERO.
+ * THIS SHOULD NEVER OCCUR.
+ * ------------------------------------------------------------------- */
+ systemErrorLab(signal);
+ return;
+ }//if
+ }//if
+ readSinglePage(signal, 0);
+ lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_FOURTH_ZERO;
+ return;
+}//Dblqh::openSrFourthZeroSkipInitLab()
+
+void Dblqh::readSrFourthZeroLab(Signal* signal)
+{
+ logFilePtr.p->logPageZero = logPagePtr.i;
+ // --------------------------------------------------------------------
+ // This is moved to invalidateLogAfterLastGCI(), RT453.
+ // signal->theData[0] = ZSR_FOURTH_COMP;
+ // signal->theData[1] = logPartPtr.i;
+ // sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ // --------------------------------------------------------------------
+
+ // Need to invalidate log pages after the head of the log. RT 453. EDTJAMO.
+ // Set the start of the invalidation.
+ logFilePtr.i = logPartPtr.p->currentLogfile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ logPartPtr.p->invalidateFileNo = logPartPtr.p->headFileNo;
+ logPartPtr.p->invalidatePageNo = logPartPtr.p->headPageNo;
+
+ logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG_INVALIDATE;
+ seizeLfo(signal);
+ initLfo(signal);
+ // The state here is a little confusing, but simulates that we return
+ // to invalidateLogAfterLastGCI() from an invalidate write and are ready
+ // to read a page from file.
+ lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES;
+
+ invalidateLogAfterLastGCI(signal);
+ return;
+}//Dblqh::readSrFourthZeroLab()
+
+/* --------------------------------------------------------------------------
+ * ONE OF THE LOG PARTS HAVE COMPLETED PHASE FOUR OF THE SYSTEM RESTART.
+ * CHECK IF ALL LOG PARTS ARE COMPLETED. IF SO SEND START_RECCONF
+ * ------------------------------------------------------------------------- */
+void Dblqh::srFourthComp(Signal* signal)
+{
+ jamEntry();
+ logPartPtr.i = signal->theData[0];
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+ logPartPtr.p->logPartState = LogPartRecord::SR_FOURTH_PHASE_COMPLETED;
+ for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) {
+ jam();
+ ptrAss(logPartPtr, logPartRecord);
+ if (logPartPtr.p->logPartState != LogPartRecord::SR_FOURTH_PHASE_COMPLETED) {
+ if (logPartPtr.p->logPartState != LogPartRecord::SR_FOURTH_PHASE_STARTED) {
+ jam();
+ systemErrorLab(signal);
+ return;
+ } else {
+ jam();
+ /* ------------------------------------------------------------------
+ * THIS LOG PART WAS NOT COMPLETED YET.
+ * EXIT AND WAIT FOR IT TO COMPLETE
+ * ----------------------------------------------------------------- */
+ return;
+ }//if
+ }//if
+ }//for
+ /* ------------------------------------------------------------------------
+ * ALL LOG PARTS HAVE COMPLETED PHASE FOUR OF THE SYSTEM RESTART.
+ * WE CAN NOW SEND START_RECCONF TO THE MASTER DIH IF IT WAS A
+ * SYSTEM RESTART. OTHERWISE WE WILL CONTINUE WITH AN INITIAL START.
+ * SET LOG PART STATE TO IDLE TO
+ * INDICATE THAT NOTHING IS GOING ON IN THE LOG PART.
+ * ----------------------------------------------------------------------- */
+ for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) {
+ ptrAss(logPartPtr, logPartRecord);
+ logPartPtr.p->logPartState = LogPartRecord::IDLE;
+ }//for
+
+ if ((cstartType == NodeState::ST_INITIAL_START) ||
+ (cstartType == NodeState::ST_INITIAL_NODE_RESTART)) {
+ jam();
+
+ ndbrequire(cinitialStartOngoing == ZTRUE);
+ cinitialStartOngoing = ZFALSE;
+
+ checkStartCompletedLab(signal);
+ return;
+ } else if ((cstartType == NodeState::ST_NODE_RESTART) ||
+ (cstartType == NodeState::ST_SYSTEM_RESTART)) {
+ jam();
+ StartRecConf * conf = (StartRecConf*)signal->getDataPtrSend();
+ conf->startingNodeId = getOwnNodeId();
+ sendSignal(cmasterDihBlockref, GSN_START_RECCONF, signal,
+ StartRecConf::SignalLength, JBB);
+
+ if(cstartType == NodeState::ST_SYSTEM_RESTART){
+ fragptr.i = c_redo_log_complete_frags;
+ while(fragptr.i != RNIL){
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ signal->theData[0] = fragptr.p->tabRef;
+ signal->theData[1] = fragptr.p->fragId;
+ sendSignal(DBACC_REF, GSN_EXPANDCHECK2, signal, 2, JBB);
+ fragptr.i = fragptr.p->nextFrag;
+ }
+ }
+ } else {
+ ndbrequire(false);
+ }//if
+ return;
+}//Dblqh::srFourthComp()
+
+/* ######################################################################### */
+/* ####### ERROR MODULE ####### */
+/* */
+/* ######################################################################### */
+void Dblqh::warningHandlerLab(Signal* signal)
+{
+ systemErrorLab(signal);
+ return;
+}//Dblqh::warningHandlerLab()
+
+/*---------------------------------------------------------------------------*/
+/* AN ERROR OCCURRED THAT WE WILL NOT TREAT AS SYSTEM ERROR. MOST OFTEN THIS */
+/* WAS CAUSED BY AN ERRONEUS SIGNAL SENT BY ANOTHER NODE. WE DO NOT WISH TO */
+/* CRASH BECAUSE OF FAULTS IN OTHER NODES. THUS WE ONLY REPORT A WARNING. */
+/* THIS IS CURRENTLY NOT IMPLEMENTED AND FOR THE MOMENT WE GENERATE A SYSTEM */
+/* ERROR SINCE WE WANT TO FIND FAULTS AS QUICKLY AS POSSIBLE IN A TEST PHASE.*/
+/* IN A LATER PHASE WE WILL CHANGE THIS TO BE A WARNING MESSAGE INSTEAD. */
+/*---------------------------------------------------------------------------*/
+/*---------------------------------------------------------------------------*/
+/* THIS TYPE OF ERROR SHOULD NOT GENERATE A SYSTEM ERROR IN A PRODUCT */
+/* RELEASE. THIS IS A TEMPORARY SOLUTION DURING TEST PHASE TO QUICKLY */
+/* FIND ERRORS. NORMALLY THIS SHOULD GENERATE A WARNING MESSAGE ONTO */
+/* SOME ERROR LOGGER. THIS WILL LATER BE IMPLEMENTED BY SOME SIGNAL. */
+/*---------------------------------------------------------------------------*/
+/* ------ SYSTEM ERROR SITUATIONS ------- */
+/* IN SITUATIONS WHERE THE STATE IS ERRONEOUS OR IF THE ERROR OCCURS IN */
+/* THE COMMIT, COMPLETE OR ABORT PHASE, WE PERFORM A CRASH OF THE AXE VM*/
+/*---------------------------------------------------------------------------*/
+
+void Dblqh::systemErrorLab(Signal* signal)
+{
+ progError(0, 0);
+/*************************************************************************>*/
+/* WE WANT TO INVOKE AN IMMEDIATE ERROR HERE SO WE GET THAT BY */
+/* INSERTING A CERTAIN POINTER OUT OF RANGE. */
+/*************************************************************************>*/
+}//Dblqh::systemErrorLab()
+
+/* ------- ERROR SITUATIONS ------- */
+
+void Dblqh::aiStateErrorCheckLab(Signal* signal, Uint32* dataPtr, Uint32 length)
+{
+ ndbrequire(tcConnectptr.p->abortState != TcConnectionrec::ABORT_IDLE);
+ if (tcConnectptr.p->transactionState != TcConnectionrec::IDLE) {
+ jam();
+/*************************************************************************>*/
+/* TRANSACTION ABORT IS ONGOING. IT CAN STILL BE A PART OF AN */
+/* OPERATION THAT SHOULD CONTINUE SINCE THE TUPLE HAS NOT ARRIVED */
+/* YET. THIS IS POSSIBLE IF ACTIVE CREATION OF THE FRAGMENT IS */
+/* ONGOING. */
+/*************************************************************************>*/
+ if (tcConnectptr.p->activeCreat == ZTRUE) {
+ jam();
+/*************************************************************************>*/
+/* ONGOING ABORTS DURING ACTIVE CREATION MUST SAVE THE ATTRIBUTE INFO*/
+/* SO THAT IT CAN BE SENT TO THE NEXT NODE IN THE COMMIT CHAIN. THIS */
+/* IS NEEDED SINCE ALL ABORTS DURING CREATION OF A FRAGMENT ARE NOT */
+/* REALLY ERRORS. A MISSING TUPLE TO BE UPDATED SIMPLY MEANS THAT */
+/* IT HASN'T BEEN TRANSFERRED TO THE NEW REPLICA YET. */
+/*************************************************************************>*/
+/*************************************************************************>*/
+/* AFTER THIS ERROR THE ABORT MUST BE COMPLETED. TO ENSURE THIS SET */
+/* ACTIVE CREATION TO FALSE. THIS WILL ENSURE THAT THE ABORT IS */
+/* COMPLETED. */
+/*************************************************************************>*/
+ if (saveTupattrbuf(signal, dataPtr, length) == ZOK) {
+ jam();
+ if (tcConnectptr.p->transactionState ==
+ TcConnectionrec::WAIT_AI_AFTER_ABORT) {
+ if (tcConnectptr.p->currTupAiLen == tcConnectptr.p->totReclenAi) {
+ jam();
+/*************************************************************************>*/
+/* WE WERE WAITING FOR MORE ATTRIBUTE INFO AFTER A SUCCESSFUL ABORT */
+/* IN ACTIVE CREATION STATE. THE TRANSACTION SHOULD CONTINUE AS IF */
+/* IT WAS COMMITTED. NOW ALL INFO HAS ARRIVED AND WE CAN CONTINUE */
+/* WITH NORMAL PROCESSING AS IF THE TRANSACTION WAS PREPARED. */
+/* SINCE THE FRAGMENT IS UNDER CREATION WE KNOW THAT LOGGING IS */
+/* DISABLED. WE STILL HAVE TO CATER FOR DIRTY OPERATION OR NOT. */
+/*************************************************************************>*/
+ tcConnectptr.p->abortState = TcConnectionrec::ABORT_IDLE;
+ rwConcludedAiLab(signal);
+ return;
+ } else {
+ ndbrequire(tcConnectptr.p->currTupAiLen < tcConnectptr.p->totReclenAi);
+ jam();
+ return; /* STILL WAITING FOR MORE ATTRIBUTE INFO */
+ }//if
+ }//if
+ } else {
+ jam();
+/*************************************************************************>*/
+/* AFTER THIS ERROR THE ABORT MUST BE COMPLETED. TO ENSURE THIS SET */
+/* ACTIVE CREATION TO ABORT. THIS WILL ENSURE THAT THE ABORT IS */
+/* COMPLETED AND THAT THE ERROR CODE IS PROPERLY SET */
+/*************************************************************************>*/
+ tcConnectptr.p->errorCode = terrorCode;
+ tcConnectptr.p->activeCreat = ZFALSE;
+ if (tcConnectptr.p->transactionState ==
+ TcConnectionrec::WAIT_AI_AFTER_ABORT) {
+ jam();
+/*************************************************************************>*/
+/* ABORT IS ALREADY COMPLETED. WE NEED TO RESTART IT FROM WHERE IT */
+/* WAS INTERRUPTED. */
+/*************************************************************************>*/
+ continueAbortLab(signal);
+ return;
+ } else {
+ jam();
+ return;
+/*************************************************************************>*/
+// Abort is ongoing. It will complete since we set the activeCreat = ZFALSE
+/*************************************************************************>*/
+ }//if
+ }//if
+ }//if
+ }//if
+/*************************************************************************>*/
+/* TRANSACTION HAVE BEEN ABORTED. THUS IGNORE ALL SIGNALS BELONGING TO IT. */
+/*************************************************************************>*/
+ return;
+}//Dblqh::aiStateErrorCheckLab()
+
+void Dblqh::takeOverErrorLab(Signal* signal)
+{
+ terrorCode = ZTAKE_OVER_ERROR;
+ abortErrorLab(signal);
+ return;
+}//Dblqh::takeOverErrorLab()
+
+/* ##########################################################################
+ * TEST MODULE
+ * ######################################################################### */
+#ifdef VM_TRACE
+void Dblqh::execTESTSIG(Signal* signal)
+{
+ jamEntry();
+ Uint32 userpointer = signal->theData[0];
+ BlockReference userblockref = signal->theData[1];
+ Uint32 testcase = signal->theData[2];
+
+ signal->theData[0] = userpointer;
+ signal->theData[1] = cownref;
+ signal->theData[2] = testcase;
+ sendSignal(userblockref, GSN_TESTSIG, signal, 25, JBB);
+ return;
+}//Dblqh::execTESTSIG()
+
+/* *************** */
+/* MEMCHECKREQ > */
+/* *************** */
+/* ************************************************************************>>
+ * THIS SIGNAL IS PURELY FOR TESTING PURPOSES. IT CHECKS THE FREE LIST
+ * AND REPORTS THE NUMBER OF FREE RECORDS.
+ * THIS CAN BE DONE TO ENSURE THAT NO RECORDS HAS BEEN LOST
+ * ************************************************************************> */
+void Dblqh::execMEMCHECKREQ(Signal* signal)
+{
+ Uint32* dataPtr = &signal->theData[0];
+ jamEntry();
+ BlockReference userblockref = signal->theData[0];
+ Uint32 index = 0;
+ for (Uint32 i = 0; i < 7; i++)
+ dataPtr[i] = 0;
+ addfragptr.i = cfirstfreeAddfragrec;
+ while (addfragptr.i != RNIL) {
+ ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
+ addfragptr.i = addfragptr.p->nextAddfragrec;
+ dataPtr[index]++;
+ }//while
+ index++;
+ attrinbufptr.i = cfirstfreeAttrinbuf;
+ while (attrinbufptr.i != RNIL) {
+ ptrCheckGuard(attrinbufptr, cattrinbufFileSize, attrbuf);
+ attrinbufptr.i = attrinbufptr.p->attrbuf[ZINBUF_NEXT];
+ dataPtr[index]++;
+ }//while
+ index++;
+ databufptr.i = cfirstfreeDatabuf;
+ while (databufptr.i != RNIL) {
+ ptrCheckGuard(databufptr, cdatabufFileSize, databuf);
+ databufptr.i = databufptr.p->nextDatabuf;
+ dataPtr[index]++;
+ }//while
+ index++;
+ fragptr.i = cfirstfreeFragrec;
+ while (fragptr.i != RNIL) {
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ fragptr.i = fragptr.p->nextFrag;
+ dataPtr[index]++;
+ }//while
+ index++;
+ for (tabptr.i = 0;
+ tabptr.i < ctabrecFileSize;
+ tabptr.i++) {
+ ptrAss(tabptr, tablerec);
+ if (tabptr.p->tableStatus == Tablerec::NOT_DEFINED) {
+ dataPtr[index]++;
+ }//if
+ }//for
+ index++;
+ tcConnectptr.i = cfirstfreeTcConrec;
+ while (tcConnectptr.i != RNIL) {
+ ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ tcConnectptr.i = tcConnectptr.p->nextTcConnectrec;
+ dataPtr[index]++;
+ }//while
+ sendSignal(userblockref, GSN_MEMCHECKCONF, signal, 10, JBB);
+ return;
+}//Dblqh::execMEMCHECKREQ()
+
+#endif
+
+/* ************************************************************************* */
+/* ************************* STATEMENT BLOCKS ****************************** */
+/* ************************************************************************* */
+/* ========================================================================= */
+/* ====== BUILD LINKED LIST OF LOG PAGES AFTER RECEIVING FSREADCONF ======= */
+/* */
+/* ========================================================================= */
+void Dblqh::buildLinkedLogPageList(Signal* signal)
+{
+ LogPageRecordPtr bllLogPagePtr;
+
+ arrGuard(lfoPtr.p->noPagesRw - 1, 16);
+ arrGuard(lfoPtr.p->noPagesRw, 16);
+ for (UintR tbllIndex = 0; tbllIndex < lfoPtr.p->noPagesRw; tbllIndex++) {
+ jam();
+ /* ----------------------------------------------------------------------
+ * BUILD LINKED LIST BUT ALSO ENSURE THAT PAGE IS NOT SEEN AS DIRTY
+ * INITIALLY.
+ * --------------------------------------------------------------------- */
+ bllLogPagePtr.i = lfoPtr.p->logPageArray[tbllIndex];
+ ptrCheckGuard(bllLogPagePtr, clogPageFileSize, logPageRecord);
+
+// #if VM_TRACE
+// // Check logPage checksum before modifying it
+// Uint32 calcCheckSum = calcPageCheckSum(bllLogPagePtr);
+// Uint32 checkSum = bllLogPagePtr.p->logPageWord[ZPOS_CHECKSUM];
+// if (checkSum != calcCheckSum) {
+// ndbout << "Redolog: Checksum failure." << endl;
+// progError(__LINE__, ERR_NDBREQUIRE, "Redolog: Checksum failure.");
+// }
+// #endif
+
+ bllLogPagePtr.p->logPageWord[ZNEXT_PAGE] =
+ lfoPtr.p->logPageArray[tbllIndex + 1];
+ bllLogPagePtr.p->logPageWord[ZPOS_DIRTY] = ZNOT_DIRTY;
+ }//for
+ bllLogPagePtr.i = lfoPtr.p->logPageArray[lfoPtr.p->noPagesRw - 1];
+ ptrCheckGuard(bllLogPagePtr, clogPageFileSize, logPageRecord);
+ bllLogPagePtr.p->logPageWord[ZNEXT_PAGE] = RNIL;
+}//Dblqh::buildLinkedLogPageList()
+
+/* =========================================================================
+ * ======= CHANGE TO NEXT MBYTE IN LOG =======
+ *
+ * ========================================================================= */
+void Dblqh::changeMbyte(Signal* signal)
+{
+ writeNextLog(signal);
+ writeFileDescriptor(signal);
+}//Dblqh::changeMbyte()
+
+/* ========================================================================= */
+/* ====== CHECK IF THIS COMMIT LOG RECORD IS TO BE EXECUTED ======= */
+/* */
+/* SUBROUTINE SHORT NAME = CEL */
+/* ========================================================================= */
+Uint32 Dblqh::checkIfExecLog(Signal* signal)
+{
+ tabptr.i = tcConnectptr.p->tableref;
+ ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
+ if (getFragmentrec(signal, tcConnectptr.p->fragmentid) &&
+ (tabptr.p->schemaVersion == tcConnectptr.p->schemaVersion)) {
+ if (fragptr.p->execSrStatus != Fragrecord::IDLE) {
+ if (fragptr.p->execSrNoReplicas > logPartPtr.p->execSrExecuteIndex) {
+ ndbrequire((fragptr.p->execSrNoReplicas - 1) < 4);
+ for (Uint32 i = logPartPtr.p->execSrExecuteIndex;
+ i < fragptr.p->execSrNoReplicas;
+ i++) {
+ jam();
+ if (tcConnectptr.p->gci >= fragptr.p->execSrStartGci[i]) {
+ if (tcConnectptr.p->gci <= fragptr.p->execSrLastGci[i]) {
+ jam();
+ logPartPtr.p->execSrExecuteIndex = i;
+ return ZOK;
+ }//if
+ }//if
+ }//for
+ }//if
+ }//if
+ }//if
+ return ZNOT_OK;
+}//Dblqh::checkIfExecLog()
+
+/* ========================================================================= */
+/* == CHECK IF THERE IS LESS THAN 192 KBYTE IN THE BUFFER PLUS INCOMING === */
+/* READS ALREADY STARTED. IF SO IS THE CASE THEN START ANOTHER READ IF */
+/* THERE ARE MORE PAGES IN THIS MBYTE. */
+/* */
+/* ========================================================================= */
+void Dblqh::checkReadExecSr(Signal* signal)
+{
+ logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG;
+ logPartPtr.p->execSrPagesRead = logPartPtr.p->execSrPagesRead + 8;
+ logPartPtr.p->execSrPagesReading = logPartPtr.p->execSrPagesReading - 8;
+ if ((logPartPtr.p->execSrPagesRead + logPartPtr.p->execSrPagesReading) <
+ ZREAD_AHEAD_SIZE) {
+ jam();
+ /* ----------------------------------------------------------------------
+ * WE HAVE LESS THAN 64 KBYTE OF LOG PAGES REMAINING IN MEMORY OR ON
+ * ITS WAY TO MAIN MEMORY. READ IN 8 MORE PAGES.
+ * --------------------------------------------------------------------- */
+ if ((logPartPtr.p->execSrPagesRead + logPartPtr.p->execSrPagesExecuted) <
+ ZPAGES_IN_MBYTE) {
+ jam();
+ /* --------------------------------------------------------------------
+ * THERE ARE MORE PAGES TO READ IN THIS MBYTE. READ THOSE FIRST
+ * IF >= ZPAGES_IN_MBYTE THEN THERE ARE NO MORE PAGES TO READ. THUS
+ * WE PROCEED WITH EXECUTION OF THE LOG.
+ * ------------------------------------------------------------------- */
+ readExecSr(signal);
+ logPartPtr.p->logExecState = LogPartRecord::LES_WAIT_READ_EXEC_SR;
+ }//if
+ }//if
+}//Dblqh::checkReadExecSr()
+
+/* ========================================================================= */
+/* ==== CHECK IF START OF NEW FRAGMENT IS COMPLETED AND WE CAN ======= */
+/* ==== GET THE START GCI ======= */
+/* */
+/* SUBROUTINE SHORT NAME = CTC */
+/* ========================================================================= */
+void Dblqh::checkScanTcCompleted(Signal* signal)
+{
+ tcConnectptr.p->logWriteState = TcConnectionrec::NOT_STARTED;
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ fragptr.p->activeTcCounter = fragptr.p->activeTcCounter - 1;
+ if (fragptr.p->activeTcCounter == 0) {
+ jam();
+ fragptr.p->startGci = cnewestGci + 1;
+ tabptr.i = tcConnectptr.p->tableref;
+ ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
+ sendCopyActiveConf(signal, tcConnectptr.p->tableref);
+ }//if
+}//Dblqh::checkScanTcCompleted()
+
+/* ==========================================================================
+ * === CHECK IF ALL PARTS OF A SYSTEM RESTART ON A FRAGMENT ARE COMPLETED ===
+ *
+ * SUBROUTINE SHORT NAME = CSC
+ * ========================================================================= */
+void Dblqh::checkSrCompleted(Signal* signal)
+{
+ LcpLocRecordPtr cscLcpLocptr;
+
+ terrorCode = ZOK;
+ ptrGuard(lcpPtr);
+ cscLcpLocptr.i = lcpPtr.p->firstLcpLocAcc;
+CSC_ACC_DOWHILE:
+ ptrCheckGuard(cscLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ if (cscLcpLocptr.p->lcpLocstate != LcpLocRecord::SR_ACC_COMPLETED) {
+ jam();
+ if (cscLcpLocptr.p->lcpLocstate != LcpLocRecord::SR_ACC_STARTED) {
+ jam();
+ systemErrorLab(signal);
+ return;
+ }//if
+ return;
+ }//if
+ cscLcpLocptr.i = cscLcpLocptr.p->nextLcpLoc;
+ if (cscLcpLocptr.i != RNIL) {
+ jam();
+ goto CSC_ACC_DOWHILE;
+ }//if
+ cscLcpLocptr.i = lcpPtr.p->firstLcpLocTup;
+CSC_TUP_DOWHILE:
+ ptrCheckGuard(cscLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
+ if (cscLcpLocptr.p->lcpLocstate != LcpLocRecord::SR_TUP_COMPLETED) {
+ jam();
+ if (cscLcpLocptr.p->lcpLocstate != LcpLocRecord::SR_TUP_STARTED) {
+ jam();
+ systemErrorLab(signal);
+ return;
+ }//if
+ return;
+ }//if
+ cscLcpLocptr.i = cscLcpLocptr.p->nextLcpLoc;
+ if (cscLcpLocptr.i != RNIL) {
+ jam();
+ goto CSC_TUP_DOWHILE;
+ }//if
+ lcpPtr.p->lcpState = LcpRecord::LCP_SR_COMPLETED;
+}//Dblqh::checkSrCompleted()
+
+/* ------------------------------------------------------------------------- */
+/* ------ CLOSE A FILE DURING EXECUTION OF FRAGMENT LOG ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dblqh::closeFile(Signal* signal, LogFileRecordPtr clfLogFilePtr)
+{
+ signal->theData[0] = clfLogFilePtr.p->fileRef;
+ signal->theData[1] = cownref;
+ signal->theData[2] = clfLogFilePtr.i;
+ signal->theData[3] = ZCLOSE_NO_DELETE;
+ sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 4, JBA);
+}//Dblqh::closeFile()
+
+
+/* ---------------------------------------------------------------- */
+/* ---------------- A LOG PAGE HAVE BEEN COMPLETED ---------------- */
+/* */
+/* SUBROUTINE SHORT NAME = CLP */
+// Input Pointers:
+// logFilePtr
+// logPagePtr
+// logPartPtr
+// Defines lfoPtr
+/* ---------------------------------------------------------------- */
+void Dblqh::completedLogPage(Signal* signal, Uint32 clpType)
+{
+ LogPageRecordPtr clpLogPagePtr;
+ LogPageRecordPtr wlpLogPagePtr;
+ UintR twlpNoPages;
+ UintR twlpType;
+
+ if (logFilePtr.p->firstFilledPage == RNIL) {
+ jam();
+ logFilePtr.p->firstFilledPage = logPagePtr.i;
+ } else {
+ jam();
+ clpLogPagePtr.i = logFilePtr.p->lastFilledPage;
+ ptrCheckGuard(clpLogPagePtr, clogPageFileSize, logPageRecord);
+ clpLogPagePtr.p->logPageWord[ZNEXT_PAGE] = logPagePtr.i;
+ }//if
+ logFilePtr.p->lastFilledPage = logPagePtr.i;
+ logPagePtr.p->logPageWord[ZNEXT_PAGE] = RNIL;
+ logFilePtr.p->noLogpagesInBuffer = logFilePtr.p->noLogpagesInBuffer + 1;
+ if (logFilePtr.p->noLogpagesInBuffer != ZMAX_PAGES_WRITTEN) {
+ if (clpType != ZLAST_WRITE_IN_FILE) {
+ if (clpType != ZENFORCE_WRITE) {
+ jam();
+ return;
+ }//if
+ }//if
+ }//if
+ twlpType = clpType;
+/* ------------------------------------------------------------------------- */
+/* ------ WRITE A SET OF LOG PAGES TO DISK ------- */
+/* */
+/* SUBROUTINE SHORT NAME: WLP */
+/* ------------------------------------------------------------------------- */
+ seizeLfo(signal);
+ initLfo(signal);
+ Uint32* dataPtr = &signal->theData[6];
+ twlpNoPages = 0;
+ wlpLogPagePtr.i = logFilePtr.p->firstFilledPage;
+ do {
+ dataPtr[twlpNoPages] = wlpLogPagePtr.i;
+ twlpNoPages++;
+ ptrCheckGuard(wlpLogPagePtr, clogPageFileSize, logPageRecord);
+
+ // Calculate checksum for page
+ wlpLogPagePtr.p->logPageWord[ZPOS_CHECKSUM] = calcPageCheckSum(wlpLogPagePtr);
+ wlpLogPagePtr.i = wlpLogPagePtr.p->logPageWord[ZNEXT_PAGE];
+ } while (wlpLogPagePtr.i != RNIL);
+ ndbrequire(twlpNoPages < 9);
+ dataPtr[twlpNoPages] = logFilePtr.p->filePosition;
+/* -------------------------------------------------- */
+/* SET TIMER ON THIS LOG PART TO SIGNIFY THAT A */
+/* LOG RECORD HAS BEEN SENT AT THIS TIME. */
+/* -------------------------------------------------- */
+ logPartPtr.p->logPartTimer = logPartPtr.p->logTimer;
+ signal->theData[0] = logFilePtr.p->fileRef;
+ signal->theData[1] = cownref;
+ signal->theData[2] = lfoPtr.i;
+ logFilePtr.p->logFilePagesToDiskWithoutSynch += twlpNoPages;
+ if (twlpType == ZLAST_WRITE_IN_FILE) {
+ jam();
+ logFilePtr.p->logFilePagesToDiskWithoutSynch = 0;
+ signal->theData[3] = ZLIST_OF_MEM_PAGES_SYNCH;
+ } else if (logFilePtr.p->logFilePagesToDiskWithoutSynch >
+ MAX_REDO_PAGES_WITHOUT_SYNCH) {
+ jam();
+ logFilePtr.p->logFilePagesToDiskWithoutSynch = 0;
+ signal->theData[3] = ZLIST_OF_MEM_PAGES_SYNCH;
+ } else {
+ jam();
+ signal->theData[3] = ZLIST_OF_MEM_PAGES;
+ }//if
+ signal->theData[4] = ZVAR_NO_LOG_PAGE_WORD;
+ signal->theData[5] = twlpNoPages;
+ sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 15, JBA);
+ if (twlpType == ZNORMAL) {
+ jam();
+ lfoPtr.p->lfoState = LogFileOperationRecord::ACTIVE_WRITE_LOG;
+ } else if (twlpType == ZLAST_WRITE_IN_FILE) {
+ jam();
+ lfoPtr.p->lfoState = LogFileOperationRecord::LAST_WRITE_IN_FILE;
+ } else {
+ ndbrequire(twlpType == ZENFORCE_WRITE);
+ jam();
+ lfoPtr.p->lfoState = LogFileOperationRecord::ACTIVE_WRITE_LOG;
+ }//if
+ /* ----------------------------------------------------------------------- */
+ /* ------ MOVE PAGES FROM LOG FILE TO LFO RECORD ------- */
+ /* */
+ /* ----------------------------------------------------------------------- */
+ /* -------------------------------------------------- */
+ /* MOVE PAGES TO LFO RECORD AND REMOVE THEM */
+ /* FROM LOG FILE RECORD. */
+ /* -------------------------------------------------- */
+ lfoPtr.p->firstLfoPage = logFilePtr.p->firstFilledPage;
+ logFilePtr.p->firstFilledPage = RNIL;
+ logFilePtr.p->lastFilledPage = RNIL;
+ logFilePtr.p->noLogpagesInBuffer = 0;
+
+ lfoPtr.p->noPagesRw = twlpNoPages;
+ lfoPtr.p->lfoPageNo = logFilePtr.p->filePosition;
+ lfoPtr.p->lfoWordWritten = ZPAGE_SIZE - 1;
+ logFilePtr.p->filePosition += twlpNoPages;
+}//Dblqh::completedLogPage()
+
+/* ---------------------------------------------------------------- */
+/* ---------------- DELETE FRAGMENT RECORD ------------------------ */
+/* */
+/* SUBROUTINE SHORT NAME = DFR */
+/* ---------------------------------------------------------------- */
+void Dblqh::deleteFragrec(Uint32 fragId)
+{
+ Uint32 indexFound= RNIL;
+ fragptr.i = RNIL;
+ for (Uint32 i = (MAX_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
+ jam();
+ if (tabptr.p->fragid[i] == fragId) {
+ fragptr.i = tabptr.p->fragrec[i];
+ indexFound = i;
+ break;
+ }//if
+ }//for
+ if (fragptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ tabptr.p->fragid[indexFound] = ZNIL;
+ tabptr.p->fragrec[indexFound] = RNIL;
+ releaseFragrec();
+ }//if
+}//Dblqh::deleteFragrec()
+
+/* ------------------------------------------------------------------------- */
+/* ------- FIND LOG FILE RECORD GIVEN FILE NUMBER ------- */
+/* */
+/* INPUT: TFLF_FILE_NO THE FILE NUMBER */
+/* FLF_LOG_PART_PTR THE LOG PART RECORD */
+/* OUTPUT: FLF_LOG_FILE_PTR THE FOUND LOG FILE RECORD */
+/* SUBROUTINE SHORT NAME = FLF */
+/* ------------------------------------------------------------------------- */
+void Dblqh::findLogfile(Signal* signal,
+ Uint32 fileNo,
+ LogPartRecordPtr flfLogPartPtr,
+ LogFileRecordPtr* parLogFilePtr)
+{
+ LogFileRecordPtr locLogFilePtr;
+ locLogFilePtr.i = flfLogPartPtr.p->firstLogfile;
+ Uint32 loopCount = 0;
+ while (true) {
+ ptrCheckGuard(locLogFilePtr, clogFileFileSize, logFileRecord);
+ if (locLogFilePtr.p->fileNo == fileNo) {
+ jam();
+ ndbrequire(loopCount == fileNo);
+ parLogFilePtr->i = locLogFilePtr.i;
+ parLogFilePtr->p = locLogFilePtr.p;
+ return;
+ }//if
+ locLogFilePtr.i = locLogFilePtr.p->nextLogFile;
+ loopCount++;
+ ndbrequire(loopCount < flfLogPartPtr.p->noLogFiles);
+ }//while
+}//Dblqh::findLogfile()
+
+/* ------------------------------------------------------------------------- */
+/* ------ FIND PAGE REFERENCE IN MEMORY BUFFER AT LOG EXECUTION ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dblqh::findPageRef(Signal* signal, CommitLogRecord* commitLogRecord)
+{
+ UintR tfprIndex;
+
+ logPagePtr.i = RNIL;
+ if (ERROR_INSERTED(5020)) {
+ // Force system to read page from disk
+ return;
+ }
+ pageRefPtr.i = logPartPtr.p->lastPageRef;
+ do {
+ ptrCheckGuard(pageRefPtr, cpageRefFileSize, pageRefRecord);
+ if (commitLogRecord->fileNo == pageRefPtr.p->prFileNo) {
+ if (commitLogRecord->startPageNo >= pageRefPtr.p->prPageNo) {
+ if (commitLogRecord->startPageNo < (Uint16) (pageRefPtr.p->prPageNo + 8)) {
+ jam();
+ tfprIndex = commitLogRecord->startPageNo - pageRefPtr.p->prPageNo;
+ logPagePtr.i = pageRefPtr.p->pageRef[tfprIndex];
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ return;
+ }//if
+ }//if
+ }//if
+ pageRefPtr.i = pageRefPtr.p->prPrev;
+ } while (pageRefPtr.i != RNIL);
+}//Dblqh::findPageRef()
+
+/* ------------------------------------------------------------------------- */
+/* ------ GET FIRST OPERATION QUEUED FOR LOGGING ------- */
+/* */
+/* SUBROUTINE SHORT NAME = GFL */
+/* ------------------------------------------------------------------------- */
+void Dblqh::getFirstInLogQueue(Signal* signal)
+{
+ TcConnectionrecPtr gflTcConnectptr;
+/* -------------------------------------------------- */
+/* GET THE FIRST FROM THE LOG QUEUE AND REMOVE */
+/* IT FROM THE QUEUE. */
+/* -------------------------------------------------- */
+ gflTcConnectptr.i = logPartPtr.p->firstLogQueue;
+ ptrCheckGuard(gflTcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ logPartPtr.p->firstLogQueue = gflTcConnectptr.p->nextTcLogQueue;
+ if (logPartPtr.p->firstLogQueue == RNIL) {
+ jam();
+ logPartPtr.p->lastLogQueue = RNIL;
+ }//if
+}//Dblqh::getFirstInLogQueue()
+
+/* ---------------------------------------------------------------- */
+/* ---------------- GET FRAGMENT RECORD --------------------------- */
+/* INPUT: TFRAGID FRAGMENT ID LOOKING FOR */
+/* TABPTR TABLE ID */
+/* SUBROUTINE SHORT NAME = GFR */
+/* ---------------------------------------------------------------- */
+bool Dblqh::getFragmentrec(Signal* signal, Uint32 fragId)
+{
+ for (Uint32 i = (MAX_FRAG_PER_NODE - 1); (UintR)~i; i--) {
+ jam();
+ if (tabptr.p->fragid[i] == fragId) {
+ fragptr.i = tabptr.p->fragrec[i];
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ return true;
+ }//if
+ }//for
+ return false;
+}//Dblqh::getFragmentrec()
+
+/* ========================================================================= */
+/* ====== INITIATE FRAGMENT RECORD ======= */
+/* */
+/* ========================================================================= */
+void Dblqh::initialiseAddfragrec(Signal* signal)
+{
+ if (caddfragrecFileSize != 0) {
+ for (addfragptr.i = 0; addfragptr.i < caddfragrecFileSize; addfragptr.i++) {
+ ptrAss(addfragptr, addFragRecord);
+ addfragptr.p->addfragStatus = AddFragRecord::FREE;
+ addfragptr.p->nextAddfragrec = addfragptr.i + 1;
+ }//for
+ addfragptr.i = caddfragrecFileSize - 1;
+ ptrAss(addfragptr, addFragRecord);
+ addfragptr.p->nextAddfragrec = RNIL;
+ cfirstfreeAddfragrec = 0;
+ } else {
+ jam();
+ cfirstfreeAddfragrec = RNIL;
+ }//if
+}//Dblqh::initialiseAddfragrec()
+
+/* ========================================================================= */
+/* ====== INITIATE ATTRIBUTE IN AND OUT DATA BUFFER ======= */
+/* */
+/* ========================================================================= */
+void Dblqh::initialiseAttrbuf(Signal* signal)
+{
+ if (cattrinbufFileSize != 0) {
+ for (attrinbufptr.i = 0;
+ attrinbufptr.i < cattrinbufFileSize;
+ attrinbufptr.i++) {
+ refresh_watch_dog();
+ ptrAss(attrinbufptr, attrbuf);
+ attrinbufptr.p->attrbuf[ZINBUF_NEXT] = attrinbufptr.i + 1;
+ }//for
+ /* NEXT ATTRINBUF */
+ attrinbufptr.i = cattrinbufFileSize - 1;
+ ptrAss(attrinbufptr, attrbuf);
+ attrinbufptr.p->attrbuf[ZINBUF_NEXT] = RNIL; /* NEXT ATTRINBUF */
+ cfirstfreeAttrinbuf = 0;
+ } else {
+ jam();
+ cfirstfreeAttrinbuf = RNIL;
+ }//if
+}//Dblqh::initialiseAttrbuf()
+
+/* ========================================================================= */
+/* ====== INITIATE DATA BUFFER ======= */
+/* */
+/* ========================================================================= */
+void Dblqh::initialiseDatabuf(Signal* signal)
+{
+ if (cdatabufFileSize != 0) {
+ for (databufptr.i = 0; databufptr.i < cdatabufFileSize; databufptr.i++) {
+ refresh_watch_dog();
+ ptrAss(databufptr, databuf);
+ databufptr.p->nextDatabuf = databufptr.i + 1;
+ }//for
+ databufptr.i = cdatabufFileSize - 1;
+ ptrAss(databufptr, databuf);
+ databufptr.p->nextDatabuf = RNIL;
+ cfirstfreeDatabuf = 0;
+ } else {
+ jam();
+ cfirstfreeDatabuf = RNIL;
+ }//if
+}//Dblqh::initialiseDatabuf()
+
+/* ========================================================================= */
+/* ====== INITIATE FRAGMENT RECORD ======= */
+/* */
+/* ========================================================================= */
+void Dblqh::initialiseFragrec(Signal* signal)
+{
+ if (cfragrecFileSize != 0) {
+ for (fragptr.i = 0; fragptr.i < cfragrecFileSize; fragptr.i++) {
+ refresh_watch_dog();
+ ptrAss(fragptr, fragrecord);
+ fragptr.p->fragStatus = Fragrecord::FREE;
+ fragptr.p->fragActiveStatus = ZFALSE;
+ fragptr.p->execSrStatus = Fragrecord::IDLE;
+ fragptr.p->srStatus = Fragrecord::SS_IDLE;
+ fragptr.p->nextFrag = fragptr.i + 1;
+ }//for
+ fragptr.i = cfragrecFileSize - 1;
+ ptrAss(fragptr, fragrecord);
+ fragptr.p->nextFrag = RNIL;
+ cfirstfreeFragrec = 0;
+ } else {
+ jam();
+ cfirstfreeFragrec = RNIL;
+ }//if
+}//Dblqh::initialiseFragrec()
+
+/* ========================================================================= */
+/* ====== INITIATE FRAGMENT RECORD ======= */
+/* */
+/* ========================================================================= */
+void Dblqh::initialiseGcprec(Signal* signal)
+{
+ UintR tigpIndex;
+
+ if (cgcprecFileSize != 0) {
+ for (gcpPtr.i = 0; gcpPtr.i < cgcprecFileSize; gcpPtr.i++) {
+ ptrAss(gcpPtr, gcpRecord);
+ for (tigpIndex = 0; tigpIndex <= 3; tigpIndex++) {
+ gcpPtr.p->gcpLogPartState[tigpIndex] = ZIDLE;
+ gcpPtr.p->gcpSyncReady[tigpIndex] = ZFALSE;
+ }//for
+ }//for
+ }//if
+}//Dblqh::initialiseGcprec()
+
+/* ========================================================================= */
+/* ====== INITIATE LCP RECORD ======= */
+/* */
+/* ========================================================================= */
+void Dblqh::initialiseLcpRec(Signal* signal)
+{
+ if (clcpFileSize != 0) {
+ for (lcpPtr.i = 0; lcpPtr.i < clcpFileSize; lcpPtr.i++) {
+ ptrAss(lcpPtr, lcpRecord);
+ lcpPtr.p->lcpState = LcpRecord::LCP_IDLE;
+ lcpPtr.p->lcpQueued = false;
+ lcpPtr.p->firstLcpLocAcc = RNIL;
+ lcpPtr.p->firstLcpLocTup = RNIL;
+ lcpPtr.p->reportEmpty = false;
+ lcpPtr.p->lastFragmentFlag = false;
+ }//for
+ }//if
+}//Dblqh::initialiseLcpRec()
+
+/* ========================================================================= */
+/* ====== INITIATE LCP LOCAL RECORD ======= */
+/* */
+/* ========================================================================= */
+void Dblqh::initialiseLcpLocrec(Signal* signal)
+{
+ if (clcpLocrecFileSize != 0) {
+ for (lcpLocptr.i = 0; lcpLocptr.i < clcpLocrecFileSize; lcpLocptr.i++) {
+ ptrAss(lcpLocptr, lcpLocRecord);
+ lcpLocptr.p->nextLcpLoc = lcpLocptr.i + 1;
+ lcpLocptr.p->lcpLocstate = LcpLocRecord::IDLE;
+ lcpLocptr.p->masterLcpRec = RNIL;
+ lcpLocptr.p->waitingBlock = LcpLocRecord::NONE;
+ }//for
+ lcpLocptr.i = clcpLocrecFileSize - 1;
+ ptrAss(lcpLocptr, lcpLocRecord);
+ lcpLocptr.p->nextLcpLoc = RNIL;
+ cfirstfreeLcpLoc = 0;
+ } else {
+ jam();
+ cfirstfreeLcpLoc = RNIL;
+ }//if
+}//Dblqh::initialiseLcpLocrec()
+
+/* ========================================================================= */
+/* ====== INITIATE LOG FILE OPERATION RECORD ======= */
+/* */
+/* ========================================================================= */
+void Dblqh::initialiseLfo(Signal* signal)
+{
+ if (clfoFileSize != 0) {
+ for (lfoPtr.i = 0; lfoPtr.i < clfoFileSize; lfoPtr.i++) {
+ ptrAss(lfoPtr, logFileOperationRecord);
+ lfoPtr.p->lfoState = LogFileOperationRecord::IDLE;
+ lfoPtr.p->lfoTimer = 0;
+ lfoPtr.p->nextLfo = lfoPtr.i + 1;
+ }//for
+ lfoPtr.i = clfoFileSize - 1;
+ ptrAss(lfoPtr, logFileOperationRecord);
+ lfoPtr.p->nextLfo = RNIL;
+ cfirstfreeLfo = 0;
+ } else {
+ jam();
+ cfirstfreeLfo = RNIL;
+ }//if
+}//Dblqh::initialiseLfo()
+
+/* ========================================================================= */
+/* ====== INITIATE LOG FILE RECORD ======= */
+/* */
+/* ========================================================================= */
+void Dblqh::initialiseLogFile(Signal* signal)
+{
+ if (clogFileFileSize != 0) {
+ for (logFilePtr.i = 0; logFilePtr.i < clogFileFileSize; logFilePtr.i++) {
+ ptrAss(logFilePtr, logFileRecord);
+ logFilePtr.p->nextLogFile = logFilePtr.i + 1;
+ logFilePtr.p->logFileStatus = LogFileRecord::LFS_IDLE;
+ }//for
+ logFilePtr.i = clogFileFileSize - 1;
+ ptrAss(logFilePtr, logFileRecord);
+ logFilePtr.p->nextLogFile = RNIL;
+ cfirstfreeLogFile = 0;
+ } else {
+ jam();
+ cfirstfreeLogFile = RNIL;
+ }//if
+}//Dblqh::initialiseLogFile()
+
+/* ========================================================================= */
+/* ====== INITIATE LOG PAGES ======= */
+/* */
+/* ========================================================================= */
+void Dblqh::initialiseLogPage(Signal* signal)
+{
+ if (clogPageFileSize != 0) {
+ for (logPagePtr.i = 0; logPagePtr.i < clogPageFileSize; logPagePtr.i++) {
+ refresh_watch_dog();
+ ptrAss(logPagePtr, logPageRecord);
+ logPagePtr.p->logPageWord[ZNEXT_PAGE] = logPagePtr.i + 1;
+ }//for
+ logPagePtr.i = clogPageFileSize - 1;
+ ptrAss(logPagePtr, logPageRecord);
+ logPagePtr.p->logPageWord[ZNEXT_PAGE] = RNIL;
+ cfirstfreeLogPage = 0;
+ } else {
+ jam();
+ cfirstfreeLogPage = RNIL;
+ }//if
+ cnoOfLogPages = clogPageFileSize;
+}//Dblqh::initialiseLogPage()
+
+/* =========================================================================
+ * ====== INITIATE LOG PART RECORD =======
+ *
+ * ========================================================================= */
+void Dblqh::initialiseLogPart(Signal* signal)
+{
+ for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) {
+ ptrAss(logPartPtr, logPartRecord);
+ logPartPtr.p->waitWriteGciLog = LogPartRecord::WWGL_FALSE;
+ logPartPtr.p->LogLqhKeyReqSent = ZFALSE;
+ logPartPtr.p->logPartNewestCompletedGCI = (UintR)-1;
+ }//for
+}//Dblqh::initialiseLogPart()
+
+void Dblqh::initialisePageRef(Signal* signal)
+{
+ if (cpageRefFileSize != 0) {
+ for (pageRefPtr.i = 0;
+ pageRefPtr.i < cpageRefFileSize;
+ pageRefPtr.i++) {
+ ptrAss(pageRefPtr, pageRefRecord);
+ pageRefPtr.p->prNext = pageRefPtr.i + 1;
+ }//for
+ pageRefPtr.i = cpageRefFileSize - 1;
+ ptrAss(pageRefPtr, pageRefRecord);
+ pageRefPtr.p->prNext = RNIL;
+ cfirstfreePageRef = 0;
+ } else {
+ jam();
+ cfirstfreePageRef = RNIL;
+ }//if
+}//Dblqh::initialisePageRef()
+
+/* ==========================================================================
+ * ======= INITIATE RECORDS =======
+ *
+ * TAKES CARE OF INITIATION OF ALL RECORDS IN THIS BLOCK.
+ * ========================================================================= */
+void Dblqh::initialiseRecordsLab(Signal* signal, Uint32 data,
+ Uint32 retRef, Uint32 retData)
+{
+ Uint32 i;
+ switch (data) {
+ case 0:
+ jam();
+ for (i = 0; i < MAX_NDB_NODES; i++) {
+ cnodeSrState[i] = ZSTART_SR;
+ cnodeExecSrState[i] = ZSTART_SR;
+ }//for
+ for (i = 0; i < 1024; i++) {
+ ctransidHash[i] = RNIL;
+ }//for
+ for (i = 0; i < 4; i++) {
+ cactiveCopy[i] = RNIL;
+ }//for
+ cnoActiveCopy = 0;
+ cCounterAccCommitBlocked = 0;
+ cCounterTupCommitBlocked = 0;
+ caccCommitBlocked = false;
+ ctupCommitBlocked = false;
+ cCommitBlocked = false;
+ ccurrentGcprec = RNIL;
+ caddNodeState = ZFALSE;
+ cstartRecReq = ZFALSE;
+ cnewestGci = (UintR)-1;
+ cnewestCompletedGci = (UintR)-1;
+ crestartOldestGci = 0;
+ crestartNewestGci = 0;
+ cfirstWaitFragSr = RNIL;
+ cfirstCompletedFragSr = RNIL;
+ csrPhaseStarted = ZSR_NO_PHASE_STARTED;
+ csrPhasesCompleted = 0;
+ cmasterDihBlockref = 0;
+ cnoFragmentsExecSr = 0;
+ clcpCompletedState = LCP_IDLE;
+ csrExecUndoLogState = EULS_IDLE;
+ c_lcpId = 0;
+ cnoOfFragsCheckpointed = 0;
+ break;
+ case 1:
+ jam();
+ initialiseAddfragrec(signal);
+ break;
+ case 2:
+ jam();
+ initialiseAttrbuf(signal);
+ break;
+ case 3:
+ jam();
+ initialiseDatabuf(signal);
+ break;
+ case 4:
+ jam();
+ initialiseFragrec(signal);
+ break;
+ case 5:
+ jam();
+ initialiseGcprec(signal);
+ initialiseLcpRec(signal);
+ initialiseLcpLocrec(signal);
+ break;
+ case 6:
+ jam();
+ initialiseLogPage(signal);
+ break;
+ case 7:
+ jam();
+ initialiseLfo(signal);
+ break;
+ case 8:
+ jam();
+ initialiseLogFile(signal);
+ initialiseLogPart(signal);
+ break;
+ case 9:
+ jam();
+ initialisePageRef(signal);
+ break;
+ case 10:
+ jam();
+ initialiseScanrec(signal);
+ break;
+ case 11:
+ jam();
+ initialiseTabrec(signal);
+ break;
+ case 12:
+ jam();
+ initialiseTcNodeFailRec(signal);
+ initialiseTcrec(signal);
+ {
+ ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = retData;
+ sendSignal(retRef, GSN_READ_CONFIG_CONF, signal,
+ ReadConfigConf::SignalLength, JBB);
+ }
+ return;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+
+ signal->theData[0] = ZINITIALISE_RECORDS;
+ signal->theData[1] = data + 1;
+ signal->theData[2] = 0;
+ signal->theData[3] = retRef;
+ signal->theData[4] = retData;
+ sendSignal(DBLQH_REF, GSN_CONTINUEB, signal, 5, JBB);
+
+ return;
+}//Dblqh::initialiseRecordsLab()
+
+/* ==========================================================================
+ * ======= INITIATE TC CONNECTION RECORD =======
+ *
+ * ========================================================================= */
+void Dblqh::initialiseScanrec(Signal* signal)
+{
+ ndbrequire(cscanrecFileSize > 1);
+ DLList<ScanRecord> tmp(c_scanRecordPool);
+ while (tmp.seize(scanptr)){
+ //new (scanptr.p) ScanRecord();
+ refresh_watch_dog();
+ scanptr.p->scanType = ScanRecord::ST_IDLE;
+ scanptr.p->scanState = ScanRecord::SCAN_FREE;
+ scanptr.p->scanTcWaiting = ZFALSE;
+ scanptr.p->nextHash = RNIL;
+ scanptr.p->prevHash = RNIL;
+ scanptr.p->scan_acc_index= 0;
+ scanptr.p->scan_acc_attr_recs= 0;
+ }
+ tmp.release();
+}//Dblqh::initialiseScanrec()
+
+/* ==========================================================================
+ * ======= INITIATE TABLE RECORD =======
+ *
+ * ========================================================================= */
+void Dblqh::initialiseTabrec(Signal* signal)
+{
+ if (ctabrecFileSize != 0) {
+ for (tabptr.i = 0; tabptr.i < ctabrecFileSize; tabptr.i++) {
+ refresh_watch_dog();
+ ptrAss(tabptr, tablerec);
+ tabptr.p->tableStatus = Tablerec::NOT_DEFINED;
+ tabptr.p->usageCount = 0;
+ for (Uint32 i = 0; i <= (MAX_FRAG_PER_NODE - 1); i++) {
+ tabptr.p->fragid[i] = ZNIL;
+ tabptr.p->fragrec[i] = RNIL;
+ }//for
+ }//for
+ }//if
+}//Dblqh::initialiseTabrec()
+
+/* ==========================================================================
+ * ======= INITIATE TC CONNECTION RECORD =======
+ *
+ * ========================================================================= */
+void Dblqh::initialiseTcrec(Signal* signal)
+{
+ if (ctcConnectrecFileSize != 0) {
+ for (tcConnectptr.i = 0;
+ tcConnectptr.i < ctcConnectrecFileSize;
+ tcConnectptr.i++) {
+ refresh_watch_dog();
+ ptrAss(tcConnectptr, tcConnectionrec);
+ tcConnectptr.p->transactionState = TcConnectionrec::TC_NOT_CONNECTED;
+ tcConnectptr.p->tcScanRec = RNIL;
+ tcConnectptr.p->logWriteState = TcConnectionrec::NOT_STARTED;
+ tcConnectptr.p->firstAttrinbuf = RNIL;
+ tcConnectptr.p->lastAttrinbuf = RNIL;
+ tcConnectptr.p->firstTupkeybuf = RNIL;
+ tcConnectptr.p->lastTupkeybuf = RNIL;
+ tcConnectptr.p->tcTimer = 0;
+ tcConnectptr.p->nextTcConnectrec = tcConnectptr.i + 1;
+ }//for
+ tcConnectptr.i = ctcConnectrecFileSize - 1;
+ ptrAss(tcConnectptr, tcConnectionrec);
+ tcConnectptr.p->nextTcConnectrec = RNIL;
+ cfirstfreeTcConrec = 0;
+ } else {
+ jam();
+ cfirstfreeTcConrec = RNIL;
+ }//if
+}//Dblqh::initialiseTcrec()
+
+/* ==========================================================================
+ * ======= INITIATE TC CONNECTION RECORD =======
+ *
+ * ========================================================================= */
+void Dblqh::initialiseTcNodeFailRec(Signal* signal)
+{
+ if (ctcNodeFailrecFileSize != 0) {
+ for (tcNodeFailptr.i = 0;
+ tcNodeFailptr.i < ctcNodeFailrecFileSize;
+ tcNodeFailptr.i++) {
+ ptrAss(tcNodeFailptr, tcNodeFailRecord);
+ tcNodeFailptr.p->tcFailStatus = TcNodeFailRecord::TC_STATE_FALSE;
+ }//for
+ }//if
+}//Dblqh::initialiseTcNodeFailRec()
+
+/* ==========================================================================
+ * ======= INITIATE FRAGMENT RECORD =======
+ *
+ * SUBROUTINE SHORT NAME = IF
+ * ========================================================================= */
+void Dblqh::initFragrec(Signal* signal,
+ Uint32 tableId,
+ Uint32 fragId,
+ Uint32 copyType)
+{
+ new (fragptr.p) Fragrecord();
+ fragptr.p->m_scanNumberMask.set(); // All is free
+ fragptr.p->accBlockref = caccBlockref;
+ fragptr.p->accBlockedList = RNIL;
+ fragptr.p->activeList = RNIL;
+ fragptr.p->firstWaitQueue = RNIL;
+ fragptr.p->lastWaitQueue = RNIL;
+ fragptr.p->fragStatus = Fragrecord::DEFINED;
+ fragptr.p->fragCopy = copyType;
+ fragptr.p->tupBlockref = ctupBlockref;
+ fragptr.p->tuxBlockref = ctuxBlockref;
+ fragptr.p->lcpRef = RNIL;
+ fragptr.p->logFlag = Fragrecord::STATE_TRUE;
+ fragptr.p->lcpFlag = Fragrecord::LCP_STATE_TRUE;
+ for (Uint32 i = 0; i < MAX_LCP_STORED; i++) {
+ fragptr.p->lcpId[i] = 0;
+ }//for
+ fragptr.p->maxGciCompletedInLcp = 0;
+ fragptr.p->maxGciInLcp = 0;
+ fragptr.p->copyFragState = ZIDLE;
+ fragptr.p->nextFrag = RNIL;
+ fragptr.p->newestGci = cnewestGci;
+ fragptr.p->nextLcp = 0;
+ fragptr.p->tabRef = tableId;
+ fragptr.p->fragId = fragId;
+ fragptr.p->srStatus = Fragrecord::SS_IDLE;
+ fragptr.p->execSrStatus = Fragrecord::IDLE;
+ fragptr.p->execSrNoReplicas = 0;
+ fragptr.p->fragDistributionKey = 0;
+ fragptr.p->activeTcCounter = 0;
+ fragptr.p->tableFragptr = RNIL;
+}//Dblqh::initFragrec()
+
+/* ==========================================================================
+ * ======= INITIATE FRAGMENT RECORD FOR SYSTEM RESTART =======
+ *
+ * SUBROUTINE SHORT NAME = IFS
+ * ========================================================================= */
+void Dblqh::initFragrecSr(Signal* signal)
+{
+ const StartFragReq * const startFragReq = (StartFragReq *)&signal->theData[0];
+ Uint32 lcpNo = startFragReq->lcpNo;
+ Uint32 noOfLogNodes = startFragReq->noOfLogNodes;
+ ndbrequire(noOfLogNodes <= 4);
+ fragptr.p->fragStatus = Fragrecord::CRASH_RECOVERING;
+ fragptr.p->srBlockref = startFragReq->userRef;
+ fragptr.p->srUserptr = startFragReq->userPtr;
+ fragptr.p->srChkpnr = lcpNo;
+ if (lcpNo == (MAX_LCP_STORED - 1)) {
+ jam();
+ fragptr.p->lcpId[lcpNo] = startFragReq->lcpId;
+ fragptr.p->nextLcp = 0;
+ } else if (lcpNo < (MAX_LCP_STORED - 1)) {
+ jam();
+ fragptr.p->lcpId[lcpNo] = startFragReq->lcpId;
+ fragptr.p->nextLcp = lcpNo + 1;
+ } else {
+ ndbrequire(lcpNo == ZNIL);
+ jam();
+ fragptr.p->nextLcp = 0;
+ }//if
+ fragptr.p->srNoLognodes = noOfLogNodes;
+ fragptr.p->logFlag = Fragrecord::STATE_FALSE;
+ fragptr.p->srStatus = Fragrecord::SS_IDLE;
+ if (noOfLogNodes > 0) {
+ jam();
+ for (Uint32 i = 0; i < noOfLogNodes; i++) {
+ jam();
+ fragptr.p->srStartGci[i] = startFragReq->startGci[i];
+ fragptr.p->srLastGci[i] = startFragReq->lastGci[i];
+ fragptr.p->srLqhLognode[i] = startFragReq->lqhLogNode[i];
+ }//for
+ fragptr.p->newestGci = startFragReq->lastGci[noOfLogNodes - 1];
+ } else {
+ fragptr.p->newestGci = cnewestGci;
+ }//if
+}//Dblqh::initFragrecSr()
+
+/* ==========================================================================
+ * ======= INITIATE INFORMATION ABOUT GLOBAL CHECKPOINTS =======
+ * IN LOG FILE RECORDS
+ *
+ * INPUT: LOG_FILE_PTR CURRENT LOG FILE
+ * TNO_FD_DESCRIPTORS THE NUMBER OF FILE DESCRIPTORS
+ * TO READ FROM THE LOG PAGE
+ * LOG_PAGE_PTR PAGE ZERO IN LOG FILE
+ * SUBROUTINE SHORT NAME = IGL
+ * ========================================================================= */
+void Dblqh::initGciInLogFileRec(Signal* signal, Uint32 noFdDescriptors)
+{
+ LogFileRecordPtr iglLogFilePtr;
+ UintR tiglLoop;
+ UintR tiglIndex;
+
+ tiglLoop = 0;
+ iglLogFilePtr.i = logFilePtr.i;
+ iglLogFilePtr.p = logFilePtr.p;
+IGL_LOOP:
+ for (tiglIndex = 0; tiglIndex <= ZNO_MBYTES_IN_FILE - 1; tiglIndex++) {
+ arrGuard(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
+ (tiglLoop * ZFD_PART_SIZE)) + tiglIndex, ZPAGE_SIZE);
+ iglLogFilePtr.p->logMaxGciCompleted[tiglIndex] =
+ logPagePtr.p->logPageWord[((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
+ (tiglLoop * ZFD_PART_SIZE)) + tiglIndex];
+ arrGuard((((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + ZNO_MBYTES_IN_FILE) +
+ (tiglLoop * ZFD_PART_SIZE)) + tiglIndex, ZPAGE_SIZE);
+ iglLogFilePtr.p->logMaxGciStarted[tiglIndex] =
+ logPagePtr.p->logPageWord[(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
+ ZNO_MBYTES_IN_FILE) +
+ (tiglLoop * ZFD_PART_SIZE)) + tiglIndex];
+ arrGuard((((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
+ (2 * ZNO_MBYTES_IN_FILE)) + (tiglLoop * ZFD_PART_SIZE)) +
+ tiglIndex, ZPAGE_SIZE);
+ iglLogFilePtr.p->logLastPrepRef[tiglIndex] =
+ logPagePtr.p->logPageWord[(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
+ (2 * ZNO_MBYTES_IN_FILE)) +
+ (tiglLoop * ZFD_PART_SIZE)) + tiglIndex];
+ }//for
+ tiglLoop = tiglLoop + 1;
+ if (tiglLoop < noFdDescriptors) {
+ jam();
+ iglLogFilePtr.i = iglLogFilePtr.p->prevLogFile;
+ ptrCheckGuard(iglLogFilePtr, clogFileFileSize, logFileRecord);
+ goto IGL_LOOP;
+ }//if
+}//Dblqh::initGciInLogFileRec()
+
+/* ==========================================================================
+ * ======= INITIATE LCP RECORD WHEN USED FOR SYSTEM RESTART =======
+ *
+ * SUBROUTINE SHORT NAME = ILS
+ * ========================================================================= */
+void Dblqh::initLcpSr(Signal* signal,
+ Uint32 lcpNo,
+ Uint32 lcpId,
+ Uint32 tableId,
+ Uint32 fragId,
+ Uint32 fragPtr)
+{
+ lcpPtr.p->lcpQueued = false;
+ lcpPtr.p->currentFragment.fragPtrI = fragPtr;
+ lcpPtr.p->currentFragment.lcpFragOrd.lcpNo = lcpNo;
+ lcpPtr.p->currentFragment.lcpFragOrd.lcpId = lcpId;
+ lcpPtr.p->currentFragment.lcpFragOrd.tableId = tableId;
+ lcpPtr.p->currentFragment.lcpFragOrd.fragmentId = fragId;
+ lcpPtr.p->lcpState = LcpRecord::LCP_SR_WAIT_FRAGID;
+ lcpPtr.p->firstLcpLocAcc = RNIL;
+ lcpPtr.p->firstLcpLocTup = RNIL;
+ lcpPtr.p->lcpAccptr = RNIL;
+}//Dblqh::initLcpSr()
+
+/* ==========================================================================
+ * ======= INITIATE LOG PART =======
+ *
+ * ========================================================================= */
+void Dblqh::initLogpart(Signal* signal)
+{
+ logPartPtr.p->execSrLogPage = RNIL;
+ logPartPtr.p->execSrLogPageIndex = ZNIL;
+ logPartPtr.p->execSrExecuteIndex = 0;
+ logPartPtr.p->noLogFiles = cnoLogFiles;
+ logPartPtr.p->logLap = 0;
+ logPartPtr.p->logTailFileNo = 0;
+ logPartPtr.p->logTailMbyte = 0;
+ logPartPtr.p->lastMbyte = ZNIL;
+ logPartPtr.p->logPartState = LogPartRecord::SR_FIRST_PHASE;
+ logPartPtr.p->logExecState = LogPartRecord::LES_IDLE;
+ logPartPtr.p->firstLogTcrec = RNIL;
+ logPartPtr.p->lastLogTcrec = RNIL;
+ logPartPtr.p->firstLogQueue = RNIL;
+ logPartPtr.p->lastLogQueue = RNIL;
+ logPartPtr.p->gcprec = RNIL;
+ logPartPtr.p->firstPageRef = RNIL;
+ logPartPtr.p->lastPageRef = RNIL;
+ logPartPtr.p->headFileNo = ZNIL;
+ logPartPtr.p->headPageNo = ZNIL;
+ logPartPtr.p->headPageIndex = ZNIL;
+}//Dblqh::initLogpart()
+
+/* ==========================================================================
+ * ======= INITIATE LOG POINTERS =======
+ *
+ * ========================================================================= */
+void Dblqh::initLogPointers(Signal* signal)
+{
+ logPartPtr.i = tcConnectptr.p->hashValue & 3;
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+ logFilePtr.i = logPartPtr.p->currentLogfile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ logPagePtr.i = logFilePtr.p->currentLogpage;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+}//Dblqh::initLogPointers()
+
+/* ------------------------------------------------------------------------- */
+/* ------- INIT REQUEST INFO BEFORE EXECUTING A LOG RECORD ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dblqh::initReqinfoExecSr(Signal* signal)
+{
+ UintR Treqinfo = 0;
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ LqhKeyReq::setKeyLen(Treqinfo, regTcPtr->primKeyLen);
+/* ------------------------------------------------------------------------- */
+/* NUMBER OF BACKUPS AND STANDBYS ARE ZERO AND NEED NOT BE SET. */
+/* REPLICA TYPE IS CLEARED BY SEND_LQHKEYREQ. */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* SET LAST REPLICA NUMBER TO ZERO (BIT 10-11) */
+/* ------------------------------------------------------------------------- */
+/* ------------------------------------------------------------------------- */
+/* SET DIRTY FLAG */
+/* ------------------------------------------------------------------------- */
+ LqhKeyReq::setDirtyFlag(Treqinfo, 1);
+/* ------------------------------------------------------------------------- */
+/* SET SIMPLE TRANSACTION */
+/* ------------------------------------------------------------------------- */
+ LqhKeyReq::setSimpleFlag(Treqinfo, 1);
+/* ------------------------------------------------------------------------- */
+/* SET OPERATION TYPE AND LOCK MODE (NEVER READ OPERATION OR SCAN IN LOG) */
+/* ------------------------------------------------------------------------- */
+ LqhKeyReq::setLockType(Treqinfo, regTcPtr->operation);
+ LqhKeyReq::setOperation(Treqinfo, regTcPtr->operation);
+ regTcPtr->reqinfo = Treqinfo;
+/* ------------------------------------------------------------------------ */
+/* NO OF BACKUP IS SET TO ONE AND NUMBER OF STANDBY NODES IS SET TO ZERO. */
+/* THUS THE RECEIVING NODE WILL EXPECT THAT IT IS THE LAST NODE AND WILL */
+/* SEND COMPLETED AS THE RESPONSE SIGNAL SINCE DIRTY_OP BIT IS SET. */
+/* ------------------------------------------------------------------------ */
+/* ------------------------------------------------------------------------- */
+/* SET REPLICA TYPE TO PRIMARY AND NUMBER OF REPLICA TO ONE */
+/* ------------------------------------------------------------------------- */
+ regTcPtr->lastReplicaNo = 0;
+ regTcPtr->apiVersionNo = 0;
+ regTcPtr->nextSeqNoReplica = 0;
+ regTcPtr->opExec = 0;
+ regTcPtr->storedProcId = ZNIL;
+ regTcPtr->readlenAi = 0;
+ regTcPtr->nodeAfterNext[0] = ZNIL;
+ regTcPtr->nodeAfterNext[1] = ZNIL;
+ regTcPtr->dirtyOp = ZFALSE;
+ regTcPtr->tcBlockref = cownref;
+}//Dblqh::initReqinfoExecSr()
+
+/* --------------------------------------------------------------------------
+ * ------- INSERT FRAGMENT -------
+ *
+ * ------------------------------------------------------------------------- */
+bool Dblqh::insertFragrec(Signal* signal, Uint32 fragId)
+{
+ terrorCode = ZOK;
+ if (cfirstfreeFragrec == RNIL) {
+ jam();
+ terrorCode = ZNO_FREE_FRAGMENTREC;
+ return false;
+ }//if
+ seizeFragmentrec(signal);
+ for (Uint32 i = (MAX_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
+ jam();
+ if (tabptr.p->fragid[i] == ZNIL) {
+ jam();
+ tabptr.p->fragid[i] = fragId;
+ tabptr.p->fragrec[i] = fragptr.i;
+ return true;
+ }//if
+ }//for
+ terrorCode = ZTOO_MANY_FRAGMENTS;
+ return false;
+}//Dblqh::insertFragrec()
+
+/* --------------------------------------------------------------------------
+ * ------- LINK OPERATION IN ACTIVE LIST ON FRAGMENT -------
+ *
+ * SUBROUTINE SHORT NAME: LFQ
+// Input Pointers:
+// tcConnectptr
+// fragptr
+* ------------------------------------------------------------------------- */
+void Dblqh::linkFragQueue(Signal* signal)
+{
+ TcConnectionrecPtr lfqTcConnectptr;
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ Fragrecord * const regFragPtr = fragptr.p;
+ Uint32 tcIndex = tcConnectptr.i;
+
+ lfqTcConnectptr.i = regFragPtr->lastWaitQueue;
+ regTcPtr->nextTc = RNIL;
+ regFragPtr->lastWaitQueue = tcIndex;
+ regTcPtr->prevTc = lfqTcConnectptr.i;
+ ndbrequire(regTcPtr->listState == TcConnectionrec::NOT_IN_LIST);
+ regTcPtr->listState = TcConnectionrec::WAIT_QUEUE_LIST;
+ if (lfqTcConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(lfqTcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ lfqTcConnectptr.p->nextTc = tcIndex;
+ } else {
+ regFragPtr->firstWaitQueue = tcIndex;
+ }//if
+ return;
+}//Dblqh::linkFragQueue()
+
+/* -------------------------------------------------------------------------
+ * ------- LINK OPERATION INTO WAITING FOR LOGGING -------
+ *
+ * SUBROUTINE SHORT NAME = LWL
+// Input Pointers:
+// tcConnectptr
+// logPartPtr
+ * ------------------------------------------------------------------------- */
+void Dblqh::linkWaitLog(Signal* signal, LogPartRecordPtr regLogPartPtr)
+{
+ TcConnectionrecPtr lwlTcConnectptr;
+
+/* -------------------------------------------------- */
+/* LINK ACTIVE OPERATION INTO QUEUE WAITING FOR */
+/* ACCESS TO THE LOG PART. */
+/* -------------------------------------------------- */
+ lwlTcConnectptr.i = regLogPartPtr.p->lastLogQueue;
+ if (lwlTcConnectptr.i == RNIL) {
+ jam();
+ regLogPartPtr.p->firstLogQueue = tcConnectptr.i;
+ } else {
+ jam();
+ ptrCheckGuard(lwlTcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ lwlTcConnectptr.p->nextTcLogQueue = tcConnectptr.i;
+ }//if
+ regLogPartPtr.p->lastLogQueue = tcConnectptr.i;
+ tcConnectptr.p->nextTcLogQueue = RNIL;
+ if (regLogPartPtr.p->LogLqhKeyReqSent == ZFALSE) {
+ jam();
+ regLogPartPtr.p->LogLqhKeyReqSent = ZTRUE;
+ signal->theData[0] = ZLOG_LQHKEYREQ;
+ signal->theData[1] = regLogPartPtr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ }//if
+}//Dblqh::linkWaitLog()
+
+/* --------------------------------------------------------------------------
+ * ------- START THE NEXT OPERATION ON THIS LOG PART IF ANY -------
+ * ------- OPERATIONS ARE QUEUED. -------
+ *
+ * SUBROUTINE SHORT NAME = LNS
+// Input Pointers:
+// tcConnectptr
+// logPartPtr
+ * ------------------------------------------------------------------------- */
+void Dblqh::logNextStart(Signal* signal)
+{
+ LogPartRecordPtr lnsLogPartPtr;
+ UintR tlnsStillWaiting;
+ LogPartRecord * const regLogPartPtr = logPartPtr.p;
+
+ if ((regLogPartPtr->firstLogQueue == RNIL) &&
+ (regLogPartPtr->logPartState == LogPartRecord::ACTIVE) &&
+ (regLogPartPtr->waitWriteGciLog != LogPartRecord::WWGL_TRUE)) {
+// --------------------------------------------------------------------------
+// Optimised route for the common case
+// --------------------------------------------------------------------------
+ regLogPartPtr->logPartState = LogPartRecord::IDLE;
+ return;
+ }//if
+ if (regLogPartPtr->firstLogQueue != RNIL) {
+ jam();
+ if (regLogPartPtr->LogLqhKeyReqSent == ZFALSE) {
+ jam();
+ regLogPartPtr->LogLqhKeyReqSent = ZTRUE;
+ signal->theData[0] = ZLOG_LQHKEYREQ;
+ signal->theData[1] = logPartPtr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ }//if
+ } else {
+ if (regLogPartPtr->logPartState == LogPartRecord::ACTIVE) {
+ jam();
+ regLogPartPtr->logPartState = LogPartRecord::IDLE;
+ } else {
+ jam();
+ }//if
+ }//if
+ if (regLogPartPtr->waitWriteGciLog != LogPartRecord::WWGL_TRUE) {
+ jam();
+ return;
+ } else {
+ jam();
+/* --------------------------------------------------------------------------
+ * A COMPLETE GCI LOG RECORD IS WAITING TO BE WRITTEN. WE GIVE THIS HIGHEST
+ * PRIORITY AND WRITE IT IMMEDIATELY. AFTER WRITING IT WE CHECK IF ANY MORE
+ * LOG PARTS ARE WAITING. IF NOT WE SEND A SIGNAL THAT INITIALISES THE GCP
+ * RECORD TO WAIT UNTIL ALL COMPLETE GCI LOG RECORDS HAVE REACHED TO DISK.
+ * -------------------------------------------------------------------------- */
+ writeCompletedGciLog(signal);
+ logPartPtr.p->waitWriteGciLog = LogPartRecord::WWGL_FALSE;
+ tlnsStillWaiting = ZFALSE;
+ for (lnsLogPartPtr.i = 0; lnsLogPartPtr.i < 4; lnsLogPartPtr.i++) {
+ jam();
+ ptrAss(lnsLogPartPtr, logPartRecord);
+ if (lnsLogPartPtr.p->waitWriteGciLog == LogPartRecord::WWGL_TRUE) {
+ jam();
+ tlnsStillWaiting = ZTRUE;
+ }//if
+ }//for
+ if (tlnsStillWaiting == ZFALSE) {
+ jam();
+ signal->theData[0] = ZINIT_GCP_REC;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 1, JBB);
+ }//if
+ }//if
+}//Dblqh::logNextStart()
+
+/* --------------------------------------------------------------------------
+ * ------- MOVE PAGES FROM LFO RECORD TO PAGE REFERENCE RECORD -------
+ * WILL ALWAYS MOVE 8 PAGES TO A PAGE REFERENCE RECORD.
+ *
+ * SUBROUTINE SHORT NAME = MPR
+ * ------------------------------------------------------------------------- */
+void Dblqh::moveToPageRef(Signal* signal)
+{
+ LogPageRecordPtr mprLogPagePtr;
+ PageRefRecordPtr mprPageRefPtr;
+ UintR tmprIndex;
+
+/* --------------------------------------------------------------------------
+ * ------- INSERT PAGE REFERENCE RECORD -------
+ *
+ * INPUT: LFO_PTR LOG FILE OPERATION RECORD
+ * LOG_PART_PTR LOG PART RECORD
+ * PAGE_REF_PTR THE PAGE REFERENCE RECORD TO BE INSERTED.
+ * ------------------------------------------------------------------------- */
+ PageRefRecordPtr iprPageRefPtr;
+
+ if ((logPartPtr.p->mmBufferSize + 8) >= ZMAX_MM_BUFFER_SIZE) {
+ jam();
+ pageRefPtr.i = logPartPtr.p->firstPageRef;
+ ptrCheckGuard(pageRefPtr, cpageRefFileSize, pageRefRecord);
+ releasePrPages(signal);
+ removePageRef(signal);
+ } else {
+ jam();
+ logPartPtr.p->mmBufferSize = logPartPtr.p->mmBufferSize + 8;
+ }//if
+ seizePageRef(signal);
+ if (logPartPtr.p->firstPageRef == RNIL) {
+ jam();
+ logPartPtr.p->firstPageRef = pageRefPtr.i;
+ } else {
+ jam();
+ iprPageRefPtr.i = logPartPtr.p->lastPageRef;
+ ptrCheckGuard(iprPageRefPtr, cpageRefFileSize, pageRefRecord);
+ iprPageRefPtr.p->prNext = pageRefPtr.i;
+ }//if
+ pageRefPtr.p->prPrev = logPartPtr.p->lastPageRef;
+ logPartPtr.p->lastPageRef = pageRefPtr.i;
+
+ pageRefPtr.p->prFileNo = logFilePtr.p->fileNo;
+ pageRefPtr.p->prPageNo = lfoPtr.p->lfoPageNo;
+ tmprIndex = 0;
+ mprLogPagePtr.i = lfoPtr.p->firstLfoPage;
+MPR_LOOP:
+ arrGuard(tmprIndex, 8);
+ pageRefPtr.p->pageRef[tmprIndex] = mprLogPagePtr.i;
+ tmprIndex = tmprIndex + 1;
+ ptrCheckGuard(mprLogPagePtr, clogPageFileSize, logPageRecord);
+ mprLogPagePtr.i = mprLogPagePtr.p->logPageWord[ZNEXT_PAGE];
+ if (mprLogPagePtr.i != RNIL) {
+ jam();
+ goto MPR_LOOP;
+ }//if
+ mprPageRefPtr.i = pageRefPtr.p->prPrev;
+ if (mprPageRefPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(mprPageRefPtr, cpageRefFileSize, pageRefRecord);
+ mprLogPagePtr.i = mprPageRefPtr.p->pageRef[7];
+ ptrCheckGuard(mprLogPagePtr, clogPageFileSize, logPageRecord);
+ mprLogPagePtr.p->logPageWord[ZNEXT_PAGE] = pageRefPtr.p->pageRef[0];
+ }//if
+}//Dblqh::moveToPageRef()
+
+/* ------------------------------------------------------------------------- */
+/* ------- READ THE ATTRINFO FROM THE LOG ------- */
+/* */
+/* SUBROUTINE SHORT NAME = RA */
+/* ------------------------------------------------------------------------- */
+void Dblqh::readAttrinfo(Signal* signal)
+{
+ Uint32 remainingLen = tcConnectptr.p->totSendlenAi;
+ if (remainingLen == 0) {
+ jam();
+ tcConnectptr.p->reclenAiLqhkey = 0;
+ return;
+ }//if
+ Uint32 dataLen = remainingLen;
+ if (remainingLen > 5)
+ dataLen = 5;
+ readLogData(signal, dataLen, &tcConnectptr.p->firstAttrinfo[0]);
+ tcConnectptr.p->reclenAiLqhkey = dataLen;
+ remainingLen -= dataLen;
+ while (remainingLen > 0) {
+ jam();
+ dataLen = remainingLen;
+ if (remainingLen > 22)
+ dataLen = 22;
+ seizeAttrinbuf(signal);
+ readLogData(signal, dataLen, &attrinbufptr.p->attrbuf[0]);
+ attrinbufptr.p->attrbuf[ZINBUF_DATA_LEN] = dataLen;
+ remainingLen -= dataLen;
+ }//while
+}//Dblqh::readAttrinfo()
+
+/* ------------------------------------------------------------------------- */
+/* ------- READ COMMIT LOG ------- */
+/* */
+/* SUBROUTINE SHORT NAME = RCL */
+/* ------------------------------------------------------------------------- */
+void Dblqh::readCommitLog(Signal* signal, CommitLogRecord* commitLogRecord)
+{
+ Uint32 trclPageIndex = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ if ((trclPageIndex + (ZCOMMIT_LOG_SIZE - 1)) < ZPAGE_SIZE) {
+ jam();
+ tcConnectptr.p->tableref = logPagePtr.p->logPageWord[trclPageIndex + 0];
+ tcConnectptr.p->schemaVersion = logPagePtr.p->logPageWord[trclPageIndex + 1];
+ tcConnectptr.p->fragmentid = logPagePtr.p->logPageWord[trclPageIndex + 2];
+ commitLogRecord->fileNo = logPagePtr.p->logPageWord[trclPageIndex + 3];
+ commitLogRecord->startPageNo = logPagePtr.p->logPageWord[trclPageIndex + 4];
+ commitLogRecord->startPageIndex = logPagePtr.p->logPageWord[trclPageIndex + 5];
+ commitLogRecord->stopPageNo = logPagePtr.p->logPageWord[trclPageIndex + 6];
+ tcConnectptr.p->gci = logPagePtr.p->logPageWord[trclPageIndex + 7];
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] =
+ (trclPageIndex + ZCOMMIT_LOG_SIZE) - 1;
+ } else {
+ jam();
+ tcConnectptr.p->tableref = readLogword(signal);
+ tcConnectptr.p->schemaVersion = readLogword(signal);
+ tcConnectptr.p->fragmentid = readLogword(signal);
+ commitLogRecord->fileNo = readLogword(signal);
+ commitLogRecord->startPageNo = readLogword(signal);
+ commitLogRecord->startPageIndex = readLogword(signal);
+ commitLogRecord->stopPageNo = readLogword(signal);
+ tcConnectptr.p->gci = readLogword(signal);
+ }//if
+ tcConnectptr.p->transid[0] = logPartPtr.i + 65536;
+ tcConnectptr.p->transid[1] = (DBLQH << 20) + (cownNodeid << 8);
+}//Dblqh::readCommitLog()
+
+/* ------------------------------------------------------------------------- */
+/* ------- READ LOG PAGES FROM DISK IN ORDER TO EXECUTE A LOG ------- */
+/* RECORD WHICH WAS NOT FOUND IN MAIN MEMORY. */
+/* */
+/* SUBROUTINE SHORT NAME = REL */
+/* ------------------------------------------------------------------------- */
+void Dblqh::readExecLog(Signal* signal)
+{
+ UintR trelIndex;
+ UintR trelI;
+
+ seizeLfo(signal);
+ initLfo(signal);
+ trelI = logPartPtr.p->execSrStopPageNo - logPartPtr.p->execSrStartPageNo;
+ arrGuard(trelI + 1, 16);
+ lfoPtr.p->logPageArray[trelI + 1] = logPartPtr.p->execSrStartPageNo;
+ for (trelIndex = logPartPtr.p->execSrStopPageNo; (trelIndex >= logPartPtr.p->execSrStartPageNo) &&
+ (UintR)~trelIndex; trelIndex--) {
+ jam();
+ seizeLogpage(signal);
+ arrGuard(trelI, 16);
+ lfoPtr.p->logPageArray[trelI] = logPagePtr.i;
+ trelI--;
+ }//for
+ lfoPtr.p->lfoPageNo = logPartPtr.p->execSrStartPageNo;
+ lfoPtr.p->noPagesRw = (logPartPtr.p->execSrStopPageNo -
+ logPartPtr.p->execSrStartPageNo) + 1;
+ lfoPtr.p->firstLfoPage = lfoPtr.p->logPageArray[0];
+ signal->theData[0] = logFilePtr.p->fileRef;
+ signal->theData[1] = cownref;
+ signal->theData[2] = lfoPtr.i;
+ signal->theData[3] = ZLIST_OF_MEM_PAGES; // edtjamo TR509 //ZLIST_OF_PAIRS;
+ signal->theData[4] = ZVAR_NO_LOG_PAGE_WORD;
+ signal->theData[5] = lfoPtr.p->noPagesRw;
+ signal->theData[6] = lfoPtr.p->logPageArray[0];
+ signal->theData[7] = lfoPtr.p->logPageArray[1];
+ signal->theData[8] = lfoPtr.p->logPageArray[2];
+ signal->theData[9] = lfoPtr.p->logPageArray[3];
+ signal->theData[10] = lfoPtr.p->logPageArray[4];
+ signal->theData[11] = lfoPtr.p->logPageArray[5];
+ signal->theData[12] = lfoPtr.p->logPageArray[6];
+ signal->theData[13] = lfoPtr.p->logPageArray[7];
+ signal->theData[14] = lfoPtr.p->logPageArray[8];
+ signal->theData[15] = lfoPtr.p->logPageArray[9];
+ sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 16, JBA);
+}//Dblqh::readExecLog()
+
+/* ------------------------------------------------------------------------- */
+/* ------- READ 64 KBYTES WHEN EXECUTING THE FRAGMENT LOG ------- */
+/* */
+/* SUBROUTINE SHORT NAME = RES */
+/* ------------------------------------------------------------------------- */
+void Dblqh::readExecSrNewMbyte(Signal* signal)
+{
+ logFilePtr.p->currentFilepage = logFilePtr.p->currentMbyte * ZPAGES_IN_MBYTE;
+ logFilePtr.p->filePosition = logFilePtr.p->currentMbyte * ZPAGES_IN_MBYTE;
+ logPartPtr.p->execSrPagesRead = 0;
+ logPartPtr.p->execSrPagesReading = 0;
+ logPartPtr.p->execSrPagesExecuted = 0;
+ readExecSr(signal);
+ logPartPtr.p->logExecState = LogPartRecord::LES_WAIT_READ_EXEC_SR_NEW_MBYTE;
+}//Dblqh::readExecSrNewMbyte()
+
+/* ------------------------------------------------------------------------- */
+/* ------- READ 64 KBYTES WHEN EXECUTING THE FRAGMENT LOG ------- */
+/* */
+/* SUBROUTINE SHORT NAME = RES */
+/* ------------------------------------------------------------------------- */
+void Dblqh::readExecSr(Signal* signal)
+{
+ UintR tresPageid;
+ UintR tresIndex;
+
+ tresPageid = logFilePtr.p->filePosition;
+ seizeLfo(signal);
+ initLfo(signal);
+ for (tresIndex = 7; (UintR)~tresIndex; tresIndex--) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/* GO BACKWARDS SINCE WE INSERT AT THE BEGINNING AND WE WANT THAT FIRST PAGE */
+/* SHALL BE FIRST AND LAST PAGE LAST. */
+/* ------------------------------------------------------------------------- */
+ seizeLogpage(signal);
+ lfoPtr.p->logPageArray[tresIndex] = logPagePtr.i;
+ }//for
+ lfoPtr.p->lfoState = LogFileOperationRecord::READ_EXEC_SR;
+ lfoPtr.p->lfoPageNo = tresPageid;
+ logFilePtr.p->filePosition = logFilePtr.p->filePosition + 8;
+ logPartPtr.p->execSrPagesReading = logPartPtr.p->execSrPagesReading + 8;
+ lfoPtr.p->noPagesRw = 8;
+ lfoPtr.p->firstLfoPage = lfoPtr.p->logPageArray[0];
+ signal->theData[0] = logFilePtr.p->fileRef;
+ signal->theData[1] = cownref;
+ signal->theData[2] = lfoPtr.i;
+ signal->theData[3] = ZLIST_OF_MEM_PAGES;
+ signal->theData[4] = ZVAR_NO_LOG_PAGE_WORD;
+ signal->theData[5] = 8;
+ signal->theData[6] = lfoPtr.p->logPageArray[0];
+ signal->theData[7] = lfoPtr.p->logPageArray[1];
+ signal->theData[8] = lfoPtr.p->logPageArray[2];
+ signal->theData[9] = lfoPtr.p->logPageArray[3];
+ signal->theData[10] = lfoPtr.p->logPageArray[4];
+ signal->theData[11] = lfoPtr.p->logPageArray[5];
+ signal->theData[12] = lfoPtr.p->logPageArray[6];
+ signal->theData[13] = lfoPtr.p->logPageArray[7];
+ signal->theData[14] = tresPageid;
+ sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 15, JBA);
+}//Dblqh::readExecSr()
+
+/* ------------------------------------------------------------------------- */
+/* ------------ READ THE PRIMARY KEY FROM THE LOG ---------------- */
+/* */
+/* SUBROUTINE SHORT NAME = RK */
+/* --------------------------------------------------------------------------*/
+void Dblqh::readKey(Signal* signal)
+{
+ Uint32 remainingLen = tcConnectptr.p->primKeyLen;
+ ndbrequire(remainingLen != 0);
+ Uint32 dataLen = remainingLen;
+ if (remainingLen > 4)
+ dataLen = 4;
+ readLogData(signal, dataLen, &tcConnectptr.p->tupkeyData[0]);
+ remainingLen -= dataLen;
+ while (remainingLen > 0) {
+ jam();
+ seizeTupkeybuf(signal);
+ dataLen = remainingLen;
+ if (dataLen > 4)
+ dataLen = 4;
+ readLogData(signal, dataLen, &databufptr.p->data[0]);
+ remainingLen -= dataLen;
+ }//while
+}//Dblqh::readKey()
+
+/* ------------------------------------------------------------------------- */
+/* ------------ READ A NUMBER OF WORDS FROM LOG INTO CDATA ---------------- */
+/* */
+/* SUBROUTINE SHORT NAME = RLD */
+/* --------------------------------------------------------------------------*/
+void Dblqh::readLogData(Signal* signal, Uint32 noOfWords, Uint32* dataPtr)
+{
+ ndbrequire(noOfWords < 32);
+ Uint32 logPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ if ((logPos + noOfWords) >= ZPAGE_SIZE) {
+ for (Uint32 i = 0; i < noOfWords; i++)
+ dataPtr[i] = readLogwordExec(signal);
+ } else {
+ MEMCOPY_NO_WORDS(dataPtr, &logPagePtr.p->logPageWord[logPos], noOfWords);
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPos + noOfWords;
+ }//if
+}//Dblqh::readLogData()
+
+/* ------------------------------------------------------------------------- */
+/* ------------ READ THE LOG HEADER OF A PREPARE LOG HEADER ---------------- */
+/* */
+/* SUBROUTINE SHORT NAME = RLH */
+/* --------------------------------------------------------------------------*/
+void Dblqh::readLogHeader(Signal* signal)
+{
+ Uint32 logPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ if ((logPos + ZLOG_HEAD_SIZE) < ZPAGE_SIZE) {
+ jam();
+ tcConnectptr.p->hashValue = logPagePtr.p->logPageWord[logPos + 2];
+ tcConnectptr.p->operation = logPagePtr.p->logPageWord[logPos + 3];
+ tcConnectptr.p->totSendlenAi = logPagePtr.p->logPageWord[logPos + 4];
+ tcConnectptr.p->primKeyLen = logPagePtr.p->logPageWord[logPos + 5];
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPos + ZLOG_HEAD_SIZE;
+ } else {
+ jam();
+ readLogwordExec(signal); /* IGNORE PREPARE LOG RECORD TYPE */
+ readLogwordExec(signal); /* IGNORE LOG RECORD SIZE */
+ tcConnectptr.p->hashValue = readLogwordExec(signal);
+ tcConnectptr.p->operation = readLogwordExec(signal);
+ tcConnectptr.p->totSendlenAi = readLogwordExec(signal);
+ tcConnectptr.p->primKeyLen = readLogwordExec(signal);
+ }//if
+}//Dblqh::readLogHeader()
+
+/* ------------------------------------------------------------------------- */
+/* ------- READ A WORD FROM THE LOG ------- */
+/* */
+/* OUTPUT: TLOG_WORD */
+/* SUBROUTINE SHORT NAME = RLW */
+/* ------------------------------------------------------------------------- */
+Uint32 Dblqh::readLogword(Signal* signal)
+{
+ Uint32 logPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ ndbrequire(logPos < ZPAGE_SIZE);
+ Uint32 logWord = logPagePtr.p->logPageWord[logPos];
+ logPos++;
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPos;
+ if (logPos >= ZPAGE_SIZE) {
+ jam();
+ logPagePtr.i = logPagePtr.p->logPageWord[ZNEXT_PAGE];
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = ZPAGE_HEADER_SIZE;
+ logFilePtr.p->currentLogpage = logPagePtr.i;
+ logFilePtr.p->currentFilepage++;
+ logPartPtr.p->execSrPagesRead--;
+ logPartPtr.p->execSrPagesExecuted++;
+ }//if
+ return logWord;
+}//Dblqh::readLogword()
+
+/* ------------------------------------------------------------------------- */
+/* ------- READ A WORD FROM THE LOG WHEN EXECUTING A LOG RECORD ------- */
+/* */
+/* OUTPUT: TLOG_WORD */
+/* SUBROUTINE SHORT NAME = RWE */
+/* ------------------------------------------------------------------------- */
+Uint32 Dblqh::readLogwordExec(Signal* signal)
+{
+ Uint32 logPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ ndbrequire(logPos < ZPAGE_SIZE);
+ Uint32 logWord = logPagePtr.p->logPageWord[logPos];
+ logPos++;
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPos;
+ if (logPos >= ZPAGE_SIZE) {
+ jam();
+ logPagePtr.i = logPagePtr.p->logPageWord[ZNEXT_PAGE];
+ if (logPagePtr.i != RNIL){
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = ZPAGE_HEADER_SIZE;
+ } else {
+ // Reading word at the last pos in the last page
+ // Don't step forward to next page!
+ jam();
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]++;
+ }
+ }//if
+ return logWord;
+}//Dblqh::readLogwordExec()
+
+/* ------------------------------------------------------------------------- */
+/* ------- READ A SINGLE PAGE FROM THE LOG ------- */
+/* */
+/* INPUT: TRSP_PAGE_NO */
+/* SUBROUTINE SHORT NAME = RSP */
+/* ------------------------------------------------------------------------- */
+void Dblqh::readSinglePage(Signal* signal, Uint32 pageNo)
+{
+ seizeLfo(signal);
+ initLfo(signal);
+ seizeLogpage(signal);
+ lfoPtr.p->firstLfoPage = logPagePtr.i;
+ lfoPtr.p->lfoPageNo = pageNo;
+ lfoPtr.p->noPagesRw = 1;
+ signal->theData[0] = logFilePtr.p->fileRef;
+ signal->theData[1] = cownref;
+ signal->theData[2] = lfoPtr.i;
+ signal->theData[3] = ZLIST_OF_PAIRS;
+ signal->theData[4] = ZVAR_NO_LOG_PAGE_WORD;
+ signal->theData[5] = 1;
+ signal->theData[6] = logPagePtr.i;
+ signal->theData[7] = pageNo;
+ sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 8, JBA);
+}//Dblqh::readSinglePage()
+
+/* --------------------------------------------------------------------------
+ * ------- RELEASE OPERATION FROM ACTIVE LIST ON FRAGMENT -------
+ *
+ * SUBROUTINE SHORT NAME = RAC
+ * ------------------------------------------------------------------------- */
+void Dblqh::releaseAccList(Signal* signal)
+{
+ TcConnectionrecPtr racTcNextConnectptr;
+ TcConnectionrecPtr racTcPrevConnectptr;
+
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ racTcPrevConnectptr.i = tcConnectptr.p->prevTc;
+ racTcNextConnectptr.i = tcConnectptr.p->nextTc;
+ if (tcConnectptr.p->listState != TcConnectionrec::ACC_BLOCK_LIST) {
+ jam();
+ systemError(signal);
+ }//if
+ tcConnectptr.p->listState = TcConnectionrec::NOT_IN_LIST;
+ if (racTcNextConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(racTcNextConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ racTcNextConnectptr.p->prevTc = racTcPrevConnectptr.i;
+ }//if
+ if (racTcPrevConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(racTcPrevConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ racTcPrevConnectptr.p->nextTc = tcConnectptr.p->nextTc;
+ } else {
+ jam();
+ /* ---------------------------------------------------------------------
+ * OPERATION RECORD IS FIRST IN ACTIVE LIST
+ * THIS MEANS THAT THERE EXISTS NO PREVIOUS TC THAT NEEDS TO BE UPDATED.
+ * --------------------------------------------------------------------- */
+ fragptr.p->accBlockedList = racTcNextConnectptr.i;
+ }//if
+}//Dblqh::releaseAccList()
+
+/* --------------------------------------------------------------------------
+ * ------- REMOVE COPY FRAGMENT FROM ACTIVE COPY LIST -------
+ *
+ * ------------------------------------------------------------------------- */
+void Dblqh::releaseActiveCopy(Signal* signal)
+{
+ /* MUST BE 8 BIT */
+ UintR tracFlag;
+ UintR tracIndex;
+
+ tracFlag = ZFALSE;
+ for (tracIndex = 0; tracIndex < 4; tracIndex++) {
+ if (tracFlag == ZFALSE) {
+ jam();
+ if (cactiveCopy[tracIndex] == fragptr.i) {
+ jam();
+ tracFlag = ZTRUE;
+ }//if
+ } else {
+ if (tracIndex < 3) {
+ jam();
+ cactiveCopy[tracIndex - 1] = cactiveCopy[tracIndex];
+ } else {
+ jam();
+ cactiveCopy[3] = RNIL;
+ }//if
+ }//if
+ }//for
+ ndbrequire(tracFlag == ZTRUE);
+ cnoActiveCopy--;
+}//Dblqh::releaseActiveCopy()
+
+/* --------------------------------------------------------------------------
+ * ------- RELEASE OPERATION FROM ACTIVE LIST ON FRAGMENT -------
+ *
+ * SUBROUTINE SHORT NAME = RAL
+ * ------------------------------------------------------------------------- */
+void Dblqh::releaseActiveList(Signal* signal)
+{
+ TcConnectionrecPtr ralTcNextConnectptr;
+ TcConnectionrecPtr ralTcPrevConnectptr;
+ ralTcPrevConnectptr.i = tcConnectptr.p->prevTc;
+ ralTcNextConnectptr.i = tcConnectptr.p->nextTc;
+ ndbrequire(tcConnectptr.p->listState == TcConnectionrec::IN_ACTIVE_LIST);
+ tcConnectptr.p->listState = TcConnectionrec::NOT_IN_LIST;
+ if (ralTcNextConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(ralTcNextConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ ralTcNextConnectptr.p->prevTc = ralTcPrevConnectptr.i;
+ }//if
+ if (ralTcPrevConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(ralTcPrevConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ ralTcPrevConnectptr.p->nextTc = tcConnectptr.p->nextTc;
+ } else {
+ jam();
+ /* ----------------------------------------------------------------------
+ * OPERATION RECORD IS FIRST IN ACTIVE LIST
+ * THIS MEANS THAT THERE EXISTS NO PREVIOUS TC THAT NEEDS TO BE UPDATED.
+ * --------------------------------------------------------------------- */
+ fragptr.p->activeList = ralTcNextConnectptr.i;
+ }//if
+}//Dblqh::releaseActiveList()
+
+/* --------------------------------------------------------------------------
+ * ------- RELEASE ADD FRAGMENT RECORD -------
+ *
+ * ------------------------------------------------------------------------- */
+void Dblqh::releaseAddfragrec(Signal* signal)
+{
+ addfragptr.p->addfragStatus = AddFragRecord::FREE;
+ addfragptr.p->nextAddfragrec = cfirstfreeAddfragrec;
+ cfirstfreeAddfragrec = addfragptr.i;
+}//Dblqh::releaseAddfragrec()
+
+/* --------------------------------------------------------------------------
+ * ------- RELEASE FRAGMENT RECORD -------
+ *
+ * ------------------------------------------------------------------------- */
+void Dblqh::releaseFragrec()
+{
+ fragptr.p->fragStatus = Fragrecord::FREE;
+ fragptr.p->nextFrag = cfirstfreeFragrec;
+ cfirstfreeFragrec = fragptr.i;
+}//Dblqh::releaseFragrec()
+
+/* --------------------------------------------------------------------------
+ * ------- RELEASE LCP LOCAL RECORD -------
+ *
+ * ------------------------------------------------------------------------- */
+void Dblqh::releaseLcpLoc(Signal* signal)
+{
+ lcpLocptr.p->lcpLocstate = LcpLocRecord::IDLE;
+ lcpLocptr.p->nextLcpLoc = cfirstfreeLcpLoc;
+ cfirstfreeLcpLoc = lcpLocptr.i;
+}//Dblqh::releaseLcpLoc()
+
+/* --------------------------------------------------------------------------
+ * ------- RELEASE A PAGE REFERENCE RECORD. -------
+ *
+ * ------------------------------------------------------------------------- */
+void Dblqh::releasePageRef(Signal* signal)
+{
+ pageRefPtr.p->prNext = cfirstfreePageRef;
+ cfirstfreePageRef = pageRefPtr.i;
+}//Dblqh::releasePageRef()
+
+/* --------------------------------------------------------------------------
+ * --- RELEASE ALL PAGES IN THE MM BUFFER AFTER EXECUTING THE LOG ON IT. ----
+ *
+ * ------------------------------------------------------------------------- */
+void Dblqh::releaseMmPages(Signal* signal)
+{
+RMP_LOOP:
+ jam();
+ pageRefPtr.i = logPartPtr.p->firstPageRef;
+ if (pageRefPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(pageRefPtr, cpageRefFileSize, pageRefRecord);
+ releasePrPages(signal);
+ removePageRef(signal);
+ goto RMP_LOOP;
+ }//if
+}//Dblqh::releaseMmPages()
+
+/* --------------------------------------------------------------------------
+ * ------- RELEASE A SET OF PAGES AFTER EXECUTING THE LOG ON IT. -------
+ *
+ * ------------------------------------------------------------------------- */
+void Dblqh::releasePrPages(Signal* signal)
+{
+ UintR trppIndex;
+
+ for (trppIndex = 0; trppIndex <= 7; trppIndex++) {
+ jam();
+ logPagePtr.i = pageRefPtr.p->pageRef[trppIndex];
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ releaseLogpage(signal);
+ }//for
+}//Dblqh::releasePrPages()
+
+/* --------------------------------------------------------------------------
+ * ------- RELEASE OPERATION FROM WAIT QUEUE LIST ON FRAGMENT -------
+ *
+ * SUBROUTINE SHORT NAME : RWA
+ * ------------------------------------------------------------------------- */
+void Dblqh::releaseWaitQueue(Signal* signal)
+{
+ TcConnectionrecPtr rwaTcNextConnectptr;
+ TcConnectionrecPtr rwaTcPrevConnectptr;
+
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ rwaTcPrevConnectptr.i = tcConnectptr.p->prevTc;
+ rwaTcNextConnectptr.i = tcConnectptr.p->nextTc;
+ if (tcConnectptr.p->listState != TcConnectionrec::WAIT_QUEUE_LIST) {
+ jam();
+ systemError(signal);
+ }//if
+ tcConnectptr.p->listState = TcConnectionrec::NOT_IN_LIST;
+ if (rwaTcNextConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(rwaTcNextConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ rwaTcNextConnectptr.p->prevTc = rwaTcPrevConnectptr.i;
+ } else {
+ jam();
+ fragptr.p->lastWaitQueue = rwaTcPrevConnectptr.i;
+ }//if
+ if (rwaTcPrevConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(rwaTcPrevConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ rwaTcPrevConnectptr.p->nextTc = rwaTcNextConnectptr.i;
+ } else {
+ jam();
+ fragptr.p->firstWaitQueue = rwaTcNextConnectptr.i;
+ }//if
+}//Dblqh::releaseWaitQueue()
+
+/* --------------------------------------------------------------------------
+ * ------- REMOVE OPERATION RECORD FROM LIST ON LOG PART OF NOT -------
+ * COMPLETED OPERATIONS IN THE LOG.
+ *
+ * SUBROUTINE SHORT NAME = RLO
+ * ------------------------------------------------------------------------- */
+void Dblqh::removeLogTcrec(Signal* signal)
+{
+ TcConnectionrecPtr rloTcNextConnectptr;
+ TcConnectionrecPtr rloTcPrevConnectptr;
+ rloTcPrevConnectptr.i = tcConnectptr.p->prevLogTcrec;
+ rloTcNextConnectptr.i = tcConnectptr.p->nextLogTcrec;
+ if (rloTcNextConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(rloTcNextConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ rloTcNextConnectptr.p->prevLogTcrec = rloTcPrevConnectptr.i;
+ } else {
+ jam();
+ logPartPtr.p->lastLogTcrec = rloTcPrevConnectptr.i;
+ }//if
+ if (rloTcPrevConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(rloTcPrevConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ rloTcPrevConnectptr.p->nextLogTcrec = rloTcNextConnectptr.i;
+ } else {
+ jam();
+ logPartPtr.p->firstLogTcrec = rloTcNextConnectptr.i;
+ }//if
+}//Dblqh::removeLogTcrec()
+
+/* --------------------------------------------------------------------------
+ * ------- REMOVE PAGE REFERENCE RECORD FROM LIST IN THIS LOG PART -------
+ *
+ * SUBROUTINE SHORT NAME = RPR
+ * ------------------------------------------------------------------------- */
+void Dblqh::removePageRef(Signal* signal)
+{
+ PageRefRecordPtr rprPageRefPtr;
+
+ pageRefPtr.i = logPartPtr.p->firstPageRef;
+ if (pageRefPtr.i != RNIL) {
+ jam();
+ ptrCheckGuard(pageRefPtr, cpageRefFileSize, pageRefRecord);
+ if (pageRefPtr.p->prNext == RNIL) {
+ jam();
+ logPartPtr.p->lastPageRef = RNIL;
+ logPartPtr.p->firstPageRef = RNIL;
+ } else {
+ jam();
+ logPartPtr.p->firstPageRef = pageRefPtr.p->prNext;
+ rprPageRefPtr.i = pageRefPtr.p->prNext;
+ ptrCheckGuard(rprPageRefPtr, cpageRefFileSize, pageRefRecord);
+ rprPageRefPtr.p->prPrev = RNIL;
+ }//if
+ releasePageRef(signal);
+ }//if
+}//Dblqh::removePageRef()
+
+/* ------------------------------------------------------------------------- */
+/* ------- RETURN FROM EXECUTION OF LOG ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+Uint32 Dblqh::returnExecLog(Signal* signal)
+{
+ tcConnectptr.p->connectState = TcConnectionrec::CONNECTED;
+ initLogPointers(signal);
+ logPartPtr.p->execSrExecuteIndex++;
+ Uint32 result = checkIfExecLog(signal);
+ if (result == ZOK) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/* THIS LOG RECORD WILL BE EXECUTED AGAIN TOWARDS ANOTHER NODE. */
+/* ------------------------------------------------------------------------- */
+ logPagePtr.i = logPartPtr.p->execSrLogPage;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] =
+ logPartPtr.p->execSrLogPageIndex;
+ } else {
+ jam();
+/* ------------------------------------------------------------------------- */
+/* NO MORE EXECUTION OF THIS LOG RECORD. */
+/* ------------------------------------------------------------------------- */
+ if (logPartPtr.p->logExecState ==
+ LogPartRecord::LES_EXEC_LOGREC_FROM_FILE) {
+ jam();
+/* ------------------------------------------------------------------------- */
+/* THE LOG RECORD WAS READ FROM DISK. RELEASE ITS PAGES IMMEDIATELY. */
+/* ------------------------------------------------------------------------- */
+ lfoPtr.i = logPartPtr.p->execSrLfoRec;
+ ptrCheckGuard(lfoPtr, clfoFileSize, logFileOperationRecord);
+ releaseLfoPages(signal);
+ releaseLfo(signal);
+ logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG;
+ if (logPartPtr.p->execSrExecLogFile != logPartPtr.p->currentLogfile) {
+ jam();
+ LogFileRecordPtr clfLogFilePtr;
+ clfLogFilePtr.i = logPartPtr.p->execSrExecLogFile;
+ ptrCheckGuard(clfLogFilePtr, clogFileFileSize, logFileRecord);
+ clfLogFilePtr.p->logFileStatus = LogFileRecord::CLOSING_EXEC_LOG;
+ closeFile(signal, clfLogFilePtr);
+ result = ZCLOSE_FILE;
+ }//if
+ }//if
+ logPartPtr.p->execSrExecuteIndex = 0;
+ logPartPtr.p->execSrLogPage = RNIL;
+ logPartPtr.p->execSrLogPageIndex = ZNIL;
+ logPagePtr.i = logFilePtr.p->currentLogpage;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPartPtr.p->savePageIndex;
+ }//if
+ return result;
+}//Dblqh::returnExecLog()
+
+/* --------------------------------------------------------------------------
+ * ------- SEIZE ADD FRAGMENT RECORD ------
+ *
+ * ------------------------------------------------------------------------- */
+void Dblqh::seizeAddfragrec(Signal* signal)
+{
+ addfragptr.i = cfirstfreeAddfragrec;
+ ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
+ cfirstfreeAddfragrec = addfragptr.p->nextAddfragrec;
+}//Dblqh::seizeAddfragrec()
+
+/* --------------------------------------------------------------------------
+ * ------- SEIZE FRAGMENT RECORD -------
+ *
+ * ------------------------------------------------------------------------- */
+void Dblqh::seizeFragmentrec(Signal* signal)
+{
+ fragptr.i = cfirstfreeFragrec;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ cfirstfreeFragrec = fragptr.p->nextFrag;
+ fragptr.p->nextFrag = RNIL;
+}//Dblqh::seizeFragmentrec()
+
+/* ------------------------------------------------------------------------- */
+/* ------- SEIZE A PAGE REFERENCE RECORD. ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dblqh::seizePageRef(Signal* signal)
+{
+ pageRefPtr.i = cfirstfreePageRef;
+ ptrCheckGuard(pageRefPtr, cpageRefFileSize, pageRefRecord);
+ cfirstfreePageRef = pageRefPtr.p->prNext;
+ pageRefPtr.p->prNext = RNIL;
+}//Dblqh::seizePageRef()
+
+/* --------------------------------------------------------------------------
+ * ------- SEND ABORTED -------
+ *
+ * ------------------------------------------------------------------------- */
+void Dblqh::sendAborted(Signal* signal)
+{
+ UintR TlastInd;
+ if (tcConnectptr.p->nextReplica == ZNIL) {
+ TlastInd = ZTRUE;
+ } else {
+ TlastInd = ZFALSE;
+ }//if
+ signal->theData[0] = tcConnectptr.p->tcOprec;
+ signal->theData[1] = tcConnectptr.p->transid[0];
+ signal->theData[2] = tcConnectptr.p->transid[1];
+ signal->theData[3] = cownNodeid;
+ signal->theData[4] = TlastInd;
+ sendSignal(tcConnectptr.p->tcBlockref, GSN_ABORTED, signal, 5, JBB);
+ return;
+}//Dblqh::sendAborted()
+
+/* --------------------------------------------------------------------------
+ * ------- SEND LQH_TRANSCONF -------
+ *
+ * ------------------------------------------------------------------------- */
+void Dblqh::sendLqhTransconf(Signal* signal, LqhTransConf::OperationStatus stat)
+{
+ tcNodeFailptr.i = tcConnectptr.p->tcNodeFailrec;
+ ptrCheckGuard(tcNodeFailptr, ctcNodeFailrecFileSize, tcNodeFailRecord);
+
+ Uint32 reqInfo = 0;
+ LqhTransConf::setReplicaType(reqInfo, tcConnectptr.p->replicaType);
+ LqhTransConf::setReplicaNo(reqInfo, tcConnectptr.p->seqNoReplica);
+ LqhTransConf::setLastReplicaNo(reqInfo, tcConnectptr.p->lastReplicaNo);
+ LqhTransConf::setSimpleFlag(reqInfo, tcConnectptr.p->opSimple);
+ LqhTransConf::setDirtyFlag(reqInfo, tcConnectptr.p->dirtyOp);
+ LqhTransConf::setOperation(reqInfo, tcConnectptr.p->operation);
+
+ LqhTransConf * const lqhTransConf = (LqhTransConf *)&signal->theData[0];
+ lqhTransConf->tcRef = tcNodeFailptr.p->newTcRef;
+ lqhTransConf->lqhNodeId = cownNodeid;
+ lqhTransConf->operationStatus = stat;
+ lqhTransConf->lqhConnectPtr = tcConnectptr.i;
+ lqhTransConf->transId1 = tcConnectptr.p->transid[0];
+ lqhTransConf->transId2 = tcConnectptr.p->transid[1];
+ lqhTransConf->oldTcOpRec = tcConnectptr.p->tcOprec;
+ lqhTransConf->requestInfo = reqInfo;
+ lqhTransConf->gci = tcConnectptr.p->gci;
+ lqhTransConf->nextNodeId1 = tcConnectptr.p->nextReplica;
+ lqhTransConf->nextNodeId2 = tcConnectptr.p->nodeAfterNext[0];
+ lqhTransConf->nextNodeId3 = tcConnectptr.p->nodeAfterNext[1];
+ lqhTransConf->apiRef = tcConnectptr.p->applRef;
+ lqhTransConf->apiOpRec = tcConnectptr.p->applOprec;
+ lqhTransConf->tableId = tcConnectptr.p->tableref;
+ sendSignal(tcNodeFailptr.p->newTcBlockref, GSN_LQH_TRANSCONF,
+ signal, LqhTransConf::SignalLength, JBB);
+ tcNodeFailptr.p->tcRecNow = tcConnectptr.i + 1;
+ signal->theData[0] = ZLQH_TRANS_NEXT;
+ signal->theData[1] = tcNodeFailptr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+}//Dblqh::sendLqhTransconf()
+
+/* --------------------------------------------------------------------------
+ * ------- START ANOTHER PHASE OF LOG EXECUTION -------
+ * RESET THE VARIABLES NEEDED BY THIS PROCESS AND SEND THE START SIGNAL
+ *
+ * ------------------------------------------------------------------------- */
+void Dblqh::startExecSr(Signal* signal)
+{
+ cnoFragmentsExecSr = 0;
+ signal->theData[0] = cfirstCompletedFragSr;
+ signal->theData[1] = RNIL;
+ sendSignal(cownref, GSN_START_EXEC_SR, signal, 2, JBB);
+}//Dblqh::startExecSr()
+
+/* ¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤
+ * ¤¤¤¤¤¤¤ LOG MODULE ¤¤¤¤¤¤¤
+ * ¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤ */
+/* --------------------------------------------------------------------------
+ * ------- STEP FORWARD IN FRAGMENT LOG DURING LOG EXECUTION -------
+ *
+ * ------------------------------------------------------------------------- */
+void Dblqh::stepAhead(Signal* signal, Uint32 stepAheadWords)
+{
+ UintR tsaPos;
+
+ tsaPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ while ((stepAheadWords + tsaPos) >= ZPAGE_SIZE) {
+ jam();
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = ZPAGE_SIZE;
+ stepAheadWords = stepAheadWords - (ZPAGE_SIZE - tsaPos);
+ logFilePtr.p->currentLogpage = logPagePtr.p->logPageWord[ZNEXT_PAGE];
+ logPagePtr.i = logPagePtr.p->logPageWord[ZNEXT_PAGE];
+ logFilePtr.p->currentFilepage++;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = ZPAGE_HEADER_SIZE;
+ logPartPtr.p->execSrPagesRead--;
+ logPartPtr.p->execSrPagesExecuted++;
+ tsaPos = ZPAGE_HEADER_SIZE;
+ }//while
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = stepAheadWords + tsaPos;
+}//Dblqh::stepAhead()
+
+/* --------------------------------------------------------------------------
+ * ------- WRITE A ABORT LOG RECORD -------
+ *
+ * SUBROUTINE SHORT NAME: WAL
+ * ------------------------------------------------------------------------- */
+void Dblqh::writeAbortLog(Signal* signal)
+{
+ if ((ZABORT_LOG_SIZE + ZNEXT_LOG_SIZE) >
+ logFilePtr.p->remainingWordsInMbyte) {
+ jam();
+ changeMbyte(signal);
+ }//if
+ logFilePtr.p->remainingWordsInMbyte =
+ logFilePtr.p->remainingWordsInMbyte - ZABORT_LOG_SIZE;
+ writeLogWord(signal, ZABORT_TYPE);
+ writeLogWord(signal, tcConnectptr.p->transid[0]);
+ writeLogWord(signal, tcConnectptr.p->transid[1]);
+}//Dblqh::writeAbortLog()
+
+/* --------------------------------------------------------------------------
+ * ------- WRITE A COMMIT LOG RECORD -------
+ *
+ * SUBROUTINE SHORT NAME: WCL
+ * ------------------------------------------------------------------------- */
+void Dblqh::writeCommitLog(Signal* signal, LogPartRecordPtr regLogPartPtr)
+{
+ LogFileRecordPtr regLogFilePtr;
+ LogPageRecordPtr regLogPagePtr;
+ TcConnectionrec * const regTcPtr = tcConnectptr.p;
+ regLogFilePtr.i = regLogPartPtr.p->currentLogfile;
+ ptrCheckGuard(regLogFilePtr, clogFileFileSize, logFileRecord);
+ regLogPagePtr.i = regLogFilePtr.p->currentLogpage;
+ Uint32 twclTmp = regLogFilePtr.p->remainingWordsInMbyte;
+ ptrCheckGuard(regLogPagePtr, clogPageFileSize, logPageRecord);
+ logPartPtr = regLogPartPtr;
+ logFilePtr = regLogFilePtr;
+ logPagePtr = regLogPagePtr;
+ if ((ZCOMMIT_LOG_SIZE + ZNEXT_LOG_SIZE) > twclTmp) {
+ jam();
+ changeMbyte(signal);
+ twclTmp = logFilePtr.p->remainingWordsInMbyte;
+ }//if
+
+ Uint32 twclLogPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ Uint32 tableId = regTcPtr->tableref;
+ Uint32 schemaVersion = regTcPtr->schemaVersion;
+ Uint32 fragId = regTcPtr->fragmentid;
+ Uint32 fileNo = regTcPtr->logStartFileNo;
+ Uint32 startPageNo = regTcPtr->logStartPageNo;
+ Uint32 pageIndex = regTcPtr->logStartPageIndex;
+ Uint32 stopPageNo = regTcPtr->logStopPageNo;
+ Uint32 gci = regTcPtr->gci;
+ logFilePtr.p->remainingWordsInMbyte = twclTmp - ZCOMMIT_LOG_SIZE;
+
+ if ((twclLogPos + ZCOMMIT_LOG_SIZE) >= ZPAGE_SIZE) {
+ writeLogWord(signal, ZCOMMIT_TYPE);
+ writeLogWord(signal, tableId);
+ writeLogWord(signal, schemaVersion);
+ writeLogWord(signal, fragId);
+ writeLogWord(signal, fileNo);
+ writeLogWord(signal, startPageNo);
+ writeLogWord(signal, pageIndex);
+ writeLogWord(signal, stopPageNo);
+ writeLogWord(signal, gci);
+ } else {
+ Uint32* dataPtr = &logPagePtr.p->logPageWord[twclLogPos];
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = twclLogPos + ZCOMMIT_LOG_SIZE;
+ dataPtr[0] = ZCOMMIT_TYPE;
+ dataPtr[1] = tableId;
+ dataPtr[2] = schemaVersion;
+ dataPtr[3] = fragId;
+ dataPtr[4] = fileNo;
+ dataPtr[5] = startPageNo;
+ dataPtr[6] = pageIndex;
+ dataPtr[7] = stopPageNo;
+ dataPtr[8] = gci;
+ }//if
+ TcConnectionrecPtr rloTcNextConnectptr;
+ TcConnectionrecPtr rloTcPrevConnectptr;
+ rloTcPrevConnectptr.i = regTcPtr->prevLogTcrec;
+ rloTcNextConnectptr.i = regTcPtr->nextLogTcrec;
+ if (rloTcNextConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(rloTcNextConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ rloTcNextConnectptr.p->prevLogTcrec = rloTcPrevConnectptr.i;
+ } else {
+ regLogPartPtr.p->lastLogTcrec = rloTcPrevConnectptr.i;
+ }//if
+ if (rloTcPrevConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(rloTcPrevConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+ rloTcPrevConnectptr.p->nextLogTcrec = rloTcNextConnectptr.i;
+ } else {
+ regLogPartPtr.p->firstLogTcrec = rloTcNextConnectptr.i;
+ }//if
+}//Dblqh::writeCommitLog()
+
+/* --------------------------------------------------------------------------
+ * ------- WRITE A COMPLETED GCI LOG RECORD -------
+ *
+ * SUBROUTINE SHORT NAME: WCG
+// Input Pointers:
+// logFilePtr
+// logPartPtr
+ * ------------------------------------------------------------------------- */
+void Dblqh::writeCompletedGciLog(Signal* signal)
+{
+ if ((ZCOMPLETED_GCI_LOG_SIZE + ZNEXT_LOG_SIZE) >
+ logFilePtr.p->remainingWordsInMbyte) {
+ jam();
+ changeMbyte(signal);
+ }//if
+ logFilePtr.p->remainingWordsInMbyte =
+ logFilePtr.p->remainingWordsInMbyte - ZCOMPLETED_GCI_LOG_SIZE;
+ writeLogWord(signal, ZCOMPLETED_GCI_TYPE);
+ writeLogWord(signal, cnewestCompletedGci);
+ logPartPtr.p->logPartNewestCompletedGCI = cnewestCompletedGci;
+}//Dblqh::writeCompletedGciLog()
+
+/* --------------------------------------------------------------------------
+ * ------- WRITE A DIRTY PAGE DURING LOG EXECUTION -------
+ *
+ * SUBROUTINE SHORT NAME: WD
+ * ------------------------------------------------------------------------- */
+void Dblqh::writeDirty(Signal* signal)
+{
+ logPagePtr.p->logPageWord[ZPOS_DIRTY] = ZNOT_DIRTY;
+
+ // Calculate checksum for page
+ logPagePtr.p->logPageWord[ZPOS_CHECKSUM] = calcPageCheckSum(logPagePtr);
+
+ seizeLfo(signal);
+ initLfo(signal);
+ lfoPtr.p->lfoPageNo = logPartPtr.p->prevFilepage;
+ lfoPtr.p->noPagesRw = 1;
+ lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_DIRTY;
+ lfoPtr.p->firstLfoPage = logPagePtr.i;
+ signal->theData[0] = logFilePtr.p->fileRef;
+ signal->theData[1] = cownref;
+ signal->theData[2] = lfoPtr.i;
+ signal->theData[3] = ZLIST_OF_PAIRS_SYNCH;
+ signal->theData[4] = ZVAR_NO_LOG_PAGE_WORD;
+ signal->theData[5] = 1;
+ signal->theData[6] = logPagePtr.i;
+ signal->theData[7] = logPartPtr.p->prevFilepage;
+ sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA);
+}//Dblqh::writeDirty()
+
+/* --------------------------------------------------------------------------
+ * ------- WRITE A WORD INTO THE LOG, CHECK FOR NEW PAGE -------
+ *
+ * SUBROUTINE SHORT NAME: WLW
+ * ------------------------------------------------------------------------- */
+void Dblqh::writeLogWord(Signal* signal, Uint32 data)
+{
+ Uint32 logPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ ndbrequire(logPos < ZPAGE_SIZE);
+ logPagePtr.p->logPageWord[logPos] = data;
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPos + 1;
+ if ((logPos + 1) == ZPAGE_SIZE) {
+ jam();
+ completedLogPage(signal, ZNORMAL);
+ seizeLogpage(signal);
+ initLogpage(signal);
+ logFilePtr.p->currentLogpage = logPagePtr.i;
+ logFilePtr.p->currentFilepage++;
+ }//if
+}//Dblqh::writeLogWord()
+
+/* --------------------------------------------------------------------------
+ * ------- WRITE A NEXT LOG RECORD AND CHANGE TO NEXT MBYTE -------
+ *
+ * SUBROUTINE SHORT NAME: WNL
+// Input Pointers:
+// logFilePtr(Redefines)
+// logPagePtr (Redefines)
+// logPartPtr
+ * ------------------------------------------------------------------------- */
+void Dblqh::writeNextLog(Signal* signal)
+{
+ LogFileRecordPtr wnlNextLogFilePtr;
+ UintR twnlNextFileNo;
+ UintR twnlNewMbyte;
+ UintR twnlRemWords;
+ UintR twnlNextMbyte;
+
+/* -------------------------------------------------- */
+/* CALCULATE THE NEW NUMBER OF REMAINING WORDS */
+/* AS 128*2036 WHERE 128 * 8 KBYTE = 1 MBYTE */
+/* AND 2036 IS THE NUMBER OF WORDS IN A PAGE */
+/* THAT IS USED FOR LOG INFORMATION. */
+/* -------------------------------------------------- */
+ twnlRemWords = ZPAGE_SIZE - ZPAGE_HEADER_SIZE;
+ twnlRemWords = twnlRemWords * ZPAGES_IN_MBYTE;
+ wnlNextLogFilePtr.i = logFilePtr.p->nextLogFile;
+ ptrCheckGuard(wnlNextLogFilePtr, clogFileFileSize, logFileRecord);
+/* -------------------------------------------------- */
+/* WRITE THE NEXT LOG RECORD. */
+/* -------------------------------------------------- */
+ ndbrequire(logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] < ZPAGE_SIZE);
+ logPagePtr.p->logPageWord[logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]] =
+ ZNEXT_MBYTE_TYPE;
+ if (logFilePtr.p->currentMbyte == (ZNO_MBYTES_IN_FILE - 1)) {
+ jam();
+/* -------------------------------------------------- */
+/* CALCULATE THE NEW REMAINING WORDS WHEN */
+/* CHANGING LOG FILE IS PERFORMED */
+/* -------------------------------------------------- */
+ twnlRemWords = twnlRemWords - (ZPAGE_SIZE - ZPAGE_HEADER_SIZE);
+/* -------------------------------------------------- */
+/* ENSURE THAT THE LOG PAGES ARE WRITTEN AFTER */
+/* WE HAVE CHANGED MBYTE. */
+/* -------------------------------------------------- */
+/* ENSURE LAST PAGE IN PREVIOUS MBYTE IS */
+/* WRITTEN AND THAT THE STATE OF THE WRITE IS */
+/* PROPERLY SET. */
+/* -------------------------------------------------- */
+/* WE HAVE TO CHANGE LOG FILE */
+/* -------------------------------------------------- */
+ completedLogPage(signal, ZLAST_WRITE_IN_FILE);
+ if (wnlNextLogFilePtr.p->fileNo == 0) {
+ jam();
+/* -------------------------------------------------- */
+/* WE HAVE FINALISED A LOG LAP, START FROM LOG */
+/* FILE 0 AGAIN */
+/* -------------------------------------------------- */
+ logPartPtr.p->logLap++;
+ }//if
+ logPartPtr.p->currentLogfile = wnlNextLogFilePtr.i;
+ logFilePtr.i = wnlNextLogFilePtr.i;
+ logFilePtr.p = wnlNextLogFilePtr.p;
+ twnlNewMbyte = 0;
+ } else {
+ jam();
+/* -------------------------------------------------- */
+/* INCREMENT THE CURRENT MBYTE */
+/* SET PAGE INDEX TO PAGE HEADER SIZE */
+/* -------------------------------------------------- */
+ completedLogPage(signal, ZENFORCE_WRITE);
+ twnlNewMbyte = logFilePtr.p->currentMbyte + 1;
+ }//if
+/* -------------------------------------------------- */
+/* CHANGE TO NEW LOG FILE IF NECESSARY */
+/* UPDATE THE FILE POSITION TO THE NEW MBYTE */
+/* FOUND IN PAGE PART OF TNEXT_LOG_PTR */
+/* ALLOCATE AND INITIATE A NEW PAGE SINCE WE */
+/* HAVE SENT THE PREVIOUS PAGE TO DISK. */
+/* SET THE NEW NUMBER OF REMAINING WORDS IN THE */
+/* NEW MBYTE ALLOCATED. */
+/* -------------------------------------------------- */
+ logFilePtr.p->currentMbyte = twnlNewMbyte;
+ logFilePtr.p->filePosition = twnlNewMbyte * ZPAGES_IN_MBYTE;
+ logFilePtr.p->currentFilepage = twnlNewMbyte * ZPAGES_IN_MBYTE;
+ logFilePtr.p->remainingWordsInMbyte = twnlRemWords;
+ seizeLogpage(signal);
+ if (logFilePtr.p->currentMbyte == 0) {
+ jam();
+ logFilePtr.p->lastPageWritten = 0;
+ if (logFilePtr.p->fileNo == 0) {
+ jam();
+ releaseLogpage(signal);
+ logPagePtr.i = logFilePtr.p->logPageZero;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ }//if
+ }//if
+ initLogpage(signal);
+ logFilePtr.p->currentLogpage = logPagePtr.i;
+ if (logFilePtr.p->currentMbyte == 0) {
+ jam();
+/* -------------------------------------------------- */
+/* THIS IS A NEW FILE, WRITE THE FILE DESCRIPTOR*/
+/* ALSO OPEN THE NEXT LOG FILE TO ENSURE THAT */
+/* THIS FILE IS OPEN WHEN ITS TURN COMES. */
+/* -------------------------------------------------- */
+ writeFileHeaderOpen(signal, ZNORMAL);
+ openNextLogfile(signal);
+ logFilePtr.p->fileChangeState = LogFileRecord::BOTH_WRITES_ONGOING;
+ }//if
+ if (logFilePtr.p->fileNo == logPartPtr.p->logTailFileNo) {
+ if (logFilePtr.p->currentMbyte == logPartPtr.p->logTailMbyte) {
+ jam();
+/* -------------------------------------------------- */
+/* THE HEAD AND TAIL HAS MET. THIS SHOULD NEVER */
+/* OCCUR. CAN HAPPEN IF THE LOCAL CHECKPOINTS */
+/* TAKE FAR TOO LONG TIME. SO TIMING PROBLEMS */
+/* CAN INVOKE THIS SYSTEM CRASH. HOWEVER ONLY */
+/* VERY SERIOUS TIMING PROBLEMS. */
+/* -------------------------------------------------- */
+ systemError(signal);
+ }//if
+ }//if
+ if (logFilePtr.p->currentMbyte == (ZNO_MBYTES_IN_FILE - 1)) {
+ jam();
+ twnlNextMbyte = 0;
+ if (logFilePtr.p->fileChangeState != LogFileRecord::NOT_ONGOING) {
+ jam();
+ logPartPtr.p->logPartState = LogPartRecord::FILE_CHANGE_PROBLEM;
+ }//if
+ twnlNextFileNo = wnlNextLogFilePtr.p->fileNo;
+ } else {
+ jam();
+ twnlNextMbyte = logFilePtr.p->currentMbyte + 1;
+ twnlNextFileNo = logFilePtr.p->fileNo;
+ }//if
+ if (twnlNextFileNo == logPartPtr.p->logTailFileNo) {
+ if (logPartPtr.p->logTailMbyte == twnlNextMbyte) {
+ jam();
+/* -------------------------------------------------- */
+/* THE NEXT MBYTE WILL BE THE TAIL. WE MUST */
+/* STOP LOGGING NEW OPERATIONS. THIS OPERATION */
+/* ALLOWED TO PASS. ALSO COMMIT, NEXT, COMPLETED*/
+/* GCI, ABORT AND FRAGMENT SPLIT IS ALLOWED. */
+/* OPERATIONS ARE ALLOWED AGAIN WHEN THE TAIL */
+/* IS MOVED FORWARD AS A RESULT OF A START_LCP */
+/* _ROUND SIGNAL ARRIVING FROM DBDIH. */
+/* -------------------------------------------------- */
+ logPartPtr.p->logPartState = LogPartRecord::TAIL_PROBLEM;
+ }//if
+ }//if
+}//Dblqh::writeNextLog()
+
+void
+Dblqh::execDUMP_STATE_ORD(Signal* signal)
+{
+ DumpStateOrd * const dumpState = (DumpStateOrd *)&signal->theData[0];
+ if(dumpState->args[0] == DumpStateOrd::CommitAckMarkersSize){
+ infoEvent("LQH: m_commitAckMarkerPool: %d free size: %d",
+ m_commitAckMarkerPool.getNoOfFree(),
+ m_commitAckMarkerPool.getSize());
+ }
+ if(dumpState->args[0] == DumpStateOrd::CommitAckMarkersDump){
+ infoEvent("LQH: m_commitAckMarkerPool: %d free size: %d",
+ m_commitAckMarkerPool.getNoOfFree(),
+ m_commitAckMarkerPool.getSize());
+
+ CommitAckMarkerIterator iter;
+ for(m_commitAckMarkerHash.first(iter); iter.curr.i != RNIL;
+ m_commitAckMarkerHash.next(iter)){
+ infoEvent("CommitAckMarker: i = %d (0x%x, 0x%x)"
+ " ApiRef: 0x%x apiOprec: 0x%x TcNodeId: %d",
+ iter.curr.i,
+ iter.curr.p->transid1,
+ iter.curr.p->transid2,
+ iter.curr.p->apiRef,
+ iter.curr.p->apiOprec,
+ iter.curr.p->tcNodeId);
+ }
+ }
+
+ // Dump info about number of log pages
+ if(dumpState->args[0] == DumpStateOrd::LqhDumpNoLogPages){
+ infoEvent("LQH: Log pages : %d Free: %d",
+ clogPageFileSize,
+ cnoOfLogPages);
+ }
+
+ // Dump all defined tables that LQH knowns about
+ if(dumpState->args[0] == DumpStateOrd::LqhDumpAllDefinedTabs){
+ for(Uint32 i = 0; i<ctabrecFileSize; i++){
+ TablerecPtr tabPtr;
+ tabPtr.i = i;
+ ptrAss(tabPtr, tablerec);
+ if(tabPtr.p->tableStatus != Tablerec::NOT_DEFINED){
+ infoEvent("Table %d Status: %d Usage: %d",
+ i, tabPtr.p->tableStatus, tabPtr.p->usageCount);
+ }
+ }
+ return;
+ }
+
+ // Dump all ScanRecords
+ if (dumpState->args[0] == DumpStateOrd::LqhDumpAllScanRec){
+ Uint32 recordNo = 0;
+ if (signal->length() == 1)
+ infoEvent("LQH: Dump all ScanRecords - size: %d",
+ cscanrecFileSize);
+ else if (signal->length() == 2)
+ recordNo = dumpState->args[1];
+ else
+ return;
+
+ dumpState->args[0] = DumpStateOrd::LqhDumpOneScanRec;
+ dumpState->args[1] = recordNo;
+ execDUMP_STATE_ORD(signal);
+
+ if (recordNo < cscanrecFileSize-1){
+ dumpState->args[0] = DumpStateOrd::LqhDumpAllScanRec;
+ dumpState->args[1] = recordNo+1;
+ sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB);
+ }
+ return;
+ }
+
+ // Dump all active ScanRecords
+ if (dumpState->args[0] == DumpStateOrd::LqhDumpAllActiveScanRec){
+ Uint32 recordNo = 0;
+ if (signal->length() == 1)
+ infoEvent("LQH: Dump active ScanRecord - size: %d",
+ cscanrecFileSize);
+ else if (signal->length() == 2)
+ recordNo = dumpState->args[1];
+ else
+ return;
+
+ ScanRecordPtr sp;
+ sp.i = recordNo;
+ c_scanRecordPool.getPtr(scanptr);
+ if (sp.p->scanState != ScanRecord::SCAN_FREE){
+ dumpState->args[0] = DumpStateOrd::LqhDumpOneScanRec;
+ dumpState->args[1] = recordNo;
+ execDUMP_STATE_ORD(signal);
+ }
+
+ if (recordNo < cscanrecFileSize-1){
+ dumpState->args[0] = DumpStateOrd::LqhDumpAllActiveScanRec;
+ dumpState->args[1] = recordNo+1;
+ sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB);
+ }
+ return;
+ }
+
+ if(dumpState->args[0] == DumpStateOrd::LqhDumpOneScanRec){
+ Uint32 recordNo = RNIL;
+ if (signal->length() == 2)
+ recordNo = dumpState->args[1];
+ else
+ return;
+
+ if (recordNo >= cscanrecFileSize)
+ return;
+
+ ScanRecordPtr sp;
+ sp.i = recordNo;
+ c_scanRecordPool.getPtr(sp);
+ infoEvent("Dblqh::ScanRecord[%d]: state=%d, type=%d, "
+ "complStatus=%d, scanNodeId=%d",
+ sp.i,
+ sp.p->scanState,
+ sp.p->scanType,
+ sp.p->scanCompletedStatus,
+ sp.p->scanNodeId);
+ infoEvent(" apiBref=0x%x, scanAccPtr=%d",
+ sp.p->scanApiBlockref,
+ sp.p->scanAccPtr);
+ infoEvent(" copyptr=%d, ailen=%d, complOps=%d, concurrOps=%d",
+ sp.p->copyPtr,
+ sp.p->scanAiLength,
+ sp.p->m_curr_batch_size_rows,
+ sp.p->m_max_batch_size_rows);
+ infoEvent(" errCnt=%d, localFid=%d, schV=%d",
+ sp.p->scanErrorCounter,
+ sp.p->scanLocalFragid,
+ sp.p->scanSchemaVersion);
+ infoEvent(" stpid=%d, flag=%d, lhold=%d, lmode=%d, num=%d",
+ sp.p->scanStoredProcId,
+ sp.p->scanFlag,
+ sp.p->scanLockHold,
+ sp.p->scanLockMode,
+ sp.p->scanNumber);
+ infoEvent(" relCount=%d, TCwait=%d, TCRec=%d, KIflag=%d",
+ sp.p->scanReleaseCounter,
+ sp.p->scanTcWaiting,
+ sp.p->scanTcrec,
+ sp.p->scanKeyinfoFlag);
+ return;
+ }
+ if(dumpState->args[0] == DumpStateOrd::LqhDumpLcpState){
+
+ infoEvent("== LQH LCP STATE ==");
+ infoEvent(" clcpCompletedState=%d, c_lcpId=%d, cnoOfFragsCheckpointed=%d",
+ clcpCompletedState,
+ c_lcpId,
+ cnoOfFragsCheckpointed);
+
+ LcpRecordPtr TlcpPtr;
+ // Print information about the current local checkpoint
+ TlcpPtr.i = 0;
+ ptrAss(TlcpPtr, lcpRecord);
+ infoEvent(" lcpState=%d firstLcpLocTup=%d firstLcpLocAcc=%d",
+ TlcpPtr.p->lcpState,
+ TlcpPtr.p->firstLcpLocTup,
+ TlcpPtr.p->firstLcpLocAcc);
+ infoEvent(" lcpAccptr=%d lastFragmentFlag=%d",
+ TlcpPtr.p->lcpAccptr,
+ TlcpPtr.p->lastFragmentFlag);
+ infoEvent("currentFragment.fragPtrI=%d",
+ TlcpPtr.p->currentFragment.fragPtrI);
+ infoEvent("currentFragment.lcpFragOrd.tableId=%d",
+ TlcpPtr.p->currentFragment.lcpFragOrd.tableId);
+ infoEvent(" lcpQueued=%d reportEmpty=%d",
+ TlcpPtr.p->lcpQueued,
+ TlcpPtr.p->reportEmpty);
+ char buf[8*_NDB_NODE_BITMASK_SIZE+1];
+ infoEvent(" m_EMPTY_LCP_REQ=%d",
+ TlcpPtr.p->m_EMPTY_LCP_REQ.getText(buf));
+
+ return;
+ }
+
+
+
+}//Dblqh::execDUMP_STATE_ORD()
+
+void Dblqh::execSET_VAR_REQ(Signal* signal)
+{
+#if 0
+ SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
+ ConfigParamId var = setVarReq->variable();
+
+ switch (var) {
+
+ case NoOfConcurrentCheckpointsAfterRestart:
+ sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
+ break;
+
+ case NoOfConcurrentCheckpointsDuringRestart:
+ // Valid only during start so value not set.
+ sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
+ break;
+
+ default:
+ sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
+ } // switch
+#endif
+}//execSET_VAR_REQ()
+
+
+/* **************************************************************** */
+/* ---------------------------------------------------------------- */
+/* ---------------------- TRIGGER HANDLING ------------------------ */
+/* ---------------------------------------------------------------- */
+/* */
+/* All trigger signals from TRIX are forwarded top TUP */
+/* ---------------------------------------------------------------- */
+/* **************************************************************** */
+
+// Trigger signals
+void
+Dblqh::execCREATE_TRIG_REQ(Signal* signal)
+{
+ jamEntry();
+ NodeId myNodeId = getOwnNodeId();
+ BlockReference tupref = calcTupBlockRef(myNodeId);
+
+ sendSignal(tupref, GSN_CREATE_TRIG_REQ, signal, CreateTrigReq::SignalLength, JBB);
+}
+
+void
+Dblqh::execCREATE_TRIG_CONF(Signal* signal)
+{
+ jamEntry();
+ NodeId myNodeId = getOwnNodeId();
+ BlockReference dictref = calcDictBlockRef(myNodeId);
+
+ sendSignal(dictref, GSN_CREATE_TRIG_CONF, signal, CreateTrigConf::SignalLength, JBB);
+}
+
+void
+Dblqh::execCREATE_TRIG_REF(Signal* signal)
+{
+ jamEntry();
+ NodeId myNodeId = getOwnNodeId();
+ BlockReference dictref = calcDictBlockRef(myNodeId);
+
+ sendSignal(dictref, GSN_CREATE_TRIG_REF, signal, CreateTrigRef::SignalLength, JBB);
+}
+
+void
+Dblqh::execDROP_TRIG_REQ(Signal* signal)
+{
+ jamEntry();
+ NodeId myNodeId = getOwnNodeId();
+ BlockReference tupref = calcTupBlockRef(myNodeId);
+
+ sendSignal(tupref, GSN_DROP_TRIG_REQ, signal, DropTrigReq::SignalLength, JBB);
+}
+
+void
+Dblqh::execDROP_TRIG_CONF(Signal* signal)
+{
+ jamEntry();
+ NodeId myNodeId = getOwnNodeId();
+ BlockReference dictref = calcDictBlockRef(myNodeId);
+
+ sendSignal(dictref, GSN_DROP_TRIG_CONF, signal, DropTrigConf::SignalLength, JBB);
+}
+
+void
+Dblqh::execDROP_TRIG_REF(Signal* signal)
+{
+ jamEntry();
+ NodeId myNodeId = getOwnNodeId();
+ BlockReference dictref = calcDictBlockRef(myNodeId);
+
+ sendSignal(dictref, GSN_DROP_TRIG_REF, signal, DropTrigRef::SignalLength, JBB);
+}
+
+Uint32 Dblqh::calcPageCheckSum(LogPageRecordPtr logP){
+ Uint32 checkSum = 37;
+#ifdef VM_TRACE
+ for (Uint32 i = (ZPOS_CHECKSUM+1); i<ZPAGE_SIZE; i++)
+ checkSum = logP.p->logPageWord[i] ^ checkSum;
+#endif
+ return checkSum;
+ }
+
diff --git a/storage/ndb/src/kernel/blocks/dblqh/Makefile.am b/storage/ndb/src/kernel/blocks/dblqh/Makefile.am
new file mode 100644
index 00000000000..854860b269c
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dblqh/Makefile.am
@@ -0,0 +1,25 @@
+#SUBDIRS = redoLogReader
+
+noinst_LIBRARIES = libdblqh.a
+
+libdblqh_a_SOURCES = DblqhInit.cpp DblqhMain.cpp
+
+include $(top_srcdir)/ndb/config/common.mk.am
+include $(top_srcdir)/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libdblqh.dsp
+
+libdblqh.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-lib.am \
+ $(top_srcdir)/ndb/config/win-name \
+ $(top_srcdir)/ndb/config/win-includes \
+ $(top_srcdir)/ndb/config/win-sources \
+ $(top_srcdir)/ndb/config/win-libraries
+ cat $(top_srcdir)/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/ndb/config/win-sources $@ $(libdblqh_a_SOURCES)
+ @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/Makefile b/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/Makefile
new file mode 100644
index 00000000000..a89b648de77
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/Makefile
@@ -0,0 +1,9 @@
+include .defs.mk
+
+BIN_TARGET := redoLogFileReader
+
+SOURCES := records.cpp redoLogFileReader.cpp
+
+TYPE := util
+
+include $(NDB_TOP)/Epilogue.mk
diff --git a/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.cpp b/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.cpp
new file mode 100644
index 00000000000..092b7840c20
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.cpp
@@ -0,0 +1,312 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "records.hpp"
+
+void printOut(const char *string, Uint32 value) {
+ ndbout_c("%-30s%-12u%-12x", string, value, value);
+}
+
+//----------------------------------------------------------------
+//
+//----------------------------------------------------------------
+
+bool AbortTransactionRecord::check() {
+ // Not implemented yet.
+ return true;
+}
+
+Uint32 AbortTransactionRecord::getLogRecordSize() {
+ return ABORTTRANSACTIONRECORDSIZE;
+}
+
+NdbOut& operator<<(NdbOut& no, const AbortTransactionRecord& atr) {
+ no << "----------ABORT TRANSACTION RECORD-------------" << endl << endl;
+ printOut("Record type:", atr.m_recordType);
+ printOut("TransactionId1:", atr.m_transactionId1);
+ printOut("TransactionId2:", atr.m_transactionId2);
+ no << endl;
+ return no;
+}
+
+//----------------------------------------------------------------
+//
+//----------------------------------------------------------------
+
+bool NextMbyteRecord::check() {
+ // Not implemented yet.
+ return true;
+}
+
+Uint32 NextMbyteRecord::getLogRecordSize() {
+ return NEXTMBYTERECORDSIZE;
+}
+
+NdbOut& operator<<(NdbOut& no, const NextMbyteRecord& nmr) {
+ no << "----------NEXT MBYTE RECORD--------------------" << endl << endl;
+ printOut("Record type:", nmr.m_recordType);
+ no << endl;
+ return no;
+}
+
+//----------------------------------------------------------------
+//
+//----------------------------------------------------------------
+
+bool CommitTransactionRecord::check() {
+ // Not implemented yet.
+ return true;
+}
+
+Uint32 CommitTransactionRecord::getLogRecordSize() {
+ return COMMITTRANSACTIONRECORDSIZE;
+}
+
+NdbOut& operator<<(NdbOut& no, const CommitTransactionRecord& ctr) {
+ no << "----------COMMIT TRANSACTION RECORD------------" << endl << endl;
+ printOut("Record type:", ctr.m_recordType);
+ printOut("TableId", ctr.m_tableId);
+ printOut("FfragmentId", ctr.m_fragmentId);
+ printOut("File no. of Prep. Op.", ctr.m_fileNumberOfPrepareOperation);
+ printOut("Start page no. of Prep. Op.", ctr.m_startPageNumberOfPrepareOperation);
+ printOut("Start page index of Prep. Op.", ctr.m_startPageIndexOfPrepareOperation);
+ printOut("Stop page no. of Prep. Op.", ctr.m_stopPageNumberOfPrepareOperation);
+ printOut("GlobalCheckpoint", ctr.m_globalCheckpoint);
+
+ no << endl;
+ return no;
+}
+
+//----------------------------------------------------------------
+//
+//----------------------------------------------------------------
+
+bool InvalidCommitTransactionRecord::check() {
+ // Not implemented yet.
+ return true;
+}
+
+Uint32 InvalidCommitTransactionRecord::getLogRecordSize() {
+ return COMMITTRANSACTIONRECORDSIZE;
+}
+
+NdbOut& operator<<(NdbOut& no, const InvalidCommitTransactionRecord& ictr) {
+ no << "------INVALID COMMIT TRANSACTION RECORD--------" << endl << endl;
+ printOut("Record type:", ictr.m_recordType);
+ printOut("TableId", ictr.m_tableId);
+ printOut("FfragmentId", ictr.m_fragmentId);
+ printOut("File no. of Prep. Op.", ictr.m_fileNumberOfPrepareOperation);
+ printOut("Start page no. of Prep. Op.", ictr.m_startPageNumberOfPrepareOperation);
+ printOut("Start page index of Prep. Op.", ictr.m_startPageIndexOfPrepareOperation);
+ printOut("Stop page no. of Prep. Op.", ictr.m_stopPageNumberOfPrepareOperation);
+ printOut("GlobalCheckpoint", ictr.m_globalCheckpoint);
+
+ no << endl;
+ return no;
+}
+
+//----------------------------------------------------------------
+//
+//----------------------------------------------------------------
+
+bool PrepareOperationRecord::check() {
+ // Not fully implemented.
+ if (m_operationType == 3 && m_attributeLength != 0)
+ return false;
+
+ if (m_logRecordSize != (m_attributeLength + m_keyLength + 7))
+ return false;
+
+ return true;
+}
+
+Uint32 PrepareOperationRecord::getLogRecordSize() {
+ return m_logRecordSize;
+}
+
+NdbOut& operator<<(NdbOut& no, const PrepareOperationRecord& por) {
+ no << "-----------PREPARE OPERATION RECORD------------" << endl << endl;
+ printOut("Record type:", por.m_recordType);
+ printOut("logRecordSize:", por.m_logRecordSize);
+ printOut("hashValue:", por.m_hashValue);
+ printOut("schemaVersion:", por.m_schemaVersion);
+ switch (por.m_operationType) {
+ case 0:
+ ndbout_c("%-30s%-12u%-6s", "operationType:",
+ por.m_operationType, "read");
+ break;
+ case 1:
+ ndbout_c("%-30s%-12u%-6s", "operationType:",
+ por.m_operationType, "update");
+ break;
+ case 2:
+ ndbout_c("%-30s%-12u%-6s", "operationType:",
+ por.m_operationType, "insert");
+ break;
+ case 3:
+ ndbout_c("%-30s%-12u%-6s", "operationType:",
+ por.m_operationType, "delete");
+ break;
+ default:
+ printOut("operationType:", por.m_operationType);
+ }
+ printOut("attributeLength:", por.m_attributeLength);
+ printOut("keyLength:", por.m_keyLength);
+
+#if 1
+ // Print keydata
+ Uint32* p = (Uint32*)&por.m_keyInfo;
+ for(Uint32 i=0; i < por.m_keyLength; i++){
+ printOut("keydata:", *p);
+ p++;
+ }
+
+ // Print attrdata
+ for(Uint32 i=0; i < por.m_attributeLength; i++){
+ printOut("attrdata:", *p);
+ p++;
+ }
+#endif
+
+ no << endl;
+ return no;
+}
+
+//----------------------------------------------------------------
+//
+//----------------------------------------------------------------
+
+bool CompletedGCIRecord::check() {
+ // Not implemented yet.
+ return true;
+}
+
+Uint32 CompletedGCIRecord::getLogRecordSize() {
+ return COMPLETEDGCIRECORDSIZE;
+}
+
+NdbOut& operator<<(NdbOut& no, const CompletedGCIRecord& cGCIr) {
+ no << "-----------COMPLETED GCI RECORD----------------" << endl << endl;
+ printOut("Record type:", cGCIr.m_recordType);
+ printOut("Completed GCI:", cGCIr.m_theCompletedGCI);
+ no << endl;
+ return no;
+}
+
+//----------------------------------------------------------------
+//
+//----------------------------------------------------------------
+
+bool NextLogRecord::check() {
+ // Not implemented yet.
+ return true;
+}
+
+Uint32 NextLogRecord::getLogRecordSize(Uint32 pageIndex) {
+ return PAGESIZE - pageIndex;
+}
+
+NdbOut& operator<<(NdbOut& no, const NextLogRecord& nl) {
+ no << "-----------NEXT LOG RECORD --------------------" << endl << endl;
+ printOut("Record type:", nl.m_recordType);
+ no << endl;
+ return no;
+}
+
+//----------------------------------------------------------------
+//
+//----------------------------------------------------------------
+
+Uint32 PageHeader::getLogRecordSize() {
+ return PAGEHEADERSIZE;
+}
+
+bool PageHeader::check() {
+ // Not implemented yet.
+ return true;
+}
+
+NdbOut& operator<<(NdbOut& no, const PageHeader& ph) {
+ no << "------------PAGE HEADER------------------------" << endl << endl;
+ ndbout_c("%-30s%-12s%-12s\n", "", "Decimal", "Hex");
+ printOut("Checksum:", ph.m_checksum);
+ printOut("Laps since initial start:", ph.m_lap);
+ printOut("Max gci completed:", ph.m_max_gci_completed);
+ printOut("Max gci started:", ph.m_max_gci_started);
+ printOut("Ptr to next page:", ph.m_next_page);
+ printOut("Ptr to previous page:", ph.m_previous_page);
+ printOut("Ndb version:", ph.m_ndb_version);
+ printOut("Number of log files:", ph.m_number_of_logfiles);
+ printOut("Current page index:", ph.m_current_page_index);
+ printOut("Oldest prepare op. file No.:", ph.m_old_prepare_file_number);
+ printOut("Oldest prepare op. page ref.:", ph.m_old_prepare_page_reference);
+ printOut("Dirty flag:", ph.m_dirty_flag);
+ no << endl;
+ return no;
+}
+
+//----------------------------------------------------------------
+//
+//----------------------------------------------------------------
+
+Uint32 FileDescriptor::getLogRecordSize() {
+ return FILEDESCRIPTORHEADERSIZE
+ + m_fdHeader.m_noOfDescriptors * FILEDESCRIPTORRECORDSIZE;
+}
+
+NdbOut& operator<<(NdbOut& no, const FileDescriptor& fd) {
+ no << "-------FILE DESCRIPTOR HEADER------------------" << endl << endl;
+ printOut("Record type:", fd.m_fdHeader.m_recordType);
+ printOut("Number of file descriptors:", fd.m_fdHeader.m_noOfDescriptors);
+ printOut("File number:", fd.m_fdHeader.m_fileNo);
+ ndbout << endl;
+ for(Uint32 i = 0; i < fd.m_fdHeader.m_noOfDescriptors; i++) {
+ fd.printARecord(i);
+ }
+ return no;
+}
+
+void FileDescriptor::printARecord( Uint32 recordIndex ) const {
+ ndbout << "------------------FILE DESCRIPTOR " << recordIndex
+ <<" ---------------------" << endl << endl;
+ ndbout_c("%-30s%-12s%-12s\n", "", "Decimal", "Hex");
+
+ for(int i = 1; i <= NO_MBYTE_IN_FILE; i++) {
+ ndbout_c("%s%2d%s%-12u%-12x", "Max GCI completed, mbyte ", i, ": ",
+ m_fdRecord[recordIndex].m_maxGciCompleted[i-1],
+ m_fdRecord[recordIndex].m_maxGciCompleted[i-1]);
+ }
+ for(int i = 1; i <= NO_MBYTE_IN_FILE; i++) {
+ ndbout_c("%s%2d%s%-12u%-12x", "Max GCI started, mbyte ", i, ": ",
+ m_fdRecord[recordIndex].m_maxGciStarted[i-1],
+ m_fdRecord[recordIndex].m_maxGciStarted[i-1]);
+ }
+ for(int i = 1; i <= NO_MBYTE_IN_FILE; i++) {
+ ndbout_c("%s%2d%s%-12u%-12x", "Last prepared ref, mbyte ", i, ": ",
+ m_fdRecord[recordIndex].m_lastPreparedReference[i-1],
+ m_fdRecord[recordIndex].m_lastPreparedReference[i-1]);
+ }
+ ndbout << endl;
+}
+
+bool FileDescriptor::check() {
+ // Not implemented yet.
+ return true;
+}
+
+//----------------------------------------------------------------
+//
+//----------------------------------------------------------------
diff --git a/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.hpp b/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.hpp
new file mode 100644
index 00000000000..e73986e4d73
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.hpp
@@ -0,0 +1,235 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <NdbMain.h>
+#include <NdbOut.hpp>
+#include <ndb_types.h>
+
+#define ZNEW_PREP_OP_TYPE 0
+#define ZPREP_OP_TYPE 1
+#define ZCOMMIT_TYPE 2
+#define ZABORT_TYPE 3
+#define ZFD_TYPE 4
+#define ZFRAG_SPLIT_TYPE 5
+#define ZNEXT_LOG_RECORD_TYPE 6
+#define ZNEXT_MBYTE_TYPE 7
+#define ZCOMPLETED_GCI_TYPE 8
+#define ZINVALID_COMMIT_TYPE 9
+
+#define MAX_FILE_DESCRIPTORS 40
+#define NO_MBYTE_IN_FILE 16
+
+#define PAGESIZE 8192
+#define NO_PAGES_IN_MBYTE 32
+#define NO_MBYTE_IN_FILE 16
+
+#define COMMITTRANSACTIONRECORDSIZE 8
+#define COMPLETEDGCIRECORDSIZE 2
+#define PAGEHEADERSIZE 32
+#define FILEDESCRIPTORHEADERSIZE 3
+#define FILEDESCRIPTORRECORDSIZE 48
+#define NEXTMBYTERECORDSIZE 1
+#define ABORTTRANSACTIONRECORDSIZE 3
+
+
+//----------------------------------------------------------------
+//
+//----------------------------------------------------------------
+
+class AbortTransactionRecord {
+ friend NdbOut& operator<<(NdbOut&, const AbortTransactionRecord&);
+public:
+ bool check();
+ Uint32 getLogRecordSize();
+protected:
+ Uint32 m_recordType;
+ Uint32 m_transactionId1;
+ Uint32 m_transactionId2;
+};
+
+
+//----------------------------------------------------------------
+//
+//----------------------------------------------------------------
+
+class NextMbyteRecord {
+ friend NdbOut& operator<<(NdbOut&, const NextMbyteRecord&);
+public:
+ bool check();
+ Uint32 getLogRecordSize();
+protected:
+ Uint32 m_recordType;
+};
+
+//----------------------------------------------------------------
+//
+//----------------------------------------------------------------
+
+
+class PrepareOperationRecord {
+ friend NdbOut& operator<<(NdbOut&, const PrepareOperationRecord&);
+public:
+ bool check();
+ Uint32 getLogRecordSize();
+
+protected:
+ Uint32 m_recordType;
+ Uint32 m_logRecordSize;
+ Uint32 m_hashValue;
+ Uint32 m_schemaVersion;
+ Uint32 m_operationType; // 0 READ, 1 UPDATE, 2 INSERT, 3 DELETE
+ Uint32 m_attributeLength;
+ Uint32 m_keyLength;
+ Uint32 *m_keyInfo; // In this order
+ Uint32 *m_attrInfo;// In this order
+};
+
+//----------------------------------------------------------------
+//
+//----------------------------------------------------------------
+
+class CompletedGCIRecord {
+ friend NdbOut& operator<<(NdbOut&, const CompletedGCIRecord&);
+public:
+ bool check();
+ Uint32 getLogRecordSize();
+protected:
+ Uint32 m_recordType;
+ Uint32 m_theCompletedGCI;
+};
+
+//----------------------------------------------------------------
+//
+//----------------------------------------------------------------
+
+class NextLogRecord {
+ friend NdbOut& operator<<(NdbOut&, const NextLogRecord&);
+public:
+ bool check();
+ Uint32 getLogRecordSize(Uint32);
+protected:
+ Uint32 m_recordType;
+};
+
+//----------------------------------------------------------------
+//
+//----------------------------------------------------------------
+
+class PageHeader {
+ friend NdbOut& operator<<(NdbOut&, const PageHeader&);
+public:
+ bool check();
+ Uint32 getLogRecordSize();
+protected:
+ Uint32 m_checksum;
+ Uint32 m_lap;
+ Uint32 m_max_gci_completed;
+ Uint32 m_max_gci_started;
+ Uint32 m_next_page;
+ Uint32 m_previous_page;
+ Uint32 m_ndb_version;
+ Uint32 m_number_of_logfiles;
+ Uint32 m_current_page_index;
+ Uint32 m_old_prepare_file_number;
+ Uint32 m_old_prepare_page_reference;
+ Uint32 m_dirty_flag;
+};
+
+//----------------------------------------------------------------
+// File descriptor.
+//----------------------------------------------------------------
+
+class FileDescriptorHeader {
+public:
+ Uint32 m_recordType;
+ Uint32 m_noOfDescriptors;
+ Uint32 m_fileNo;
+};
+
+class FileDescriptorRecord {
+public:
+ Uint32 m_maxGciCompleted[16];
+ Uint32 m_maxGciStarted[16];
+ Uint32 m_lastPreparedReference[16];
+};
+
+class FileDescriptor {
+ friend NdbOut& operator<<(NdbOut&, const FileDescriptor&);
+public:
+ bool check();
+ Uint32 getLogRecordSize();
+protected:
+ void printARecord( Uint32 ) const;
+ FileDescriptorHeader m_fdHeader;
+ FileDescriptorRecord m_fdRecord[1];
+};
+
+
+//----------------------------------------------------------------
+//
+//----------------------------------------------------------------
+
+class CommitTransactionRecord {
+ friend NdbOut& operator<<(NdbOut&, const CommitTransactionRecord&);
+public:
+ bool check();
+ Uint32 getLogRecordSize();
+protected:
+ Uint32 m_recordType;
+ Uint32 m_tableId;
+ Uint32 m_fragmentId;
+ Uint32 m_fileNumberOfPrepareOperation;
+ Uint32 m_startPageNumberOfPrepareOperation;
+ Uint32 m_startPageIndexOfPrepareOperation;
+ Uint32 m_stopPageNumberOfPrepareOperation;
+ Uint32 m_globalCheckpoint;
+};
+
+//----------------------------------------------------------------
+//
+//----------------------------------------------------------------
+
+class InvalidCommitTransactionRecord {
+ friend NdbOut& operator<<(NdbOut&, const InvalidCommitTransactionRecord&);
+public:
+ bool check();
+ Uint32 getLogRecordSize();
+protected:
+ Uint32 m_recordType;
+ Uint32 m_tableId;
+ Uint32 m_fragmentId;
+ Uint32 m_fileNumberOfPrepareOperation;
+ Uint32 m_startPageNumberOfPrepareOperation;
+ Uint32 m_startPageIndexOfPrepareOperation;
+ Uint32 m_stopPageNumberOfPrepareOperation;
+ Uint32 m_globalCheckpoint;
+};
+
+//----------------------------------------------------------------
+//
+//----------------------------------------------------------------
+
+struct NextLogRec {
+
+};
+
+struct NewPrepareOperation {
+
+};
+
+struct FragmentSplit {
+
+};
diff --git a/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/redoLogFileReader.cpp b/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/redoLogFileReader.cpp
new file mode 100644
index 00000000000..540df7b507e
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/redoLogFileReader.cpp
@@ -0,0 +1,464 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+//----------------------------------------------------------------
+// REDOLOGFILEREADER
+// Reads a redo log file and checks it for errors and/or prints
+// the file in a human readable format.
+//
+// Usage: redoLogFileReader <file> [-noprint] [-nocheck]
+// [-mbyte <0-15>] [-mbyteHeaders] [-pageHeaders]
+//
+//----------------------------------------------------------------
+
+
+#include <ndb_global.h>
+
+#include "records.hpp"
+
+#define RETURN_ERROR 1
+#define RETURN_OK 0
+
+#define FROM_BEGINNING 0
+
+void usage(const char * prg);
+Uint32 readRecordOverPageBoundary (Uint32 *, Uint32 , Uint32 , Uint32);
+Uint32 readFromFile(FILE * f, Uint32 *toPtr, Uint32 sizeInWords);
+void readArguments(int argc, const char** argv);
+void doExit();
+
+FILE * f;
+char fileName[256];
+bool thePrintFlag = true;
+bool theCheckFlag = true;
+bool onlyPageHeaders = false;
+bool onlyMbyteHeaders = false;
+bool onlyFileDesc = false;
+bool firstLap = true;
+Uint32 startAtMbyte = 0;
+Uint32 startAtPage = 0;
+Uint32 startAtPageIndex = 0;
+Uint32 *redoLogPage;
+
+NDB_COMMAND(redoLogFileReader, "redoLogFileReader", "redoLogFileReader", "Read a redo log file", 16384) {
+ Uint32 pageIndex = 0;
+ Uint32 oldPageIndex = 0;
+ Uint32 recordType = 1234567890;
+
+ PageHeader *thePageHeader;
+ CompletedGCIRecord *cGCIrecord;
+ PrepareOperationRecord *poRecord;
+ NextLogRecord *nlRecord;
+ FileDescriptor *fdRecord;
+ CommitTransactionRecord *ctRecord;
+ InvalidCommitTransactionRecord *ictRecord;
+ NextMbyteRecord *nmRecord;
+ AbortTransactionRecord *atRecord;
+
+ readArguments(argc, argv);
+
+ f = fopen(fileName, "rb");
+ if(!f){
+ perror("Error: open file");
+ exit(RETURN_ERROR);
+ }
+
+ Uint32 tmpFileOffset = startAtMbyte * PAGESIZE * NO_PAGES_IN_MBYTE * sizeof(Uint32);
+ if (fseek(f, tmpFileOffset, FROM_BEGINNING)) {
+ perror("Error: Move in file");
+ exit(RETURN_ERROR);
+ }
+
+ redoLogPage = new Uint32[PAGESIZE*NO_PAGES_IN_MBYTE];
+
+ // Loop for every mbyte.
+ for (Uint32 j = startAtMbyte; j < NO_MBYTE_IN_FILE; j++) {
+ readFromFile(f, redoLogPage, PAGESIZE*NO_PAGES_IN_MBYTE);
+
+ if (firstLap) {
+ pageIndex = startAtPageIndex;
+ firstLap = false;
+ } else
+ pageIndex = 0;
+
+ // Loop for every page.
+ for (int i = startAtPage; i < NO_PAGES_IN_MBYTE; i++) {
+
+ if (pageIndex == 0) {
+ thePageHeader = (PageHeader *) &redoLogPage[i*PAGESIZE];
+ // Print out mbyte number, page number and page index.
+ ndbout << j << ":" << i << ":" << pageIndex << endl
+ << " " << j*32 + i << ":" << pageIndex << " ";
+ if (thePrintFlag) ndbout << (*thePageHeader);
+ if (theCheckFlag) {
+ if(!thePageHeader->check()) {
+ doExit();
+ }
+
+ Uint32 checkSum = 37;
+ for (int ps = 1; ps < PAGESIZE; ps++)
+ checkSum = redoLogPage[i*PAGESIZE+ps] ^ checkSum;
+
+ if (checkSum != redoLogPage[i*PAGESIZE]){
+ ndbout << "WRONG CHECKSUM: checksum = " << redoLogPage[i*PAGESIZE]
+ << " expected = " << checkSum << endl;
+ doExit();
+ }
+ else
+ ndbout << "expected checksum: " << checkSum << endl;
+
+ }
+ pageIndex += thePageHeader->getLogRecordSize();
+ }
+
+ if (onlyMbyteHeaders) {
+ // Show only the first page header in every mbyte of the file.
+ break;
+ }
+
+ if (onlyPageHeaders) {
+ // Show only page headers. Continue with the next page in this for loop.
+ pageIndex = 0;
+ continue;
+ }
+
+ do {
+ // Print out mbyte number, page number and page index.
+ ndbout << j << ":" << i << ":" << pageIndex << endl
+ << " " << j*32 + i << ":" << pageIndex << " ";
+ recordType = redoLogPage[i*PAGESIZE + pageIndex];
+ switch(recordType) {
+ case ZFD_TYPE:
+ fdRecord = (FileDescriptor *) &redoLogPage[i*PAGESIZE + pageIndex];
+ if (thePrintFlag) ndbout << (*fdRecord);
+ if (theCheckFlag) {
+ if(!fdRecord->check()) {
+ doExit();
+ }
+ }
+ if (onlyFileDesc) {
+ delete [] redoLogPage;
+ exit(RETURN_OK);
+ }
+ pageIndex += fdRecord->getLogRecordSize();
+ break;
+
+ case ZNEXT_LOG_RECORD_TYPE:
+ nlRecord = (NextLogRecord *) (&redoLogPage[i*PAGESIZE] + pageIndex);
+ pageIndex += nlRecord->getLogRecordSize(pageIndex);
+ if (pageIndex <= PAGESIZE) {
+ if (thePrintFlag) ndbout << (*nlRecord);
+ if (theCheckFlag) {
+ if(!nlRecord->check()) {
+ doExit();
+ }
+ }
+ }
+ break;
+
+ case ZCOMPLETED_GCI_TYPE:
+ cGCIrecord = (CompletedGCIRecord *) &redoLogPage[i*PAGESIZE + pageIndex];
+ pageIndex += cGCIrecord->getLogRecordSize();
+ if (pageIndex <= PAGESIZE) {
+ if (thePrintFlag) ndbout << (*cGCIrecord);
+ if (theCheckFlag) {
+ if(!cGCIrecord->check()) {
+ doExit();
+ }
+ }
+ }
+ break;
+
+ case ZPREP_OP_TYPE:
+ poRecord = (PrepareOperationRecord *) &redoLogPage[i*PAGESIZE + pageIndex];
+ pageIndex += poRecord->getLogRecordSize();
+ if (pageIndex <= PAGESIZE) {
+ if (thePrintFlag) ndbout << (*poRecord);
+ if (theCheckFlag) {
+ if(!poRecord->check()) {
+ doExit();
+ }
+ }
+ }
+ else {
+ oldPageIndex = pageIndex - poRecord->getLogRecordSize();
+ }
+ break;
+
+ case ZCOMMIT_TYPE:
+ ctRecord = (CommitTransactionRecord *) &redoLogPage[i*PAGESIZE + pageIndex];
+ pageIndex += ctRecord->getLogRecordSize();
+ if (pageIndex <= PAGESIZE) {
+ if (thePrintFlag) ndbout << (*ctRecord);
+ if (theCheckFlag) {
+ if(!ctRecord->check()) {
+ doExit();
+ }
+ }
+ }
+ else {
+ oldPageIndex = pageIndex - ctRecord->getLogRecordSize();
+ }
+ break;
+
+ case ZINVALID_COMMIT_TYPE:
+ ictRecord = (InvalidCommitTransactionRecord *) &redoLogPage[i*PAGESIZE + pageIndex];
+ pageIndex += ictRecord->getLogRecordSize();
+ if (pageIndex <= PAGESIZE) {
+ if (thePrintFlag) ndbout << (*ictRecord);
+ if (theCheckFlag) {
+ if(!ictRecord->check()) {
+ doExit();
+ }
+ }
+ }
+ else {
+ oldPageIndex = pageIndex - ictRecord->getLogRecordSize();
+ }
+ break;
+
+ case ZNEXT_MBYTE_TYPE:
+ nmRecord = (NextMbyteRecord *) &redoLogPage[i*PAGESIZE + pageIndex];
+ if (thePrintFlag) ndbout << (*nmRecord);
+ i = NO_PAGES_IN_MBYTE;
+ break;
+
+ case ZABORT_TYPE:
+ atRecord = (AbortTransactionRecord *) &redoLogPage[i*PAGESIZE + pageIndex];
+ pageIndex += atRecord->getLogRecordSize();
+ if (pageIndex <= PAGESIZE) {
+ if (thePrintFlag) ndbout << (*atRecord);
+ if (theCheckFlag) {
+ if(!atRecord->check()) {
+ doExit();
+ }
+ }
+ }
+ break;
+
+ case ZNEW_PREP_OP_TYPE:
+ case ZFRAG_SPLIT_TYPE:
+ ndbout << endl << "Record type = " << recordType << " not implemented." << endl;
+ doExit();
+
+ default:
+ ndbout << " ------ERROR: UNKNOWN RECORD TYPE------" << endl;
+
+ // Print out remaining data in this page
+ for (int j = pageIndex; j < PAGESIZE; j++){
+ Uint32 unknown = redoLogPage[i*PAGESIZE + j];
+
+ ndbout_c("%-30d%-12u%-12x", j, unknown, unknown);
+ }
+
+ doExit();
+ }
+ } while(pageIndex < PAGESIZE && i < NO_PAGES_IN_MBYTE);
+
+ if (pageIndex > PAGESIZE) {
+ // The last record overlapped page boundary. Must redo that record.
+ pageIndex = readRecordOverPageBoundary(&redoLogPage[i*PAGESIZE],
+ pageIndex, oldPageIndex, recordType);
+ } else {
+ pageIndex = 0;
+ }
+ ndbout << endl;
+ }//for
+ ndbout << endl;
+ if (startAtMbyte != 0) {
+ break;
+ }
+ }//for
+ fclose(f);
+ delete [] redoLogPage;
+ exit(RETURN_OK);
+}
+
+//----------------------------------------------------------------
+//
+//----------------------------------------------------------------
+
+Uint32 readFromFile(FILE * f, Uint32 *toPtr, Uint32 sizeInWords) {
+ Uint32 noOfReadWords;
+ if ( !(noOfReadWords = fread(toPtr, sizeof(Uint32), sizeInWords, f)) ) {
+ ndbout << "Error reading file" << endl;
+ doExit();
+ }
+
+ return noOfReadWords;
+}
+
+
+//----------------------------------------------------------------
+//
+//----------------------------------------------------------------
+
+Uint32 readRecordOverPageBoundary(Uint32 *pagePtr, Uint32 pageIndex, Uint32 oldPageIndex, Uint32 recordType) {
+ Uint32 pageHeader[PAGEHEADERSIZE];
+ Uint32 tmpPages[PAGESIZE*10];
+ PageHeader *thePageHeader;
+ Uint32 recordSize = 0;
+
+ PrepareOperationRecord *poRecord;
+ CommitTransactionRecord *ctRecord;
+ InvalidCommitTransactionRecord *ictRecord;
+
+ memcpy(pageHeader, pagePtr + PAGESIZE, PAGEHEADERSIZE*sizeof(Uint32));
+ memcpy(tmpPages, pagePtr + oldPageIndex, (PAGESIZE - oldPageIndex)*sizeof(Uint32));
+ memcpy(tmpPages + PAGESIZE - oldPageIndex ,
+ (pagePtr + PAGESIZE + PAGEHEADERSIZE),
+ (PAGESIZE - PAGEHEADERSIZE)*sizeof(Uint32));
+
+ switch(recordType) {
+ case ZPREP_OP_TYPE:
+ poRecord = (PrepareOperationRecord *) tmpPages;
+ recordSize = poRecord->getLogRecordSize();
+ if (recordSize < (PAGESIZE - PAGEHEADERSIZE)) {
+ if (theCheckFlag) {
+ if(!poRecord->check()) {
+ doExit();
+ }
+ }
+ if (thePrintFlag) ndbout << (*poRecord);
+ } else {
+ ndbout << "Error: Record greater than a Page" << endl;
+ }
+ break;
+
+ case ZCOMMIT_TYPE:
+ ctRecord = (CommitTransactionRecord *) tmpPages;
+ recordSize = ctRecord->getLogRecordSize();
+ if (recordSize < (PAGESIZE - PAGEHEADERSIZE)) {
+ if (theCheckFlag) {
+ if(!ctRecord->check()) {
+ doExit();
+ }
+ }
+ if (thePrintFlag) ndbout << (*ctRecord);
+ } else {
+ ndbout << endl << "Error: Record greater than a Page" << endl;
+ }
+ break;
+
+ case ZINVALID_COMMIT_TYPE:
+ ictRecord = (InvalidCommitTransactionRecord *) tmpPages;
+ recordSize = ictRecord->getLogRecordSize();
+ if (recordSize < (PAGESIZE - PAGEHEADERSIZE)) {
+ if (theCheckFlag) {
+ if(!ictRecord->check()) {
+ doExit();
+ }
+ }
+ if (thePrintFlag) ndbout << (*ictRecord);
+ } else {
+ ndbout << endl << "Error: Record greater than a Page" << endl;
+ }
+ break;
+
+ case ZNEW_PREP_OP_TYPE:
+ case ZABORT_TYPE:
+ case ZFRAG_SPLIT_TYPE:
+ case ZNEXT_MBYTE_TYPE:
+ ndbout << endl << "Record type = " << recordType << " not implemented." << endl;
+ return 0;
+
+ default:
+ ndbout << endl << "Error: Unknown record type. Record type = " << recordType << endl;
+ return 0;
+ }
+
+ thePageHeader = (PageHeader *) (pagePtr + PAGESIZE);
+ if (thePrintFlag) ndbout << (*thePageHeader);
+
+ return PAGEHEADERSIZE - PAGESIZE + oldPageIndex + recordSize;
+}
+
+//----------------------------------------------------------------
+//
+//----------------------------------------------------------------
+
+
+void usage(const char * prg){
+ ndbout << endl << "Usage: " << endl << prg
+ << " <Binary log file> [-noprint] [-nocheck] [-mbyte <0-15>] "
+ << "[-mbyteheaders] [-pageheaders] [-filedescriptors] [-page <0-31>] "
+ << "[-pageindex <12-8191>]"
+ << endl << endl;
+
+}
+void readArguments(int argc, const char** argv)
+{
+ if(argc < 2 || argc > 9){
+ usage(argv[0]);
+ doExit();
+ }
+
+ strcpy(fileName, argv[1]);
+ argc--;
+
+ int i = 2;
+ while (argc > 1)
+ {
+ if (strcmp(argv[i], "-noprint") == 0) {
+ thePrintFlag = false;
+ } else if (strcmp(argv[i], "-nocheck") == 0) {
+ theCheckFlag = false;
+ } else if (strcmp(argv[i], "-mbyteheaders") == 0) {
+ onlyMbyteHeaders = true;
+ } else if (strcmp(argv[i], "-pageheaders") == 0) {
+ onlyPageHeaders = true;
+ } else if (strcmp(argv[i], "-filedescriptors") == 0) {
+ onlyFileDesc = true;
+ } else if (strcmp(argv[i], "-mbyte") == 0) {
+ startAtMbyte = atoi(argv[i+1]);
+ if (startAtMbyte > 15) {
+ usage(argv[0]);
+ doExit();
+ }
+ argc--;
+ i++;
+ } else if (strcmp(argv[i], "-page") == 0) {
+ startAtPage = atoi(argv[i+1]);
+ if (startAtPage > 31) {
+ usage(argv[0]);
+ doExit();
+ }
+ argc--;
+ i++;
+ } else if (strcmp(argv[i], "-pageindex") == 0) {
+ startAtPageIndex = atoi(argv[i+1]);
+ if (startAtPageIndex > 8191 || startAtPageIndex < 12) {
+ usage(argv[0]);
+ doExit();
+ }
+ argc--;
+ i++;
+ } else {
+ usage(argv[0]);
+ doExit();
+ }
+ argc--;
+ i++;
+ }
+
+}
+
+void doExit() {
+ ndbout << "Error in redoLogReader(). Exiting!" << endl;
+ fclose(f);
+ delete [] redoLogPage;
+ exit(RETURN_ERROR);
+}
diff --git a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
new file mode 100644
index 00000000000..2baa4400409
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
@@ -0,0 +1,1974 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef DBTC_H
+#define DBTC_H
+
+#include <ndb_limits.h>
+#include <pc.hpp>
+#include <SimulatedBlock.hpp>
+#include <DLHashTable.hpp>
+#include <SLList.hpp>
+#include <DLList.hpp>
+#include <DLFifoList.hpp>
+#include <DataBuffer.hpp>
+#include <Bitmask.hpp>
+#include <AttributeList.hpp>
+#include <signaldata/AttrInfo.hpp>
+#include <signaldata/LqhTransConf.hpp>
+#include <signaldata/LqhKey.hpp>
+#include <signaldata/TrigAttrInfo.hpp>
+#include <signaldata/TcIndx.hpp>
+#include <signaldata/TransIdAI.hpp>
+#include <signaldata/EventReport.hpp>
+#include <trigger_definitions.h>
+#include <SignalCounter.hpp>
+
+#ifdef DBTC_C
+/*
+ * 2.2 LOCAL SYMBOLS
+ * -----------------
+ */
+#define Z8NIL 255
+#define ZAPI_CONNECT_FILESIZE 20
+#define ZATTRBUF_FILESIZE 4000
+#define ZCLOSED 2
+#define ZCOMMITING 0 /* VALUE FOR TRANSTATUS */
+#define ZCOMMIT_SETUP 2
+#define ZCONTINUE_ABORT_080 4
+#define ZDATABUF_FILESIZE 4000
+#define ZGCP_FILESIZE 10
+#define ZINBUF_DATA_LEN 24 /* POSITION OF 'DATA LENGHT'-VARIABLE. */
+#define ZINBUF_NEXT 27 /* POSITION OF 'NEXT'-VARIABLE. */
+#define ZINBUF_PREV 26 /* POSITION OF 'PREVIOUS'-VARIABLE. */
+#define ZINTSPH1 1
+#define ZINTSPH2 2
+#define ZINTSPH3 3
+#define ZINTSPH6 6
+#define ZLASTPHASE 255
+#define ZMAX_DATA_IN_LQHKEYREQ 12
+#define ZNODEBUF_FILESIZE 2000
+#define ZNR_OF_SEIZE 10
+#define ZSCANREC_FILE_SIZE 100
+#define ZSCAN_FRAGREC_FILE_SIZE 400
+#define ZSCAN_OPREC_FILE_SIZE 400
+#define ZSEND_ATTRINFO 0
+#define ZSPH1 1
+#define ZTABREC_FILESIZE 16
+#define ZTAKE_OVER_ACTIVE 1
+#define ZTAKE_OVER_IDLE 0
+#define ZTC_CONNECT_FILESIZE 200
+#define ZTCOPCONF_SIZE 6
+
+// ----------------------------------------
+// Error Codes for Scan
+// ----------------------------------------
+#define ZNO_CONCURRENCY_ERROR 242
+#define ZTOO_HIGH_CONCURRENCY_ERROR 244
+#define ZNO_SCANREC_ERROR 245
+#define ZNO_FRAGMENT_ERROR 246
+#define ZSCAN_AI_LEN_ERROR 269
+#define ZSCAN_LQH_ERROR 270
+#define ZSCAN_FRAG_LQH_ERROR 274
+
+#define ZSCANTIME_OUT_ERROR 296
+#define ZSCANTIME_OUT_ERROR2 297
+
+// ----------------------------------------
+// Error Codes for transactions
+// ----------------------------------------
+#define ZSTATE_ERROR 202
+#define ZLENGTH_ERROR 207 // Also Scan
+#define ZERO_KEYLEN_ERROR 208
+#define ZSIGNAL_ERROR 209
+#define ZGET_ATTRBUF_ERROR 217 // Also Scan
+#define ZGET_DATAREC_ERROR 218
+#define ZMORE_AI_IN_TCKEYREQ_ERROR 220
+#define ZCOMMITINPROGRESS 230
+#define ZROLLBACKNOTALLOWED 232
+#define ZNO_FREE_TC_CONNECTION 233 // Also Scan
+#define ZABORTINPROGRESS 237
+#define ZPREPAREINPROGRESS 238
+#define ZWRONG_SCHEMA_VERSION_ERROR 241 // Also Scan
+#define ZSCAN_NODE_ERROR 250
+#define ZTRANS_STATUS_ERROR 253
+#define ZTIME_OUT_ERROR 266
+#define ZSIMPLE_READ_WITHOUT_AI 271
+#define ZNO_AI_WITH_UPDATE 272
+#define ZSEIZE_API_COPY_ERROR 275
+#define ZSCANINPROGRESS 276
+#define ZABORT_ERROR 277
+#define ZCOMMIT_TYPE_ERROR 278
+
+#define ZNO_FREE_TC_MARKER 279
+#define ZNODE_SHUTDOWN_IN_PROGRESS 280
+#define ZCLUSTER_SHUTDOWN_IN_PROGRESS 281
+#define ZWRONG_STATE 282
+#define ZCLUSTER_IN_SINGLEUSER_MODE 299
+
+#define ZDROP_TABLE_IN_PROGRESS 283
+#define ZNO_SUCH_TABLE 284
+#define ZUNKNOWN_TABLE_ERROR 285
+#define ZNODEFAIL_BEFORE_COMMIT 286
+#define ZINDEX_CORRUPT_ERROR 287
+
+// ----------------------------------------
+// Seize error
+// ----------------------------------------
+#define ZNO_FREE_API_CONNECTION 219
+#define ZSYSTEM_NOT_STARTED_ERROR 203
+
+// ----------------------------------------
+// Release errors
+// ----------------------------------------
+#define ZINVALID_CONNECTION 229
+
+
+#define ZNOT_FOUND 626
+#define ZALREADYEXIST 630
+#define ZINCONSISTENTHASHINDEX 892
+#define ZNOTUNIQUE 893
+#endif
+
+class Dbtc: public SimulatedBlock {
+public:
+ enum ConnectionState {
+ CS_CONNECTED = 0,
+ CS_DISCONNECTED = 1,
+ CS_STARTED = 2,
+ CS_RECEIVING = 3,
+ CS_PREPARED = 4,
+ CS_START_PREPARING = 5,
+ CS_REC_PREPARING = 6,
+ CS_RESTART = 7,
+ CS_ABORTING = 8,
+ CS_COMPLETING = 9,
+ CS_COMPLETE_SENT = 10,
+ CS_PREPARE_TO_COMMIT = 11,
+ CS_COMMIT_SENT = 12,
+ CS_START_COMMITTING = 13,
+ CS_COMMITTING = 14,
+ CS_REC_COMMITTING = 15,
+ CS_WAIT_ABORT_CONF = 16,
+ CS_WAIT_COMPLETE_CONF = 17,
+ CS_WAIT_COMMIT_CONF = 18,
+ CS_FAIL_ABORTING = 19,
+ CS_FAIL_ABORTED = 20,
+ CS_FAIL_PREPARED = 21,
+ CS_FAIL_COMMITTING = 22,
+ CS_FAIL_COMMITTED = 23,
+ CS_FAIL_COMPLETED = 24,
+ CS_START_SCAN = 25
+ };
+
+ enum OperationState {
+ OS_CONNECTING_DICT = 0,
+ OS_CONNECTED = 1,
+ OS_OPERATING = 2,
+ OS_PREPARED = 3,
+ OS_COMMITTING = 4,
+ OS_COMMITTED = 5,
+ OS_COMPLETING = 6,
+ OS_COMPLETED = 7,
+ OS_RESTART = 8,
+ OS_ABORTING = 9,
+ OS_ABORT_SENT = 10,
+ OS_TAKE_OVER = 11,
+ OS_WAIT_DIH = 12,
+ OS_WAIT_KEYINFO = 13,
+ OS_WAIT_ATTR = 14,
+ OS_WAIT_COMMIT_CONF = 15,
+ OS_WAIT_ABORT_CONF = 16,
+ OS_WAIT_COMPLETE_CONF = 17,
+ OS_WAIT_SCAN = 18
+ };
+
+ enum AbortState {
+ AS_IDLE = 0,
+ AS_ACTIVE = 1
+ };
+
+ enum HostState {
+ HS_ALIVE = 0,
+ HS_DEAD = 1
+ };
+
+ enum LqhTransState {
+ LTS_IDLE = 0,
+ LTS_ACTIVE = 1
+ };
+
+ enum TakeOverState {
+ TOS_NOT_DEFINED = 0,
+ TOS_IDLE = 1,
+ TOS_ACTIVE = 2,
+ TOS_COMPLETED = 3,
+ TOS_NODE_FAILED = 4
+ };
+
+ enum FailState {
+ FS_IDLE = 0,
+ FS_LISTENING = 1,
+ FS_COMPLETING = 2
+ };
+
+ enum SystemStartState {
+ SSS_TRUE = 0,
+ SSS_FALSE = 1
+ };
+
+ enum TimeOutCheckState {
+ TOCS_TRUE = 0,
+ TOCS_FALSE = 1
+ };
+
+ enum ReturnSignal {
+ RS_NO_RETURN = 0,
+ RS_TCKEYCONF = 1,
+ RS_TC_COMMITCONF = 3,
+ RS_TCROLLBACKCONF = 4,
+ RS_TCROLLBACKREP = 5
+ };
+
+ enum IndexOperationState {
+ IOS_NOOP = 0,
+ IOS_INDEX_ACCESS = 1,
+ IOS_INDEX_ACCESS_WAIT_FOR_TCKEYCONF = 2,
+ IOS_INDEX_ACCESS_WAIT_FOR_TRANSID_AI = 3,
+ IOS_INDEX_OPERATION = 4
+ };
+
+ enum IndexState {
+ IS_BUILDING = 0, // build in progress, start state at create
+ IS_ONLINE = 1 // ready to use
+ };
+
+
+ /**--------------------------------------------------------------------------
+ * LOCAL SYMBOLS PER 'SYMBOL-VALUED' VARIABLE
+ *
+ *
+ * NSYMB ZAPI_CONNECT_FILESIZE = 20
+ * NSYMB ZTC_CONNECT_FILESIZE = 200
+ * NSYMB ZHOST_FILESIZE = 16
+ * NSYMB ZDATABUF_FILESIZE = 4000
+ * NSYMB ZATTRBUF_FILESIZE = 4000
+ * NSYMB ZGCP_FILESIZE = 10
+ *
+ *
+ * ABORTED CODES
+ * TPHASE NSYMB ZSPH1 = 1
+ * NSYMB ZLASTPHASE = 255
+ *
+ *
+ * LQH_TRANS
+ * NSYMB ZTRANS_ABORTED = 1
+ * NSYMB ZTRANS_PREPARED = 2
+ * NSYMB ZTRANS_COMMITTED = 3
+ * NSYMB ZCOMPLETED_LQH_TRANS = 4
+ * NSYMB ZTRANS_COMPLETED = 5
+ *
+ *
+ * TAKE OVER
+ * NSYMB ZTAKE_OVER_IDLE = 0
+ * NSYMB ZTAKE_OVER_ACTIVE = 1
+ *
+ * ATTRBUF (ATTRBUF_RECORD)
+ * NSYMB ZINBUF_DATA_LEN = 24
+ * NSYMB ZINBUF_NEXTFREE = 25 (NOT USED )
+ * NSYMB ZINBUF_PREV = 26
+ * NSYMB ZINBUF_NEXT = 27
+ -------------------------------------------------------------------------*/
+ /*
+ 2.3 RECORDS AND FILESIZES
+ -------------------------
+ */
+ /* **************************************************************** */
+ /* ---------------------------------------------------------------- */
+ /* ------------------- TRIGGER AND INDEX DATA --------------------- */
+ /* ---------------------------------------------------------------- */
+ /* **************************************************************** */
+ /* ********* DEFINED TRIGGER DATA ********* */
+ /* THIS RECORD FORMS LISTS OF ACTIVE */
+ /* TRIGGERS FOR EACH TABLE. */
+ /* THE RECORDS ARE MANAGED BY A TRIGGER */
+ /* POOL WHERE A TRIGGER RECORD IS SEIZED */
+ /* WHEN A TRIGGER IS ACTIVATED AND RELEASED */
+ /* WHEN THE TRIGGER IS DEACTIVATED. */
+ /* **************************************** */
+ struct TcDefinedTriggerData {
+ /**
+ * Trigger id, used to identify the trigger
+ */
+ UintR triggerId;
+
+ /**
+ * Trigger type, defines what the trigger is used for
+ */
+ TriggerType::Value triggerType;
+
+ /**
+ * Trigger type, defines what the trigger is used for
+ */
+ TriggerEvent::Value triggerEvent;
+
+ /**
+ * Attribute mask, defines what attributes are to be monitored
+ * Can be seen as a compact representation of SQL column name list
+ */
+ Bitmask<MAXNROFATTRIBUTESINWORDS> attributeMask;
+
+ /**
+ * Next ptr (used in pool/list)
+ */
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ };
+
+ /**
+ * Index id, only used by secondary_index triggers. This is same as
+ * index table id in DICT.
+ **/
+ Uint32 indexId;
+
+ /**
+ * Prev pointer (used in list)
+ */
+ Uint32 prevList;
+
+ inline void print(NdbOut & s) const {
+ s << "[DefinedTriggerData = " << triggerId << "]";
+ }
+ };
+ typedef Ptr<TcDefinedTriggerData> DefinedTriggerPtr;
+
+ /**
+ * Pool of trigger data record
+ */
+ ArrayPool<TcDefinedTriggerData> c_theDefinedTriggerPool;
+
+ /**
+ * The list of active triggers
+ */
+ DLList<TcDefinedTriggerData> c_theDefinedTriggers;
+
+ typedef DataBuffer<11> AttributeBuffer;
+
+ AttributeBuffer::DataBufferPool c_theAttributeBufferPool;
+
+ UintR c_transactionBufferSpace;
+
+
+ /* ********** FIRED TRIGGER DATA ********** */
+ /* THIS RECORD FORMS LISTS OF FIRED */
+ /* TRIGGERS FOR A TRANSACTION. */
+ /* THE RECORDS ARE MANAGED BY A TRIGGER */
+ /* POOL WHERE A TRIGGER RECORD IS SEIZED */
+ /* WHEN A TRIGGER IS ACTIVATED AND RELEASED */
+ /* WHEN THE TRIGGER IS DEACTIVATED. */
+ /* **************************************** */
+ struct TcFiredTriggerData {
+ TcFiredTriggerData() {}
+
+ /**
+ * Trigger id, used to identify the trigger
+ **/
+ Uint32 triggerId;
+
+ /**
+ * The operation that fired the trigger
+ */
+ Uint32 fireingOperation;
+
+ /**
+ * Used for scrapping in case of node failure
+ */
+ Uint32 nodeId;
+
+ /**
+ * Trigger attribute info, primary key value(s)
+ */
+ AttributeBuffer::Head keyValues;
+
+ /**
+ * Trigger attribute info, attribute value(s) before operation
+ */
+ AttributeBuffer::Head beforeValues;
+
+ /**
+ * Trigger attribute info, attribute value(s) after operation
+ */
+ AttributeBuffer::Head afterValues;
+
+ /**
+ * Next ptr (used in pool/list)
+ */
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ Uint32 nextHash;
+ };
+
+ /**
+ * Prev pointer (used in list)
+ */
+ union {
+ Uint32 prevList;
+ Uint32 prevHash;
+ };
+
+ inline void print(NdbOut & s) const {
+ s << "[FiredTriggerData = " << triggerId << "]";
+ }
+
+ inline Uint32 hashValue() const {
+ return fireingOperation ^ nodeId;
+ }
+
+ inline bool equal(const TcFiredTriggerData & rec) const {
+ return fireingOperation == rec.fireingOperation && nodeId == rec.nodeId;
+ }
+ };
+ typedef Ptr<TcFiredTriggerData> FiredTriggerPtr;
+
+ /**
+ * Pool of trigger data record
+ */
+ ArrayPool<TcFiredTriggerData> c_theFiredTriggerPool;
+ DLHashTable<TcFiredTriggerData> c_firedTriggerHash;
+ AttributeBuffer::DataBufferPool c_theTriggerAttrInfoPool;
+
+ Uint32 c_maxNumberOfDefinedTriggers;
+ Uint32 c_maxNumberOfFiredTriggers;
+
+ struct AttrInfoRecord {
+ /**
+ * Pre-allocated AttrInfo signal
+ */
+ AttrInfo attrInfo;
+
+ /**
+ * Next ptr (used in pool/list)
+ */
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ };
+ /**
+ * Prev pointer (used in list)
+ */
+ Uint32 prevList;
+ };
+
+
+ /* ************* INDEX DATA *************** */
+ /* THIS RECORD FORMS LISTS OF ACTIVE */
+ /* INDEX FOR EACH TABLE. */
+ /* THE RECORDS ARE MANAGED BY A INDEX */
+ /* POOL WHERE AN INDEX RECORD IS SEIZED */
+ /* WHEN AN INDEX IS CREATED AND RELEASED */
+ /* WHEN THE INDEX IS DROPPED. */
+ /* **************************************** */
+ struct TcIndexData {
+ /**
+ * IndexState
+ */
+ IndexState indexState;
+
+ /**
+ * Index id, same as index table id in DICT
+ */
+ Uint32 indexId;
+
+ /**
+ * Index attribute list. Only the length is used in v21x.
+ */
+ AttributeList attributeList;
+
+ /**
+ * Primary table id, the primary table to be indexed
+ */
+ Uint32 primaryTableId;
+
+ /**
+ * Primary key position in secondary table
+ */
+ Uint32 primaryKeyPos;
+
+ /**
+ * Next ptr (used in pool/list)
+ */
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ };
+ /**
+ * Prev pointer (used in list)
+ */
+ Uint32 prevList;
+ };
+
+ typedef Ptr<TcIndexData> TcIndexDataPtr;
+
+ /**
+ * Pool of index data record
+ */
+ ArrayPool<TcIndexData> c_theIndexPool;
+
+ /**
+ * The list of defined indexes
+ */
+ ArrayList<TcIndexData> c_theIndexes;
+ UintR c_maxNumberOfIndexes;
+
+ struct TcIndexOperation {
+ TcIndexOperation(AttributeBuffer::DataBufferPool & abp) :
+ indexOpState(IOS_NOOP),
+ expectedKeyInfo(0),
+ keyInfo(abp),
+ expectedAttrInfo(0),
+ attrInfo(abp),
+ expectedTransIdAI(0),
+ transIdAI(abp),
+ indexReadTcConnect(RNIL)
+ {}
+
+ ~TcIndexOperation()
+ {
+ }
+
+ // Index data
+ Uint32 indexOpId;
+ IndexOperationState indexOpState; // Used to mark on-going TcKeyReq
+ Uint32 expectedKeyInfo;
+ AttributeBuffer keyInfo; // For accumulating IndxKeyInfo
+ Uint32 expectedAttrInfo;
+ AttributeBuffer attrInfo; // For accumulating IndxAttrInfo
+ Uint32 expectedTransIdAI;
+ AttributeBuffer transIdAI; // For accumulating TransId_AI
+
+ TcKeyReq tcIndxReq;
+ UintR connectionIndex;
+ UintR indexReadTcConnect; //
+
+ /**
+ * Next ptr (used in pool/list)
+ */
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ };
+ /**
+ * Prev pointer (used in list)
+ */
+ Uint32 prevList;
+ };
+
+ typedef Ptr<TcIndexOperation> TcIndexOperationPtr;
+
+ /**
+ * Pool of index data record
+ */
+ ArrayPool<TcIndexOperation> c_theIndexOperationPool;
+
+ /**
+ * The list of index operations
+ */
+ ArrayList<TcIndexOperation> c_theIndexOperations;
+
+ UintR c_maxNumberOfIndexOperations;
+
+ struct TcSeizedIndexOperation {
+ /**
+ * Next ptr (used in pool/list)
+ */
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ };
+ /**
+ * Prev pointer (used in list)
+ */
+ Uint32 prevList;
+ };
+
+ /**
+ * Pool of seized index operations
+ */
+ ArrayPool<TcSeizedIndexOperation> c_theSeizedIndexOperationPool;
+
+ typedef Ptr<TcSeizedIndexOperation> TcSeizedIndexOperationPtr;
+
+ /************************** API CONNECT RECORD ***********************
+ * The API connect record contains the connection record to which the
+ * application connects.
+ *
+ * The application can send one operation at a time. It can send a
+ * new operation immediately after sending the previous operation.
+ * Thereby several operations can be active in one transaction within TC.
+ * This is achieved by using the API connect record.
+ * Each active operation is handled by the TC connect record.
+ * As soon as the TC connect record has sent the
+ * request to the LQH it is ready to receive new operations.
+ * The LQH connect record takes care of waiting for an operation to
+ * complete.
+ * When an operation has completed on the LQH connect record,
+ * a new operation can be started on this LQH connect record.
+ *******************************************************************
+ *
+ * API CONNECT RECORD ALIGNED TO BE 256 BYTES
+ ********************************************************************/
+
+ /*******************************************************************>*/
+ // We break out the API Timer for optimisation on scanning rather than
+ // on fast access.
+ /*******************************************************************>*/
+ inline void setApiConTimer(Uint32 apiConPtrI, Uint32 value, Uint32 line){
+ c_apiConTimer[apiConPtrI] = value;
+ c_apiConTimer_line[apiConPtrI] = line;
+ }
+
+ inline Uint32 getApiConTimer(Uint32 apiConPtrI) const {
+ return c_apiConTimer[apiConPtrI];
+ }
+ UintR* c_apiConTimer;
+ UintR* c_apiConTimer_line;
+
+ struct ApiConnectRecord {
+ ApiConnectRecord(ArrayPool<TcFiredTriggerData> & firedTriggerPool,
+ ArrayPool<TcSeizedIndexOperation> & seizedIndexOpPool):
+ theFiredTriggers(firedTriggerPool),
+ isIndexOp(false),
+ theSeizedIndexOperations(seizedIndexOpPool)
+ {}
+
+ //---------------------------------------------------
+ // First 16 byte cache line. Hot variables.
+ //---------------------------------------------------
+ ConnectionState apiConnectstate;
+ UintR transid[2];
+ UintR firstTcConnect;
+
+ //---------------------------------------------------
+ // Second 16 byte cache line. Hot variables.
+ //---------------------------------------------------
+ UintR lqhkeyconfrec;
+ UintR cachePtr;
+ UintR currSavePointId;
+ UintR counter;
+
+ //---------------------------------------------------
+ // Third 16 byte cache line. First and second cache
+ // line plus this will be enough for copy API records.
+ // Variables used in late phases.
+ //---------------------------------------------------
+ UintR nextGcpConnect;
+ UintR prevGcpConnect;
+ UintR gcpPointer;
+ UintR ndbapiConnect;
+
+ //---------------------------------------------------
+ // Fourth 16 byte cache line. Only used in late phases.
+ // Plus 4 bytes of error handling.
+ //---------------------------------------------------
+ UintR nextApiConnect;
+ BlockReference ndbapiBlockref;
+ UintR apiCopyRecord;
+ UintR globalcheckpointid;
+
+ //---------------------------------------------------
+ // Second 64 byte cache line starts. First 16 byte
+ // cache line in this one. Variables primarily used
+ // in early phase.
+ //---------------------------------------------------
+ UintR lastTcConnect;
+ UintR lqhkeyreqrec;
+ AbortState abortState;
+ Uint32 buddyPtr;
+ Uint8 m_exec_flag;
+ Uint8 unused2;
+ Uint8 takeOverRec;
+ Uint8 currentReplicaNo;
+
+ //---------------------------------------------------
+ // Error Handling variables. If cache line 32 bytes
+ // ensures that cache line is still only read in
+ // early phases.
+ //---------------------------------------------------
+ union {
+ UintR apiScanRec;
+ UintR commitAckMarker;
+ };
+ UintR currentTcConnect;
+ BlockReference tcBlockref;
+ Uint16 returncode;
+ Uint16 takeOverInd;
+
+ //---------------------------------------------------
+ // Second 64 byte cache line. Third 16 byte cache line
+ // in this one. Variables primarily used in early phase
+ // and checked in late phase.
+ // Fourth cache line is the tcSendArray that is used
+ // when two and three operations are responded to in
+ // parallel. The first two entries in tcSendArray is
+ // part of the third cache line.
+ //---------------------------------------------------
+ //---------------------------------------------------
+ // timeOutCounter is used waiting for ABORTCONF, COMMITCONF
+ // and COMPLETECONF
+ //---------------------------------------------------
+ UintR failureNr;
+ Uint8 tckeyrec; // Ändrad från R
+ Uint8 tcindxrec;
+ Uint8 apiFailState; // Ändrad från R
+ ReturnSignal returnsignal;
+ Uint8 timeOutCounter;
+
+ UintR tcSendArray[6];
+
+ // Trigger data
+
+ /**
+ * The list of fired triggers
+ */
+ DLFifoList<TcFiredTriggerData> theFiredTriggers;
+
+ bool triggerPending; // Used to mark waiting for a CONTINUEB
+
+ // Index data
+
+ bool isIndexOp; // Used to mark on-going TcKeyReq as indx table access
+ bool indexOpReturn;
+ UintR noIndexOp; // No outstanding index ops
+
+ // Index op return context
+ UintR indexOp;
+ UintR clientData;
+ UintR attrInfoLen;
+
+ UintR accumulatingIndexOp;
+ UintR executingIndexOp;
+ UintR tcIndxSendArray[6];
+ ArrayList<TcSeizedIndexOperation> theSeizedIndexOperations;
+ };
+
+ typedef Ptr<ApiConnectRecord> ApiConnectRecordPtr;
+
+
+ /************************** TC CONNECT RECORD ************************/
+ /* *******************************************************************/
+ /* TC CONNECT RECORD KEEPS ALL INFORMATION TO CARRY OUT A TRANSACTION*/
+ /* THE TRANSACTION CONTROLLER ESTABLISHES CONNECTIONS TO DIFFERENT */
+ /* BLOCKS TO CARRY OUT THE TRANSACTION. THERE CAN BE SEVERAL RECORDS */
+ /* PER ACTIVE TRANSACTION. THE TC CONNECT RECORD COOPERATES WITH THE */
+ /* API CONNECT RECORD FOR COMMUNICATION WITH THE API AND WITH THE */
+ /* LQH CONNECT RECORD FOR COMMUNICATION WITH THE LQH'S INVOLVED IN */
+ /* THE TRANSACTION. TC CONNECT RECORD IS PERMANENTLY CONNECTED TO A */
+ /* RECORD IN DICT AND ONE IN DIH. IT CONTAINS A LIST OF ACTIVE LQH */
+ /* CONNECT RECORDS AND A LIST OF STARTED BUT NOT ACTIVE LQH CONNECT */
+ /* RECORDS. IT DOES ALSO CONTAIN A LIST OF ALL OPERATIONS THAT ARE */
+ /* EXECUTED WITH THE TC CONNECT RECORD. */
+ /*******************************************************************>*/
+ /* TC_CONNECT RECORD ALIGNED TO BE 128 BYTES */
+ /*******************************************************************>*/
+ struct TcConnectRecord {
+ //---------------------------------------------------
+ // First 16 byte cache line. Those variables are only
+ // used in error cases.
+ //---------------------------------------------------
+ UintR tcOprec; /* TC OPREC of operation being taken over */
+ Uint16 failData[4]; /* Failed nodes when taking over an operation */
+ UintR nextTcFailHash;
+
+ //---------------------------------------------------
+ // Second 16 byte cache line. Those variables are used
+ // from LQHKEYCONF to sending COMMIT and COMPLETED.
+ //---------------------------------------------------
+ UintR lastLqhCon; /* Connect record in last replicas Lqh record */
+ Uint16 lastLqhNodeId; /* Node id of last replicas Lqh */
+ Uint16 m_execAbortOption;/* TcKeyReq::ExecuteAbortOption */
+ UintR commitAckMarker; /* CommitMarker I value */
+
+ //---------------------------------------------------
+ // Third 16 byte cache line. The hottest variables.
+ //---------------------------------------------------
+ OperationState tcConnectstate; /* THE STATE OF THE CONNECT*/
+ UintR apiConnect; /* POINTER TO API CONNECT RECORD */
+ UintR nextTcConnect; /* NEXT TC RECORD*/
+ Uint8 dirtyOp;
+ Uint8 lastReplicaNo; /* NUMBER OF THE LAST REPLICA IN THE OPERATION */
+ Uint8 noOfNodes; /* TOTAL NUMBER OF NODES IN OPERATION */
+ Uint8 operation; /* OPERATION TYPE */
+ /* 0 = READ REQUEST */
+ /* 1 = UPDATE REQUEST */
+ /* 2 = INSERT REQUEST */
+ /* 3 = DELETE REQUEST */
+
+ //---------------------------------------------------
+ // Fourth 16 byte cache line. The mildly hot variables.
+ // tcNodedata expands 4 Bytes into the next cache line
+ // with indexes almost never used.
+ //---------------------------------------------------
+ UintR clientData; /* SENDERS OPERATION POINTER */
+ UintR dihConnectptr; /* CONNECTION TO DIH BLOCK ON THIS NODE */
+ UintR prevTcConnect; /* DOUBLY LINKED LIST OF TC CONNECT RECORDS*/
+ UintR savePointId;
+
+ Uint16 tcNodedata[4];
+
+ // Trigger data
+ FiredTriggerPtr accumulatingTriggerData;
+ UintR noFiredTriggers;
+ UintR noReceivedTriggers;
+ UintR triggerExecutionCount;
+ UintR triggeringOperation;
+ UintR savedState[LqhKeyConf::SignalLength];
+
+ // Index data
+ bool isIndexOp; // Used to mark on-going TcKeyReq as index table access
+ UintR indexOp;
+ UintR currentIndexId;
+ UintR attrInfoLen;
+ };
+
+ friend struct TcConnectRecord;
+
+ typedef Ptr<TcConnectRecord> TcConnectRecordPtr;
+
+ // ********************** CACHE RECORD **************************************
+ //---------------------------------------------------------------------------
+ // This record is used between reception of TCKEYREQ and sending of LQHKEYREQ
+ // It is separatedso as to improve the cache hit rate and also to minimise
+ // the necessary memory storage in NDB Cluster.
+ //---------------------------------------------------------------------------
+
+ struct CacheRecord {
+ //---------------------------------------------------
+ // First 16 byte cache line. Variables used by
+ // ATTRINFO processing.
+ //---------------------------------------------------
+ UintR firstAttrbuf; /* POINTER TO LINKED LIST OF ATTRIBUTE BUFFERS */
+ UintR lastAttrbuf; /* POINTER TO LINKED LIST OF ATTRIBUTE BUFFERS */
+ UintR currReclenAi;
+ Uint16 attrlength; /* ATTRIBUTE INFORMATION LENGTH */
+ Uint16 save1;
+
+ //---------------------------------------------------
+ // Second 16 byte cache line. Variables initiated by
+ // TCKEYREQ and used in LQHKEYREQ.
+ //---------------------------------------------------
+ UintR attrinfo15[4];
+
+ //---------------------------------------------------
+ // Third 16 byte cache line. Variables initiated by
+ // TCKEYREQ and used in LQHKEYREQ.
+ //---------------------------------------------------
+ UintR attrinfo0;
+ UintR schemaVersion;/* SCHEMA VERSION USED IN TRANSACTION */
+ UintR tableref; /* POINTER TO THE TABLE IN WHICH THE FRAGMENT EXISTS*/
+ Uint16 apiVersionNo;
+ Uint16 keylen; /* KEY LENGTH SENT BY REQUEST SIGNAL */
+
+ //---------------------------------------------------
+ // Fourth 16 byte cache line. Variables initiated by
+ // TCKEYREQ and used in LQHKEYREQ.
+ //---------------------------------------------------
+ UintR keydata[4]; /* RECEIVES FIRST 16 BYTES OF TUPLE KEY */
+
+ //---------------------------------------------------
+ // First 16 byte cache line in second 64 byte cache
+ // line. Diverse use.
+ //---------------------------------------------------
+ UintR fragmentid; /* THE COMPUTED FRAGMENT ID */
+ UintR hashValue; /* THE HASH VALUE USED TO LOCATE FRAGMENT */
+
+ Uint8 distributionKeyIndicator;
+ Uint8 m_special_hash; // collation or distribution key
+ Uint8 unused2;
+ Uint8 lenAiInTckeyreq; /* LENGTH OF ATTRIBUTE INFORMATION IN TCKEYREQ */
+
+ Uint8 fragmentDistributionKey; /* DIH generation no */
+
+ /**
+ * EXECUTION MODE OF OPERATION
+ * 0 = NORMAL EXECUTION, 1 = INTERPRETED EXECUTION
+ */
+ Uint8 opExec;
+
+ /**
+ * LOCK TYPE OF OPERATION IF READ OPERATION
+ * 0 = READ LOCK, 1 = WRITE LOCK
+ */
+ Uint8 opLock;
+
+ /**
+ * IS THE OPERATION A SIMPLE TRANSACTION
+ * 0 = NO, 1 = YES
+ */
+ Uint8 opSimple;
+
+ //---------------------------------------------------
+ // Second 16 byte cache line in second 64 byte cache
+ // line. Diverse use.
+ //---------------------------------------------------
+ UintR distributionKey;
+ UintR nextCacheRec;
+ UintR unused3;
+ Uint32 scanInfo;
+
+ //---------------------------------------------------
+ // Third 16 byte cache line in second 64
+ // byte cache line. Diverse use.
+ //---------------------------------------------------
+ Uint32 unused4;
+ Uint32 scanTakeOverInd;
+ UintR firstKeybuf; /* POINTER THE LINKED LIST OF KEY BUFFERS */
+ UintR lastKeybuf; /* VARIABLE POINTING TO THE LAST KEY BUFFER */
+
+ //---------------------------------------------------
+ // Fourth 16 byte cache line in second 64
+ // byte cache line. Not used currently.
+ //---------------------------------------------------
+ UintR packedCacheVar[4];
+ };
+
+ typedef Ptr<CacheRecord> CacheRecordPtr;
+
+ /* ************************ HOST RECORD ********************************** */
+ /********************************************************/
+ /* THIS RECORD CONTAINS ALIVE-STATUS ON ALL NODES IN THE*/
+ /* SYSTEM */
+ /********************************************************/
+ /* THIS RECORD IS ALIGNED TO BE 128 BYTES. */
+ /********************************************************/
+ struct HostRecord {
+ HostState hostStatus;
+ LqhTransState lqhTransStatus;
+ TakeOverState takeOverStatus;
+ bool inPackedList;
+ UintR noOfPackedWordsLqh;
+ UintR packedWordsLqh[26];
+ UintR noOfWordsTCKEYCONF;
+ UintR packedWordsTCKEYCONF[30];
+ UintR noOfWordsTCINDXCONF;
+ UintR packedWordsTCINDXCONF[30];
+ BlockReference hostLqhBlockRef;
+ }; /* p2c: size = 128 bytes */
+
+ typedef Ptr<HostRecord> HostRecordPtr;
+
+ /* *********** TABLE RECORD ********************************************* */
+
+ /********************************************************/
+ /* THIS RECORD CONTAINS THE CURRENT SCHEMA VERSION OF */
+ /* ALL TABLES IN THE SYSTEM. */
+ /********************************************************/
+ struct TableRecord {
+ Uint32 currentSchemaVersion;
+ Uint8 enabled;
+ Uint8 dropping;
+ Uint8 tableType;
+ Uint8 storedTable;
+
+ Uint8 noOfKeyAttr;
+ Uint8 hasCharAttr;
+ Uint8 noOfDistrKeys;
+
+ struct KeyAttr {
+ Uint32 attributeDescriptor;
+ CHARSET_INFO* charsetInfo;
+ } keyAttr[MAX_ATTRIBUTES_IN_INDEX];
+
+ bool checkTable(Uint32 schemaVersion) const {
+ return enabled && !dropping && (schemaVersion == currentSchemaVersion);
+ }
+
+ Uint32 getErrorCode(Uint32 schemaVersion) const;
+
+ struct DropTable {
+ Uint32 senderRef;
+ Uint32 senderData;
+ SignalCounter waitDropTabCount;
+ } dropTable;
+ };
+ typedef Ptr<TableRecord> TableRecordPtr;
+
+ /**
+ * There is max 16 ScanFragRec's for
+ * each scan started in TC. Each ScanFragRec is used by
+ * a scan fragment "process" that scans one fragment at a time.
+ * It will receive max 16 tuples in each request
+ */
+ struct ScanFragRec {
+ ScanFragRec(){
+ stopFragTimer();
+ lqhBlockref = 0;
+ scanFragState = IDLE;
+ scanRec = RNIL;
+ }
+ /**
+ * ScanFragState
+ * WAIT_GET_PRIMCONF : Waiting for DIGETPRIMCONF when starting a new
+ * fragment scan
+ * LQH_ACTIVE : The scan process has sent a command to LQH and is
+ * waiting for the response
+ * LQH_ACTIVE_CLOSE : The scan process has sent close to LQH and is
+ * waiting for the response
+ * DELIVERED : The result have been delivered, this scan frag process
+ * are waiting for a SCAN_NEXTREQ to tell us to continue scanning
+ * RETURNING_FROM_DELIVERY : SCAN_NEXTREQ received and continuing scan
+ * soon
+ * QUEUED_FOR_DELIVERY : Result queued in TC and waiting for delivery
+ * to API
+ * COMPLETED : The fragment scan processes has completed and finally
+ * sent a SCAN_PROCCONF
+ */
+ enum ScanFragState {
+ IDLE = 0,
+ WAIT_GET_PRIMCONF = 1,
+ LQH_ACTIVE = 2,
+ DELIVERED = 4,
+ QUEUED_FOR_DELIVERY = 6,
+ COMPLETED = 7
+ };
+ // Timer for checking timeout of this fragment scan
+ Uint32 scanFragTimer;
+
+ // Id of the current scanned fragment
+ Uint32 scanFragId;
+
+ // Blockreference of LQH
+ BlockReference lqhBlockref;
+
+ // getNodeInfo.m_connectCount, set at seize used so that
+ // I don't accidently kill a starting node
+ Uint32 m_connectCount;
+
+ // State of this fragment scan
+ ScanFragState scanFragState;
+
+ // Id of the ScanRecord this fragment scan belongs to
+ Uint32 scanRec;
+
+ // The value of fragmentCompleted in the last received SCAN_FRAGCONF
+ Uint8 m_scan_frag_conf_status;
+
+ inline void startFragTimer(Uint32 timeVal){
+ scanFragTimer = timeVal;
+ }
+ inline void stopFragTimer(void){
+ scanFragTimer = 0;
+ }
+
+ Uint32 m_ops;
+ Uint32 m_chksum;
+ Uint32 m_apiPtr;
+ Uint32 m_totalLen;
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ };
+ Uint32 prevList;
+ };
+
+ typedef Ptr<ScanFragRec> ScanFragRecPtr;
+ typedef LocalDLList<ScanFragRec> ScanFragList;
+
+ /**
+ * Each scan allocates one ScanRecord to store information
+ * about the current scan
+ *
+ */
+ struct ScanRecord {
+ ScanRecord() {}
+ /** NOTE! This is the old comment for ScanState. - MASV
+ * STATE TRANSITIONS OF SCAN_STATE. SCAN_STATE IS THE STATE
+ * VARIABLE OF THE RECEIVE AND DELIVERY PROCESS.
+ * THE PROCESS HAS THREE STEPS IT GOES THROUGH.
+ * 1) THE INITIAL STATES WHEN RECEIVING DATA FOR THE SCAN.
+ * - WAIT_SCAN_TAB_INFO
+ * - WAIT_AI
+ * - WAIT_FRAGMENT_COUNT
+ * 2) THE EXECUTION STATES WHEN THE SCAN IS PERFORMED.
+ * - SCAN_NEXT_ORDERED
+ * - DELIVERED
+ * - QUEUED_DELIVERED
+ * 3) THE CLOSING STATE WHEN THE SCAN PROCESS IS CLOSING UP
+ * EVERYTHING.
+ * - CLOSING_SCAN
+ * INITIAL START WHEN SCAN_TABREQ RECEIVED
+ * -> WAIT_SCAN_TAB_INFO (IF ANY SCAN_TABINFO TO BE RECEIVED)
+ * -> WAIT_AI (IF NO SCAN_TAB_INFO BUT ATTRINFO IS RECEIVED)
+ * -> WAIT_FRAGMENT_COUNT (IF NEITHER SCAN_TABINFO OR ATTRINFO
+ * RECEIVED)
+ *
+ * WAIT_SCAN_TAB_INFO TRANSITIONS:
+ * -> WAIT_SCAN_TABINFO (WHEN MORE SCAN_TABINFO RECEIVED)
+ * -> WAIT_AI (WHEN ATTRINFO RECEIVED AFTER RECEIVING ALL
+ * SCAN_TABINFO)
+ * -> WAIT_FRAGMENT_COUNT (WHEN NO ATTRINFO RECEIVED AFTER
+ * RECEIVING ALL SCAN_TABINFO )
+ * WAIT_AI TRANSITIONS:
+ * -> WAIT_AI (WHEN MORE ATTRINFO RECEIVED)
+ * -> WAIT_FRAGMENT_COUNT (WHEN ALL ATTRINFO RECEIVED)
+ *
+ * WAIT_FRAGMENT_COUNT TRANSITIONS:
+ * -> SCAN_NEXT_ORDERED
+ *
+ * SCAN_NEXT_ORDERED TRANSITIONS:
+ * -> DELIVERED (WHEN FIRST SCAN_FRAGCONF ARRIVES WITH OPERATIONS
+ * TO REPORT IN IT)
+ * -> CLOSING_SCAN (WHEN SCAN IS CLOSED BY SCAN_NEXTREQ OR BY SOME
+ * ERROR)
+ *
+ * DELIVERED TRANSITIONS:
+ * -> SCAN_NEXT_ORDERED (IF SCAN_NEXTREQ ARRIVES BEFORE ANY NEW
+ * OPERATIONS TO REPORT ARRIVES)
+ * -> QUEUED_DELIVERED (IF NEW OPERATION TO REPORT ARRIVES BEFORE
+ * SCAN_NEXTREQ)
+ * -> CLOSING_SCAN (WHEN SCAN IS CLOSED BY SCAN_NEXTREQ OR BY SOME
+ * ERROR)
+ *
+ * QUEUED_DELIVERED TRANSITIONS:
+ * -> DELIVERED (WHEN SCAN_NEXTREQ ARRIVES AND QUEUED OPERATIONS
+ * TO REPORT ARE SENT TO THE APPLICATION)
+ * -> CLOSING_SCAN (WHEN SCAN IS CLOSED BY SCAN_NEXTREQ OR BY
+ * SOME ERROR)
+ */
+ enum ScanState {
+ IDLE = 0,
+ WAIT_SCAN_TAB_INFO = 1,
+ WAIT_AI = 2,
+ WAIT_FRAGMENT_COUNT = 3,
+ RUNNING = 4,
+ CLOSING_SCAN = 5
+ };
+
+ // State of this scan
+ ScanState scanState;
+
+ DLList<ScanFragRec>::Head m_running_scan_frags; // Currently in LQH
+ union { Uint32 m_queued_count; Uint32 scanReceivedOperations; };
+ DLList<ScanFragRec>::Head m_queued_scan_frags; // In TC !sent to API
+ DLList<ScanFragRec>::Head m_delivered_scan_frags;// Delivered to API
+
+ // Id of the next fragment to be scanned. Used by scan fragment
+ // processes when they are ready for the next fragment
+ Uint32 scanNextFragId;
+
+ // Total number of fragments in the table we are scanning
+ Uint32 scanNoFrag;
+
+ // Index of next ScanRecords when in free list
+ Uint32 nextScan;
+
+ // Length of expected attribute information
+ union { Uint32 scanAiLength; Uint32 m_booked_fragments_count; };
+
+ Uint32 scanKeyLen;
+
+ // Reference to ApiConnectRecord
+ Uint32 scanApiRec;
+
+ // Reference to TcConnectRecord
+ Uint32 scanTcrec;
+
+ // Number of scan frag processes that belong to this scan
+ Uint32 scanParallel;
+
+ // Schema version used by this scan
+ Uint32 scanSchemaVersion;
+
+ // Index of stored procedure belonging to this scan
+ Uint32 scanStoredProcId;
+
+ // The index of table that is scanned
+ Uint32 scanTableref;
+
+ // Number of operation records per scanned fragment
+ // Number of operations in first batch
+ // Max number of bytes per batch
+ union {
+ Uint16 first_batch_size_rows;
+ Uint16 batch_size_rows;
+ };
+ Uint32 batch_byte_size;
+
+ Uint32 scanRequestInfo; // ScanFrag format
+
+ // Close is ordered
+ bool m_close_scan_req;
+ };
+ typedef Ptr<ScanRecord> ScanRecordPtr;
+
+ /* **********************************************************************$ */
+ /* ******$ DATA BUFFER ******$ */
+ /* */
+ /* THIS BUFFER IS USED AS A GENERAL DATA STORAGE. */
+ /* **********************************************************************$ */
+ struct DatabufRecord {
+ UintR data[4];
+ /* 4 * 1 WORD = 4 WORD */
+ UintR nextDatabuf;
+ }; /* p2c: size = 20 bytes */
+
+ typedef Ptr<DatabufRecord> DatabufRecordPtr;
+
+ /* **********************************************************************$ */
+ /* ******$ ATTRIBUTE INFORMATION RECORD ******$ */
+ /*
+ * CAN CONTAIN ONE (1) ATTRINFO SIGNAL. ONE SIGNAL CONTAINS 24 ATTR.
+ * INFO WORDS. BUT 32 ELEMENTS ARE USED TO MAKE PLEX HAPPY.
+ * SOME OF THE ELEMENTS ARE USED TO THE FOLLOWING THINGS:
+ * DATA LENGHT IN THIS RECORD IS STORED IN THE ELEMENT INDEXED BY
+ * ZINBUF_DATA_LEN.
+ * NEXT FREE ATTRBUF IS POINTED OUT BY THE ELEMENT INDEXED BY
+ * PREVIOUS ATTRBUF IS POINTED OUT BY THE ELEMENT INDEXED BY ZINBUF_PREV
+ * (NOT USED YET).
+ * NEXT ATTRBUF IS POINTED OUT BY THE ELEMENT INDEXED BY ZINBUF_NEXT. */
+ /* ******************************************************************** */
+ struct AttrbufRecord {
+ UintR attrbuf[32];
+ }; /* p2c: size = 128 bytes */
+
+ typedef Ptr<AttrbufRecord> AttrbufRecordPtr;
+
+ /*************************************************************************>*/
+ /* GLOBAL CHECKPOINT INFORMATION RECORD */
+ /* */
+ /* THIS RECORD IS USED TO STORE THE GLOBALCHECKPOINT NUMBER AND A
+ * COUNTER DURING THE COMPLETION PHASE OF THE TRANSACTION */
+ /*************************************************************************>*/
+ /* */
+ /* GCP RECORD ALIGNED TO BE 32 BYTES */
+ /*************************************************************************>*/
+ struct GcpRecord {
+ UintR gcpUnused1[2]; /* p2c: Not used */
+ UintR firstApiConnect;
+ UintR lastApiConnect;
+ UintR gcpId;
+ UintR nextGcp;
+ UintR gcpUnused2; /* p2c: Not used */
+ Uint16 gcpNomoretransRec;
+ }; /* p2c: size = 32 bytes */
+
+ typedef Ptr<GcpRecord> GcpRecordPtr;
+
+ /*************************************************************************>*/
+ /* TC_FAIL_RECORD */
+ /* THIS RECORD IS USED WHEN HANDLING TAKE OVER OF ANOTHER FAILED
+ * TC NODE. */
+ /*************************************************************************>*/
+ struct TcFailRecord {
+ Uint16 queueList[MAX_NDB_NODES];
+ Uint8 takeOverProcState[MAX_NDB_NODES];
+ UintR completedTakeOver;
+ UintR currentHashIndexTakeOver;
+ FailState failStatus;
+ Uint16 queueIndex;
+ Uint16 takeOverNode;
+ }; /* p2c: size = 64 bytes */
+
+ typedef Ptr<TcFailRecord> TcFailRecordPtr;
+
+public:
+ Dbtc(const class Configuration &);
+ virtual ~Dbtc();
+
+private:
+ BLOCK_DEFINES(Dbtc);
+
+ // Transit signals
+ void execPACKED_SIGNAL(Signal* signal);
+ void execABORTED(Signal* signal);
+ void execATTRINFO(Signal* signal);
+ void execCONTINUEB(Signal* signal);
+ void execKEYINFO(Signal* signal);
+ void execSCAN_NEXTREQ(Signal* signal);
+ void execSCAN_PROCREQ(Signal* signal);
+ void execSCAN_PROCCONF(Signal* signal);
+ void execTAKE_OVERTCREQ(Signal* signal);
+ void execTAKE_OVERTCCONF(Signal* signal);
+ void execLQHKEYREF(Signal* signal);
+ void execTRANSID_AI_R(Signal* signal);
+ void execKEYINFO20_R(Signal* signal);
+
+ // Received signals
+ void execDUMP_STATE_ORD(Signal* signal);
+ void execSEND_PACKED(Signal* signal);
+ void execCOMPLETED(Signal* signal);
+ void execCOMMITTED(Signal* signal);
+ void execDIGETNODESREF(Signal* signal);
+ void execDIGETPRIMCONF(Signal* signal);
+ void execDIGETPRIMREF(Signal* signal);
+ void execDISEIZECONF(Signal* signal);
+ void execDIVERIFYCONF(Signal* signal);
+ void execDI_FCOUNTCONF(Signal* signal);
+ void execDI_FCOUNTREF(Signal* signal);
+ void execGCP_NOMORETRANS(Signal* signal);
+ void execLQHKEYCONF(Signal* signal);
+ void execNDB_STTOR(Signal* signal);
+ void execREAD_NODESCONF(Signal* signal);
+ void execREAD_NODESREF(Signal* signal);
+ void execSTTOR(Signal* signal);
+ void execTC_COMMITREQ(Signal* signal);
+ void execTC_CLOPSIZEREQ(Signal* signal);
+ void execTCGETOPSIZEREQ(Signal* signal);
+ void execTCKEYREQ(Signal* signal);
+ void execTCRELEASEREQ(Signal* signal);
+ void execTCSEIZEREQ(Signal* signal);
+ void execTCROLLBACKREQ(Signal* signal);
+ void execTC_HBREP(Signal* signal);
+ void execTC_SCHVERREQ(Signal* signal);
+ void execSCAN_TABREQ(Signal* signal);
+ void execSCAN_TABINFO(Signal* signal);
+ void execSCAN_FRAGCONF(Signal* signal);
+ void execSCAN_FRAGREF(Signal* signal);
+ void execREAD_CONFIG_REQ(Signal* signal);
+ void execLQH_TRANSCONF(Signal* signal);
+ void execCOMPLETECONF(Signal* signal);
+ void execCOMMITCONF(Signal* signal);
+ void execABORTCONF(Signal* signal);
+ void execNODE_FAILREP(Signal* signal);
+ void execINCL_NODEREQ(Signal* signal);
+ void execTIME_SIGNAL(Signal* signal);
+ void execAPI_FAILREQ(Signal* signal);
+ void execSCAN_HBREP(Signal* signal);
+ void execSET_VAR_REQ(Signal* signal);
+
+ void execABORT_ALL_REQ(Signal* signal);
+
+ void execCREATE_TRIG_REQ(Signal* signal);
+ void execDROP_TRIG_REQ(Signal* signal);
+ void execFIRE_TRIG_ORD(Signal* signal);
+ void execTRIG_ATTRINFO(Signal* signal);
+ void execCREATE_INDX_REQ(Signal* signal);
+ void execDROP_INDX_REQ(Signal* signal);
+ void execTCINDXREQ(Signal* signal);
+ void execINDXKEYINFO(Signal* signal);
+ void execINDXATTRINFO(Signal* signal);
+ void execALTER_INDX_REQ(Signal* signal);
+
+ // Index table lookup
+ void execTCKEYCONF(Signal* signal);
+ void execTCKEYREF(Signal* signal);
+ void execTRANSID_AI(Signal* signal);
+ void execTCROLLBACKREP(Signal* signal);
+
+ void execCREATE_TAB_REQ(Signal* signal);
+ void execPREP_DROP_TAB_REQ(Signal* signal);
+ void execDROP_TAB_REQ(Signal* signal);
+ void execWAIT_DROP_TAB_REF(Signal* signal);
+ void execWAIT_DROP_TAB_CONF(Signal* signal);
+ void checkWaitDropTabFailedLqh(Signal*, Uint32 nodeId, Uint32 tableId);
+ void execALTER_TAB_REQ(Signal* signal);
+ void set_timeout_value(Uint32 timeOut);
+ void set_appl_timeout_value(Uint32 timeOut);
+ void set_no_parallel_takeover(Uint32);
+ void updateBuddyTimer(ApiConnectRecordPtr);
+
+ // Statement blocks
+ void updatePackedList(Signal* signal, HostRecord* ahostptr,
+ Uint16 ahostIndex);
+ void clearTcNodeData(Signal* signal,
+ UintR TLastLqhIndicator,
+ UintR Tstart);
+ void errorReport(Signal* signal, int place);
+ void warningReport(Signal* signal, int place);
+ void printState(Signal* signal, int place);
+ int seizeTcRecord(Signal* signal);
+ int seizeCacheRecord(Signal* signal);
+ void TCKEY_abort(Signal* signal, int place);
+ void copyFromToLen(UintR* sourceBuffer, UintR* destBuffer, UintR copyLen);
+ void reportNodeFailed(Signal* signal, Uint32 nodeId);
+ void sendPackedTCKEYCONF(Signal* signal,
+ HostRecord * ahostptr,
+ UintR hostId);
+ void sendPackedTCINDXCONF(Signal* signal,
+ HostRecord * ahostptr,
+ UintR hostId);
+ void sendPackedSignalLqh(Signal* signal, HostRecord * ahostptr);
+ void sendCommitLqh(Signal* signal,
+ TcConnectRecord * const regTcPtr);
+ void sendCompleteLqh(Signal* signal,
+ TcConnectRecord * const regTcPtr);
+ void sendTCKEY_FAILREF(Signal* signal, const ApiConnectRecord *);
+ void sendTCKEY_FAILCONF(Signal* signal, ApiConnectRecord *);
+ void checkStartTimeout(Signal* signal);
+ void checkStartFragTimeout(Signal* signal);
+ void timeOutFoundFragLab(Signal* signal, Uint32 TscanConPtr);
+ void timeOutLoopStartFragLab(Signal* signal, Uint32 TscanConPtr);
+ int releaseAndAbort(Signal* signal);
+ void findApiConnectFail(Signal* signal);
+ void findTcConnectFail(Signal* signal);
+ void initApiConnectFail(Signal* signal);
+ void initTcConnectFail(Signal* signal);
+ void initTcFail(Signal* signal);
+ void releaseTakeOver(Signal* signal);
+ void setupFailData(Signal* signal);
+ void updateApiStateFail(Signal* signal);
+ void updateTcStateFail(Signal* signal);
+ void handleApiFailState(Signal* signal, UintR anApiConnectptr);
+ void handleFailedApiNode(Signal* signal,
+ UintR aFailedNode,
+ UintR anApiConnectPtr);
+ void handleScanStop(Signal* signal, UintR aFailedNode);
+ void initScanTcrec(Signal* signal);
+ void initScanrec(ScanRecordPtr, const class ScanTabReq*,
+ const UintR scanParallel,
+ const UintR noOprecPerFrag);
+ void initScanfragrec(Signal* signal);
+ void releaseScanResources(ScanRecordPtr);
+ ScanRecordPtr seizeScanrec(Signal* signal);
+ void sendScanFragReq(Signal*, ScanRecord*, ScanFragRec*);
+ void sendScanTabConf(Signal* signal, ScanRecordPtr);
+ void close_scan_req(Signal*, ScanRecordPtr, bool received_req);
+ void close_scan_req_send_conf(Signal*, ScanRecordPtr);
+
+ void checkGcp(Signal* signal);
+ void commitGciHandling(Signal* signal, UintR Tgci);
+ void copyApi(Signal* signal);
+ void DIVER_node_fail_handling(Signal* signal, UintR Tgci);
+ void gcpTcfinished(Signal* signal);
+ void handleGcp(Signal* signal);
+ void hash(Signal* signal);
+ bool handle_special_hash(Uint32 dstHash[4],
+ Uint32* src, Uint32 srcLen,
+ Uint32 tabPtrI, bool distr);
+
+ void initApiConnect(Signal* signal);
+ void initApiConnectRec(Signal* signal,
+ ApiConnectRecord * const regApiPtr,
+ bool releaseIndexOperations = false);
+ void initattrbuf(Signal* signal);
+ void initdatabuf(Signal* signal);
+ void initgcp(Signal* signal);
+ void inithost(Signal* signal);
+ void initialiseScanrec(Signal* signal);
+ void initialiseScanFragrec(Signal* signal);
+ void initialiseScanOprec(Signal* signal);
+ void initTable(Signal* signal);
+ void initialiseTcConnect(Signal* signal);
+ void linkApiToGcp(Signal* signal);
+ void linkGciInGcilist(Signal* signal);
+ void linkKeybuf(Signal* signal);
+ void linkTcInConnectionlist(Signal* signal);
+ void releaseAbortResources(Signal* signal);
+ void releaseApiCon(Signal* signal, UintR aApiConnectPtr);
+ void releaseApiConCopy(Signal* signal);
+ void releaseApiConnectFail(Signal* signal);
+ void releaseAttrinfo();
+ void releaseGcp(Signal* signal);
+ void releaseKeys();
+ void releaseSimpleRead(Signal*, ApiConnectRecordPtr, TcConnectRecord*);
+ void releaseDirtyWrite(Signal* signal);
+ void releaseTcCon();
+ void releaseTcConnectFail(Signal* signal);
+ void releaseTransResources(Signal* signal);
+ void saveAttrbuf(Signal* signal);
+ void seizeApiConnect(Signal* signal);
+ void seizeApiConnectCopy(Signal* signal);
+ void seizeApiConnectFail(Signal* signal);
+ void seizeDatabuf(Signal* signal);
+ void seizeGcp(Signal* signal);
+ void seizeTcConnect(Signal* signal);
+ void seizeTcConnectFail(Signal* signal);
+ void sendApiCommit(Signal* signal);
+ void sendAttrinfo(Signal* signal,
+ UintR TattrinfoPtr,
+ AttrbufRecord * const regAttrPtr,
+ UintR TBref);
+ void sendContinueTimeOutControl(Signal* signal, Uint32 TapiConPtr);
+ void sendKeyinfo(Signal* signal, BlockReference TBRef, Uint32 len);
+ void sendlqhkeyreq(Signal* signal, BlockReference TBRef);
+ void sendSystemError(Signal* signal);
+ void sendtckeyconf(Signal* signal, UintR TcommitFlag);
+ void sendTcIndxConf(Signal* signal, UintR TcommitFlag);
+ void unlinkApiConnect(Signal* signal);
+ void unlinkGcp(Signal* signal);
+ void unlinkReadyTcCon(Signal* signal);
+ void handleFailedOperation(Signal* signal,
+ const LqhKeyRef * const lqhKeyRef,
+ bool gotLqhKeyRef);
+ void markOperationAborted(ApiConnectRecord * const regApiPtr,
+ TcConnectRecord * const regTcPtr);
+ void clearCommitAckMarker(ApiConnectRecord * const regApiPtr,
+ TcConnectRecord * const regTcPtr);
+ // Trigger and index handling
+ bool saveINDXKEYINFO(Signal* signal,
+ TcIndexOperation* indexOp,
+ const Uint32 *src,
+ Uint32 len);
+ bool receivedAllINDXKEYINFO(TcIndexOperation* indexOp);
+ bool saveINDXATTRINFO(Signal* signal,
+ TcIndexOperation* indexOp,
+ const Uint32 *src,
+ Uint32 len);
+ bool receivedAllINDXATTRINFO(TcIndexOperation* indexOp);
+ bool saveTRANSID_AI(Signal* signal,
+ TcIndexOperation* indexOp,
+ const Uint32 *src,
+ Uint32 len);
+ bool receivedAllTRANSID_AI(TcIndexOperation* indexOp);
+ void readIndexTable(Signal* signal,
+ ApiConnectRecord* regApiPtr,
+ TcIndexOperation* indexOp);
+ void executeIndexOperation(Signal* signal,
+ ApiConnectRecord* regApiPtr,
+ TcIndexOperation* indexOp);
+ bool seizeIndexOperation(ApiConnectRecord* regApiPtr,
+ TcIndexOperationPtr& indexOpPtr);
+ void releaseIndexOperation(ApiConnectRecord* regApiPtr,
+ TcIndexOperation* indexOp);
+ void releaseAllSeizedIndexOperations(ApiConnectRecord* regApiPtr);
+ void setupIndexOpReturn(ApiConnectRecord* regApiPtr,
+ TcConnectRecord* regTcPtr);
+
+ void saveTriggeringOpState(Signal* signal,
+ TcConnectRecord* trigOp);
+ void restoreTriggeringOpState(Signal* signal,
+ TcConnectRecord* trigOp);
+ void continueTriggeringOp(Signal* signal,
+ TcConnectRecord* trigOp);
+
+ void scheduleFiredTrigger(ApiConnectRecordPtr* transPtr,
+ TcConnectRecordPtr* opPtr);
+ void executeTriggers(Signal* signal, ApiConnectRecordPtr* transPtr);
+ void executeTrigger(Signal* signal,
+ TcFiredTriggerData* firedTriggerData,
+ ApiConnectRecordPtr* transPtr,
+ TcConnectRecordPtr* opPtr);
+ void executeIndexTrigger(Signal* signal,
+ TcDefinedTriggerData* definedTriggerData,
+ TcFiredTriggerData* firedTriggerData,
+ ApiConnectRecordPtr* transPtr,
+ TcConnectRecordPtr* opPtr);
+ void insertIntoIndexTable(Signal* signal,
+ TcFiredTriggerData* firedTriggerData,
+ ApiConnectRecordPtr* transPtr,
+ TcConnectRecordPtr* opPtr,
+ TcIndexData* indexData,
+ bool holdOperation = false);
+ void deleteFromIndexTable(Signal* signal,
+ TcFiredTriggerData* firedTriggerData,
+ ApiConnectRecordPtr* transPtr,
+ TcConnectRecordPtr* opPtr,
+ TcIndexData* indexData,
+ bool holdOperation = false);
+ void releaseFiredTriggerData(DLFifoList<TcFiredTriggerData>* triggers);
+ // Generated statement blocks
+ void warningHandlerLab(Signal* signal);
+ void systemErrorLab(Signal* signal);
+ void sendSignalErrorRefuseLab(Signal* signal);
+ void scanTabRefLab(Signal* signal, Uint32 errCode);
+ void diFcountReqLab(Signal* signal, ScanRecordPtr);
+ void signalErrorRefuseLab(Signal* signal);
+ void abort080Lab(Signal* signal);
+ void packKeyData000Lab(Signal* signal, BlockReference TBRef, Uint32 len);
+ void abortScanLab(Signal* signal, ScanRecordPtr, Uint32 errCode);
+ void sendAbortedAfterTimeout(Signal* signal, int Tcheck);
+ void abort010Lab(Signal* signal);
+ void abort015Lab(Signal* signal);
+ void packLqhkeyreq(Signal* signal, BlockReference TBRef);
+ void packLqhkeyreq040Lab(Signal* signal,
+ UintR anAttrBufIndex,
+ BlockReference TBRef);
+ void packLqhkeyreq040Lab(Signal* signal);
+ void returnFromQueuedDeliveryLab(Signal* signal);
+ void startTakeOverLab(Signal* signal);
+ void toCompleteHandlingLab(Signal* signal);
+ void toCommitHandlingLab(Signal* signal);
+ void toAbortHandlingLab(Signal* signal);
+ void abortErrorLab(Signal* signal);
+ void nodeTakeOverCompletedLab(Signal* signal);
+ void ndbsttorry010Lab(Signal* signal);
+ void commit020Lab(Signal* signal);
+ void complete010Lab(Signal* signal);
+ void releaseAtErrorLab(Signal* signal);
+ void seizeDatabuferrorLab(Signal* signal);
+ void scanAttrinfoLab(Signal* signal, UintR Tlen);
+ void seizeAttrbuferrorLab(Signal* signal);
+ void attrinfoDihReceivedLab(Signal* signal);
+ void aiErrorLab(Signal* signal);
+ void attrinfo020Lab(Signal* signal);
+ void scanReleaseResourcesLab(Signal* signal);
+ void scanCompletedLab(Signal* signal);
+ void scanError(Signal* signal, ScanRecordPtr, Uint32 errorCode);
+ void diverify010Lab(Signal* signal);
+ void intstartphase2x010Lab(Signal* signal);
+ void intstartphase3x010Lab(Signal* signal);
+ void sttorryLab(Signal* signal);
+ void abortBeginErrorLab(Signal* signal);
+ void tabStateErrorLab(Signal* signal);
+ void wrongSchemaVersionErrorLab(Signal* signal);
+ void noFreeConnectionErrorLab(Signal* signal);
+ void tckeyreq050Lab(Signal* signal);
+ void timeOutFoundLab(Signal* signal, UintR anAdd);
+ void completeTransAtTakeOverLab(Signal* signal, UintR TtakeOverInd);
+ void completeTransAtTakeOverDoLast(Signal* signal, UintR TtakeOverInd);
+ void completeTransAtTakeOverDoOne(Signal* signal, UintR TtakeOverInd);
+ void timeOutLoopStartLab(Signal* signal, Uint32 apiConnectPtr);
+ void initialiseRecordsLab(Signal* signal, UintR Tdata0, Uint32, Uint32);
+ void tckeyreq020Lab(Signal* signal);
+ void intstartphase2x020Lab(Signal* signal);
+ void intstartphase1x010Lab(Signal* signal);
+ void startphase1x010Lab(Signal* signal);
+
+ void lqhKeyConf_checkTransactionState(Signal * signal,
+ ApiConnectRecord * const regApiPtr);
+
+ void checkDropTab(Signal* signal);
+
+ void checkScanActiveInFailedLqh(Signal* signal,
+ Uint32 scanPtrI,
+ Uint32 failedNodeId);
+ void checkScanFragList(Signal*, Uint32 failedNodeId, ScanRecord * scanP,
+ LocalDLList<ScanFragRec>::Head&);
+
+ // Initialisation
+ void initData();
+ void initRecords();
+
+ // Transit signals
+
+
+ ApiConnectRecord *apiConnectRecord;
+ ApiConnectRecordPtr apiConnectptr;
+ UintR capiConnectFilesize;
+
+ TcConnectRecord *tcConnectRecord;
+ TcConnectRecordPtr tcConnectptr;
+ UintR ctcConnectFilesize;
+
+ CacheRecord *cacheRecord;
+ CacheRecordPtr cachePtr;
+ UintR ccacheFilesize;
+
+ AttrbufRecord *attrbufRecord;
+ AttrbufRecordPtr attrbufptr;
+ UintR cattrbufFilesize;
+
+ HostRecord *hostRecord;
+ HostRecordPtr hostptr;
+ UintR chostFilesize;
+
+ GcpRecord *gcpRecord;
+ GcpRecordPtr gcpPtr;
+ UintR cgcpFilesize;
+
+ TableRecord *tableRecord;
+ UintR ctabrecFilesize;
+
+ UintR thashValue;
+ UintR tdistrHashValue;
+
+ UintR ttransid_ptr;
+ UintR cfailure_nr;
+ UintR coperationsize;
+ UintR ctcTimer;
+
+ ApiConnectRecordPtr tmpApiConnectptr;
+ UintR tcheckGcpId;
+
+ struct TransCounters {
+ enum { Off, Timer, Started } c_trans_status;
+ UintR cattrinfoCount;
+ UintR ctransCount;
+ UintR ccommitCount;
+ UintR creadCount;
+ UintR csimpleReadCount;
+ UintR cwriteCount;
+ UintR cabortCount;
+ UintR cconcurrentOp;
+ Uint32 c_scan_count;
+ Uint32 c_range_scan_count;
+ void reset () {
+ cattrinfoCount = ctransCount = ccommitCount = creadCount =
+ csimpleReadCount = cwriteCount = cabortCount =
+ c_scan_count = c_range_scan_count = 0;
+ }
+ Uint32 report(Signal* signal){
+ signal->theData[0] = NDB_LE_TransReportCounters;
+ signal->theData[1] = ctransCount;
+ signal->theData[2] = ccommitCount;
+ signal->theData[3] = creadCount;
+ signal->theData[4] = csimpleReadCount;
+ signal->theData[5] = cwriteCount;
+ signal->theData[6] = cattrinfoCount;
+ signal->theData[7] = cconcurrentOp;
+ signal->theData[8] = cabortCount;
+ signal->theData[9] = c_scan_count;
+ signal->theData[10] = c_range_scan_count;
+ return 11;
+ }
+ } c_counters;
+
+ Uint16 cownNodeid;
+ Uint16 terrorCode;
+
+ UintR cfirstfreeAttrbuf;
+ UintR cfirstfreeTcConnect;
+ UintR cfirstfreeApiConnectCopy;
+ UintR cfirstfreeCacheRec;
+
+ UintR cfirstgcp;
+ UintR clastgcp;
+ UintR cfirstfreeGcp;
+ UintR cfirstfreeScanrec;
+
+ TableRecordPtr tabptr;
+ UintR cfirstfreeApiConnectFail;
+ UintR cfirstfreeApiConnect;
+
+ UintR cfirstfreeDatabuf;
+ BlockReference cdihblockref;
+ BlockReference cownref; /* OWN BLOCK REFERENCE */
+
+ ApiConnectRecordPtr timeOutptr;
+
+ ScanRecord *scanRecord;
+ UintR cscanrecFileSize;
+
+ UnsafeArrayPool<ScanFragRec> c_scan_frag_pool;
+ ScanFragRecPtr scanFragptr;
+
+ UintR cscanFragrecFileSize;
+ UintR cdatabufFilesize;
+
+ BlockReference cdictblockref;
+ BlockReference cerrorBlockref;
+ BlockReference clqhblockref;
+ BlockReference cndbcntrblockref;
+
+ Uint16 csignalKey;
+ Uint16 csystemnodes;
+ Uint16 cnodes[4];
+ NodeId cmasterNodeId;
+ UintR cnoParallelTakeOver;
+ TimeOutCheckState ctimeOutCheckFragActive;
+
+ UintR ctimeOutCheckFragCounter;
+ UintR ctimeOutCheckCounter;
+ UintR ctimeOutValue;
+ UintR ctimeOutCheckDelay;
+ Uint32 ctimeOutCheckHeartbeat;
+ Uint32 ctimeOutCheckLastHeartbeat;
+ Uint32 ctimeOutMissedHeartbeats;
+ Uint32 c_appl_timeout_value;
+
+ SystemStartState csystemStart;
+ TimeOutCheckState ctimeOutCheckActive;
+
+ BlockReference capiFailRef;
+ UintR cpackedListIndex;
+ Uint16 cpackedList[MAX_NODES];
+ UintR capiConnectClosing[MAX_NODES];
+ UintR con_lineNodes;
+
+ DatabufRecord *databufRecord;
+ DatabufRecordPtr databufptr;
+ DatabufRecordPtr tmpDatabufptr;
+
+ UintR treqinfo;
+ UintR ttransid1;
+ UintR ttransid2;
+
+ UintR tabortInd;
+
+ NodeId tnodeid;
+ BlockReference tblockref;
+
+ LqhTransConf::OperationStatus ttransStatus;
+ UintR ttcOprec;
+ NodeId tfailedNodeId;
+ Uint8 tcurrentReplicaNo;
+ Uint8 tpad1;
+
+ UintR tgci;
+ UintR tapplRef;
+ UintR tapplOprec;
+
+ UintR tindex;
+ UintR tmaxData;
+ UintR tmp;
+
+ UintR tnodes;
+ BlockReference tusersblkref;
+ UintR tuserpointer;
+ UintR tloadCode;
+
+ UintR tconfig1;
+ UintR tconfig2;
+
+ UintR cdata[32];
+ UintR ctransidFailHash[512];
+ UintR ctcConnectFailHash[1024];
+
+ /**
+ * Commit Ack handling
+ */
+public:
+ struct CommitAckMarker {
+ Uint32 transid1;
+ Uint32 transid2;
+ union { Uint32 nextPool; Uint32 nextHash; };
+ Uint32 prevHash;
+ Uint32 apiConnectPtr;
+ Uint16 apiNodeId;
+ Uint16 noOfLqhs;
+ Uint16 lqhNodeId[MAX_REPLICAS];
+
+ inline bool equal(const CommitAckMarker & p) const {
+ return ((p.transid1 == transid1) && (p.transid2 == transid2));
+ }
+
+ inline Uint32 hashValue() const {
+ return transid1;
+ }
+ };
+private:
+ typedef Ptr<CommitAckMarker> CommitAckMarkerPtr;
+ typedef DLHashTable<CommitAckMarker>::Iterator CommitAckMarkerIterator;
+
+ ArrayPool<CommitAckMarker> m_commitAckMarkerPool;
+ DLHashTable<CommitAckMarker> m_commitAckMarkerHash;
+
+ void execTC_COMMIT_ACK(Signal* signal);
+ void sendRemoveMarkers(Signal*, const CommitAckMarker *);
+ void sendRemoveMarker(Signal* signal,
+ NodeId nodeId,
+ Uint32 transid1,
+ Uint32 transid2);
+ void removeMarkerForFailedAPI(Signal* signal, Uint32 nodeId, Uint32 bucket);
+
+ bool getAllowStartTransaction() const {
+ if(getNodeState().getSingleUserMode())
+ return true;
+ return getNodeState().startLevel < NodeState::SL_STOPPING_2;
+ }
+
+ void checkAbortAllTimeout(Signal* signal, Uint32 sleepTime);
+ struct AbortAllRecord {
+ AbortAllRecord(){ clientRef = 0; }
+ Uint32 clientData;
+ BlockReference clientRef;
+
+ Uint32 oldTimeOutValue;
+ };
+ AbortAllRecord c_abortRec;
+
+ /************************** API CONNECT RECORD ***********************/
+ /* *******************************************************************/
+ /* THE API CONNECT RECORD CONTAINS THE CONNECTION RECORD TO WHICH THE*/
+ /* APPLICATION CONNECTS. THE APPLICATION CAN SEND ONE OPERATION AT A */
+ /* TIME. IT CAN SEND A NEW OPERATION IMMEDIATELY AFTER SENDING THE */
+ /* PREVIOUS OPERATION. THEREBY SEVERAL OPERATIONS CAN BE ACTIVE IN */
+ /* ONE TRANSACTION WITHIN TC. THIS IS ACHIEVED BY USING THE API */
+ /* CONNECT RECORD. EACH ACTIVE OPERATION IS HANDLED BY THE TC */
+ /* CONNECT RECORD. AS SOON AS THE TC CONNECT RECORD HAS SENT THE */
+ /* REQUEST TO THE LQH IT IS READY TO RECEIVE NEW OPERATIONS. THE */
+ /* LQH CONNECT RECORD TAKES CARE OF WAITING FOR AN OPERATION TO */
+ /* COMPLETE. WHEN AN OPERATION HAS COMPLETED ON THE LQH CONNECT */
+ /* RECORD A NEW OPERATION CAN BE STARTED ON THIS LQH CONNECT RECORD. */
+ /*******************************************************************>*/
+ /* */
+ /* API CONNECT RECORD ALIGNED TO BE 256 BYTES */
+ /*******************************************************************>*/
+ /************************** TC CONNECT RECORD ************************/
+ /* *******************************************************************/
+ /* TC CONNECT RECORD KEEPS ALL INFORMATION TO CARRY OUT A TRANSACTION*/
+ /* THE TRANSACTION CONTROLLER ESTABLISHES CONNECTIONS TO DIFFERENT */
+ /* BLOCKS TO CARRY OUT THE TRANSACTION. THERE CAN BE SEVERAL RECORDS */
+ /* PER ACTIVE TRANSACTION. THE TC CONNECT RECORD COOPERATES WITH THE */
+ /* API CONNECT RECORD FOR COMMUNICATION WITH THE API AND WITH THE */
+ /* LQH CONNECT RECORD FOR COMMUNICATION WITH THE LQH'S INVOLVED IN */
+ /* THE TRANSACTION. TC CONNECT RECORD IS PERMANENTLY CONNECTED TO A */
+ /* RECORD IN DICT AND ONE IN DIH. IT CONTAINS A LIST OF ACTIVE LQH */
+ /* CONNECT RECORDS AND A LIST OF STARTED BUT NOT ACTIVE LQH CONNECT */
+ /* RECORDS. IT DOES ALSO CONTAIN A LIST OF ALL OPERATIONS THAT ARE */
+ /* EXECUTED WITH THE TC CONNECT RECORD. */
+ /*******************************************************************>*/
+ /* TC_CONNECT RECORD ALIGNED TO BE 128 BYTES */
+ /*******************************************************************>*/
+ UintR cfirstfreeTcConnectFail;
+
+ /* POINTER FOR THE LQH RECORD*/
+ /* ************************ HOST RECORD ********************************* */
+ /********************************************************/
+ /* THIS RECORD CONTAINS ALIVE-STATUS ON ALL NODES IN THE*/
+ /* SYSTEM */
+ /********************************************************/
+ /* THIS RECORD IS ALIGNED TO BE 8 BYTES. */
+ /********************************************************/
+ /* ************************ TABLE RECORD ******************************** */
+ /********************************************************/
+ /* THIS RECORD CONTAINS THE CURRENT SCHEMA VERSION OF */
+ /* ALL TABLES IN THE SYSTEM. */
+ /********************************************************/
+ /*-------------------------------------------------------------------------*/
+ /* THE TC CONNECTION USED BY THIS SCAN. */
+ /*-------------------------------------------------------------------------*/
+ /*-------------------------------------------------------------------------*/
+ /* LENGTH READ FOR A PARTICULAR SCANNED OPERATION. */
+ /*-------------------------------------------------------------------------*/
+ /*-------------------------------------------------------------------------*/
+ /* REFERENCE TO THE SCAN RECORD FOR THIS SCAN PROCESS. */
+ /*-------------------------------------------------------------------------*/
+ /* *********************************************************************** */
+ /* ******$ DATA BUFFER ******$ */
+ /* */
+ /* THIS BUFFER IS USED AS A GENERAL DATA STORAGE. */
+ /* *********************************************************************** */
+ /* *********************************************************************** */
+ /* ******$ ATTRIBUTE INFORMATION RECORD ******$ */
+ /*
+ CAN CONTAIN ONE (1) ATTRINFO SIGNAL. ONE SIGNAL CONTAINS 24 ATTR.
+ INFO WORDS. BUT 32 ELEMENTS ARE USED TO MAKE PLEX HAPPY.
+ SOME OF THE ELEMENTS ARE USED TO THE FOLLOWING THINGS:
+ DATA LENGHT IN THIS RECORD IS STORED IN THE ELEMENT INDEXED BY
+ ZINBUF_DATA_LEN.
+ NEXT FREE ATTRBUF IS POINTED OUT BY THE ELEMENT INDEXED BY
+ PREVIOUS ATTRBUF IS POINTED OUT BY THE ELEMENT INDEXED BY ZINBUF_PREV
+ (NOT USED YET).
+ NEXT ATTRBUF IS POINTED OUT BY THE ELEMENT INDEXED BY ZINBUF_NEXT.
+ */
+ /* ********************************************************************** */
+ /**************************************************************************/
+ /* GLOBAL CHECKPOINT INFORMATION RECORD */
+ /* */
+ /* THIS RECORD IS USED TO STORE THE GCP NUMBER AND A COUNTER */
+ /* DURING THE COMPLETION PHASE OF THE TRANSACTION */
+ /**************************************************************************/
+ /* */
+ /* GCP RECORD ALIGNED TO BE 32 BYTES */
+ /**************************************************************************/
+ /**************************************************************************/
+ /* TC_FAIL_RECORD */
+ /* THIS RECORD IS USED WHEN HANDLING TAKE OVER OF ANOTHER FAILED TC NODE.*/
+ /**************************************************************************/
+ TcFailRecord *tcFailRecord;
+ TcFailRecordPtr tcNodeFailptr;
+ /**************************************************************************/
+ // Temporary variables that are not allowed to use for storage between
+ // signals. They
+ // can only be used in a signal to transfer values between subroutines.
+ // In the long run
+ // those variables should be removed and exchanged for stack
+ // variable communication.
+ /**************************************************************************/
+};
+#endif
diff --git a/storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp b/storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp
new file mode 100644
index 00000000000..59c8237f20a
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp
@@ -0,0 +1,368 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#define DBTC_C
+#include "Dbtc.hpp"
+#include <pc.hpp>
+#include <ndb_limits.h>
+#include <Properties.hpp>
+#include <Configuration.hpp>
+
+#define DEBUG(x) { ndbout << "TC::" << x << endl; }
+
+
+void Dbtc::initData()
+{
+ cattrbufFilesize = ZATTRBUF_FILESIZE;
+ capiConnectFilesize = ZAPI_CONNECT_FILESIZE;
+ ccacheFilesize = ZAPI_CONNECT_FILESIZE;
+ chostFilesize = MAX_NODES;
+ cdatabufFilesize = ZDATABUF_FILESIZE;
+ cgcpFilesize = ZGCP_FILESIZE;
+ cscanrecFileSize = ZSCANREC_FILE_SIZE;
+ cscanFragrecFileSize = ZSCAN_FRAGREC_FILE_SIZE;
+ ctabrecFilesize = ZTABREC_FILESIZE;
+ ctcConnectFilesize = ZTC_CONNECT_FILESIZE;
+ cdihblockref = DBDIH_REF;
+ cdictblockref = DBDICT_REF;
+ clqhblockref = DBLQH_REF;
+ cerrorBlockref = NDBCNTR_REF;
+
+ cacheRecord = 0;
+ apiConnectRecord = 0;
+ tcConnectRecord = 0;
+ hostRecord = 0;
+ tableRecord = 0;
+ scanRecord = 0;
+ databufRecord = 0;
+ attrbufRecord = 0;
+ gcpRecord = 0;
+ tcFailRecord = 0;
+ c_apiConTimer = 0;
+ c_apiConTimer_line = 0;
+ // Records with constant sizes
+ tcFailRecord = (TcFailRecord*)allocRecord("TcFailRecord",
+ sizeof(TcFailRecord), 1);
+
+ // Variables
+ ctcTimer = 0;
+
+ // Trigger and index pools
+ c_theDefinedTriggerPool.setSize(c_maxNumberOfDefinedTriggers);
+ c_theFiredTriggerPool.setSize(c_maxNumberOfFiredTriggers);
+ c_theIndexPool.setSize(c_maxNumberOfIndexes);
+ c_theIndexOperationPool.setSize(c_maxNumberOfIndexOperations);
+ c_theSeizedIndexOperationPool.setSize(c_maxNumberOfIndexOperations);
+ c_theAttributeBufferPool.setSize(c_transactionBufferSpace);
+ c_firedTriggerHash.setSize((c_maxNumberOfFiredTriggers+10)/10);
+}//Dbtc::initData()
+
+void Dbtc::initRecords()
+{
+ void *p;
+ // Records with dynamic sizes
+ cacheRecord = (CacheRecord*)allocRecord("CacheRecord",
+ sizeof(CacheRecord),
+ ccacheFilesize);
+
+ apiConnectRecord = (ApiConnectRecord*)allocRecord("ApiConnectRecord",
+ sizeof(ApiConnectRecord),
+ capiConnectFilesize);
+
+ for(unsigned i = 0; i<capiConnectFilesize; i++) {
+ p = &apiConnectRecord[i];
+ new (p) ApiConnectRecord(c_theFiredTriggerPool,
+ c_theSeizedIndexOperationPool);
+ }
+ // Init all fired triggers
+ DLFifoList<TcFiredTriggerData> triggers(c_theFiredTriggerPool);
+ FiredTriggerPtr tptr;
+ while(triggers.seize(tptr) == true) {
+ p= tptr.p;
+ new (p) TcFiredTriggerData();
+ }
+ triggers.release();
+
+ /*
+ // Init all index records
+ ArrayList<TcIndexData> indexes(c_theIndexPool);
+ TcIndexDataPtr iptr;
+ while(indexes.seize(iptr) == true) {
+ new (iptr.p) TcIndexData(c_theAttrInfoListPool);
+ }
+ indexes.release();
+ */
+
+ // Init all index operation records
+ ArrayList<TcIndexOperation> indexOps(c_theIndexOperationPool);
+ TcIndexOperationPtr ioptr;
+ while(indexOps.seize(ioptr) == true) {
+ p= ioptr.p;
+ new (p) TcIndexOperation(c_theAttributeBufferPool);
+ }
+ indexOps.release();
+
+ c_apiConTimer = (UintR*)allocRecord("ApiConTimer",
+ sizeof(UintR),
+ capiConnectFilesize);
+
+ c_apiConTimer_line = (UintR*)allocRecord("ApiConTimer_line",
+ sizeof(UintR),
+ capiConnectFilesize);
+
+ tcConnectRecord = (TcConnectRecord*)allocRecord("TcConnectRecord",
+ sizeof(TcConnectRecord),
+ ctcConnectFilesize);
+
+ m_commitAckMarkerPool.setSize(capiConnectFilesize);
+ m_commitAckMarkerHash.setSize(512);
+
+ hostRecord = (HostRecord*)allocRecord("HostRecord",
+ sizeof(HostRecord),
+ chostFilesize);
+
+ tableRecord = (TableRecord*)allocRecord("TableRecord",
+ sizeof(TableRecord),
+ ctabrecFilesize);
+
+ scanRecord = (ScanRecord*)allocRecord("ScanRecord",
+ sizeof(ScanRecord),
+ cscanrecFileSize);
+
+
+ c_scan_frag_pool.setSize(cscanFragrecFileSize);
+ {
+ ScanFragRecPtr ptr;
+ SLList<ScanFragRec> tmp(c_scan_frag_pool);
+ while(tmp.seize(ptr)) {
+ new (ptr.p) ScanFragRec();
+ }
+ tmp.release();
+ }
+
+ indexOps.release();
+
+ databufRecord = (DatabufRecord*)allocRecord("DatabufRecord",
+ sizeof(DatabufRecord),
+ cdatabufFilesize);
+
+ attrbufRecord = (AttrbufRecord*)allocRecord("AttrbufRecord",
+ sizeof(AttrbufRecord),
+ cattrbufFilesize);
+
+ gcpRecord = (GcpRecord*)allocRecord("GcpRecord",
+ sizeof(GcpRecord),
+ cgcpFilesize);
+
+}//Dbtc::initRecords()
+
+Dbtc::Dbtc(const class Configuration & conf):
+ SimulatedBlock(DBTC, conf),
+ c_theDefinedTriggers(c_theDefinedTriggerPool),
+ c_firedTriggerHash(c_theFiredTriggerPool),
+ c_maxNumberOfDefinedTriggers(0),
+ c_maxNumberOfFiredTriggers(0),
+ c_theIndexes(c_theIndexPool),
+ c_maxNumberOfIndexes(0),
+ c_theIndexOperations(c_theIndexOperationPool),
+ c_maxNumberOfIndexOperations(0),
+ m_commitAckMarkerHash(m_commitAckMarkerPool)
+{
+ BLOCK_CONSTRUCTOR(Dbtc);
+
+ const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ Uint32 transactionBufferMemory = 0;
+ Uint32 maxNoOfIndexes = 0, maxNoOfConcurrentIndexOperations = 0;
+ Uint32 maxNoOfTriggers = 0, maxNoOfFiredTriggers = 0;
+
+ ndb_mgm_get_int_parameter(p, CFG_DB_TRANS_BUFFER_MEM,
+ &transactionBufferMemory);
+ ndb_mgm_get_int_parameter(p, CFG_DB_NO_UNIQUE_HASH_INDEXES,
+ &maxNoOfIndexes);
+ ndb_mgm_get_int_parameter(p, CFG_DB_NO_INDEX_OPS,
+ &maxNoOfConcurrentIndexOperations);
+ ndb_mgm_get_int_parameter(p, CFG_DB_NO_TRIGGERS,
+ &maxNoOfTriggers);
+ ndb_mgm_get_int_parameter(p, CFG_DB_NO_TRIGGER_OPS,
+ &maxNoOfFiredTriggers);
+
+ c_transactionBufferSpace =
+ transactionBufferMemory / AttributeBuffer::getSegmentSize();
+ c_maxNumberOfIndexes = maxNoOfIndexes;
+ c_maxNumberOfIndexOperations = maxNoOfConcurrentIndexOperations;
+ c_maxNumberOfDefinedTriggers = maxNoOfTriggers;
+ c_maxNumberOfFiredTriggers = maxNoOfFiredTriggers;
+
+ // Transit signals
+ addRecSignal(GSN_PACKED_SIGNAL, &Dbtc::execPACKED_SIGNAL);
+ addRecSignal(GSN_ABORTED, &Dbtc::execABORTED);
+ addRecSignal(GSN_ATTRINFO, &Dbtc::execATTRINFO);
+ addRecSignal(GSN_CONTINUEB, &Dbtc::execCONTINUEB);
+ addRecSignal(GSN_KEYINFO, &Dbtc::execKEYINFO);
+ addRecSignal(GSN_SCAN_NEXTREQ, &Dbtc::execSCAN_NEXTREQ);
+ addRecSignal(GSN_TAKE_OVERTCREQ, &Dbtc::execTAKE_OVERTCREQ);
+ addRecSignal(GSN_TAKE_OVERTCCONF, &Dbtc::execTAKE_OVERTCCONF);
+ addRecSignal(GSN_LQHKEYREF, &Dbtc::execLQHKEYREF);
+
+ // Received signals
+
+ addRecSignal(GSN_DUMP_STATE_ORD, &Dbtc::execDUMP_STATE_ORD);
+ addRecSignal(GSN_SEND_PACKED, &Dbtc::execSEND_PACKED);
+ addRecSignal(GSN_SCAN_HBREP, &Dbtc::execSCAN_HBREP);
+ addRecSignal(GSN_COMPLETED, &Dbtc::execCOMPLETED);
+ addRecSignal(GSN_COMMITTED, &Dbtc::execCOMMITTED);
+ addRecSignal(GSN_DIGETPRIMCONF, &Dbtc::execDIGETPRIMCONF);
+ addRecSignal(GSN_DIGETPRIMREF, &Dbtc::execDIGETPRIMREF);
+ addRecSignal(GSN_DISEIZECONF, &Dbtc::execDISEIZECONF);
+ addRecSignal(GSN_DIVERIFYCONF, &Dbtc::execDIVERIFYCONF);
+ addRecSignal(GSN_DI_FCOUNTCONF, &Dbtc::execDI_FCOUNTCONF);
+ addRecSignal(GSN_DI_FCOUNTREF, &Dbtc::execDI_FCOUNTREF);
+ addRecSignal(GSN_GCP_NOMORETRANS, &Dbtc::execGCP_NOMORETRANS);
+ addRecSignal(GSN_LQHKEYCONF, &Dbtc::execLQHKEYCONF);
+ addRecSignal(GSN_NDB_STTOR, &Dbtc::execNDB_STTOR);
+ addRecSignal(GSN_READ_NODESCONF, &Dbtc::execREAD_NODESCONF);
+ addRecSignal(GSN_READ_NODESREF, &Dbtc::execREAD_NODESREF);
+ addRecSignal(GSN_STTOR, &Dbtc::execSTTOR);
+ addRecSignal(GSN_TC_COMMITREQ, &Dbtc::execTC_COMMITREQ);
+ addRecSignal(GSN_TC_CLOPSIZEREQ, &Dbtc::execTC_CLOPSIZEREQ);
+ addRecSignal(GSN_TCGETOPSIZEREQ, &Dbtc::execTCGETOPSIZEREQ);
+ addRecSignal(GSN_TCKEYREQ, &Dbtc::execTCKEYREQ);
+ addRecSignal(GSN_TCRELEASEREQ, &Dbtc::execTCRELEASEREQ);
+ addRecSignal(GSN_TCSEIZEREQ, &Dbtc::execTCSEIZEREQ);
+ addRecSignal(GSN_TCROLLBACKREQ, &Dbtc::execTCROLLBACKREQ);
+ addRecSignal(GSN_TC_HBREP, &Dbtc::execTC_HBREP);
+ addRecSignal(GSN_TC_SCHVERREQ, &Dbtc::execTC_SCHVERREQ);
+ addRecSignal(GSN_SCAN_TABREQ, &Dbtc::execSCAN_TABREQ);
+ addRecSignal(GSN_SCAN_FRAGCONF, &Dbtc::execSCAN_FRAGCONF);
+ addRecSignal(GSN_SCAN_FRAGREF, &Dbtc::execSCAN_FRAGREF);
+ addRecSignal(GSN_READ_CONFIG_REQ, &Dbtc::execREAD_CONFIG_REQ, true);
+ addRecSignal(GSN_LQH_TRANSCONF, &Dbtc::execLQH_TRANSCONF);
+ addRecSignal(GSN_COMPLETECONF, &Dbtc::execCOMPLETECONF);
+ addRecSignal(GSN_COMMITCONF, &Dbtc::execCOMMITCONF);
+ addRecSignal(GSN_ABORTCONF, &Dbtc::execABORTCONF);
+ addRecSignal(GSN_NODE_FAILREP, &Dbtc::execNODE_FAILREP);
+ addRecSignal(GSN_INCL_NODEREQ, &Dbtc::execINCL_NODEREQ);
+ addRecSignal(GSN_TIME_SIGNAL, &Dbtc::execTIME_SIGNAL);
+ addRecSignal(GSN_API_FAILREQ, &Dbtc::execAPI_FAILREQ);
+ addRecSignal(GSN_SET_VAR_REQ, &Dbtc::execSET_VAR_REQ);
+
+ addRecSignal(GSN_TC_COMMIT_ACK, &Dbtc::execTC_COMMIT_ACK);
+ addRecSignal(GSN_ABORT_ALL_REQ, &Dbtc::execABORT_ALL_REQ);
+
+ addRecSignal(GSN_CREATE_TRIG_REQ, &Dbtc::execCREATE_TRIG_REQ);
+ addRecSignal(GSN_DROP_TRIG_REQ, &Dbtc::execDROP_TRIG_REQ);
+ addRecSignal(GSN_FIRE_TRIG_ORD, &Dbtc::execFIRE_TRIG_ORD);
+ addRecSignal(GSN_TRIG_ATTRINFO, &Dbtc::execTRIG_ATTRINFO);
+
+ addRecSignal(GSN_CREATE_INDX_REQ, &Dbtc::execCREATE_INDX_REQ);
+ addRecSignal(GSN_DROP_INDX_REQ, &Dbtc::execDROP_INDX_REQ);
+ addRecSignal(GSN_TCINDXREQ, &Dbtc::execTCINDXREQ);
+ addRecSignal(GSN_INDXKEYINFO, &Dbtc::execINDXKEYINFO);
+ addRecSignal(GSN_INDXATTRINFO, &Dbtc::execINDXATTRINFO);
+ addRecSignal(GSN_ALTER_INDX_REQ, &Dbtc::execALTER_INDX_REQ);
+
+ addRecSignal(GSN_TRANSID_AI_R, &Dbtc::execTRANSID_AI_R);
+ addRecSignal(GSN_KEYINFO20_R, &Dbtc::execKEYINFO20_R);
+
+ // Index table lookup
+ addRecSignal(GSN_TCKEYCONF, &Dbtc::execTCKEYCONF);
+ addRecSignal(GSN_TCKEYREF, &Dbtc::execTCKEYREF);
+ addRecSignal(GSN_TRANSID_AI, &Dbtc::execTRANSID_AI);
+ addRecSignal(GSN_TCROLLBACKREP, &Dbtc::execTCROLLBACKREP);
+
+ //addRecSignal(GSN_CREATE_TAB_REQ, &Dbtc::execCREATE_TAB_REQ);
+ addRecSignal(GSN_DROP_TAB_REQ, &Dbtc::execDROP_TAB_REQ);
+ addRecSignal(GSN_PREP_DROP_TAB_REQ, &Dbtc::execPREP_DROP_TAB_REQ);
+ addRecSignal(GSN_WAIT_DROP_TAB_REF, &Dbtc::execWAIT_DROP_TAB_REF);
+ addRecSignal(GSN_WAIT_DROP_TAB_CONF, &Dbtc::execWAIT_DROP_TAB_CONF);
+
+ addRecSignal(GSN_ALTER_TAB_REQ, &Dbtc::execALTER_TAB_REQ);
+
+ initData();
+
+#ifdef VM_TRACE
+ {
+ void* tmp[] = { &apiConnectptr,
+ &tcConnectptr,
+ &cachePtr,
+ &attrbufptr,
+ &hostptr,
+ &gcpPtr,
+ &tmpApiConnectptr,
+ &timeOutptr,
+ &scanFragptr,
+ &databufptr,
+ &tmpDatabufptr };
+ init_globals_list(tmp, sizeof(tmp)/sizeof(tmp[0]));
+ }
+#endif
+}//Dbtc::Dbtc()
+
+Dbtc::~Dbtc()
+{
+ // Records with dynamic sizes
+ deallocRecord((void **)&cacheRecord, "CacheRecord",
+ sizeof(CacheRecord),
+ ccacheFilesize);
+
+ deallocRecord((void **)&apiConnectRecord, "ApiConnectRecord",
+ sizeof(ApiConnectRecord),
+ capiConnectFilesize);
+
+ deallocRecord((void **)&tcConnectRecord, "TcConnectRecord",
+ sizeof(TcConnectRecord),
+ ctcConnectFilesize);
+
+ deallocRecord((void **)&hostRecord, "HostRecord",
+ sizeof(HostRecord),
+ chostFilesize);
+
+ deallocRecord((void **)&tableRecord, "TableRecord",
+ sizeof(TableRecord),
+ ctabrecFilesize);
+
+ deallocRecord((void **)&scanRecord, "ScanRecord",
+ sizeof(ScanRecord),
+ cscanrecFileSize);
+
+ deallocRecord((void **)&databufRecord, "DatabufRecord",
+ sizeof(DatabufRecord),
+ cdatabufFilesize);
+
+ deallocRecord((void **)&attrbufRecord, "AttrbufRecord",
+ sizeof(AttrbufRecord),
+ cattrbufFilesize);
+
+ deallocRecord((void **)&gcpRecord, "GcpRecord",
+ sizeof(GcpRecord),
+ cgcpFilesize);
+
+ deallocRecord((void **)&tcFailRecord, "TcFailRecord",
+ sizeof(TcFailRecord), 1);
+
+ deallocRecord((void **)&c_apiConTimer, "ApiConTimer",
+ sizeof(UintR),
+ capiConnectFilesize);
+
+ deallocRecord((void **)&c_apiConTimer_line, "ApiConTimer",
+ sizeof(UintR),
+ capiConnectFilesize);
+}//Dbtc::~Dbtc()
+
+BLOCK_FUNCTIONS(Dbtc)
+
diff --git a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
new file mode 100644
index 00000000000..17d55176830
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
@@ -0,0 +1,13098 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#define DBTC_C
+
+#include "Dbtc.hpp"
+#include "md5_hash.hpp"
+#include <RefConvert.hpp>
+#include <ndb_limits.h>
+#include <my_sys.h>
+
+#include <signaldata/EventReport.hpp>
+#include <signaldata/TcKeyReq.hpp>
+#include <signaldata/TcKeyConf.hpp>
+#include <signaldata/TcKeyRef.hpp>
+#include <signaldata/KeyInfo.hpp>
+#include <signaldata/AttrInfo.hpp>
+#include <signaldata/TransIdAI.hpp>
+#include <signaldata/TcRollbackRep.hpp>
+#include <signaldata/NodeFailRep.hpp>
+#include <signaldata/ReadNodesConf.hpp>
+#include <signaldata/NFCompleteRep.hpp>
+#include <signaldata/LqhKey.hpp>
+#include <signaldata/TcCommit.hpp>
+#include <signaldata/TcContinueB.hpp>
+#include <signaldata/TcKeyFailConf.hpp>
+#include <signaldata/AbortAll.hpp>
+#include <signaldata/ScanFrag.hpp>
+#include <signaldata/ScanTab.hpp>
+#include <signaldata/PrepDropTab.hpp>
+#include <signaldata/DropTab.hpp>
+#include <signaldata/AlterTab.hpp>
+#include <signaldata/CreateTrig.hpp>
+#include <signaldata/DropTrig.hpp>
+#include <signaldata/FireTrigOrd.hpp>
+#include <signaldata/TrigAttrInfo.hpp>
+#include <signaldata/CreateIndx.hpp>
+#include <signaldata/DropIndx.hpp>
+#include <signaldata/AlterIndx.hpp>
+#include <signaldata/ScanTab.hpp>
+#include <signaldata/SystemError.hpp>
+#include <signaldata/DumpStateOrd.hpp>
+#include <signaldata/DisconnectRep.hpp>
+#include <signaldata/TcHbRep.hpp>
+
+#include <signaldata/PrepDropTab.hpp>
+#include <signaldata/DropTab.hpp>
+#include <signaldata/TcIndx.hpp>
+#include <signaldata/IndxKeyInfo.hpp>
+#include <signaldata/IndxAttrInfo.hpp>
+#include <signaldata/PackedSignal.hpp>
+#include <AttributeHeader.hpp>
+#include <signaldata/DictTabInfo.hpp>
+#include <AttributeDescriptor.hpp>
+#include <SectionReader.hpp>
+
+#include <NdbOut.hpp>
+#include <DebuggerNames.hpp>
+
+// Use DEBUG to print messages that should be
+// seen only when we debug the product
+#ifdef VM_TRACE
+#define DEBUG(x) ndbout << "DBTC: "<< x << endl;
+#else
+#define DEBUG(x)
+#endif
+
+#define INTERNAL_TRIGGER_TCKEYREQ_JBA 0
+
+#ifdef VM_TRACE
+NdbOut &
+operator<<(NdbOut& out, Dbtc::ConnectionState state){
+ switch(state){
+ case Dbtc::CS_CONNECTED: out << "CS_CONNECTED"; break;
+ case Dbtc::CS_DISCONNECTED: out << "CS_DISCONNECTED"; break;
+ case Dbtc::CS_STARTED: out << "CS_STARTED"; break;
+ case Dbtc::CS_RECEIVING: out << "CS_RECEIVING"; break;
+ case Dbtc::CS_PREPARED: out << "CS_PREPARED"; break;
+ case Dbtc::CS_START_PREPARING: out << "CS_START_PREPARING"; break;
+ case Dbtc::CS_REC_PREPARING: out << "CS_REC_PREPARING"; break;
+ case Dbtc::CS_RESTART: out << "CS_RESTART"; break;
+ case Dbtc::CS_ABORTING: out << "CS_ABORTING"; break;
+ case Dbtc::CS_COMPLETING: out << "CS_COMPLETING"; break;
+ case Dbtc::CS_COMPLETE_SENT: out << "CS_COMPLETE_SENT"; break;
+ case Dbtc::CS_PREPARE_TO_COMMIT: out << "CS_PREPARE_TO_COMMIT"; break;
+ case Dbtc::CS_COMMIT_SENT: out << "CS_COMMIT_SENT"; break;
+ case Dbtc::CS_START_COMMITTING: out << "CS_START_COMMITTING"; break;
+ case Dbtc::CS_COMMITTING: out << "CS_COMMITTING"; break;
+ case Dbtc::CS_REC_COMMITTING: out << "CS_REC_COMMITTING"; break;
+ case Dbtc::CS_WAIT_ABORT_CONF: out << "CS_WAIT_ABORT_CONF"; break;
+ case Dbtc::CS_WAIT_COMPLETE_CONF: out << "CS_WAIT_COMPLETE_CONF"; break;
+ case Dbtc::CS_WAIT_COMMIT_CONF: out << "CS_WAIT_COMMIT_CONF"; break;
+ case Dbtc::CS_FAIL_ABORTING: out << "CS_FAIL_ABORTING"; break;
+ case Dbtc::CS_FAIL_ABORTED: out << "CS_FAIL_ABORTED"; break;
+ case Dbtc::CS_FAIL_PREPARED: out << "CS_FAIL_PREPARED"; break;
+ case Dbtc::CS_FAIL_COMMITTING: out << "CS_FAIL_COMMITTING"; break;
+ case Dbtc::CS_FAIL_COMMITTED: out << "CS_FAIL_COMMITTED"; break;
+ case Dbtc::CS_FAIL_COMPLETED: out << "CS_FAIL_COMPLETED"; break;
+ case Dbtc::CS_START_SCAN: out << "CS_START_SCAN"; break;
+ default:
+ out << "Unknown: " << (int)state; break;
+ }
+ return out;
+}
+NdbOut &
+operator<<(NdbOut& out, Dbtc::OperationState state){
+ out << (int)state;
+ return out;
+}
+NdbOut &
+operator<<(NdbOut& out, Dbtc::AbortState state){
+ out << (int)state;
+ return out;
+}
+NdbOut &
+operator<<(NdbOut& out, Dbtc::ReturnSignal state){
+ out << (int)state;
+ return out;
+}
+NdbOut &
+operator<<(NdbOut& out, Dbtc::ScanRecord::ScanState state){
+ out << (int)state;
+ return out;
+}
+NdbOut &
+operator<<(NdbOut& out, Dbtc::ScanFragRec::ScanFragState state){
+ out << (int)state;
+ return out;
+}
+#endif
+
+void
+Dbtc::updateBuddyTimer(ApiConnectRecordPtr apiPtr)
+{
+ if (apiPtr.p->buddyPtr != RNIL) {
+ jam();
+ ApiConnectRecordPtr buddyApiPtr;
+ buddyApiPtr.i = apiPtr.p->buddyPtr;
+ ptrCheckGuard(buddyApiPtr, capiConnectFilesize, apiConnectRecord);
+ if (getApiConTimer(buddyApiPtr.i) != 0) {
+ if ((apiPtr.p->transid[0] == buddyApiPtr.p->transid[0]) &&
+ (apiPtr.p->transid[1] == buddyApiPtr.p->transid[1])) {
+ jam();
+ setApiConTimer(buddyApiPtr.i, ctcTimer, __LINE__);
+ } else {
+ jam();
+ // Not a buddy anymore since not the same transid
+ apiPtr.p->buddyPtr = RNIL;
+ }//if
+ }//if
+ }//if
+}
+
+void Dbtc::execCONTINUEB(Signal* signal)
+{
+ UintR tcase;
+
+ jamEntry();
+ tcase = signal->theData[0];
+ UintR Tdata0 = signal->theData[1];
+ UintR Tdata1 = signal->theData[2];
+ UintR Tdata2 = signal->theData[3];
+ switch (tcase) {
+ case TcContinueB::ZRETURN_FROM_QUEUED_DELIVERY:
+ jam();
+ ndbrequire(false);
+ return;
+ case TcContinueB::ZCOMPLETE_TRANS_AT_TAKE_OVER:
+ jam();
+ tcNodeFailptr.i = Tdata0;
+ ptrCheckGuard(tcNodeFailptr, 1, tcFailRecord);
+ completeTransAtTakeOverLab(signal, Tdata1);
+ return;
+ case TcContinueB::ZCONTINUE_TIME_OUT_CONTROL:
+ jam();
+ timeOutLoopStartLab(signal, Tdata0);
+ return;
+ case TcContinueB::ZNODE_TAKE_OVER_COMPLETED:
+ jam();
+ tnodeid = Tdata0;
+ tcNodeFailptr.i = 0;
+ ptrAss(tcNodeFailptr, tcFailRecord);
+ nodeTakeOverCompletedLab(signal);
+ return;
+ case TcContinueB::ZINITIALISE_RECORDS:
+ jam();
+ initialiseRecordsLab(signal, Tdata0, Tdata2, signal->theData[4]);
+ return;
+ case TcContinueB::ZSEND_COMMIT_LOOP:
+ jam();
+ apiConnectptr.i = Tdata0;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ tcConnectptr.i = Tdata1;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ commit020Lab(signal);
+ return;
+ case TcContinueB::ZSEND_COMPLETE_LOOP:
+ jam();
+ apiConnectptr.i = Tdata0;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ tcConnectptr.i = Tdata1;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ complete010Lab(signal);
+ return;
+ case TcContinueB::ZHANDLE_FAILED_API_NODE:
+ jam();
+ handleFailedApiNode(signal, Tdata0, Tdata1);
+ return;
+ case TcContinueB::ZTRANS_EVENT_REP:
+ jam();
+ /* -------------------------------------------------------------------- */
+ // Report information about transaction activity once per second.
+ /* -------------------------------------------------------------------- */
+ if (c_counters.c_trans_status == TransCounters::Timer){
+ Uint32 len = c_counters.report(signal);
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, len, JBB);
+
+ c_counters.reset();
+ signal->theData[0] = TcContinueB::ZTRANS_EVENT_REP;
+ sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 5000, 1);
+ }
+ return;
+ case TcContinueB::ZCONTINUE_TIME_OUT_FRAG_CONTROL:
+ jam();
+ timeOutLoopStartFragLab(signal, Tdata0);
+ return;
+ case TcContinueB::ZABORT_BREAK:
+ jam();
+ tcConnectptr.i = Tdata0;
+ apiConnectptr.i = Tdata1;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ apiConnectptr.p->counter--;
+ abort015Lab(signal);
+ return;
+ case TcContinueB::ZABORT_TIMEOUT_BREAK:
+ jam();
+ tcConnectptr.i = Tdata0;
+ apiConnectptr.i = Tdata1;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ apiConnectptr.p->counter--;
+ sendAbortedAfterTimeout(signal, 1);
+ return;
+ case TcContinueB::ZHANDLE_FAILED_API_NODE_REMOVE_MARKERS:
+ jam();
+ removeMarkerForFailedAPI(signal, Tdata0, Tdata1);
+ return;
+ case TcContinueB::ZWAIT_ABORT_ALL:
+ jam();
+ checkAbortAllTimeout(signal, Tdata0);
+ return;
+ case TcContinueB::ZCHECK_SCAN_ACTIVE_FAILED_LQH:
+ jam();
+ checkScanActiveInFailedLqh(signal, Tdata0, Tdata1);
+ return;
+ case TcContinueB::CHECK_WAIT_DROP_TAB_FAILED_LQH:
+ jam();
+ checkWaitDropTabFailedLqh(signal, Tdata0, Tdata1);
+ return;
+ case TcContinueB::TRIGGER_PENDING:
+ jam();
+ ApiConnectRecordPtr transPtr;
+ transPtr.i = Tdata0;
+ ptrCheckGuard(transPtr, capiConnectFilesize, apiConnectRecord);
+ transPtr.p->triggerPending = false;
+ executeTriggers(signal, &transPtr);
+ return;
+ case TcContinueB::DelayTCKEYCONF:
+ jam();
+ apiConnectptr.i = Tdata0;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ sendtckeyconf(signal, Tdata1);
+ return;
+ default:
+ ndbrequire(false);
+ }//switch
+}
+
+void Dbtc::execDIGETNODESREF(Signal* signal)
+{
+ jamEntry();
+ terrorCode = signal->theData[1];
+ releaseAtErrorLab(signal);
+}
+
+void Dbtc::execINCL_NODEREQ(Signal* signal)
+{
+ jamEntry();
+ tblockref = signal->theData[0];
+ hostptr.i = signal->theData[1];
+ ptrCheckGuard(hostptr, chostFilesize, hostRecord);
+ hostptr.p->hostStatus = HS_ALIVE;
+ hostptr.p->takeOverStatus = TOS_IDLE;
+ signal->theData[0] = cownref;
+ sendSignal(tblockref, GSN_INCL_NODECONF, signal, 1, JBB);
+}
+
+void Dbtc::execREAD_NODESREF(Signal* signal)
+{
+ jamEntry();
+ ndbrequire(false);
+}
+
+void Dbtc::execTC_SCHVERREQ(Signal* signal)
+{
+ jamEntry();
+ if (! assembleFragments(signal)) {
+ jam();
+ return;
+ }
+ tabptr.i = signal->theData[0];
+ ptrCheckGuard(tabptr, ctabrecFilesize, tableRecord);
+ tabptr.p->currentSchemaVersion = signal->theData[1];
+ tabptr.p->storedTable = (bool)signal->theData[2];
+ BlockReference retRef = signal->theData[3];
+ tabptr.p->tableType = (Uint8)signal->theData[4];
+ BlockReference retPtr = signal->theData[5];
+ Uint32 noOfKeyAttr = signal->theData[6];
+ ndbrequire(noOfKeyAttr <= MAX_ATTRIBUTES_IN_INDEX);
+ Uint32 hasCharAttr = 0;
+ Uint32 noOfDistrKeys = 0;
+ SegmentedSectionPtr s0Ptr;
+ signal->getSection(s0Ptr, 0);
+ SectionReader r0(s0Ptr, getSectionSegmentPool());
+ Uint32 i = 0;
+ while (i < noOfKeyAttr) {
+ jam();
+ Uint32 attributeDescriptor = ~0;
+ Uint32 csNumber = ~0;
+ if (! r0.getWord(&attributeDescriptor) ||
+ ! r0.getWord(&csNumber)) {
+ jam();
+ break;
+ }
+ CHARSET_INFO* cs = 0;
+ if (csNumber != 0) {
+ cs = all_charsets[csNumber];
+ ndbrequire(cs != 0);
+ hasCharAttr = 1;
+ }
+
+ noOfDistrKeys += AttributeDescriptor::getDKey(attributeDescriptor);
+ tabptr.p->keyAttr[i].attributeDescriptor = attributeDescriptor;
+ tabptr.p->keyAttr[i].charsetInfo = cs;
+ i++;
+ }
+ ndbrequire(i == noOfKeyAttr);
+ releaseSections(signal);
+
+ ndbrequire(tabptr.p->enabled == false);
+ tabptr.p->enabled = true;
+ tabptr.p->dropping = false;
+ tabptr.p->noOfKeyAttr = noOfKeyAttr;
+ tabptr.p->hasCharAttr = hasCharAttr;
+ tabptr.p->noOfDistrKeys = noOfDistrKeys;
+
+ signal->theData[0] = tabptr.i;
+ signal->theData[1] = retPtr;
+ sendSignal(retRef, GSN_TC_SCHVERCONF, signal, 2, JBB);
+}//Dbtc::execTC_SCHVERREQ()
+
+void
+Dbtc::execPREP_DROP_TAB_REQ(Signal* signal)
+{
+ jamEntry();
+
+ PrepDropTabReq* req = (PrepDropTabReq*)signal->getDataPtr();
+
+ TableRecordPtr tabPtr;
+ tabPtr.i = req->tableId;
+ ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord);
+
+ Uint32 senderRef = req->senderRef;
+ Uint32 senderData = req->senderData;
+
+ if(!tabPtr.p->enabled){
+ jam();
+ PrepDropTabRef* ref = (PrepDropTabRef*)signal->getDataPtrSend();
+ ref->senderRef = reference();
+ ref->senderData = senderData;
+ ref->tableId = tabPtr.i;
+ ref->errorCode = PrepDropTabRef::NoSuchTable;
+ sendSignal(senderRef, GSN_PREP_DROP_TAB_REF, signal,
+ PrepDropTabRef::SignalLength, JBB);
+ return;
+ }
+
+ if(tabPtr.p->dropping){
+ jam();
+ PrepDropTabRef* ref = (PrepDropTabRef*)signal->getDataPtrSend();
+ ref->senderRef = reference();
+ ref->senderData = senderData;
+ ref->tableId = tabPtr.i;
+ ref->errorCode = PrepDropTabRef::DropInProgress;
+ sendSignal(senderRef, GSN_PREP_DROP_TAB_REF, signal,
+ PrepDropTabRef::SignalLength, JBB);
+ return;
+ }
+
+ tabPtr.p->dropping = true;
+ tabPtr.p->dropTable.senderRef = senderRef;
+ tabPtr.p->dropTable.senderData = senderData;
+
+ {
+ WaitDropTabReq * req = (WaitDropTabReq*)signal->getDataPtrSend();
+ req->tableId = tabPtr.i;
+ req->senderRef = reference();
+
+ HostRecordPtr hostPtr;
+ tabPtr.p->dropTable.waitDropTabCount.clearWaitingFor();
+ for (hostPtr.i = 1; hostPtr.i < MAX_NDB_NODES; hostPtr.i++) {
+ jam();
+ ptrAss(hostPtr, hostRecord);
+ if (hostPtr.p->hostStatus == HS_ALIVE) {
+ jam();
+ tabPtr.p->dropTable.waitDropTabCount.setWaitingFor(hostPtr.i);
+ sendSignal(calcLqhBlockRef(hostPtr.i), GSN_WAIT_DROP_TAB_REQ,
+ signal, WaitDropTabReq::SignalLength, JBB);
+ }//for
+ }//if
+
+ ndbrequire(tabPtr.p->dropTable.waitDropTabCount.done() != true);
+ }
+}
+
+void
+Dbtc::execWAIT_DROP_TAB_CONF(Signal* signal)
+{
+ jamEntry();
+ WaitDropTabConf * conf = (WaitDropTabConf*)signal->getDataPtr();
+
+ TableRecordPtr tabPtr;
+ tabPtr.i = conf->tableId;
+ ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord);
+
+ ndbrequire(tabPtr.p->dropping == true);
+ Uint32 nodeId = refToNode(conf->senderRef);
+ tabPtr.p->dropTable.waitDropTabCount.clearWaitingFor(nodeId);
+
+ if(!tabPtr.p->dropTable.waitDropTabCount.done()){
+ jam();
+ return;
+ }
+
+ {
+ PrepDropTabConf* conf = (PrepDropTabConf*)signal->getDataPtrSend();
+ conf->tableId = tabPtr.i;
+ conf->senderRef = reference();
+ conf->senderData = tabPtr.p->dropTable.senderData;
+ sendSignal(tabPtr.p->dropTable.senderRef, GSN_PREP_DROP_TAB_CONF, signal,
+ PrepDropTabConf::SignalLength, JBB);
+ tabPtr.p->dropTable.senderRef = 0;
+ }
+}
+
+void
+Dbtc::execWAIT_DROP_TAB_REF(Signal* signal)
+{
+ jamEntry();
+ WaitDropTabRef * ref = (WaitDropTabRef*)signal->getDataPtr();
+
+ TableRecordPtr tabPtr;
+ tabPtr.i = ref->tableId;
+ ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord);
+
+ ndbrequire(tabPtr.p->dropping == true);
+ Uint32 nodeId = refToNode(ref->senderRef);
+ tabPtr.p->dropTable.waitDropTabCount.clearWaitingFor(nodeId);
+
+ ndbrequire(ref->errorCode == WaitDropTabRef::NoSuchTable ||
+ ref->errorCode == WaitDropTabRef::NF_FakeErrorREF);
+
+ if(!tabPtr.p->dropTable.waitDropTabCount.done()){
+ jam();
+ return;
+ }
+
+ {
+ PrepDropTabConf* conf = (PrepDropTabConf*)signal->getDataPtrSend();
+ conf->tableId = tabPtr.i;
+ conf->senderRef = reference();
+ conf->senderData = tabPtr.p->dropTable.senderData;
+ sendSignal(tabPtr.p->dropTable.senderRef, GSN_PREP_DROP_TAB_CONF, signal,
+ PrepDropTabConf::SignalLength, JBB);
+ tabPtr.p->dropTable.senderRef = 0;
+ }
+}
+
+void
+Dbtc::checkWaitDropTabFailedLqh(Signal* signal, Uint32 nodeId, Uint32 tableId)
+{
+
+ TableRecordPtr tabPtr;
+ tabPtr.i = tableId;
+
+ WaitDropTabConf * conf = (WaitDropTabConf*)signal->getDataPtr();
+ conf->tableId = tableId;
+
+ const Uint32 RT_BREAK = 16;
+ for(Uint32 i = 0; i<RT_BREAK && tabPtr.i < ctabrecFilesize; i++, tabPtr.i++){
+ jam();
+ ptrAss(tabPtr, tableRecord);
+ if(tabPtr.p->enabled && tabPtr.p->dropping){
+ if(tabPtr.p->dropTable.waitDropTabCount.isWaitingFor(nodeId)){
+ jam();
+ conf->senderRef = calcLqhBlockRef(nodeId);
+ execWAIT_DROP_TAB_CONF(signal);
+ tabPtr.i++;
+ break;
+ }
+ }
+ }
+
+ if(tabPtr.i == ctabrecFilesize){
+ /**
+ * Finished
+ */
+ jam();
+ return;
+ }
+
+ signal->theData[0] = TcContinueB::CHECK_WAIT_DROP_TAB_FAILED_LQH;
+ signal->theData[1] = nodeId;
+ signal->theData[2] = tabPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+}
+
+void
+Dbtc::execDROP_TAB_REQ(Signal* signal)
+{
+ jamEntry();
+
+ DropTabReq* req = (DropTabReq*)signal->getDataPtr();
+
+ TableRecordPtr tabPtr;
+ tabPtr.i = req->tableId;
+ ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord);
+
+ Uint32 senderRef = req->senderRef;
+ Uint32 senderData = req->senderData;
+ DropTabReq::RequestType rt = (DropTabReq::RequestType)req->requestType;
+
+ if(!tabPtr.p->enabled && rt == DropTabReq::OnlineDropTab){
+ jam();
+ DropTabRef* ref = (DropTabRef*)signal->getDataPtrSend();
+ ref->senderRef = reference();
+ ref->senderData = senderData;
+ ref->tableId = tabPtr.i;
+ ref->errorCode = DropTabRef::NoSuchTable;
+ sendSignal(senderRef, GSN_DROP_TAB_REF, signal,
+ DropTabRef::SignalLength, JBB);
+ return;
+ }
+
+ if(!tabPtr.p->dropping && rt == DropTabReq::OnlineDropTab){
+ jam();
+ DropTabRef* ref = (DropTabRef*)signal->getDataPtrSend();
+ ref->senderRef = reference();
+ ref->senderData = senderData;
+ ref->tableId = tabPtr.i;
+ ref->errorCode = DropTabRef::DropWoPrep;
+ sendSignal(senderRef, GSN_DROP_TAB_REF, signal,
+ DropTabRef::SignalLength, JBB);
+ return;
+ }
+
+ tabPtr.p->enabled = false;
+ tabPtr.p->dropping = false;
+
+ DropTabConf * conf = (DropTabConf*)signal->getDataPtrSend();
+ conf->tableId = tabPtr.i;
+ conf->senderRef = reference();
+ conf->senderData = senderData;
+ sendSignal(senderRef, GSN_DROP_TAB_CONF, signal,
+ PrepDropTabConf::SignalLength, JBB);
+}
+
+void Dbtc::execALTER_TAB_REQ(Signal * signal)
+{
+ AlterTabReq* const req = (AlterTabReq*)signal->getDataPtr();
+ const Uint32 senderRef = req->senderRef;
+ const Uint32 senderData = req->senderData;
+ const Uint32 changeMask = req->changeMask;
+ const Uint32 tableId = req->tableId;
+ const Uint32 tableVersion = req->tableVersion;
+ const Uint32 gci = req->gci;
+ AlterTabReq::RequestType requestType =
+ (AlterTabReq::RequestType) req->requestType;
+
+ TableRecordPtr tabPtr;
+ tabPtr.i = req->tableId;
+ ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord);
+ tabPtr.p->currentSchemaVersion = tableVersion;
+
+ // Request handled successfully
+ AlterTabConf * conf = (AlterTabConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = senderData;
+ conf->changeMask = changeMask;
+ conf->tableId = tableId;
+ conf->tableVersion = tableVersion;
+ conf->gci = gci;
+ conf->requestType = requestType;
+ sendSignal(senderRef, GSN_ALTER_TAB_CONF, signal,
+ AlterTabConf::SignalLength, JBB);
+}
+
+/* ***************************************************************************/
+/* START / RESTART */
+/* ***************************************************************************/
+void Dbtc::execREAD_CONFIG_REQ(Signal* signal)
+{
+ const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr();
+ Uint32 ref = req->senderRef;
+ Uint32 senderData = req->senderData;
+ ndbrequire(req->noOfParameters == 0);
+
+ jamEntry();
+
+ const ndb_mgm_configuration_iterator * p =
+ theConfiguration.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ UintR apiConnect;
+ UintR tcConnect;
+ UintR tables;
+ UintR localScan;
+ UintR tcScan;
+
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TC_API_CONNECT, &apiConnect));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TC_TC_CONNECT, &tcConnect));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TC_TABLE, &tables));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TC_LOCAL_SCAN, &localScan));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TC_SCAN, &tcScan));
+
+ ccacheFilesize = (apiConnect/3) + 1;
+ capiConnectFilesize = apiConnect;
+ ctcConnectFilesize = tcConnect;
+ ctabrecFilesize = tables;
+ cscanrecFileSize = tcScan;
+ cscanFragrecFileSize = localScan;
+
+ initRecords();
+ initialiseRecordsLab(signal, 0, ref, senderData);
+
+ Uint32 val = 3000;
+ ndb_mgm_get_int_parameter(p, CFG_DB_TRANSACTION_DEADLOCK_TIMEOUT, &val);
+ set_timeout_value(val);
+
+ val = 3000;
+ ndb_mgm_get_int_parameter(p, CFG_DB_TRANSACTION_INACTIVE_TIMEOUT, &val);
+ set_appl_timeout_value(val);
+
+ val = 1;
+ //ndb_mgm_get_int_parameter(p, CFG_DB_PARALLEL_TRANSACTION_TAKEOVER, &val);
+ set_no_parallel_takeover(val);
+
+ ctimeOutCheckDelay = 50; // 500ms
+}//Dbtc::execSIZEALT_REP()
+
+void Dbtc::execSTTOR(Signal* signal)
+{
+ Uint16 tphase;
+
+ jamEntry();
+ /* START CASE */
+ tphase = signal->theData[1];
+ csignalKey = signal->theData[6];
+ switch (tphase) {
+ case ZSPH1:
+ jam();
+ startphase1x010Lab(signal);
+ return;
+ default:
+ jam();
+ sttorryLab(signal); /* START PHASE 255 */
+ return;
+ }//switch
+}//Dbtc::execSTTOR()
+
+void Dbtc::sttorryLab(Signal* signal)
+{
+ signal->theData[0] = csignalKey;
+ signal->theData[1] = 3; /* BLOCK CATEGORY */
+ signal->theData[2] = 2; /* SIGNAL VERSION NUMBER */
+ signal->theData[3] = ZSPH1;
+ signal->theData[4] = 255;
+ sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 5, JBB);
+}//Dbtc::sttorryLab()
+
+/* ***************************************************************************/
+/* INTERNAL START / RESTART */
+/*****************************************************************************/
+void Dbtc::execNDB_STTOR(Signal* signal)
+{
+ Uint16 tndbstartphase;
+ Uint16 tstarttype;
+
+ jamEntry();
+ tusersblkref = signal->theData[0];
+ tnodeid = signal->theData[1];
+ tndbstartphase = signal->theData[2]; /* START PHASE */
+ tstarttype = signal->theData[3]; /* START TYPE */
+ switch (tndbstartphase) {
+ case ZINTSPH1:
+ jam();
+ intstartphase1x010Lab(signal);
+ return;
+ case ZINTSPH2:
+ jam();
+ intstartphase2x010Lab(signal);
+ return;
+ case ZINTSPH3:
+ jam();
+ intstartphase3x010Lab(signal); /* SEIZE CONNECT RECORD IN EACH LQH*/
+// Start transaction event reporting.
+ c_counters.c_trans_status = TransCounters::Timer;
+ c_counters.reset();
+ signal->theData[0] = TcContinueB::ZTRANS_EVENT_REP;
+ sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 10, 1);
+ return;
+ case ZINTSPH6:
+ jam();
+ csystemStart = SSS_TRUE;
+ break;
+ default:
+ jam();
+ break;
+ }//switch
+ ndbsttorry010Lab(signal);
+ return;
+}//Dbtc::execNDB_STTOR()
+
+void Dbtc::ndbsttorry010Lab(Signal* signal)
+{
+ signal->theData[0] = cownref;
+ sendSignal(cndbcntrblockref, GSN_NDB_STTORRY, signal, 1, JBB);
+}//Dbtc::ndbsttorry010Lab()
+
+void
+Dbtc::set_timeout_value(Uint32 timeOut)
+{
+ timeOut = timeOut / 10;
+ if (timeOut < 2) {
+ jam();
+ timeOut = 100;
+ }//if
+ ctimeOutValue = timeOut;
+}
+
+void
+Dbtc::set_appl_timeout_value(Uint32 timeOut)
+{
+ timeOut /= 10;
+ if (timeOut < ctimeOutValue) {
+ jam();
+ c_appl_timeout_value = ctimeOutValue;
+ }//if
+ c_appl_timeout_value = timeOut;
+}
+
+void
+Dbtc::set_no_parallel_takeover(Uint32 noParallelTakeOver)
+{
+ if (noParallelTakeOver == 0) {
+ jam();
+ noParallelTakeOver = 1;
+ } else if (noParallelTakeOver > MAX_NDB_NODES) {
+ jam();
+ noParallelTakeOver = MAX_NDB_NODES;
+ }//if
+ cnoParallelTakeOver = noParallelTakeOver;
+}
+
+/* ***************************************************************************/
+/* S T A R T P H A S E 1 X */
+/* INITIALISE BLOCKREF AND BLOCKNUMBERS */
+/* ***************************************************************************/
+void Dbtc::startphase1x010Lab(Signal* signal)
+{
+ csystemStart = SSS_FALSE;
+ ctimeOutCheckCounter = 0;
+ ctimeOutCheckFragCounter = 0;
+ ctimeOutMissedHeartbeats = 0;
+ ctimeOutCheckHeartbeat = 0;
+ ctimeOutCheckLastHeartbeat = 0;
+ ctimeOutCheckActive = TOCS_FALSE;
+ ctimeOutCheckFragActive = TOCS_FALSE;
+ sttorryLab(signal);
+}//Dbtc::startphase1x010Lab()
+
+/*****************************************************************************/
+/* I N T S T A R T P H A S E 1 X */
+/* INITIALISE ALL RECORDS. */
+/*****************************************************************************/
+void Dbtc::intstartphase1x010Lab(Signal* signal)
+{
+ cownNodeid = tnodeid;
+ cownref = calcTcBlockRef(cownNodeid);
+ clqhblockref = calcLqhBlockRef(cownNodeid);
+ cdihblockref = calcDihBlockRef(cownNodeid);
+ cdictblockref = calcDictBlockRef(cownNodeid);
+ cndbcntrblockref = calcNdbCntrBlockRef(cownNodeid);
+ cerrorBlockref = calcNdbCntrBlockRef(cownNodeid);
+ coperationsize = 0;
+ cfailure_nr = 0;
+ ndbsttorry010Lab(signal);
+}//Dbtc::intstartphase1x010Lab()
+
+/*****************************************************************************/
+/* I N T S T A R T P H A S E 2 X */
+/* SET-UP LOCAL CONNECTIONS. */
+/*****************************************************************************/
+void Dbtc::intstartphase2x010Lab(Signal* signal)
+{
+ tcConnectptr.i = cfirstfreeTcConnect;
+ intstartphase2x020Lab(signal);
+}//Dbtc::intstartphase2x010Lab()
+
+void Dbtc::intstartphase2x020Lab(Signal* signal)
+{
+ if (tcConnectptr.i == RNIL) {
+ jam();
+ ndbsttorry010Lab(signal);
+ return;
+ }//if
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ tcConnectptr.p->tcConnectstate = OS_CONNECTING_DICT;
+/* ****************** */
+/* DISEIZEREQ < */
+/* ****************** */
+ signal->theData[0] = tcConnectptr.i;
+ signal->theData[1] = cownref;
+ sendSignal(cdihblockref, GSN_DISEIZEREQ, signal, 2, JBB);
+}//Dbtc::intstartphase2x020Lab()
+
+void Dbtc::execDISEIZECONF(Signal* signal)
+{
+ jamEntry();
+ tcConnectptr.i = signal->theData[0];
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ tcConnectptr.p->dihConnectptr = signal->theData[1];
+ tcConnectptr.i = tcConnectptr.p->nextTcConnect;
+ intstartphase2x020Lab(signal);
+}//Dbtc::execDISEIZECONF()
+
+/*****************************************************************************/
+/* I N T S T A R T P H A S E 3 X */
+/* PREPARE DISTRIBUTED CONNECTIONS */
+/*****************************************************************************/
+void Dbtc::intstartphase3x010Lab(Signal* signal)
+{
+ signal->theData[0] = cownref;
+ sendSignal(cndbcntrblockref, GSN_READ_NODESREQ, signal, 1, JBB);
+}//Dbtc::intstartphase3x010Lab()
+
+void Dbtc::execREAD_NODESCONF(Signal* signal)
+{
+ UintR guard0;
+
+ jamEntry();
+
+ ReadNodesConf * const readNodes = (ReadNodesConf *)&signal->theData[0];
+
+ csystemnodes = readNodes->noOfNodes;
+ cmasterNodeId = readNodes->masterNodeId;
+
+ con_lineNodes = 0;
+ arrGuard(csystemnodes, MAX_NDB_NODES);
+ guard0 = csystemnodes - 1;
+ arrGuard(guard0, MAX_NDB_NODES); // Check not zero nodes
+
+ for (unsigned i = 1; i < MAX_NDB_NODES; i++) {
+ jam();
+ if (NodeBitmask::get(readNodes->allNodes, i)) {
+ hostptr.i = i;
+ ptrCheckGuard(hostptr, chostFilesize, hostRecord);
+
+ hostptr.p->takeOverStatus = TOS_IDLE;
+
+ if (NodeBitmask::get(readNodes->inactiveNodes, i)) {
+ jam();
+ hostptr.p->hostStatus = HS_DEAD;
+ } else {
+ jam();
+ con_lineNodes++;
+ hostptr.p->hostStatus = HS_ALIVE;
+ }//if
+ }//if
+ }//for
+ ndbsttorry010Lab(signal);
+}//Dbtc::execREAD_NODESCONF()
+
+/*****************************************************************************/
+/* A P I _ F A I L R E Q */
+// An API node has failed for some reason. We need to disconnect all API
+// connections to the API node. This also includes
+/*****************************************************************************/
+void Dbtc::execAPI_FAILREQ(Signal* signal)
+{
+ /***************************************************************************
+ * Set the block reference to return API_FAILCONF to. Set the number of api
+ * connects currently closing to one to indicate that we are still in the
+ * process of going through the api connect records. Thus checking for zero
+ * can only be true after all api connect records have been checked.
+ **************************************************************************/
+ jamEntry();
+ capiFailRef = signal->theData[1];
+ arrGuard(signal->theData[0], MAX_NODES);
+ capiConnectClosing[signal->theData[0]] = 1;
+ handleFailedApiNode(signal, signal->theData[0], (UintR)0);
+}
+
+void
+Dbtc::handleFailedApiNode(Signal* signal,
+ UintR TapiFailedNode,
+ UintR TapiConnectPtr)
+{
+ UintR TloopCount = 0;
+ arrGuard(TapiFailedNode, MAX_NODES);
+ apiConnectptr.i = TapiConnectPtr;
+ do {
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ const UintR TapiNode = refToNode(apiConnectptr.p->ndbapiBlockref);
+ if (TapiNode == TapiFailedNode) {
+#ifdef VM_TRACE
+ if (apiConnectptr.p->apiFailState != ZFALSE) {
+ ndbout << "Error in previous API fail handling discovered" << endl
+ << " apiConnectptr.i = " << apiConnectptr.i << endl
+ << " apiConnectstate = " << apiConnectptr.p->apiConnectstate
+ << endl
+ << " ndbapiBlockref = " << hex
+ << apiConnectptr.p->ndbapiBlockref << endl
+ << " apiNode = " << refToNode(apiConnectptr.p->ndbapiBlockref)
+ << endl;
+ if (apiConnectptr.p->lastTcConnect != RNIL){
+ jam();
+ tcConnectptr.i = apiConnectptr.p->lastTcConnect;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ ndbout << " tcConnectptr.i = " << tcConnectptr.i << endl
+ << " tcConnectstate = " << tcConnectptr.p->tcConnectstate
+ << endl;
+ }
+ }//if
+#endif
+
+ apiConnectptr.p->returnsignal = RS_NO_RETURN;
+ /***********************************************************************/
+ // The connected node is the failed node.
+ /**********************************************************************/
+ switch(apiConnectptr.p->apiConnectstate) {
+ case CS_DISCONNECTED:
+ /*********************************************************************/
+ // These states do not need any special handling.
+ // Simply continue with the next.
+ /*********************************************************************/
+ jam();
+ break;
+ case CS_ABORTING:
+ /*********************************************************************/
+ // This could actually mean that the API connection is already
+ // ready to release if the abortState is IDLE.
+ /*********************************************************************/
+ if (apiConnectptr.p->abortState == AS_IDLE) {
+ jam();
+ releaseApiCon(signal, apiConnectptr.i);
+ } else {
+ jam();
+ capiConnectClosing[TapiFailedNode]++;
+ apiConnectptr.p->apiFailState = ZTRUE;
+ }//if
+ break;
+ case CS_WAIT_ABORT_CONF:
+ case CS_WAIT_COMMIT_CONF:
+ case CS_START_COMMITTING:
+ case CS_PREPARE_TO_COMMIT:
+ case CS_COMMITTING:
+ case CS_COMMIT_SENT:
+ /*********************************************************************/
+ // These states indicate that an abort process or commit process is
+ // already ongoing. We will set a state in the api record indicating
+ // that the API node has failed.
+ // Also we will increase the number of outstanding api records to
+ // wait for before we can respond with API_FAILCONF.
+ /*********************************************************************/
+ jam();
+ capiConnectClosing[TapiFailedNode]++;
+ apiConnectptr.p->apiFailState = ZTRUE;
+ break;
+ case CS_START_SCAN:
+ /*********************************************************************/
+ // The api record was performing a scan operation. We need to check
+ // on the scan state. Since completing a scan process might involve
+ // sending several signals we will increase the loop count by 64.
+ /*********************************************************************/
+ jam();
+
+ apiConnectptr.p->apiFailState = ZTRUE;
+ capiConnectClosing[TapiFailedNode]++;
+
+ ScanRecordPtr scanPtr;
+ scanPtr.i = apiConnectptr.p->apiScanRec;
+ ptrCheckGuard(scanPtr, cscanrecFileSize, scanRecord);
+ close_scan_req(signal, scanPtr, true);
+
+ TloopCount += 64;
+ break;
+ case CS_CONNECTED:
+ /*********************************************************************/
+ // The api record is connected to failed node. We need to release the
+ // connection and set it in a disconnected state.
+ /*********************************************************************/
+ jam();
+ releaseApiCon(signal, apiConnectptr.i);
+ break;
+ case CS_REC_COMMITTING:
+ case CS_RECEIVING:
+ case CS_STARTED:
+ /*********************************************************************/
+ // The api record was in the process of performing a transaction but
+ // had not yet sent all information.
+ // We need to initiate an ABORT since the API will not provide any
+ // more information.
+ // Since the abort can send many signals we will insert a real-time
+ // break after checking this record.
+ /*********************************************************************/
+ jam();
+ apiConnectptr.p->apiFailState = ZTRUE;
+ capiConnectClosing[TapiFailedNode]++;
+ abort010Lab(signal);
+ TloopCount = 256;
+ break;
+ case CS_PREPARED:
+ jam();
+ case CS_REC_PREPARING:
+ jam();
+ case CS_START_PREPARING:
+ jam();
+ /*********************************************************************/
+ // Not implemented yet.
+ /*********************************************************************/
+ systemErrorLab(signal);
+ break;
+ case CS_RESTART:
+ jam();
+ case CS_COMPLETING:
+ jam();
+ case CS_COMPLETE_SENT:
+ jam();
+ case CS_WAIT_COMPLETE_CONF:
+ jam();
+ case CS_FAIL_ABORTING:
+ jam();
+ case CS_FAIL_ABORTED:
+ jam();
+ case CS_FAIL_PREPARED:
+ jam();
+ case CS_FAIL_COMMITTING:
+ jam();
+ case CS_FAIL_COMMITTED:
+ /*********************************************************************/
+ // These states are only valid on copy and fail API connections.
+ /*********************************************************************/
+ default:
+ jam();
+ systemErrorLab(signal);
+ break;
+ }//switch
+ } else {
+ jam();
+ }//if
+ apiConnectptr.i++;
+ if (apiConnectptr.i > ((capiConnectFilesize / 3) - 1)) {
+ jam();
+ /**
+ * Finished with scanning connection record
+ *
+ * Now scan markers
+ */
+ removeMarkerForFailedAPI(signal, TapiFailedNode, 0);
+ return;
+ }//if
+ } while (TloopCount++ < 256);
+ signal->theData[0] = TcContinueB::ZHANDLE_FAILED_API_NODE;
+ signal->theData[1] = TapiFailedNode;
+ signal->theData[2] = apiConnectptr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
+}//Dbtc::handleFailedApiNode()
+
+void
+Dbtc::removeMarkerForFailedAPI(Signal* signal,
+ Uint32 nodeId,
+ Uint32 startBucket)
+{
+ TcFailRecordPtr node_fail_ptr;
+ node_fail_ptr.i = 0;
+ ptrAss(node_fail_ptr, tcFailRecord);
+ if(node_fail_ptr.p->failStatus != FS_IDLE) {
+ jam();
+ DEBUG("Restarting removeMarkerForFailedAPI");
+ /**
+ * TC take-over in progress
+ * needs to restart as this
+ * creates new markers
+ */
+ signal->theData[0] = TcContinueB::ZHANDLE_FAILED_API_NODE_REMOVE_MARKERS;
+ signal->theData[1] = nodeId;
+ signal->theData[2] = 0;
+ sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 500, 3);
+ return;
+ }
+
+ CommitAckMarkerIterator iter;
+ m_commitAckMarkerHash.next(startBucket, iter);
+
+ const Uint32 RT_BREAK = 256;
+ for(Uint32 i = 0; i<RT_BREAK || iter.bucket == startBucket; i++){
+ jam();
+
+ if(iter.curr.i == RNIL){
+ jam();
+ /**
+ * Done with iteration
+ */
+ capiConnectClosing[nodeId]--;
+ if (capiConnectClosing[nodeId] == 0) {
+ jam();
+ /********************************************************************/
+ // No outstanding ABORT or COMMIT's of this failed API node.
+ // We can respond with API_FAILCONF
+ /********************************************************************/
+ signal->theData[0] = nodeId;
+ signal->theData[1] = cownref;
+ sendSignal(capiFailRef, GSN_API_FAILCONF, signal, 2, JBB);
+ }
+ return;
+ }
+
+ if(iter.curr.p->apiNodeId == nodeId){
+ jam();
+
+ /**
+ * Check so that the record is not still in use
+ *
+ */
+ ApiConnectRecordPtr apiConnectPtr;
+ apiConnectPtr.i = iter.curr.p->apiConnectPtr;
+ ptrCheckGuard(apiConnectPtr, capiConnectFilesize, apiConnectRecord);
+ if(apiConnectPtr.p->commitAckMarker == iter.curr.i){
+ jam();
+ /**
+ * The record is still active
+ *
+ * Don't remove it, but continueb instead
+ */
+ break;
+ }
+ sendRemoveMarkers(signal, iter.curr.p);
+ m_commitAckMarkerHash.release(iter.curr);
+
+ break;
+ }
+ m_commitAckMarkerHash.next(iter);
+ }
+
+ signal->theData[0] = TcContinueB::ZHANDLE_FAILED_API_NODE_REMOVE_MARKERS;
+ signal->theData[1] = nodeId;
+ signal->theData[2] = iter.bucket;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
+}
+
+void Dbtc::handleApiFailState(Signal* signal, UintR TapiConnectptr)
+{
+ ApiConnectRecordPtr TlocalApiConnectptr;
+ UintR TfailedApiNode;
+
+ TlocalApiConnectptr.i = TapiConnectptr;
+ ptrCheckGuard(TlocalApiConnectptr, capiConnectFilesize, apiConnectRecord);
+ TfailedApiNode = refToNode(TlocalApiConnectptr.p->ndbapiBlockref);
+ arrGuard(TfailedApiNode, MAX_NODES);
+ capiConnectClosing[TfailedApiNode]--;
+ releaseApiCon(signal, TapiConnectptr);
+ TlocalApiConnectptr.p->apiFailState = ZFALSE;
+ if (capiConnectClosing[TfailedApiNode] == 0) {
+ jam();
+ signal->theData[0] = TfailedApiNode;
+ signal->theData[1] = cownref;
+ sendSignal(capiFailRef, GSN_API_FAILCONF, signal, 2, JBB);
+ }//if
+}//Dbtc::handleApiFailState()
+
+/****************************************************************************
+ * T C S E I Z E R E Q
+ * THE APPLICATION SENDS A REQUEST TO SEIZE A CONNECT RECORD TO CARRY OUT A
+ * TRANSACTION
+ * TC BLOCK TAKE OUT A CONNECT RECORD FROM THE FREE LIST AND ESTABLISHES ALL
+ * NECESSARY CONNECTION BEFORE REPLYING TO THE APPLICATION BLOCK
+ ****************************************************************************/
+void Dbtc::execTCSEIZEREQ(Signal* signal)
+{
+ UintR tapiPointer;
+ BlockReference tapiBlockref; /* SENDER BLOCK REFERENCE*/
+
+ jamEntry();
+ tapiPointer = signal->theData[0]; /* REQUEST SENDERS CONNECT RECORD POINTER*/
+ tapiBlockref = signal->theData[1]; /* SENDERS BLOCK REFERENCE*/
+
+ const NodeState::StartLevel sl =
+ (NodeState::StartLevel)getNodeState().startLevel;
+
+ const NodeId senderNodeId = refToNode(tapiBlockref);
+ const bool local = senderNodeId == getOwnNodeId() || senderNodeId == 0;
+
+ if(!(senderNodeId == getNodeState().getSingleUserApi()) &&
+ !getNodeState().getSingleUserMode()) {
+ if(!(sl==NodeState::SL_SINGLEUSER &&
+ senderNodeId == getNodeState().getSingleUserApi())) {
+ if (!(sl == NodeState::SL_STARTED ||
+ (sl == NodeState::SL_STARTING && local == true))) {
+ jam();
+
+ Uint32 errCode;
+ if(!(sl == NodeState::SL_SINGLEUSER && local))
+ {
+ switch(sl){
+ case NodeState::SL_STARTING:
+ errCode = ZSYSTEM_NOT_STARTED_ERROR;
+ break;
+ case NodeState::SL_STOPPING_1:
+ case NodeState::SL_STOPPING_2:
+ case NodeState::SL_STOPPING_3:
+ case NodeState::SL_STOPPING_4:
+ if(getNodeState().stopping.systemShutdown)
+ errCode = ZCLUSTER_SHUTDOWN_IN_PROGRESS;
+ else
+ errCode = ZNODE_SHUTDOWN_IN_PROGRESS;
+ break;
+ case NodeState::SL_SINGLEUSER:
+ errCode = ZCLUSTER_IN_SINGLEUSER_MODE;
+ break;
+ default:
+ errCode = ZWRONG_STATE;
+ break;
+ }
+ signal->theData[0] = tapiPointer;
+ signal->theData[1] = errCode;
+ sendSignal(tapiBlockref, GSN_TCSEIZEREF, signal, 2, JBB);
+ return;
+ }//if (!(sl == SL_SINGLEUSER))
+ } //if
+ }
+ }
+
+ seizeApiConnect(signal);
+ if (terrorCode == ZOK) {
+ jam();
+ apiConnectptr.p->ndbapiConnect = tapiPointer;
+ apiConnectptr.p->ndbapiBlockref = tapiBlockref;
+ signal->theData[0] = apiConnectptr.p->ndbapiConnect;
+ signal->theData[1] = apiConnectptr.i;
+ sendSignal(tapiBlockref, GSN_TCSEIZECONF, signal, 2, JBB);
+ return;
+ }
+
+ signal->theData[0] = tapiPointer;
+ signal->theData[1] = terrorCode;
+ sendSignal(tapiBlockref, GSN_TCSEIZEREF, signal, 2, JBB);
+}//Dbtc::execTCSEIZEREQ()
+
+/****************************************************************************/
+/* T C R E L E A S E Q */
+/* REQUEST TO RELEASE A CONNECT RECORD */
+/****************************************************************************/
+void Dbtc::execTCRELEASEREQ(Signal* signal)
+{
+ UintR tapiPointer;
+ BlockReference tapiBlockref; /* SENDER BLOCK REFERENCE*/
+
+ jamEntry();
+ tapiPointer = signal->theData[0]; /* REQUEST SENDERS CONNECT RECORD POINTER*/
+ tapiBlockref = signal->theData[1];/* SENDERS BLOCK REFERENCE*/
+ tuserpointer = signal->theData[2];
+ if (tapiPointer >= capiConnectFilesize) {
+ jam();
+ signal->theData[0] = tuserpointer;
+ signal->theData[1] = ZINVALID_CONNECTION;
+ signal->theData[2] = __LINE__;
+ sendSignal(tapiBlockref, GSN_TCRELEASEREF, signal, 3, JBB);
+ return;
+ } else {
+ jam();
+ apiConnectptr.i = tapiPointer;
+ }//if
+ ptrAss(apiConnectptr, apiConnectRecord);
+ if (apiConnectptr.p->apiConnectstate == CS_DISCONNECTED) {
+ jam();
+ signal->theData[0] = tuserpointer;
+ sendSignal(tapiBlockref, GSN_TCRELEASECONF, signal, 1, JBB);
+ } else {
+ if (tapiBlockref == apiConnectptr.p->ndbapiBlockref) {
+ if (apiConnectptr.p->apiConnectstate == CS_CONNECTED ||
+ (apiConnectptr.p->apiConnectstate == CS_ABORTING &&
+ apiConnectptr.p->abortState == AS_IDLE) ||
+ (apiConnectptr.p->apiConnectstate == CS_STARTED &&
+ apiConnectptr.p->firstTcConnect == RNIL))
+ {
+ jam(); /* JUST REPLY OK */
+ releaseApiCon(signal, apiConnectptr.i);
+ signal->theData[0] = tuserpointer;
+ sendSignal(tapiBlockref,
+ GSN_TCRELEASECONF, signal, 1, JBB);
+ } else {
+ jam();
+ signal->theData[0] = tuserpointer;
+ signal->theData[1] = ZINVALID_CONNECTION;
+ signal->theData[2] = __LINE__;
+ signal->theData[3] = apiConnectptr.p->apiConnectstate;
+ sendSignal(tapiBlockref,
+ GSN_TCRELEASEREF, signal, 4, JBB);
+ }
+ } else {
+ jam();
+ signal->theData[0] = tuserpointer;
+ signal->theData[1] = ZINVALID_CONNECTION;
+ signal->theData[2] = __LINE__;
+ signal->theData[3] = tapiBlockref;
+ signal->theData[4] = apiConnectptr.p->ndbapiBlockref;
+ sendSignal(tapiBlockref, GSN_TCRELEASEREF, signal, 5, JBB);
+ }//if
+ }//if
+}//Dbtc::execTCRELEASEREQ()
+
+/****************************************************************************/
+// Error Handling for TCKEYREQ messages
+/****************************************************************************/
+void Dbtc::signalErrorRefuseLab(Signal* signal)
+{
+ ptrGuard(apiConnectptr);
+ if (apiConnectptr.p->apiConnectstate != CS_DISCONNECTED) {
+ jam();
+ apiConnectptr.p->abortState = AS_IDLE;
+ apiConnectptr.p->apiConnectstate = CS_ABORTING;
+ }//if
+ sendSignalErrorRefuseLab(signal);
+}//Dbtc::signalErrorRefuseLab()
+
+void Dbtc::sendSignalErrorRefuseLab(Signal* signal)
+{
+ ndbassert(false);
+ ptrGuard(apiConnectptr);
+ if (apiConnectptr.p->apiConnectstate != CS_DISCONNECTED) {
+ jam();
+ ndbrequire(false);
+ signal->theData[0] = apiConnectptr.p->ndbapiConnect;
+ signal->theData[1] = signal->theData[ttransid_ptr];
+ signal->theData[2] = signal->theData[ttransid_ptr + 1];
+ signal->theData[3] = ZSIGNAL_ERROR;
+ sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_TCROLLBACKREP,
+ signal, 4, JBB);
+ }
+}//Dbtc::sendSignalErrorRefuseLab()
+
+void Dbtc::abortBeginErrorLab(Signal* signal)
+{
+ apiConnectptr.p->transid[0] = signal->theData[ttransid_ptr];
+ apiConnectptr.p->transid[1] = signal->theData[ttransid_ptr + 1];
+ abortErrorLab(signal);
+}//Dbtc::abortBeginErrorLab()
+
+void Dbtc::printState(Signal* signal, int place)
+{
+#ifdef VM_TRACE // Change to if 0 to disable these printouts
+ ndbout << "-- Dbtc::printState -- " << endl;
+ ndbout << "Received from place = " << place
+ << " apiConnectptr.i = " << apiConnectptr.i
+ << " apiConnectstate = " << apiConnectptr.p->apiConnectstate << endl;
+ ndbout << "ctcTimer = " << ctcTimer
+ << " ndbapiBlockref = " << hex <<apiConnectptr.p->ndbapiBlockref
+ << " Transid = " << apiConnectptr.p->transid[0]
+ << " " << apiConnectptr.p->transid[1] << endl;
+ ndbout << " apiTimer = " << getApiConTimer(apiConnectptr.i)
+ << " counter = " << apiConnectptr.p->counter
+ << " lqhkeyconfrec = " << apiConnectptr.p->lqhkeyconfrec
+ << " lqhkeyreqrec = " << apiConnectptr.p->lqhkeyreqrec << endl;
+ ndbout << "abortState = " << apiConnectptr.p->abortState
+ << " apiScanRec = " << apiConnectptr.p->apiScanRec
+ << " returncode = " << apiConnectptr.p->returncode << endl;
+ ndbout << "tckeyrec = " << apiConnectptr.p->tckeyrec
+ << " returnsignal = " << apiConnectptr.p->returnsignal
+ << " apiFailState = " << apiConnectptr.p->apiFailState << endl;
+ if (apiConnectptr.p->cachePtr != RNIL) {
+ jam();
+ CacheRecord *localCacheRecord = cacheRecord;
+ UintR TcacheFilesize = ccacheFilesize;
+ UintR TcachePtr = apiConnectptr.p->cachePtr;
+ if (TcachePtr < TcacheFilesize) {
+ jam();
+ CacheRecord * const regCachePtr = &localCacheRecord[TcachePtr];
+ ndbout << "currReclenAi = " << regCachePtr->currReclenAi
+ << " attrlength = " << regCachePtr->attrlength
+ << " tableref = " << regCachePtr->tableref
+ << " keylen = " << regCachePtr->keylen << endl;
+ } else {
+ jam();
+ systemErrorLab(signal);
+ }//if
+ }//if
+#endif
+ return;
+}//Dbtc::printState()
+
+void
+Dbtc::TCKEY_abort(Signal* signal, int place)
+{
+ switch (place) {
+ case 0:
+ jam();
+ terrorCode = ZSTATE_ERROR;
+ apiConnectptr.p->firstTcConnect = RNIL;
+ printState(signal, 4);
+ abortBeginErrorLab(signal);
+ return;
+ case 1:
+ jam();
+ printState(signal, 3);
+ sendSignalErrorRefuseLab(signal);
+ return;
+ case 2:{
+ printState(signal, 6);
+ const TcKeyReq * const tcKeyReq = (TcKeyReq *)&signal->theData[0];
+ const Uint32 t1 = tcKeyReq->transId1;
+ const Uint32 t2 = tcKeyReq->transId2;
+ signal->theData[0] = apiConnectptr.p->ndbapiConnect;
+ signal->theData[1] = t1;
+ signal->theData[2] = t2;
+ signal->theData[3] = ZABORT_ERROR;
+ ndbrequire(false);
+ sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_TCROLLBACKREP,
+ signal, 4, JBB);
+ return;
+ }
+ case 3:
+ jam();
+ printState(signal, 7);
+ noFreeConnectionErrorLab(signal);
+ return;
+ case 4:
+ jam();
+ terrorCode = ZERO_KEYLEN_ERROR;
+ releaseAtErrorLab(signal);
+ return;
+ case 5:
+ jam();
+ terrorCode = ZNO_AI_WITH_UPDATE;
+ releaseAtErrorLab(signal);
+ return;
+ case 6:
+ jam();
+ warningHandlerLab(signal);
+ return;
+
+ case 7:
+ jam();
+ tabStateErrorLab(signal);
+ return;
+
+ case 8:
+ jam();
+ wrongSchemaVersionErrorLab(signal);
+ return;
+
+ case 9:
+ jam();
+ terrorCode = ZSTATE_ERROR;
+ releaseAtErrorLab(signal);
+ return;
+
+ case 10:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 11:
+ jam();
+ terrorCode = ZMORE_AI_IN_TCKEYREQ_ERROR;
+ releaseAtErrorLab(signal);
+ return;
+
+ case 12:
+ jam();
+ terrorCode = ZSIMPLE_READ_WITHOUT_AI;
+ releaseAtErrorLab(signal);
+ return;
+
+ case 13:
+ jam();
+ switch (tcConnectptr.p->tcConnectstate) {
+ case OS_WAIT_KEYINFO:
+ jam();
+ printState(signal, 8);
+ terrorCode = ZSTATE_ERROR;
+ abortErrorLab(signal);
+ return;
+ default:
+ jam();
+ /********************************************************************/
+ /* MISMATCH BETWEEN STATE ON API CONNECTION AND THIS */
+ /* PARTICULAR TC CONNECT RECORD. THIS MUST BE CAUSED BY NDB */
+ /* INTERNAL ERROR. */
+ /********************************************************************/
+ systemErrorLab(signal);
+ return;
+ }//switch
+ return;
+
+ case 15:
+ jam();
+ terrorCode = ZSCAN_NODE_ERROR;
+ releaseAtErrorLab(signal);
+ return;
+
+ case 16:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 17:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 18:
+ jam();
+ warningHandlerLab(signal);
+ return;
+
+ case 19:
+ jam();
+ return;
+
+ case 20:
+ jam();
+ warningHandlerLab(signal);
+ return;
+
+ case 21:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 22:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 23:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 24:
+ jam();
+ seizeAttrbuferrorLab(signal);
+ return;
+
+ case 25:
+ jam();
+ warningHandlerLab(signal);
+ return;
+
+ case 26:
+ jam();
+ return;
+
+ case 27:
+ systemErrorLab(signal);
+ jam();
+ return;
+
+ case 28:
+ jam();
+ // NOT USED
+ return;
+
+ case 29:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 30:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 31:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 32:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 33:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 34:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 35:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 36:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 37:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 38:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 39:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 40:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 41:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 42:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 43:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 44:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 45:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 46:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 47:
+ jam();
+ terrorCode = apiConnectptr.p->returncode;
+ releaseAtErrorLab(signal);
+ return;
+
+ case 48:
+ jam();
+ terrorCode = ZCOMMIT_TYPE_ERROR;
+ releaseAtErrorLab(signal);
+ return;
+
+ case 49:
+ jam();
+ abortErrorLab(signal);
+ return;
+
+ case 50:
+ jam();
+ systemErrorLab(signal);
+ return;
+
+ case 51:
+ jam();
+ abortErrorLab(signal);
+ return;
+
+ case 52:
+ jam();
+ abortErrorLab(signal);
+ return;
+
+ case 53:
+ jam();
+ abortErrorLab(signal);
+ return;
+
+ case 54:
+ jam();
+ abortErrorLab(signal);
+ return;
+
+ case 55:
+ jam();
+ printState(signal, 5);
+ sendSignalErrorRefuseLab(signal);
+ return;
+
+ case 56:{
+ jam();
+ terrorCode = ZNO_FREE_TC_MARKER;
+ abortErrorLab(signal);
+ return;
+ }
+ case 57:{
+ jam();
+ /**
+ * Initialize object before starting error handling
+ */
+ initApiConnectRec(signal, apiConnectptr.p, true);
+ switch(getNodeState().startLevel){
+ case NodeState::SL_STOPPING_2:
+ case NodeState::SL_STOPPING_3:
+ case NodeState::SL_STOPPING_4:
+ if(getNodeState().stopping.systemShutdown)
+ terrorCode = ZCLUSTER_SHUTDOWN_IN_PROGRESS;
+ else
+ terrorCode = ZNODE_SHUTDOWN_IN_PROGRESS;
+ break;
+ case NodeState::SL_SINGLEUSER:
+ terrorCode = ZCLUSTER_IN_SINGLEUSER_MODE;
+ break;
+ default:
+ terrorCode = ZWRONG_STATE;
+ break;
+ }
+ abortErrorLab(signal);
+ return;
+ }
+
+ case 58:{
+ jam();
+ releaseAtErrorLab(signal);
+ return;
+ }
+
+ case 59:{
+ jam();
+ terrorCode = ZABORTINPROGRESS;
+ abortErrorLab(signal);
+ return;
+ }
+
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ }//switch
+}
+
+void Dbtc::execKEYINFO(Signal* signal)
+{
+ UintR compare_transid1, compare_transid2;
+ jamEntry();
+ apiConnectptr.i = signal->theData[0];
+ tmaxData = 20;
+ if (apiConnectptr.i >= capiConnectFilesize) {
+ jam();
+ warningHandlerLab(signal);
+ return;
+ }//if
+ ptrAss(apiConnectptr, apiConnectRecord);
+ ttransid_ptr = 1;
+ compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[1];
+ compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[2];
+ compare_transid1 = compare_transid1 | compare_transid2;
+ if (compare_transid1 != 0) {
+ jam();
+ printState(signal, 10);
+ sendSignalErrorRefuseLab(signal);
+ return;
+ }//if
+ switch (apiConnectptr.p->apiConnectstate) {
+ case CS_RECEIVING:
+ case CS_REC_COMMITTING:
+ case CS_START_SCAN:
+ jam();
+ /*empty*/;
+ break;
+ /* OK */
+ case CS_ABORTING:
+ jam();
+ return; /* IGNORE */
+ case CS_CONNECTED:
+ jam();
+ /****************************************************************>*/
+ /* MOST LIKELY CAUSED BY A MISSED SIGNAL. SEND REFUSE AND */
+ /* SET STATE TO ABORTING. */
+ /****************************************************************>*/
+ printState(signal, 11);
+ signalErrorRefuseLab(signal);
+ return;
+ case CS_STARTED:
+ jam();
+ /****************************************************************>*/
+ /* MOST LIKELY CAUSED BY A MISSED SIGNAL. SEND REFUSE AND */
+ /* SET STATE TO ABORTING. SINCE A TRANSACTION WAS STARTED */
+ /* WE ALSO NEED TO ABORT THIS TRANSACTION. */
+ /****************************************************************>*/
+ terrorCode = ZSIGNAL_ERROR;
+ printState(signal, 2);
+ abortErrorLab(signal);
+ return;
+ default:
+ jam();
+ warningHandlerLab(signal);
+ return;
+ }//switch
+
+ CacheRecord *localCacheRecord = cacheRecord;
+ UintR TcacheFilesize = ccacheFilesize;
+ UintR TcachePtr = apiConnectptr.p->cachePtr;
+ UintR TtcTimer = ctcTimer;
+ CacheRecord * const regCachePtr = &localCacheRecord[TcachePtr];
+ if (TcachePtr >= TcacheFilesize) {
+ TCKEY_abort(signal, 42);
+ return;
+ }//if
+ setApiConTimer(apiConnectptr.i, TtcTimer, __LINE__);
+ cachePtr.i = TcachePtr;
+ cachePtr.p = regCachePtr;
+
+ tcConnectptr.i = apiConnectptr.p->lastTcConnect;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ switch (tcConnectptr.p->tcConnectstate) {
+ case OS_WAIT_KEYINFO:
+ jam();
+ tckeyreq020Lab(signal);
+ return;
+ case OS_WAIT_SCAN:
+ break;
+ default:
+ jam();
+ terrorCode = ZSTATE_ERROR;
+ abortErrorLab(signal);
+ return;
+ }//switch
+
+ UintR TdataPos = 0;
+ UintR TkeyLen = regCachePtr->keylen;
+ UintR Tlen = regCachePtr->save1;
+
+ do {
+ if (cfirstfreeDatabuf == RNIL) {
+ jam();
+ abort();
+ seizeDatabuferrorLab(signal);
+ return;
+ }//if
+ linkKeybuf(signal);
+ arrGuard(TdataPos, 19);
+ databufptr.p->data[0] = signal->theData[TdataPos + 3];
+ databufptr.p->data[1] = signal->theData[TdataPos + 4];
+ databufptr.p->data[2] = signal->theData[TdataPos + 5];
+ databufptr.p->data[3] = signal->theData[TdataPos + 6];
+ Tlen = Tlen + 4;
+ TdataPos = TdataPos + 4;
+ if (Tlen < TkeyLen) {
+ jam();
+ if (TdataPos >= tmaxData) {
+ jam();
+ /*----------------------------------------------------*/
+ /** EXIT AND WAIT FOR SIGNAL KEYINFO OR KEYINFO9 **/
+ /** WHEN EITHER OF THE SIGNALS IS RECEIVED A JUMP **/
+ /** TO LABEL "KEYINFO_LABEL" IS DONE. THEN THE **/
+ /** PROGRAM RETURNS TO LABEL TCKEYREQ020 **/
+ /*----------------------------------------------------*/
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ regCachePtr->save1 = Tlen;
+ return;
+ }//if
+ } else {
+ jam();
+ return;
+ }//if
+ } while (1);
+ return;
+}//Dbtc::execKEYINFO()
+
+/*---------------------------------------------------------------------------*/
+/* */
+/* MORE THAN FOUR WORDS OF KEY DATA. WE NEED TO PACK THIS IN KEYINFO SIGNALS.*/
+/* WE WILL ALWAYS PACK 4 WORDS AT A TIME. */
+/*---------------------------------------------------------------------------*/
+void Dbtc::packKeyData000Lab(Signal* signal,
+ BlockReference TBRef,
+ Uint32 totalLen)
+{
+ CacheRecord * const regCachePtr = cachePtr.p;
+
+ jam();
+ Uint32 len = 0;
+ databufptr.i = regCachePtr->firstKeybuf;
+ signal->theData[0] = tcConnectptr.i;
+ signal->theData[1] = apiConnectptr.p->transid[0];
+ signal->theData[2] = apiConnectptr.p->transid[1];
+ Uint32 * dst = signal->theData+3;
+ ptrCheckGuard(databufptr, cdatabufFilesize, databufRecord);
+
+ do {
+ jam();
+ databufptr.i = databufptr.p->nextDatabuf;
+ dst[len + 0] = databufptr.p->data[0];
+ dst[len + 1] = databufptr.p->data[1];
+ dst[len + 2] = databufptr.p->data[2];
+ dst[len + 3] = databufptr.p->data[3];
+ len += 4;
+ if (totalLen <= 4) {
+ jam();
+ /*---------------------------------------------------------------------*/
+ /* LAST PACK OF KEY DATA HAVE BEEN SENT */
+ /*---------------------------------------------------------------------*/
+ /* THERE WERE UNSENT INFORMATION, SEND IT. */
+ /*---------------------------------------------------------------------*/
+ sendSignal(TBRef, GSN_KEYINFO, signal, 3 + len, JBB);
+ return;
+ } else if(len == KeyInfo::DataLength){
+ jam();
+ len = 0;
+ sendSignal(TBRef, GSN_KEYINFO, signal, 3 + KeyInfo::DataLength, JBB);
+ }
+ totalLen -= 4;
+ ptrCheckGuard(databufptr, cdatabufFilesize, databufRecord);
+ } while (1);
+}//Dbtc::packKeyData000Lab()
+
+void Dbtc::tckeyreq020Lab(Signal* signal)
+{
+ CacheRecord * const regCachePtr = cachePtr.p;
+ UintR TdataPos = 0;
+ UintR TkeyLen = regCachePtr->keylen;
+ UintR Tlen = regCachePtr->save1;
+
+ do {
+ if (cfirstfreeDatabuf == RNIL) {
+ jam();
+ seizeDatabuferrorLab(signal);
+ return;
+ }//if
+ linkKeybuf(signal);
+ arrGuard(TdataPos, 19);
+ databufptr.p->data[0] = signal->theData[TdataPos + 3];
+ databufptr.p->data[1] = signal->theData[TdataPos + 4];
+ databufptr.p->data[2] = signal->theData[TdataPos + 5];
+ databufptr.p->data[3] = signal->theData[TdataPos + 6];
+ Tlen = Tlen + 4;
+ TdataPos = TdataPos + 4;
+ if (Tlen < TkeyLen) {
+ jam();
+ if (TdataPos >= tmaxData) {
+ jam();
+ /*----------------------------------------------------*/
+ /** EXIT AND WAIT FOR SIGNAL KEYINFO OR KEYINFO9 **/
+ /** WHEN EITHER OF THE SIGNALS IS RECEIVED A JUMP **/
+ /** TO LABEL "KEYINFO_LABEL" IS DONE. THEN THE **/
+ /** PROGRAM RETURNS TO LABEL TCKEYREQ020 **/
+ /*----------------------------------------------------*/
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ regCachePtr->save1 = Tlen;
+ tcConnectptr.p->tcConnectstate = OS_WAIT_KEYINFO;
+ return;
+ }//if
+ } else {
+ jam();
+ tckeyreq050Lab(signal);
+ return;
+ }//if
+ } while (1);
+ return;
+}//Dbtc::tckeyreq020Lab()
+
+/* ------------------------------------------------------------------------- */
+/* ------- SAVE ATTRIBUTE INFORMATION IN OPERATION RECORD ------- */
+/* ------------------------------------------------------------------------- */
+void Dbtc::saveAttrbuf(Signal* signal)
+{
+ CacheRecord * const regCachePtr = cachePtr.p;
+ UintR TfirstfreeAttrbuf = cfirstfreeAttrbuf;
+ UintR TattrbufFilesize = cattrbufFilesize;
+ UintR TTcfirstAttrbuf = regCachePtr->firstAttrbuf;
+ UintR Tlen = signal->length() - 3;
+ AttrbufRecord *localAttrbufRecord = attrbufRecord;
+
+ AttrbufRecord * const regAttrPtr = &localAttrbufRecord[TfirstfreeAttrbuf];
+ if (TfirstfreeAttrbuf >= TattrbufFilesize) {
+ TCKEY_abort(signal, 21);
+ return;
+ }//if
+ UintR Tnext = regAttrPtr->attrbuf[ZINBUF_NEXT];
+ if (TTcfirstAttrbuf == RNIL) {
+ jam();
+ regCachePtr->firstAttrbuf = TfirstfreeAttrbuf;
+ } else {
+ AttrbufRecordPtr saAttrbufptr;
+
+ saAttrbufptr.i = regCachePtr->lastAttrbuf;
+ jam();
+ if (saAttrbufptr.i >= TattrbufFilesize) {
+ TCKEY_abort(signal, 22);
+ return;
+ }//if
+ saAttrbufptr.p = &localAttrbufRecord[saAttrbufptr.i];
+ saAttrbufptr.p->attrbuf[ZINBUF_NEXT] = TfirstfreeAttrbuf;
+ }//if
+
+ cfirstfreeAttrbuf = Tnext;
+ regAttrPtr->attrbuf[ZINBUF_NEXT] = RNIL;
+ regCachePtr->lastAttrbuf = TfirstfreeAttrbuf;
+ regAttrPtr->attrbuf[ZINBUF_DATA_LEN] = Tlen;
+
+ UintR Tdata1 = signal->theData[3];
+ UintR Tdata2 = signal->theData[4];
+ UintR Tdata3 = signal->theData[5];
+ UintR Tdata4 = signal->theData[6];
+ UintR Tdata5 = signal->theData[7];
+ UintR Tdata6 = signal->theData[8];
+ UintR Tdata7 = signal->theData[9];
+ UintR Tdata8 = signal->theData[10];
+
+ regAttrPtr->attrbuf[0] = Tdata1;
+ regAttrPtr->attrbuf[1] = Tdata2;
+ regAttrPtr->attrbuf[2] = Tdata3;
+ regAttrPtr->attrbuf[3] = Tdata4;
+ regAttrPtr->attrbuf[4] = Tdata5;
+ regAttrPtr->attrbuf[5] = Tdata6;
+ regAttrPtr->attrbuf[6] = Tdata7;
+ regAttrPtr->attrbuf[7] = Tdata8;
+
+ if (Tlen > 8) {
+
+ Tdata1 = signal->theData[11];
+ Tdata2 = signal->theData[12];
+ Tdata3 = signal->theData[13];
+ Tdata4 = signal->theData[14];
+ Tdata5 = signal->theData[15];
+ Tdata6 = signal->theData[16];
+ Tdata7 = signal->theData[17];
+
+ regAttrPtr->attrbuf[8] = Tdata1;
+ regAttrPtr->attrbuf[9] = Tdata2;
+ regAttrPtr->attrbuf[10] = Tdata3;
+ regAttrPtr->attrbuf[11] = Tdata4;
+ regAttrPtr->attrbuf[12] = Tdata5;
+ regAttrPtr->attrbuf[13] = Tdata6;
+ regAttrPtr->attrbuf[14] = Tdata7;
+ jam();
+ if (Tlen > 15) {
+
+ Tdata1 = signal->theData[18];
+ Tdata2 = signal->theData[19];
+ Tdata3 = signal->theData[20];
+ Tdata4 = signal->theData[21];
+ Tdata5 = signal->theData[22];
+ Tdata6 = signal->theData[23];
+ Tdata7 = signal->theData[24];
+
+ jam();
+ regAttrPtr->attrbuf[15] = Tdata1;
+ regAttrPtr->attrbuf[16] = Tdata2;
+ regAttrPtr->attrbuf[17] = Tdata3;
+ regAttrPtr->attrbuf[18] = Tdata4;
+ regAttrPtr->attrbuf[19] = Tdata5;
+ regAttrPtr->attrbuf[20] = Tdata6;
+ regAttrPtr->attrbuf[21] = Tdata7;
+ }//if
+ }//if
+}//Dbtc::saveAttrbuf()
+
+void Dbtc::execATTRINFO(Signal* signal)
+{
+ UintR compare_transid1, compare_transid2;
+ UintR Tdata1 = signal->theData[0];
+ UintR Tlength = signal->length();
+ UintR TapiConnectFilesize = capiConnectFilesize;
+ ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
+
+ jamEntry();
+ apiConnectptr.i = Tdata1;
+ ttransid_ptr = 1;
+ if (Tdata1 >= TapiConnectFilesize) {
+ DEBUG("Drop ATTRINFO, wrong apiConnectptr");
+ TCKEY_abort(signal, 18);
+ return;
+ }//if
+
+ UintR Tdata2 = signal->theData[1];
+ UintR Tdata3 = signal->theData[2];
+ ApiConnectRecord * const regApiPtr = &localApiConnectRecord[Tdata1];
+ compare_transid1 = regApiPtr->transid[0] ^ Tdata2;
+ compare_transid2 = regApiPtr->transid[1] ^ Tdata3;
+ apiConnectptr.p = regApiPtr;
+ compare_transid1 = compare_transid1 | compare_transid2;
+
+ if (compare_transid1 != 0) {
+ DEBUG("Drop ATTRINFO, wrong transid, lenght="<<Tlength
+ << " transid("<<hex<<Tdata2<<", "<<Tdata3);
+ TCKEY_abort(signal, 19);
+ return;
+ }//if
+ if (Tlength < 4) {
+ DEBUG("Drop ATTRINFO, wrong length = " << Tlength);
+ TCKEY_abort(signal, 20);
+ return;
+ }
+ Tlength -= 3;
+ UintR TcompREC_COMMIT = (regApiPtr->apiConnectstate == CS_REC_COMMITTING);
+ UintR TcompRECEIVING = (regApiPtr->apiConnectstate == CS_RECEIVING);
+ UintR TcompBOTH = TcompREC_COMMIT | TcompRECEIVING;
+
+ if (TcompBOTH) {
+ jam();
+ if (ERROR_INSERTED(8015)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }//if
+ if (ERROR_INSERTED(8016)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }//if
+ CacheRecord *localCacheRecord = cacheRecord;
+ UintR TcacheFilesize = ccacheFilesize;
+ UintR TcachePtr = regApiPtr->cachePtr;
+ UintR TtcTimer = ctcTimer;
+ CacheRecord * const regCachePtr = &localCacheRecord[TcachePtr];
+ if (TcachePtr >= TcacheFilesize) {
+ TCKEY_abort(signal, 43);
+ return;
+ }//if
+ UintR TfirstfreeAttrbuf = cfirstfreeAttrbuf;
+ UintR TcurrReclenAi = regCachePtr->currReclenAi;
+ UintR TattrLen = regCachePtr->attrlength;
+
+ setApiConTimer(apiConnectptr.i, TtcTimer, __LINE__);
+ cachePtr.i = TcachePtr;
+ cachePtr.p = regCachePtr;
+ TcurrReclenAi = TcurrReclenAi + Tlength;
+ regCachePtr->currReclenAi = TcurrReclenAi;
+ int TattrlengthRemain = TattrLen - TcurrReclenAi;
+
+ if (TfirstfreeAttrbuf == RNIL) {
+ DEBUG("No more attrinfo buffers");
+ TCKEY_abort(signal, 24);
+ return;
+ }//if
+ saveAttrbuf(signal);
+ if (TattrlengthRemain == 0) {
+ /****************************************************************>*/
+ /* HERE WE HAVE FOUND THAT THE LAST SIGNAL BELONGING TO THIS */
+ /* OPERATION HAVE BEEN RECEIVED. THIS MEANS THAT WE CAN NOW REUSE */
+ /* THE API CONNECT RECORD. HOWEVER IF PREPARE OR COMMIT HAVE BEEN */
+ /* RECEIVED THEN IT IS NOT ALLOWED TO RECEIVE ANY FURTHER */
+ /* OPERATIONS. */
+ /****************************************************************>*/
+ UintR TlastConnect = regApiPtr->lastTcConnect;
+ if (TcompRECEIVING) {
+ jam();
+ regApiPtr->apiConnectstate = CS_STARTED;
+ } else {
+ jam();
+ regApiPtr->apiConnectstate = CS_START_COMMITTING;
+ }//if
+ tcConnectptr.i = TlastConnect;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ attrinfoDihReceivedLab(signal);
+ } else if (TattrlengthRemain < 0) {
+ jam();
+ DEBUG("ATTRINFO wrong total length="<<Tlength
+ <<", TattrlengthRemain="<<TattrlengthRemain
+ <<", TattrLen="<<TattrLen
+ <<", TcurrReclenAi="<<TcurrReclenAi);
+ tcConnectptr.i = regApiPtr->lastTcConnect;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ aiErrorLab(signal);
+ }//if
+ return;
+ } else if (regApiPtr->apiConnectstate == CS_START_SCAN) {
+ jam();
+ scanAttrinfoLab(signal, Tlength);
+ return;
+ } else {
+ switch (regApiPtr->apiConnectstate) {
+ case CS_ABORTING:
+ jam();
+ /* JUST IGNORE THE SIGNAL*/
+ // DEBUG("Drop ATTRINFO, CS_ABORTING");
+ return;
+ case CS_CONNECTED:
+ jam();
+ /* MOST LIKELY CAUSED BY A MISSED SIGNAL.*/
+ // DEBUG("Drop ATTRINFO, CS_CONNECTED");
+ return;
+ case CS_STARTED:
+ jam();
+ /****************************************************************>*/
+ /* MOST LIKELY CAUSED BY A MISSED SIGNAL. SEND REFUSE AND */
+ /* SET STATE TO ABORTING. SINCE A TRANSACTION WAS STARTED */
+ /* WE ALSO NEED TO ABORT THIS TRANSACTION. */
+ /****************************************************************>*/
+ terrorCode = ZSIGNAL_ERROR;
+ printState(signal, 1);
+ abortErrorLab(signal);
+ return;
+ default:
+ jam();
+ /****************************************************************>*/
+ /* SIGNAL RECEIVED IN AN UNEXPECTED STATE. WE IGNORE SIGNAL */
+ /* SINCE WE DO NOT REALLY KNOW WHERE THE ERROR OCCURRED. */
+ /****************************************************************>*/
+ DEBUG("Drop ATTRINFO, illegal state="<<regApiPtr->apiConnectstate);
+ printState(signal, 9);
+ return;
+ }//switch
+ }//if
+}//Dbtc::execATTRINFO()
+
+/* *********************************************************************>> */
+/* */
+/* MODULE: HASH MODULE */
+/* DESCRIPTION: CONTAINS THE HASH VALUE CALCULATION */
+/* *********************************************************************> */
+void Dbtc::hash(Signal* signal)
+{
+ DatabufRecordPtr locDatabufptr;
+ UintR ti;
+ UintR Tdata0;
+ UintR Tdata1;
+ UintR Tdata2;
+ UintR Tdata3;
+ UintR* Tdata32;
+
+ CacheRecord * const regCachePtr = cachePtr.p;
+ Tdata32 = signal->theData;
+
+ Tdata0 = regCachePtr->keydata[0];
+ Tdata1 = regCachePtr->keydata[1];
+ Tdata2 = regCachePtr->keydata[2];
+ Tdata3 = regCachePtr->keydata[3];
+ Tdata32[0] = Tdata0;
+ Tdata32[1] = Tdata1;
+ Tdata32[2] = Tdata2;
+ Tdata32[3] = Tdata3;
+ if (regCachePtr->keylen > 4) {
+ locDatabufptr.i = regCachePtr->firstKeybuf;
+ ti = 4;
+ while (locDatabufptr.i != RNIL) {
+ ptrCheckGuard(locDatabufptr, cdatabufFilesize, databufRecord);
+ Tdata0 = locDatabufptr.p->data[0];
+ Tdata1 = locDatabufptr.p->data[1];
+ Tdata2 = locDatabufptr.p->data[2];
+ Tdata3 = locDatabufptr.p->data[3];
+ Tdata32[ti ] = Tdata0;
+ Tdata32[ti + 1] = Tdata1;
+ Tdata32[ti + 2] = Tdata2;
+ Tdata32[ti + 3] = Tdata3;
+ locDatabufptr.i = locDatabufptr.p->nextDatabuf;
+ ti += 4;
+ }//while
+ }//if
+
+ UintR keylen = (UintR)regCachePtr->keylen;
+ Uint32 distKey = regCachePtr->distributionKeyIndicator;
+
+ Uint32 tmp[4];
+ if(!regCachePtr->m_special_hash)
+ {
+ md5_hash(tmp, (Uint64*)&Tdata32[0], keylen);
+ }
+ else
+ {
+ handle_special_hash(tmp, Tdata32, keylen, regCachePtr->tableref, !distKey);
+ }
+
+ thashValue = tmp[0];
+ if (distKey){
+ jam();
+ tdistrHashValue = regCachePtr->distributionKey;
+ } else {
+ jam();
+ tdistrHashValue = tmp[1];
+ }//if
+}//Dbtc::hash()
+
+bool
+Dbtc::handle_special_hash(Uint32 dstHash[4], Uint32* src, Uint32 srcLen,
+ Uint32 tabPtrI,
+ bool distr)
+{
+ Uint64 Tmp[MAX_KEY_SIZE_IN_WORDS * 4 * MAX_XFRM_MULTIPLY];
+ const Uint32 dstSize = sizeof(Tmp) / 4;
+ const TableRecord* tabPtrP = &tableRecord[tabPtrI];
+ const Uint32 noOfKeyAttr = tabPtrP->noOfKeyAttr;
+ Uint32 noOfDistrKeys = tabPtrP->noOfDistrKeys;
+ const bool hasCharAttr = tabPtrP->hasCharAttr;
+
+ Uint32 *dst = (Uint32*)Tmp;
+ Uint32 dstPos = 0;
+ Uint32 srcPos = 0;
+ Uint32 keyPartLen[MAX_ATTRIBUTES_IN_INDEX];
+ if(hasCharAttr){
+ Uint32 i = 0;
+ while (i < noOfKeyAttr) {
+ const TableRecord::KeyAttr& keyAttr = tabPtrP->keyAttr[i];
+
+ Uint32 srcBytes =
+ AttributeDescriptor::getSizeInBytes(keyAttr.attributeDescriptor);
+ Uint32 srcWords = (srcBytes + 3) / 4;
+ Uint32 dstWords = ~0;
+ uchar* dstPtr = (uchar*)&dst[dstPos];
+ const uchar* srcPtr = (const uchar*)&src[srcPos];
+ CHARSET_INFO* cs = keyAttr.charsetInfo;
+
+ if (cs == NULL) {
+ jam();
+ memcpy(dstPtr, srcPtr, srcWords << 2);
+ dstWords = srcWords;
+ } else {
+ jam();
+ Uint32 typeId =
+ AttributeDescriptor::getType(keyAttr.attributeDescriptor);
+ Uint32 lb, len;
+ bool ok = NdbSqlUtil::get_var_length(typeId, srcPtr, srcBytes, lb, len);
+ ndbrequire(ok);
+ Uint32 xmul = cs->strxfrm_multiply;
+ if (xmul == 0)
+ xmul = 1;
+ /*
+ * Varchar is really Char. End spaces do not matter. To get
+ * same hash we blank-pad to maximum length via strnxfrm.
+ * TODO use MySQL charset-aware hash function instead
+ */
+ Uint32 dstLen = xmul * (srcBytes - lb);
+ ndbrequire(dstLen <= ((dstSize - dstPos) << 2));
+ int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len);
+ ndbrequire(n != -1);
+ while ((n & 3) != 0) {
+ dstPtr[n++] = 0;
+ }
+ dstWords = (n >> 2);
+ }
+ dstPos += dstWords;
+ srcPos += srcWords;
+ keyPartLen[i++] = dstWords;
+ }
+ }
+ else
+ {
+ dst = src;
+ dstPos = srcLen;
+ }
+
+ md5_hash(dstHash, (Uint64*)dst, dstPos);
+
+ if(distr && noOfDistrKeys)
+ {
+ jam();
+ src = dst;
+ dstPos = 0;
+ Uint32 i = 0;
+ if(hasCharAttr)
+ {
+ while (i < noOfKeyAttr && noOfDistrKeys)
+ {
+ const TableRecord::KeyAttr& keyAttr = tabPtrP->keyAttr[i];
+ Uint32 len = keyPartLen[i];
+ if(AttributeDescriptor::getDKey(keyAttr.attributeDescriptor))
+ {
+ noOfDistrKeys--;
+ memmove(dst+dstPos, src, len << 2);
+ dstPos += len;
+ }
+ src += len;
+ i++;
+ }
+ }
+ else
+ {
+ while (i < noOfKeyAttr && noOfDistrKeys)
+ {
+ const TableRecord::KeyAttr& keyAttr = tabPtrP->keyAttr[i];
+ Uint32 len =
+ AttributeDescriptor::getSizeInBytes(keyAttr.attributeDescriptor);
+ len = (len + 3) / 4;
+ if(AttributeDescriptor::getDKey(keyAttr.attributeDescriptor))
+ {
+ noOfDistrKeys--;
+ memmove(dst+dstPos, src, len << 2);
+ dstPos += len;
+ }
+ src += len;
+ i++;
+ }
+ }
+ Uint32 tmp[4];
+ md5_hash(tmp, (Uint64*)dst, dstPos);
+ dstHash[1] = tmp[1];
+ }
+ return true; // success
+}
+
+/*
+INIT_API_CONNECT_REC
+---------------------------
+*/
+/* ========================================================================= */
+/* ======= INIT_API_CONNECT_REC ======= */
+/* */
+/* ========================================================================= */
+void Dbtc::initApiConnectRec(Signal* signal,
+ ApiConnectRecord * const regApiPtr,
+ bool releaseIndexOperations)
+{
+ const TcKeyReq * const tcKeyReq = (TcKeyReq *)&signal->theData[0];
+ UintR TfailureNr = cfailure_nr;
+ UintR TtransCount = c_counters.ctransCount;
+ UintR Ttransid0 = tcKeyReq->transId1;
+ UintR Ttransid1 = tcKeyReq->transId2;
+
+ regApiPtr->m_exec_flag = 0;
+ regApiPtr->returncode = 0;
+ regApiPtr->returnsignal = RS_TCKEYCONF;
+ ndbassert(regApiPtr->firstTcConnect == RNIL);
+ regApiPtr->firstTcConnect = RNIL;
+ regApiPtr->lastTcConnect = RNIL;
+ regApiPtr->globalcheckpointid = 0;
+ regApiPtr->lqhkeyconfrec = 0;
+ regApiPtr->lqhkeyreqrec = 0;
+ regApiPtr->tckeyrec = 0;
+ regApiPtr->tcindxrec = 0;
+ regApiPtr->failureNr = TfailureNr;
+ regApiPtr->transid[0] = Ttransid0;
+ regApiPtr->transid[1] = Ttransid1;
+ regApiPtr->commitAckMarker = RNIL;
+ regApiPtr->buddyPtr = RNIL;
+ regApiPtr->currSavePointId = 0;
+ // Trigger data
+ releaseFiredTriggerData(&regApiPtr->theFiredTriggers),
+ // Index data
+ regApiPtr->indexOpReturn = false;
+ regApiPtr->noIndexOp = 0;
+ if(releaseIndexOperations)
+ releaseAllSeizedIndexOperations(regApiPtr);
+
+ c_counters.ctransCount = TtransCount + 1;
+}//Dbtc::initApiConnectRec()
+
+int
+Dbtc::seizeTcRecord(Signal* signal)
+{
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ TcConnectRecord *localTcConnectRecord = tcConnectRecord;
+ UintR TfirstfreeTcConnect = cfirstfreeTcConnect;
+ UintR TtcConnectFilesize = ctcConnectFilesize;
+ tcConnectptr.i = TfirstfreeTcConnect;
+ if (TfirstfreeTcConnect >= TtcConnectFilesize) {
+ int place = 3;
+ if (TfirstfreeTcConnect != RNIL) {
+ place = 10;
+ }//if
+ TCKEY_abort(signal, place);
+ return 1;
+ }//if
+ //--------------------------------------------------------------------------
+ // Optimised version of ptrAss(tcConnectptr, tcConnectRecord)
+ //--------------------------------------------------------------------------
+ TcConnectRecord * const regTcPtr =
+ &localTcConnectRecord[TfirstfreeTcConnect];
+
+ UintR TconcurrentOp = c_counters.cconcurrentOp;
+ UintR TlastTcConnect = regApiPtr->lastTcConnect;
+ UintR TtcConnectptrIndex = tcConnectptr.i;
+ TcConnectRecordPtr tmpTcConnectptr;
+
+ cfirstfreeTcConnect = regTcPtr->nextTcConnect;
+ tcConnectptr.p = regTcPtr;
+
+ c_counters.cconcurrentOp = TconcurrentOp + 1;
+ regTcPtr->prevTcConnect = TlastTcConnect;
+ regTcPtr->nextTcConnect = RNIL;
+ regTcPtr->accumulatingTriggerData.i = RNIL;
+ regTcPtr->accumulatingTriggerData.p = NULL;
+ regTcPtr->noFiredTriggers = 0;
+ regTcPtr->noReceivedTriggers = 0;
+ regTcPtr->triggerExecutionCount = 0;
+ regTcPtr->triggeringOperation = RNIL;
+ regTcPtr->isIndexOp = false;
+ regTcPtr->indexOp = RNIL;
+ regTcPtr->currentIndexId = RNIL;
+
+ regApiPtr->lastTcConnect = TtcConnectptrIndex;
+
+ if (TlastTcConnect == RNIL) {
+ jam();
+ regApiPtr->firstTcConnect = TtcConnectptrIndex;
+ } else {
+ tmpTcConnectptr.i = TlastTcConnect;
+ jam();
+ ptrCheckGuard(tmpTcConnectptr, TtcConnectFilesize, localTcConnectRecord);
+ tmpTcConnectptr.p->nextTcConnect = TtcConnectptrIndex;
+ }//if
+ return 0;
+}//Dbtc::seizeTcRecord()
+
+int
+Dbtc::seizeCacheRecord(Signal* signal)
+{
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ UintR TfirstfreeCacheRec = cfirstfreeCacheRec;
+ UintR TcacheFilesize = ccacheFilesize;
+ CacheRecord *localCacheRecord = cacheRecord;
+ if (TfirstfreeCacheRec >= TcacheFilesize) {
+ TCKEY_abort(signal, 41);
+ return 1;
+ }//if
+ CacheRecord * const regCachePtr = &localCacheRecord[TfirstfreeCacheRec];
+
+ regApiPtr->cachePtr = TfirstfreeCacheRec;
+ cfirstfreeCacheRec = regCachePtr->nextCacheRec;
+ cachePtr.i = TfirstfreeCacheRec;
+ cachePtr.p = regCachePtr;
+
+#ifdef VM_TRACE
+ // This is a good place to check that resources have
+ // been properly released from CacheRecord
+ ndbrequire(regCachePtr->firstKeybuf == RNIL);
+ ndbrequire(regCachePtr->lastKeybuf == RNIL);
+#endif
+ regCachePtr->firstKeybuf = RNIL;
+ regCachePtr->lastKeybuf = RNIL;
+ regCachePtr->firstAttrbuf = RNIL;
+ regCachePtr->lastAttrbuf = RNIL;
+ regCachePtr->currReclenAi = 0;
+ return 0;
+}//Dbtc::seizeCacheRecord()
+
+/*****************************************************************************/
+/* T C K E Y R E Q */
+/* AFTER HAVING ESTABLISHED THE CONNECT, THE APPLICATION BLOCK SENDS AN */
+/* OPERATION REQUEST TO TC. ALL NECESSARY INFORMATION TO CARRY OUT REQUEST */
+/* IS FURNISHED IN PARAMETERS. TC STORES THIS INFORMATION AND ENQUIRES */
+/* FROM DIH ABOUT THE NODES WHICH MAY HAVE THE REQUESTED DATA */
+/*****************************************************************************/
+void Dbtc::execTCKEYREQ(Signal* signal)
+{
+ UintR compare_transid1, compare_transid2;
+ UintR titcLenAiInTckeyreq;
+ UintR TkeyLength;
+ const TcKeyReq * const tcKeyReq = (TcKeyReq *)signal->getDataPtr();
+ UintR Treqinfo;
+
+ jamEntry();
+ /*-------------------------------------------------------------------------
+ * Common error routines are used for several signals, they need to know
+ * where to find the transaction identifier in the signal.
+ *-------------------------------------------------------------------------*/
+ const UintR TapiIndex = tcKeyReq->apiConnectPtr;
+ const UintR TapiMaxIndex = capiConnectFilesize;
+ const UintR TtabIndex = tcKeyReq->tableId;
+ const UintR TtabMaxIndex = ctabrecFilesize;
+ ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
+
+ ttransid_ptr = 6;
+ apiConnectptr.i = TapiIndex;
+ if (TapiIndex >= TapiMaxIndex) {
+ TCKEY_abort(signal, 6);
+ return;
+ }//if
+ if (TtabIndex >= TtabMaxIndex) {
+ TCKEY_abort(signal, 7);
+ return;
+ }//if
+
+ Treqinfo = tcKeyReq->requestInfo;
+ //--------------------------------------------------------------------------
+ // Optimised version of ptrAss(tabptr, tableRecord)
+ // Optimised version of ptrAss(apiConnectptr, apiConnectRecord)
+ //--------------------------------------------------------------------------
+ ApiConnectRecord * const regApiPtr = &localApiConnectRecord[TapiIndex];
+ apiConnectptr.p = regApiPtr;
+
+ Uint32 TstartFlag = tcKeyReq->getStartFlag(Treqinfo);
+ Uint32 TexecFlag = TcKeyReq::getExecuteFlag(Treqinfo);
+
+ bool isIndexOp = regApiPtr->isIndexOp;
+ bool isIndexOpReturn = regApiPtr->indexOpReturn;
+ regApiPtr->isIndexOp = false; // Reset marker
+ regApiPtr->m_exec_flag |= TexecFlag;
+ switch (regApiPtr->apiConnectstate) {
+ case CS_CONNECTED:{
+ if (TstartFlag == 1 && getAllowStartTransaction() == true){
+ //---------------------------------------------------------------------
+ // Initialise API connect record if transaction is started.
+ //---------------------------------------------------------------------
+ jam();
+ initApiConnectRec(signal, regApiPtr);
+ regApiPtr->m_exec_flag = TexecFlag;
+ } else {
+ if(getAllowStartTransaction() == true){
+ /*------------------------------------------------------------------
+ * WE EXPECTED A START TRANSACTION. SINCE NO OPERATIONS HAVE BEEN
+ * RECEIVED WE INDICATE THIS BY SETTING FIRST_TC_CONNECT TO RNIL TO
+ * ENSURE PROPER OPERATION OF THE COMMON ABORT HANDLING.
+ *-----------------------------------------------------------------*/
+ TCKEY_abort(signal, 0);
+ return;
+ } else {
+ /**
+ * getAllowStartTransaction() == false
+ */
+ TCKEY_abort(signal, 57);
+ return;
+ }//if
+ }
+ }
+ break;
+ case CS_STARTED:
+ if(TstartFlag == 1 && regApiPtr->firstTcConnect == RNIL)
+ {
+ /**
+ * If last operation in last transaction was a simple/dirty read
+ * it does not have to be committed or rollbacked hence,
+ * the state will be CS_STARTED
+ */
+ jam();
+ initApiConnectRec(signal, regApiPtr);
+ regApiPtr->m_exec_flag = TexecFlag;
+ } else {
+ //----------------------------------------------------------------------
+ // Transaction is started already.
+ // Check that the operation is on the same transaction.
+ //-----------------------------------------------------------------------
+ compare_transid1 = regApiPtr->transid[0] ^ tcKeyReq->transId1;
+ compare_transid2 = regApiPtr->transid[1] ^ tcKeyReq->transId2;
+ jam();
+ compare_transid1 = compare_transid1 | compare_transid2;
+ if (compare_transid1 != 0) {
+ TCKEY_abort(signal, 1);
+ return;
+ }//if
+ }
+ break;
+ case CS_ABORTING:
+ if (regApiPtr->abortState == AS_IDLE) {
+ if (TstartFlag == 1) {
+ //--------------------------------------------------------------------
+ // Previous transaction had been aborted and the abort was completed.
+ // It is then OK to start a new transaction again.
+ //--------------------------------------------------------------------
+ jam();
+ initApiConnectRec(signal, regApiPtr);
+ regApiPtr->m_exec_flag = TexecFlag;
+ } else if(TexecFlag) {
+ TCKEY_abort(signal, 59);
+ return;
+ } else {
+ //--------------------------------------------------------------------
+ // The current transaction was aborted successfully.
+ // We will not do anything before we receive an operation
+ // with a start indicator. We will ignore this signal.
+ //--------------------------------------------------------------------
+ jam();
+ DEBUG("Drop TCKEYREQ - apiConnectState=CS_ABORTING, ==AS_IDLE");
+ return;
+ }//if
+ } else {
+ //----------------------------------------------------------------------
+ // Previous transaction is still aborting
+ //----------------------------------------------------------------------
+ jam();
+ if (TstartFlag == 1) {
+ //--------------------------------------------------------------------
+ // If a new transaction tries to start while the old is
+ // still aborting, we will report this to the starting API.
+ //--------------------------------------------------------------------
+ TCKEY_abort(signal, 2);
+ return;
+ } else if(TexecFlag) {
+ TCKEY_abort(signal, 59);
+ return;
+ }
+ //----------------------------------------------------------------------
+ // Ignore signals without start indicator set when aborting transaction.
+ //----------------------------------------------------------------------
+ DEBUG("Drop TCKEYREQ - apiConnectState=CS_ABORTING, !=AS_IDLE");
+ return;
+ }//if
+ break;
+ case CS_START_COMMITTING:
+ jam();
+ if(isIndexOpReturn || TcKeyReq::getExecutingTrigger(Treqinfo)){
+ break;
+ }
+ default:
+ jam();
+ /*----------------------------------------------------------------------
+ * IN THIS CASE THE NDBAPI IS AN UNTRUSTED ENTITY THAT HAS SENT A SIGNAL
+ * WHEN IT WAS NOT EXPECTED TO.
+ * WE MIGHT BE IN A PROCESS TO RECEIVE, PREPARE,
+ * COMMIT OR COMPLETE AND OBVIOUSLY THIS IS NOT A DESIRED EVENT.
+ * WE WILL ALWAYS COMPLETE THE ABORT HANDLING BEFORE WE ALLOW
+ * ANYTHING TO HAPPEN ON THIS CONNECTION AGAIN.
+ * THUS THERE IS NO ACTION FROM THE API THAT CAN SPEED UP THIS PROCESS.
+ *---------------------------------------------------------------------*/
+ TCKEY_abort(signal, 55);
+ return;
+ }//switch
+
+ TableRecordPtr localTabptr;
+ localTabptr.i = TtabIndex;
+ localTabptr.p = &tableRecord[TtabIndex];
+ if (localTabptr.p->checkTable(tcKeyReq->tableSchemaVersion)) {
+ ;
+ } else {
+ /*-----------------------------------------------------------------------*/
+ /* THE API IS WORKING WITH AN OLD SCHEMA VERSION. IT NEEDS REPLACEMENT. */
+ /* COULD ALSO BE THAT THE TABLE IS NOT DEFINED. */
+ /*-----------------------------------------------------------------------*/
+ TCKEY_abort(signal, 8);
+ return;
+ }//if
+
+ //-------------------------------------------------------------------------
+ // Error Insertion for testing purposes. Test to see what happens when no
+ // more TC records available.
+ //-------------------------------------------------------------------------
+ if (ERROR_INSERTED(8032)) {
+ TCKEY_abort(signal, 3);
+ return;
+ }//if
+
+ if (seizeTcRecord(signal) != 0) {
+ return;
+ }//if
+
+ if (seizeCacheRecord(signal) != 0) {
+ return;
+ }//if
+
+ TcConnectRecord * const regTcPtr = tcConnectptr.p;
+ CacheRecord * const regCachePtr = cachePtr.p;
+
+ /*
+ INIT_TC_CONNECT_REC
+ -------------------------
+ */
+ /* ---------------------------------------------------------------------- */
+ /* ------- INIT OPERATION RECORD WITH SIGNAL DATA AND RNILS ------- */
+ /* */
+ /* ---------------------------------------------------------------------- */
+
+ UintR TapiVersionNo = tcKeyReq->getAPIVersion(tcKeyReq->attrLen);
+ UintR Tlqhkeyreqrec = regApiPtr->lqhkeyreqrec;
+ regApiPtr->lqhkeyreqrec = Tlqhkeyreqrec + 1;
+ regCachePtr->apiVersionNo = TapiVersionNo;
+
+ UintR TapiConnectptrIndex = apiConnectptr.i;
+ UintR TsenderData = tcKeyReq->senderData;
+ UintR TattrLen = tcKeyReq->getAttrinfoLen(tcKeyReq->attrLen);
+ UintR TattrinfoCount = c_counters.cattrinfoCount;
+
+ regTcPtr->apiConnect = TapiConnectptrIndex;
+ regTcPtr->clientData = TsenderData;
+ regTcPtr->commitAckMarker = RNIL;
+ regTcPtr->isIndexOp = isIndexOp;
+ regTcPtr->indexOp = regApiPtr->executingIndexOp;
+ regTcPtr->savePointId = regApiPtr->currSavePointId;
+ regApiPtr->executingIndexOp = RNIL;
+
+ if (TcKeyReq::getExecutingTrigger(Treqinfo)) {
+ // Save the TcOperationPtr for fireing operation
+ regTcPtr->triggeringOperation = TsenderData;
+ }
+
+ if (TexecFlag){
+ Uint32 currSPId = regApiPtr->currSavePointId;
+ regApiPtr->currSavePointId = ++currSPId;
+ }
+
+ regCachePtr->attrlength = TattrLen;
+ c_counters.cattrinfoCount = TattrinfoCount + TattrLen;
+
+ UintR TtabptrIndex = localTabptr.i;
+ UintR TtableSchemaVersion = tcKeyReq->tableSchemaVersion;
+ Uint8 TOperationType = tcKeyReq->getOperationType(Treqinfo);
+ regCachePtr->tableref = TtabptrIndex;
+ regCachePtr->schemaVersion = TtableSchemaVersion;
+ regTcPtr->operation = TOperationType;
+
+ Uint8 TSimpleFlag = tcKeyReq->getSimpleFlag(Treqinfo);
+ Uint8 TDirtyFlag = tcKeyReq->getDirtyFlag(Treqinfo);
+ Uint8 TInterpretedFlag = tcKeyReq->getInterpretedFlag(Treqinfo);
+ Uint8 TDistrKeyFlag = tcKeyReq->getDistributionKeyFlag(Treqinfo);
+ Uint8 TexecuteFlag = TexecFlag;
+
+ regCachePtr->opSimple = TSimpleFlag;
+ regCachePtr->opExec = TInterpretedFlag;
+ regTcPtr->dirtyOp = TDirtyFlag;
+ regCachePtr->distributionKeyIndicator = TDistrKeyFlag;
+
+ //-------------------------------------------------------------
+ // The next step is to read the upto three conditional words.
+ //-------------------------------------------------------------
+ Uint32 TkeyIndex;
+ Uint32* TOptionalDataPtr = (Uint32*)&tcKeyReq->scanInfo;
+ {
+ Uint32 TDistrGHIndex = tcKeyReq->getScanIndFlag(Treqinfo);
+ Uint32 TDistrKeyIndex = TDistrGHIndex;
+
+ Uint32 TscanInfo = tcKeyReq->getTakeOverScanInfo(TOptionalDataPtr[0]);
+
+ regCachePtr->scanTakeOverInd = TDistrGHIndex;
+ regCachePtr->scanInfo = TscanInfo;
+
+ regCachePtr->distributionKey = TOptionalDataPtr[TDistrKeyIndex];
+
+ TkeyIndex = TDistrKeyIndex + TDistrKeyFlag;
+ }
+ Uint32* TkeyDataPtr = &TOptionalDataPtr[TkeyIndex];
+
+ UintR Tdata1 = TkeyDataPtr[0];
+ UintR Tdata2 = TkeyDataPtr[1];
+ UintR Tdata3 = TkeyDataPtr[2];
+ UintR Tdata4 = TkeyDataPtr[3];
+ UintR Tdata5;
+
+ regCachePtr->keydata[0] = Tdata1;
+ regCachePtr->keydata[1] = Tdata2;
+ regCachePtr->keydata[2] = Tdata3;
+ regCachePtr->keydata[3] = Tdata4;
+
+ TkeyLength = tcKeyReq->getKeyLength(Treqinfo);
+ Uint32 TAIDataIndex;
+ if (TkeyLength > 8) {
+ TAIDataIndex = TkeyIndex + 8;
+ } else {
+ if (TkeyLength == 0) {
+ TCKEY_abort(signal, 4);
+ return;
+ }//if
+ TAIDataIndex = TkeyIndex + TkeyLength;
+ }//if
+ Uint32* TAIDataPtr = &TOptionalDataPtr[TAIDataIndex];
+
+ titcLenAiInTckeyreq = tcKeyReq->getAIInTcKeyReq(Treqinfo);
+ regCachePtr->keylen = TkeyLength;
+ regCachePtr->lenAiInTckeyreq = titcLenAiInTckeyreq;
+ regCachePtr->currReclenAi = titcLenAiInTckeyreq;
+ regCachePtr->m_special_hash =
+ localTabptr.p->hasCharAttr | (localTabptr.p->noOfDistrKeys > 0);
+ Tdata1 = TAIDataPtr[0];
+ Tdata2 = TAIDataPtr[1];
+ Tdata3 = TAIDataPtr[2];
+ Tdata4 = TAIDataPtr[3];
+ Tdata5 = TAIDataPtr[4];
+
+ regCachePtr->attrinfo0 = Tdata1;
+ regCachePtr->attrinfo15[0] = Tdata2;
+ regCachePtr->attrinfo15[1] = Tdata3;
+ regCachePtr->attrinfo15[2] = Tdata4;
+ regCachePtr->attrinfo15[3] = Tdata5;
+
+ if (TOperationType == ZREAD) {
+ Uint32 TreadCount = c_counters.creadCount;
+ jam();
+ regCachePtr->opLock = 0;
+ c_counters.creadCount = TreadCount + 1;
+ } else if(TOperationType == ZREAD_EX){
+ Uint32 TreadCount = c_counters.creadCount;
+ jam();
+ TOperationType = ZREAD;
+ regTcPtr->operation = ZREAD;
+ regCachePtr->opLock = ZUPDATE;
+ c_counters.creadCount = TreadCount + 1;
+ } else {
+ if(regApiPtr->commitAckMarker == RNIL){
+ jam();
+ CommitAckMarkerPtr tmp;
+ if(!m_commitAckMarkerHash.seize(tmp)){
+ TCKEY_abort(signal, 56);
+ return;
+ } else {
+ regTcPtr->commitAckMarker = tmp.i;
+ regApiPtr->commitAckMarker = tmp.i;
+ tmp.p->transid1 = tcKeyReq->transId1;
+ tmp.p->transid2 = tcKeyReq->transId2;
+ tmp.p->apiNodeId = refToNode(regApiPtr->ndbapiBlockref);
+ tmp.p->apiConnectPtr = TapiIndex;
+ tmp.p->noOfLqhs = 0;
+ m_commitAckMarkerHash.add(tmp);
+ }
+ }
+
+ UintR TwriteCount = c_counters.cwriteCount;
+ UintR Toperationsize = coperationsize;
+ /* --------------------------------------------------------------------
+ * THIS IS A TEMPORARY TABLE, DON'T UPDATE coperationsize.
+ * THIS VARIABLE CONTROLS THE INTERVAL BETWEEN LCP'S AND
+ * TEMP TABLES DON'T PARTICIPATE.
+ * -------------------------------------------------------------------- */
+ if (localTabptr.p->storedTable) {
+ coperationsize = ((Toperationsize + TattrLen) + TkeyLength) + 17;
+ }
+ c_counters.cwriteCount = TwriteCount + 1;
+ switch (TOperationType) {
+ case ZUPDATE:
+ jam();
+ if (TattrLen == 0) {
+ //TCKEY_abort(signal, 5);
+ //return;
+ }//if
+ /*---------------------------------------------------------------------*/
+ // The missing break is intentional since we also want to set the opLock
+ // variable also for updates
+ /*---------------------------------------------------------------------*/
+ case ZINSERT:
+ case ZDELETE:
+ jam();
+ regCachePtr->opLock = TOperationType;
+ break;
+ case ZWRITE:
+ jam();
+ // A write operation is originally an insert operation.
+ regCachePtr->opLock = ZINSERT;
+ break;
+ default:
+ TCKEY_abort(signal, 9);
+ return;
+ }//switch
+ }//if
+
+ Uint32 TabortOption = tcKeyReq->getAbortOption(Treqinfo);
+ regTcPtr->m_execAbortOption = TabortOption;
+
+ /*-------------------------------------------------------------------------
+ * Check error handling per operation
+ * If CommitFlag is set state accordingly and check for early abort
+ *------------------------------------------------------------------------*/
+ if (tcKeyReq->getCommitFlag(Treqinfo) == 1) {
+ ndbrequire(TexecuteFlag);
+ regApiPtr->apiConnectstate = CS_REC_COMMITTING;
+ } else {
+ /* ---------------------------------------------------------------------
+ * PREPARE TRANSACTION IS NOT IMPLEMENTED YET.
+ * ---------------------------------------------------------------------
+ * ELSIF (TREQINFO => 3) (*) 1 = 1 THEN
+ * IF PREPARE TRANSACTION THEN
+ * API_CONNECTPTR:API_CONNECTSTATE = REC_PREPARING
+ * SET STATE TO PREPARING
+ * --------------------------------------------------------------------- */
+ if (regApiPtr->apiConnectstate == CS_START_COMMITTING) {
+ jam();
+ // Trigger execution at commit
+ regApiPtr->apiConnectstate = CS_REC_COMMITTING;
+ } else {
+ jam();
+ regApiPtr->apiConnectstate = CS_RECEIVING;
+ }//if
+ }//if
+ if (TkeyLength <= 4) {
+ tckeyreq050Lab(signal);
+ return;
+ } else {
+ if (cfirstfreeDatabuf != RNIL) {
+ jam();
+ linkKeybuf(signal);
+ Tdata1 = TkeyDataPtr[4];
+ Tdata2 = TkeyDataPtr[5];
+ Tdata3 = TkeyDataPtr[6];
+ Tdata4 = TkeyDataPtr[7];
+
+ DatabufRecord * const regDataPtr = databufptr.p;
+ regDataPtr->data[0] = Tdata1;
+ regDataPtr->data[1] = Tdata2;
+ regDataPtr->data[2] = Tdata3;
+ regDataPtr->data[3] = Tdata4;
+ } else {
+ jam();
+ seizeDatabuferrorLab(signal);
+ return;
+ }//if
+ if (TkeyLength <= 8) {
+ jam();
+ tckeyreq050Lab(signal);
+ return;
+ } else {
+ jam();
+ /* --------------------------------------------------------------------
+ * THE TCKEYREQ DIDN'T CONTAIN ALL KEY DATA,
+ * SAVE STATE AND WAIT FOR KEYINFO
+ * --------------------------------------------------------------------*/
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ regCachePtr->save1 = 8;
+ regTcPtr->tcConnectstate = OS_WAIT_KEYINFO;
+ return;
+ }//if
+ }//if
+ return;
+}//Dbtc::execTCKEYREQ()
+
+void Dbtc::tckeyreq050Lab(Signal* signal)
+{
+ UintR tnoOfBackup;
+ UintR tnoOfStandby;
+ UintR tnodeinfo;
+
+ hash(signal); /* NOW IT IS TIME TO CALCULATE THE HASH VALUE*/
+
+ CacheRecord * const regCachePtr = cachePtr.p;
+ TcConnectRecord * const regTcPtr = tcConnectptr.p;
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+
+ UintR TtcTimer = ctcTimer;
+ UintR ThashValue = thashValue;
+ UintR TdistrHashValue = tdistrHashValue;
+ UintR TdihConnectptr = regTcPtr->dihConnectptr;
+ UintR Ttableref = regCachePtr->tableref;
+
+ TableRecordPtr localTabptr;
+ localTabptr.i = Ttableref;
+ localTabptr.p = &tableRecord[localTabptr.i];
+ Uint32 schemaVersion = regCachePtr->schemaVersion;
+ if(localTabptr.p->checkTable(schemaVersion)){
+ ;
+ } else {
+ terrorCode = localTabptr.p->getErrorCode(schemaVersion);
+ TCKEY_abort(signal, 58);
+ return;
+ }
+
+ setApiConTimer(apiConnectptr.i, TtcTimer, __LINE__);
+ regCachePtr->hashValue = ThashValue;
+
+ signal->theData[0] = TdihConnectptr;
+ signal->theData[1] = Ttableref;
+ signal->theData[2] = TdistrHashValue;
+
+ /*-------------------------------------------------------------*/
+ /* FOR EFFICIENCY REASONS WE AVOID THE SIGNAL SENDING HERE AND */
+ /* PROCEED IMMEDIATELY TO DIH. IN MULTI-THREADED VERSIONS WE */
+ /* HAVE TO INSERT A MUTEX ON DIH TO ENSURE PROPER OPERATION. */
+ /* SINCE THIS SIGNAL AND DIVERIFYREQ ARE THE ONLY SIGNALS SENT */
+ /* TO DIH IN TRAFFIC IT SHOULD BE OK (3% OF THE EXECUTION TIME */
+ /* IS SPENT IN DIH AND EVEN LESS IN REPLICATED NDB. */
+ /*-------------------------------------------------------------*/
+ EXECUTE_DIRECT(DBDIH, GSN_DIGETNODESREQ, signal, 3);
+ UintR TerrorIndicator = signal->theData[0];
+ jamEntry();
+ if (TerrorIndicator != 0) {
+ execDIGETNODESREF(signal);
+ return;
+ }
+
+ if(ERROR_INSERTED(8050) && signal->theData[3] != getOwnNodeId())
+ {
+ ndbassert(false);
+ signal->theData[1] = 626;
+ execDIGETNODESREF(signal);
+ return;
+ }
+
+ /****************>>*/
+ /* DIGETNODESCONF >*/
+ /* ***************>*/
+
+ UintR Tdata1 = signal->theData[1];
+ UintR Tdata2 = signal->theData[2];
+ UintR Tdata3 = signal->theData[3];
+ UintR Tdata4 = signal->theData[4];
+ UintR Tdata5 = signal->theData[5];
+ UintR Tdata6 = signal->theData[6];
+
+ regCachePtr->fragmentid = Tdata1;
+ tnodeinfo = Tdata2;
+
+ regTcPtr->tcNodedata[0] = Tdata3;
+ regTcPtr->tcNodedata[1] = Tdata4;
+ regTcPtr->tcNodedata[2] = Tdata5;
+ regTcPtr->tcNodedata[3] = Tdata6;
+
+ Uint8 Toperation = regTcPtr->operation;
+ Uint8 Tdirty = regTcPtr->dirtyOp;
+ tnoOfBackup = tnodeinfo & 3;
+ tnoOfStandby = (tnodeinfo >> 8) & 3;
+
+ regCachePtr->fragmentDistributionKey = (tnodeinfo >> 16) & 255;
+ if (Toperation == ZREAD) {
+ if (Tdirty == 1) {
+ jam();
+ /*-------------------------------------------------------------*/
+ /* A SIMPLE READ CAN SELECT ANY OF THE PRIMARY AND */
+ /* BACKUP NODES TO READ. WE WILL TRY TO SELECT THIS */
+ /* NODE IF POSSIBLE TO AVOID UNNECESSARY COMMUNICATION */
+ /* WITH SIMPLE READS. */
+ /*-------------------------------------------------------------*/
+ arrGuard(tnoOfBackup, 4);
+ UintR Tindex;
+ UintR TownNode = cownNodeid;
+ for (Tindex = 1; Tindex <= tnoOfBackup; Tindex++) {
+ UintR Tnode = regTcPtr->tcNodedata[Tindex];
+ jam();
+ if (Tnode == TownNode) {
+ jam();
+ regTcPtr->tcNodedata[0] = Tnode;
+ }//if
+ }//for
+ if(ERROR_INSERTED(8048) || ERROR_INSERTED(8049))
+ {
+ for (Tindex = 0; Tindex <= tnoOfBackup; Tindex++)
+ {
+ UintR Tnode = regTcPtr->tcNodedata[Tindex];
+ jam();
+ if (Tnode != TownNode) {
+ jam();
+ regTcPtr->tcNodedata[0] = Tnode;
+ ndbout_c("Choosing %d", Tnode);
+ }//if
+ }//for
+ }
+ }//if
+ jam();
+ regTcPtr->lastReplicaNo = 0;
+ regTcPtr->noOfNodes = 1;
+ } else {
+ UintR TlastReplicaNo;
+ jam();
+ TlastReplicaNo = tnoOfBackup + tnoOfStandby;
+ regTcPtr->lastReplicaNo = (Uint8)TlastReplicaNo;
+ regTcPtr->noOfNodes = (Uint8)(TlastReplicaNo + 1);
+ }//if
+ if (regCachePtr->lenAiInTckeyreq == regCachePtr->attrlength) {
+ /****************************************************************>*/
+ /* HERE WE HAVE FOUND THAT THE LAST SIGNAL BELONGING TO THIS */
+ /* OPERATION HAVE BEEN RECEIVED. THIS MEANS THAT WE CAN NOW REUSE */
+ /* THE API CONNECT RECORD. HOWEVER IF PREPARE OR COMMIT HAVE BEEN */
+ /* RECEIVED THEN IT IS NOT ALLOWED TO RECEIVE ANY FURTHER */
+ /* OPERATIONS. WE KNOW THAT WE WILL WAIT FOR DICT NEXT. IT IS NOT */
+ /* POSSIBLE FOR THE TC CONNECTION TO BE READY YET. */
+ /****************************************************************>*/
+ switch (regApiPtr->apiConnectstate) {
+ case CS_RECEIVING:
+ jam();
+ regApiPtr->apiConnectstate = CS_STARTED;
+ break;
+ case CS_REC_COMMITTING:
+ jam();
+ regApiPtr->apiConnectstate = CS_START_COMMITTING;
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ }//switch
+ attrinfoDihReceivedLab(signal);
+ return;
+ } else {
+ if (regCachePtr->lenAiInTckeyreq < regCachePtr->attrlength) {
+ TtcTimer = ctcTimer;
+ jam();
+ setApiConTimer(apiConnectptr.i, TtcTimer, __LINE__);
+ regTcPtr->tcConnectstate = OS_WAIT_ATTR;
+ return;
+ } else {
+ TCKEY_abort(signal, 11);
+ return;
+ }//if
+ }//if
+ return;
+}//Dbtc::tckeyreq050Lab()
+
+void Dbtc::attrinfoDihReceivedLab(Signal* signal)
+{
+ CacheRecord * const regCachePtr = cachePtr.p;
+ TcConnectRecord * const regTcPtr = tcConnectptr.p;
+ Uint16 Tnode = regTcPtr->tcNodedata[0];
+
+ TableRecordPtr localTabptr;
+ localTabptr.i = regCachePtr->tableref;
+ localTabptr.p = &tableRecord[localTabptr.i];
+
+ if(localTabptr.p->checkTable(regCachePtr->schemaVersion)){
+ ;
+ } else {
+ terrorCode = localTabptr.p->getErrorCode(regCachePtr->schemaVersion);
+ TCKEY_abort(signal, 58);
+ return;
+ }
+ arrGuard(Tnode, MAX_NDB_NODES);
+ packLqhkeyreq(signal, calcLqhBlockRef(Tnode));
+}//Dbtc::attrinfoDihReceivedLab()
+
+void Dbtc::packLqhkeyreq(Signal* signal,
+ BlockReference TBRef)
+{
+ CacheRecord * const regCachePtr = cachePtr.p;
+ UintR Tkeylen = regCachePtr->keylen;
+ UintR TfirstAttrbuf = regCachePtr->firstAttrbuf;
+ sendlqhkeyreq(signal, TBRef);
+ if (Tkeylen > 4) {
+ packKeyData000Lab(signal, TBRef, Tkeylen - 4);
+ releaseKeys();
+ }//if
+ packLqhkeyreq040Lab(signal,
+ TfirstAttrbuf,
+ TBRef);
+}//Dbtc::packLqhkeyreq()
+
+void Dbtc::sendlqhkeyreq(Signal* signal,
+ BlockReference TBRef)
+{
+ UintR tslrAttrLen;
+ UintR Tdata10;
+ TcConnectRecord * const regTcPtr = tcConnectptr.p;
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ CacheRecord * const regCachePtr = cachePtr.p;
+#ifdef ERROR_INSERT
+ if (ERROR_INSERTED(8002)) {
+ systemErrorLab(signal);
+ }//if
+ if (ERROR_INSERTED(8007)) {
+ if (apiConnectptr.p->apiConnectstate == CS_STARTED) {
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }//if
+ }//if
+ if (ERROR_INSERTED(8008)) {
+ if (apiConnectptr.p->apiConnectstate == CS_START_COMMITTING) {
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }//if
+ }//if
+ if (ERROR_INSERTED(8009)) {
+ if (apiConnectptr.p->apiConnectstate == CS_STARTED) {
+ return;
+ }//if
+ }//if
+ if (ERROR_INSERTED(8010)) {
+ if (apiConnectptr.p->apiConnectstate == CS_START_COMMITTING) {
+ return;
+ }//if
+ }//if
+#endif
+
+ tslrAttrLen = 0;
+ LqhKeyReq::setAttrLen(tslrAttrLen, regCachePtr->attrlength);
+ /* ---------------------------------------------------------------------- */
+ // Bit16 == 0 since StoredProcedures are not yet supported.
+ /* ---------------------------------------------------------------------- */
+ LqhKeyReq::setDistributionKey(tslrAttrLen, regCachePtr->fragmentDistributionKey);
+ LqhKeyReq::setScanTakeOverFlag(tslrAttrLen, regCachePtr->scanTakeOverInd);
+
+ Tdata10 = 0;
+ LqhKeyReq::setKeyLen(Tdata10, regCachePtr->keylen);
+ LqhKeyReq::setLastReplicaNo(Tdata10, regTcPtr->lastReplicaNo);
+ LqhKeyReq::setLockType(Tdata10, regCachePtr->opLock);
+ /* ---------------------------------------------------------------------- */
+ // Indicate Application Reference is present in bit 15
+ /* ---------------------------------------------------------------------- */
+ LqhKeyReq::setApplicationAddressFlag(Tdata10, 1);
+ LqhKeyReq::setDirtyFlag(Tdata10, regTcPtr->dirtyOp);
+ LqhKeyReq::setInterpretedFlag(Tdata10, regCachePtr->opExec);
+ LqhKeyReq::setSimpleFlag(Tdata10, regCachePtr->opSimple);
+ LqhKeyReq::setOperation(Tdata10, regTcPtr->operation);
+ /* -----------------------------------------------------------------------
+ * Sequential Number of first LQH = 0, bit 22-23
+ * IF ATTRIBUTE INFORMATION IS SENT IN TCKEYREQ,
+ * IT IS ALSO SENT IN LQHKEYREQ
+ * ----------------------------------------------------------------------- */
+ LqhKeyReq::setAIInLqhKeyReq(Tdata10, regCachePtr->lenAiInTckeyreq);
+ /* -----------------------------------------------------------------------
+ * Bit 27 == 0 since TC record is the same as the client record.
+ * Bit 28 == 0 since readLenAi can only be set after reading in LQH.
+ * ----------------------------------------------------------------------- */
+ //LqhKeyReq::setAPIVersion(Tdata10, regCachePtr->apiVersionNo);
+ Uint32 commitAckMarker = regTcPtr->commitAckMarker;
+ if(commitAckMarker != RNIL){
+ jam();
+
+ LqhKeyReq::setMarkerFlag(Tdata10, 1);
+
+ CommitAckMarker * tmp;
+ tmp = m_commitAckMarkerHash.getPtr(commitAckMarker);
+
+ /**
+ * Populate LQH array
+ */
+ const Uint32 noOfLqhs = regTcPtr->noOfNodes;
+ tmp->noOfLqhs = noOfLqhs;
+ for(Uint32 i = 0; i<noOfLqhs; i++){
+ tmp->lqhNodeId[i] = regTcPtr->tcNodedata[i];
+ }
+ }
+
+ /* ************************************************************> */
+ /* NO READ LENGTH SENT FROM TC. SEQUENTIAL NUMBER IS 1 AND IT */
+ /* IS SENT TO A PRIMARY NODE. */
+ /* ************************************************************> */
+ UintR sig0, sig1, sig2, sig3, sig4, sig5, sig6;
+
+ LqhKeyReq * const lqhKeyReq = (LqhKeyReq *)signal->getDataPtrSend();
+
+ sig0 = tcConnectptr.i;
+ sig2 = regCachePtr->hashValue;
+ sig4 = cownref;
+ sig5 = regTcPtr->savePointId;
+
+ lqhKeyReq->clientConnectPtr = sig0;
+ lqhKeyReq->attrLen = tslrAttrLen;
+ lqhKeyReq->hashValue = sig2;
+ lqhKeyReq->requestInfo = Tdata10;
+ lqhKeyReq->tcBlockref = sig4;
+ lqhKeyReq->savePointId = sig5;
+
+ sig0 = regCachePtr->tableref + (regCachePtr->schemaVersion << 16);
+ sig1 = regCachePtr->fragmentid + (regTcPtr->tcNodedata[1] << 16);
+ sig2 = regApiPtr->transid[0];
+ sig3 = regApiPtr->transid[1];
+ sig4 = regApiPtr->ndbapiBlockref;
+ sig5 = regTcPtr->clientData;
+ sig6 = regCachePtr->scanInfo;
+
+ lqhKeyReq->tableSchemaVersion = sig0;
+ lqhKeyReq->fragmentData = sig1;
+ lqhKeyReq->transId1 = sig2;
+ lqhKeyReq->transId2 = sig3;
+ lqhKeyReq->scanInfo = sig6;
+
+ lqhKeyReq->variableData[0] = sig4;
+ lqhKeyReq->variableData[1] = sig5;
+
+ UintR nextPos = 2;
+
+ if (regTcPtr->lastReplicaNo > 1) {
+ sig0 = (UintR)regTcPtr->tcNodedata[2] +
+ (UintR)(regTcPtr->tcNodedata[3] << 16);
+ lqhKeyReq->variableData[nextPos] = sig0;
+ nextPos++;
+ }//if
+
+ sig0 = regCachePtr->keydata[0];
+ sig1 = regCachePtr->keydata[1];
+ sig2 = regCachePtr->keydata[2];
+ sig3 = regCachePtr->keydata[3];
+ UintR Tkeylen = regCachePtr->keylen;
+
+ lqhKeyReq->variableData[nextPos + 0] = sig0;
+ lqhKeyReq->variableData[nextPos + 1] = sig1;
+ lqhKeyReq->variableData[nextPos + 2] = sig2;
+ lqhKeyReq->variableData[nextPos + 3] = sig3;
+
+ if (Tkeylen < 4) {
+ nextPos += Tkeylen;
+ } else {
+ nextPos += 4;
+ }//if
+
+ sig0 = regCachePtr->attrinfo0;
+ sig1 = regCachePtr->attrinfo15[0];
+ sig2 = regCachePtr->attrinfo15[1];
+ sig3 = regCachePtr->attrinfo15[2];
+ sig4 = regCachePtr->attrinfo15[3];
+ UintR TlenAi = regCachePtr->lenAiInTckeyreq;
+
+ lqhKeyReq->variableData[nextPos + 0] = sig0;
+ lqhKeyReq->variableData[nextPos + 1] = sig1;
+ lqhKeyReq->variableData[nextPos + 2] = sig2;
+ lqhKeyReq->variableData[nextPos + 3] = sig3;
+ lqhKeyReq->variableData[nextPos + 4] = sig4;
+
+ nextPos += TlenAi;
+
+ // Reset trigger count
+ regTcPtr->accumulatingTriggerData.i = RNIL;
+ regTcPtr->accumulatingTriggerData.p = NULL;
+ regTcPtr->noFiredTriggers = 0;
+ regTcPtr->triggerExecutionCount = 0;
+
+ sendSignal(TBRef, GSN_LQHKEYREQ, signal,
+ nextPos + LqhKeyReq::FixedSignalLength, JBB);
+}//Dbtc::sendlqhkeyreq()
+
+void Dbtc::packLqhkeyreq040Lab(Signal* signal,
+ UintR anAttrBufIndex,
+ BlockReference TBRef)
+{
+ TcConnectRecord * const regTcPtr = tcConnectptr.p;
+ CacheRecord * const regCachePtr = cachePtr.p;
+#ifdef ERROR_INSERT
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ if (ERROR_INSERTED(8009)) {
+ if (regApiPtr->apiConnectstate == CS_STARTED) {
+ attrbufptr.i = RNIL;
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }//if
+ }//if
+ if (ERROR_INSERTED(8010)) {
+ if (regApiPtr->apiConnectstate == CS_START_COMMITTING) {
+ attrbufptr.i = RNIL;
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }//if
+ }//if
+#endif
+
+ UintR TattrbufFilesize = cattrbufFilesize;
+ AttrbufRecord *localAttrbufRecord = attrbufRecord;
+ while (1) {
+ if (anAttrBufIndex == RNIL) {
+ UintR TtcTimer = ctcTimer;
+ UintR Tread = (regTcPtr->operation == ZREAD);
+ UintR Tsimple = (regCachePtr->opSimple == ZTRUE);
+ UintR Tboth = Tread & Tsimple;
+ setApiConTimer(apiConnectptr.i, TtcTimer, __LINE__);
+ jam();
+ /*--------------------------------------------------------------------
+ * WE HAVE SENT ALL THE SIGNALS OF THIS OPERATION. SET STATE AND EXIT.
+ *---------------------------------------------------------------------*/
+ releaseAttrinfo();
+ if (Tboth) {
+ jam();
+ releaseSimpleRead(signal, apiConnectptr, tcConnectptr.p);
+ return;
+ }//if
+ regTcPtr->tcConnectstate = OS_OPERATING;
+ return;
+ }//if
+ if (anAttrBufIndex < TattrbufFilesize) {
+ AttrbufRecord * const regAttrPtr = &localAttrbufRecord[anAttrBufIndex];
+ anAttrBufIndex = regAttrPtr->attrbuf[ZINBUF_NEXT];
+ sendAttrinfo(signal,
+ tcConnectptr.i,
+ regAttrPtr,
+ TBRef);
+ } else {
+ TCKEY_abort(signal, 17);
+ return;
+ }//if
+ }//while
+}//Dbtc::packLqhkeyreq040Lab()
+
+/* ========================================================================= */
+/* ------- RELEASE ALL ATTRINFO RECORDS IN AN OPERATION RECORD ------- */
+/* ========================================================================= */
+void Dbtc::releaseAttrinfo()
+{
+ UintR Tmp;
+ AttrbufRecordPtr Tattrbufptr;
+ CacheRecord * const regCachePtr = cachePtr.p;
+ UintR TattrbufFilesize = cattrbufFilesize;
+ UintR TfirstfreeAttrbuf = cfirstfreeAttrbuf;
+ Tattrbufptr.i = regCachePtr->firstAttrbuf;
+ AttrbufRecord *localAttrbufRecord = attrbufRecord;
+
+ while (Tattrbufptr.i < TattrbufFilesize) {
+ Tattrbufptr.p = &localAttrbufRecord[Tattrbufptr.i];
+ Tmp = Tattrbufptr.p->attrbuf[ZINBUF_NEXT];
+ Tattrbufptr.p->attrbuf[ZINBUF_NEXT] = TfirstfreeAttrbuf;
+ TfirstfreeAttrbuf = Tattrbufptr.i;
+ Tattrbufptr.i = Tmp;
+ jam();
+ }//while
+ if (Tattrbufptr.i == RNIL) {
+//---------------------------------------------------
+// Now we will release the cache record at the same
+// time as releasing the attrinfo records.
+//---------------------------------------------------
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ UintR TfirstfreeCacheRec = cfirstfreeCacheRec;
+ UintR TCacheIndex = cachePtr.i;
+ cfirstfreeAttrbuf = TfirstfreeAttrbuf;
+ regCachePtr->nextCacheRec = TfirstfreeCacheRec;
+ cfirstfreeCacheRec = TCacheIndex;
+ regApiPtr->cachePtr = RNIL;
+ return;
+ }//if
+ systemErrorLab(0);
+ return;
+}//Dbtc::releaseAttrinfo()
+
+/* ========================================================================= */
+/* ------- RELEASE ALL RECORDS CONNECTED TO A SIMPLE OPERATION ------- */
+/* ========================================================================= */
+void Dbtc::releaseSimpleRead(Signal* signal,
+ ApiConnectRecordPtr regApiPtr,
+ TcConnectRecord* regTcPtr)
+{
+ Uint32 Ttckeyrec = regApiPtr.p->tckeyrec;
+ Uint32 TclientData = regTcPtr->clientData;
+ Uint32 Tnode = regTcPtr->tcNodedata[0];
+ Uint32 Tlqhkeyreqrec = regApiPtr.p->lqhkeyreqrec;
+ Uint32 TsimpleReadCount = c_counters.csimpleReadCount;
+ ConnectionState state = regApiPtr.p->apiConnectstate;
+
+ regApiPtr.p->tcSendArray[Ttckeyrec] = TclientData;
+ regApiPtr.p->tcSendArray[Ttckeyrec + 1] = TcKeyConf::SimpleReadBit | Tnode;
+ regApiPtr.p->tckeyrec = Ttckeyrec + 2;
+
+ unlinkReadyTcCon(signal);
+ releaseTcCon();
+
+ /**
+ * No LQHKEYCONF in Simple/Dirty read
+ * Therefore decrese no LQHKEYCONF(REF) we are waiting for
+ */
+ c_counters.csimpleReadCount = TsimpleReadCount + 1;
+ regApiPtr.p->lqhkeyreqrec = --Tlqhkeyreqrec;
+
+ if(Tlqhkeyreqrec == 0)
+ {
+ /**
+ * Special case of lqhKeyConf_checkTransactionState:
+ * - commit with zero operations: handle only for simple read
+ */
+ sendtckeyconf(signal, state == CS_START_COMMITTING);
+ regApiPtr.p->apiConnectstate =
+ (state == CS_START_COMMITTING ? CS_CONNECTED : state);
+ setApiConTimer(regApiPtr.i, 0, __LINE__);
+
+ return;
+ }
+
+ /**
+ * Emulate LQHKEYCONF
+ */
+ lqhKeyConf_checkTransactionState(signal, regApiPtr.p);
+}//Dbtc::releaseSimpleRead()
+
+/* ------------------------------------------------------------------------- */
+/* ------- CHECK IF ALL TC CONNECTIONS ARE COMPLETED ------- */
+/* ------------------------------------------------------------------------- */
+void Dbtc::unlinkReadyTcCon(Signal* signal)
+{
+ TcConnectRecordPtr urtTcConnectptr;
+
+ TcConnectRecord * const regTcPtr = tcConnectptr.p;
+ TcConnectRecord *localTcConnectRecord = tcConnectRecord;
+ UintR TtcConnectFilesize = ctcConnectFilesize;
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ if (regTcPtr->prevTcConnect != RNIL) {
+ jam();
+ urtTcConnectptr.i = regTcPtr->prevTcConnect;
+ ptrCheckGuard(urtTcConnectptr, TtcConnectFilesize, localTcConnectRecord);
+ urtTcConnectptr.p->nextTcConnect = regTcPtr->nextTcConnect;
+ } else {
+ jam();
+ regApiPtr->firstTcConnect = regTcPtr->nextTcConnect;
+ }//if
+ if (regTcPtr->nextTcConnect != RNIL) {
+ jam();
+ urtTcConnectptr.i = regTcPtr->nextTcConnect;
+ ptrCheckGuard(urtTcConnectptr, TtcConnectFilesize, localTcConnectRecord);
+ urtTcConnectptr.p->prevTcConnect = regTcPtr->prevTcConnect;
+ } else {
+ jam();
+ regApiPtr->lastTcConnect = tcConnectptr.p->prevTcConnect;
+ }//if
+}//Dbtc::unlinkReadyTcCon()
+
+void Dbtc::releaseTcCon()
+{
+ TcConnectRecord * const regTcPtr = tcConnectptr.p;
+ UintR TfirstfreeTcConnect = cfirstfreeTcConnect;
+ UintR TconcurrentOp = c_counters.cconcurrentOp;
+ UintR TtcConnectptrIndex = tcConnectptr.i;
+
+ regTcPtr->tcConnectstate = OS_CONNECTED;
+ regTcPtr->nextTcConnect = TfirstfreeTcConnect;
+ regTcPtr->apiConnect = RNIL;
+ regTcPtr->isIndexOp = false;
+ regTcPtr->indexOp = RNIL;
+ cfirstfreeTcConnect = TtcConnectptrIndex;
+ c_counters.cconcurrentOp = TconcurrentOp - 1;
+}//Dbtc::releaseTcCon()
+
+void Dbtc::execPACKED_SIGNAL(Signal* signal)
+{
+ LqhKeyConf * const lqhKeyConf = (LqhKeyConf *)signal->getDataPtr();
+
+ UintR Ti;
+ UintR Tstep = 0;
+ UintR Tlength;
+ UintR TpackedData[28];
+ UintR Tdata1, Tdata2, Tdata3, Tdata4;
+
+ jamEntry();
+ Tlength = signal->length();
+ if (Tlength > 25) {
+ jam();
+ systemErrorLab(signal);
+ return;
+ }//if
+ Uint32* TpackDataPtr;
+ for (Ti = 0; Ti < Tlength; Ti += 4) {
+ Uint32* TsigDataPtr = &signal->theData[Ti];
+ Tdata1 = TsigDataPtr[0];
+ Tdata2 = TsigDataPtr[1];
+ Tdata3 = TsigDataPtr[2];
+ Tdata4 = TsigDataPtr[3];
+
+ TpackDataPtr = &TpackedData[Ti];
+ TpackDataPtr[0] = Tdata1;
+ TpackDataPtr[1] = Tdata2;
+ TpackDataPtr[2] = Tdata3;
+ TpackDataPtr[3] = Tdata4;
+ }//for
+ while (Tlength > Tstep) {
+
+ TpackDataPtr = &TpackedData[Tstep];
+ Tdata1 = TpackDataPtr[0];
+ Tdata2 = TpackDataPtr[1];
+ Tdata3 = TpackDataPtr[2];
+
+ lqhKeyConf->connectPtr = Tdata1 & 0x0FFFFFFF;
+ lqhKeyConf->opPtr = Tdata2;
+ lqhKeyConf->userRef = Tdata3;
+
+ switch (Tdata1 >> 28) {
+ case ZCOMMITTED:
+ signal->header.theLength = 3;
+ execCOMMITTED(signal);
+ Tstep += 3;
+ break;
+ case ZCOMPLETED:
+ signal->header.theLength = 3;
+ execCOMPLETED(signal);
+ Tstep += 3;
+ break;
+ case ZLQHKEYCONF:
+ jam();
+ Tdata1 = TpackDataPtr[3];
+ Tdata2 = TpackDataPtr[4];
+ Tdata3 = TpackDataPtr[5];
+ Tdata4 = TpackDataPtr[6];
+
+ lqhKeyConf->readLen = Tdata1;
+ lqhKeyConf->transId1 = Tdata2;
+ lqhKeyConf->transId2 = Tdata3;
+ lqhKeyConf->noFiredTriggers = Tdata4;
+ signal->header.theLength = LqhKeyConf::SignalLength;
+ execLQHKEYCONF(signal);
+ Tstep += LqhKeyConf::SignalLength;
+ break;
+ default:
+ systemErrorLab(signal);
+ return;
+ }//switch
+ }//while
+ return;
+}//Dbtc::execPACKED_SIGNAL()
+
+void Dbtc::execLQHKEYCONF(Signal* signal)
+{
+ const LqhKeyConf * const lqhKeyConf = (LqhKeyConf *)signal->getDataPtr();
+ UintR compare_transid1, compare_transid2;
+ BlockReference tlastLqhBlockref;
+ UintR tlastLqhConnect;
+ UintR treadlenAi;
+ UintR TtcConnectptrIndex;
+ UintR TtcConnectFilesize = ctcConnectFilesize;
+
+ tlastLqhConnect = lqhKeyConf->connectPtr;
+ TtcConnectptrIndex = lqhKeyConf->opPtr;
+ tlastLqhBlockref = lqhKeyConf->userRef;
+ treadlenAi = lqhKeyConf->readLen;
+ TcConnectRecord *localTcConnectRecord = tcConnectRecord;
+
+ /*------------------------------------------------------------------------
+ * NUMBER OF EXTERNAL TRIGGERS FIRED IN DATA[6]
+ * OPERATION IS NOW COMPLETED. CHECK FOR CORRECT OPERATION POINTER
+ * TO ENSURE NO CRASHES BECAUSE OF ERRONEUS NODES. CHECK STATE OF
+ * OPERATION. THEN SET OPERATION STATE AND RETRIEVE ALL POINTERS
+ * OF THIS OPERATION. PUT COMPLETED OPERATION IN LIST OF COMPLETED
+ * OPERATIONS ON THE LQH CONNECT RECORD.
+ *------------------------------------------------------------------------
+ * THIS SIGNAL ALWAYS ARRIVE BEFORE THE ABORTED SIGNAL ARRIVES SINCE IT USES
+ * THE SAME PATH BACK TO TC AS THE ABORTED SIGNAL DO. WE DO HOWEVER HAVE A
+ * PROBLEM WHEN WE ENCOUNTER A TIME-OUT WAITING FOR THE ABORTED SIGNAL.
+ * THEN THIS SIGNAL MIGHT ARRIVE WHEN THE TC CONNECT RECORD HAVE BEEN REUSED
+ * BY OTHER TRANSACTION THUS WE CHECK THE TRANSACTION ID OF THE SIGNAL
+ * BEFORE ACCEPTING THIS SIGNAL.
+ * Due to packing of LQHKEYCONF the ABORTED signal can now arrive before
+ * this.
+ * This is more reason to ignore the signal if not all states are correct.
+ *------------------------------------------------------------------------*/
+ if (TtcConnectptrIndex >= TtcConnectFilesize) {
+ TCKEY_abort(signal, 25);
+ return;
+ }//if
+ TcConnectRecord* const regTcPtr = &localTcConnectRecord[TtcConnectptrIndex];
+ OperationState TtcConnectstate = regTcPtr->tcConnectstate;
+ tcConnectptr.i = TtcConnectptrIndex;
+ tcConnectptr.p = regTcPtr;
+ if (TtcConnectstate != OS_OPERATING) {
+ warningReport(signal, 23);
+ return;
+ }//if
+ ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
+ UintR TapiConnectptrIndex = regTcPtr->apiConnect;
+ UintR TapiConnectFilesize = capiConnectFilesize;
+ UintR Ttrans1 = lqhKeyConf->transId1;
+ UintR Ttrans2 = lqhKeyConf->transId2;
+ Uint32 noFired = lqhKeyConf->noFiredTriggers;
+
+ if (TapiConnectptrIndex >= TapiConnectFilesize) {
+ TCKEY_abort(signal, 29);
+ return;
+ }//if
+ ApiConnectRecord * const regApiPtr =
+ &localApiConnectRecord[TapiConnectptrIndex];
+ apiConnectptr.i = TapiConnectptrIndex;
+ apiConnectptr.p = regApiPtr;
+ compare_transid1 = regApiPtr->transid[0] ^ Ttrans1;
+ compare_transid2 = regApiPtr->transid[1] ^ Ttrans2;
+ compare_transid1 = compare_transid1 | compare_transid2;
+ if (compare_transid1 != 0) {
+ warningReport(signal, 24);
+ return;
+ }//if
+
+#ifdef ERROR_INSERT
+ if (ERROR_INSERTED(8029)) {
+ systemErrorLab(signal);
+ }//if
+ if (ERROR_INSERTED(8003)) {
+ if (regApiPtr->apiConnectstate == CS_STARTED) {
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }//if
+ }//if
+ if (ERROR_INSERTED(8004)) {
+ if (regApiPtr->apiConnectstate == CS_RECEIVING) {
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }//if
+ }//if
+ if (ERROR_INSERTED(8005)) {
+ if (regApiPtr->apiConnectstate == CS_REC_COMMITTING) {
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }//if
+ }//if
+ if (ERROR_INSERTED(8006)) {
+ if (regApiPtr->apiConnectstate == CS_START_COMMITTING) {
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }//if
+ }//if
+ if (ERROR_INSERTED(8023)) {
+ SET_ERROR_INSERT_VALUE(8024);
+ return;
+ }//if
+#endif
+ UintR TtcTimer = ctcTimer;
+ regTcPtr->lastLqhCon = tlastLqhConnect;
+ regTcPtr->lastLqhNodeId = refToNode(tlastLqhBlockref);
+ regTcPtr->noFiredTriggers = noFired;
+
+ UintR Ttckeyrec = (UintR)regApiPtr->tckeyrec;
+ UintR TclientData = regTcPtr->clientData;
+ UintR TdirtyOp = regTcPtr->dirtyOp;
+ ConnectionState TapiConnectstate = regApiPtr->apiConnectstate;
+ if (Ttckeyrec > (ZTCOPCONF_SIZE - 2)) {
+ TCKEY_abort(signal, 30);
+ return;
+ }
+ if (TapiConnectstate == CS_ABORTING) {
+ warningReport(signal, 27);
+ return;
+ }//if
+
+ setApiConTimer(apiConnectptr.i, TtcTimer, __LINE__);
+
+ if (regTcPtr->isIndexOp) {
+ jam();
+ // This was an internal TCKEYREQ
+ // will be returned unpacked
+ regTcPtr->attrInfoLen = treadlenAi;
+ } else {
+ if (noFired == 0 && regTcPtr->triggeringOperation == RNIL) {
+ jam();
+ /*
+ * Skip counting triggering operations the first round
+ * since they will enter execLQHKEYCONF a second time
+ * Skip counting internally generated TcKeyReq
+ */
+ regApiPtr->tcSendArray[Ttckeyrec] = TclientData;
+ regApiPtr->tcSendArray[Ttckeyrec + 1] = treadlenAi;
+ regApiPtr->tckeyrec = Ttckeyrec + 2;
+ }//if
+ }//if
+ if (TdirtyOp == ZTRUE) {
+ UintR Tlqhkeyreqrec = regApiPtr->lqhkeyreqrec;
+ jam();
+ releaseDirtyWrite(signal);
+ regApiPtr->lqhkeyreqrec = Tlqhkeyreqrec - 1;
+ } else {
+ jam();
+ if (noFired == 0) {
+ jam();
+ // No triggers to execute
+ UintR Tlqhkeyconfrec = regApiPtr->lqhkeyconfrec;
+ regApiPtr->lqhkeyconfrec = Tlqhkeyconfrec + 1;
+ regTcPtr->tcConnectstate = OS_PREPARED;
+ }
+ }//if
+
+ /**
+ * And now decide what to do next
+ */
+ if (regTcPtr->triggeringOperation != RNIL) {
+ jam();
+ // This operation was created by a trigger execting operation
+ // Restart it if we have executed all it's triggers
+ TcConnectRecordPtr opPtr;
+
+ opPtr.i = regTcPtr->triggeringOperation;
+ ptrCheckGuard(opPtr, ctcConnectFilesize, localTcConnectRecord);
+ opPtr.p->triggerExecutionCount--;
+ if (opPtr.p->triggerExecutionCount == 0) {
+ /*
+ We have completed current trigger execution
+ Continue triggering operation
+ */
+ jam();
+ continueTriggeringOp(signal, opPtr.p);
+ }
+ } else if (noFired == 0) {
+ // This operation did not fire any triggers, finish operation
+ jam();
+ if (regTcPtr->isIndexOp) {
+ jam();
+ setupIndexOpReturn(regApiPtr, regTcPtr);
+ }
+ lqhKeyConf_checkTransactionState(signal, regApiPtr);
+ } else {
+ // We have fired triggers
+ jam();
+ saveTriggeringOpState(signal, regTcPtr);
+ if (regTcPtr->noReceivedTriggers == noFired) {
+ ApiConnectRecordPtr transPtr;
+
+ // We have received all data
+ jam();
+ transPtr.i = TapiConnectptrIndex;
+ transPtr.p = regApiPtr;
+ executeTriggers(signal, &transPtr);
+ }
+ // else wait for more trigger data
+ }
+}//Dbtc::execLQHKEYCONF()
+
+void Dbtc::setupIndexOpReturn(ApiConnectRecord* regApiPtr,
+ TcConnectRecord* regTcPtr)
+{
+ regApiPtr->indexOpReturn = true;
+ regApiPtr->indexOp = regTcPtr->indexOp;
+ regApiPtr->clientData = regTcPtr->clientData;
+ regApiPtr->attrInfoLen = regTcPtr->attrInfoLen;
+}
+
+/**
+ * lqhKeyConf_checkTransactionState
+ *
+ * This functions checks state variables, and
+ * decides if it should wait for more LQHKEYCONF signals
+ * or if it should start commiting
+ */
+void
+Dbtc::lqhKeyConf_checkTransactionState(Signal * signal,
+ ApiConnectRecord * const apiConnectPtrP)
+{
+/*---------------------------------------------------------------*/
+/* IF THE COMMIT FLAG IS SET IN SIGNAL TCKEYREQ THEN DBTC HAS TO */
+/* SEND TCKEYCONF FOR ALL OPERATIONS EXCEPT THE LAST ONE. WHEN */
+/* THE TRANSACTION THEN IS COMMITTED TCKEYCONF IS SENT FOR THE */
+/* WHOLE TRANSACTION */
+/* IF THE COMMIT FLAG IS NOT RECECIVED DBTC WILL SEND TCKEYCONF */
+/* FOR ALL OPERATIONS, AND THEN WAIT FOR THE API TO CONCLUDE THE */
+/* TRANSACTION */
+/*---------------------------------------------------------------*/
+ ConnectionState TapiConnectstate = apiConnectPtrP->apiConnectstate;
+ UintR Tlqhkeyconfrec = apiConnectPtrP->lqhkeyconfrec;
+ UintR Tlqhkeyreqrec = apiConnectPtrP->lqhkeyreqrec;
+ int TnoOfOutStanding = Tlqhkeyreqrec - Tlqhkeyconfrec;
+
+ switch (TapiConnectstate) {
+ case CS_START_COMMITTING:
+ if (TnoOfOutStanding == 0) {
+ jam();
+ diverify010Lab(signal);
+ return;
+ } else if (TnoOfOutStanding > 0) {
+ if (apiConnectPtrP->tckeyrec == ZTCOPCONF_SIZE) {
+ jam();
+ sendtckeyconf(signal, 0);
+ return;
+ } else if (apiConnectPtrP->indexOpReturn) {
+ jam();
+ sendtckeyconf(signal, 0);
+ return;
+ }//if
+ jam();
+ return;
+ } else {
+ TCKEY_abort(signal, 44);
+ return;
+ }//if
+ return;
+ case CS_STARTED:
+ case CS_RECEIVING:
+ if (TnoOfOutStanding == 0) {
+ jam();
+ sendtckeyconf(signal, 2);
+ return;
+ } else {
+ if (apiConnectPtrP->tckeyrec == ZTCOPCONF_SIZE) {
+ jam();
+ sendtckeyconf(signal, 0);
+ return;
+ } else if (apiConnectPtrP->indexOpReturn) {
+ jam();
+ sendtckeyconf(signal, 0);
+ return;
+ }//if
+ jam();
+ }//if
+ return;
+ case CS_REC_COMMITTING:
+ if (TnoOfOutStanding > 0) {
+ if (apiConnectPtrP->tckeyrec == ZTCOPCONF_SIZE) {
+ jam();
+ sendtckeyconf(signal, 0);
+ return;
+ } else if (apiConnectPtrP->indexOpReturn) {
+ jam();
+ sendtckeyconf(signal, 0);
+ return;
+ }//if
+ jam();
+ return;
+ }//if
+ TCKEY_abort(signal, 45);
+ return;
+ case CS_CONNECTED:
+ jam();
+/*---------------------------------------------------------------*/
+/* WE HAVE CONCLUDED THE TRANSACTION SINCE IT WAS ONLY */
+/* CONSISTING OF DIRTY WRITES AND ALL OF THOSE WERE */
+/* COMPLETED. ENSURE TCKEYREC IS ZERO TO PREVENT ERRORS. */
+/*---------------------------------------------------------------*/
+ apiConnectPtrP->tckeyrec = 0;
+ return;
+ default:
+ TCKEY_abort(signal, 46);
+ return;
+ }//switch
+}//Dbtc::lqhKeyConf_checkTransactionState()
+
+void Dbtc::sendtckeyconf(Signal* signal, UintR TcommitFlag)
+{
+ if(ERROR_INSERTED(8049)){
+ CLEAR_ERROR_INSERT_VALUE;
+ signal->theData[0] = TcContinueB::DelayTCKEYCONF;
+ signal->theData[1] = apiConnectptr.i;
+ signal->theData[2] = TcommitFlag;
+ sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 3000, 3);
+ return;
+ }
+
+ HostRecordPtr localHostptr;
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ const UintR TopWords = (UintR)regApiPtr->tckeyrec;
+ localHostptr.i = refToNode(regApiPtr->ndbapiBlockref);
+ const Uint32 type = getNodeInfo(localHostptr.i).m_type;
+ const bool is_api = (type >= NodeInfo::API && type <= NodeInfo::REP);
+ const BlockNumber TblockNum = refToBlock(regApiPtr->ndbapiBlockref);
+ const Uint32 Tmarker = (regApiPtr->commitAckMarker == RNIL) ? 0 : 1;
+ ptrAss(localHostptr, hostRecord);
+ UintR TcurrLen = localHostptr.p->noOfWordsTCKEYCONF;
+ UintR confInfo = 0;
+ TcKeyConf::setCommitFlag(confInfo, TcommitFlag == 1);
+ TcKeyConf::setMarkerFlag(confInfo, Tmarker);
+ const UintR TpacketLen = 6 + TopWords;
+ regApiPtr->tckeyrec = 0;
+
+ if (regApiPtr->indexOpReturn) {
+ jam();
+ // Return internally generated TCKEY
+ TcKeyConf * const tcKeyConf = (TcKeyConf *)signal->getDataPtrSend();
+ TcKeyConf::setNoOfOperations(confInfo, 1);
+ tcKeyConf->apiConnectPtr = regApiPtr->indexOp;
+ tcKeyConf->gci = regApiPtr->globalcheckpointid;
+ tcKeyConf->confInfo = confInfo;
+ tcKeyConf->transId1 = regApiPtr->transid[0];
+ tcKeyConf->transId2 = regApiPtr->transid[1];
+ tcKeyConf->operations[0].apiOperationPtr = regApiPtr->clientData;
+ tcKeyConf->operations[0].attrInfoLen = regApiPtr->attrInfoLen;
+ Uint32 sigLen = TcKeyConf::StaticLength + TcKeyConf::OperationLength;
+ EXECUTE_DIRECT(DBTC, GSN_TCKEYCONF, signal, sigLen);
+ regApiPtr->indexOpReturn = false;
+ if (TopWords == 0) {
+ jam();
+ return; // No queued TcKeyConf
+ }//if
+ }//if
+ if(TcommitFlag){
+ jam();
+ regApiPtr->m_exec_flag = 0;
+ }
+ TcKeyConf::setNoOfOperations(confInfo, (TopWords >> 1));
+ if ((TpacketLen > 25) || !is_api){
+ TcKeyConf * const tcKeyConf = (TcKeyConf *)signal->getDataPtrSend();
+
+ jam();
+ tcKeyConf->apiConnectPtr = regApiPtr->ndbapiConnect;
+ tcKeyConf->gci = regApiPtr->globalcheckpointid;;
+ tcKeyConf->confInfo = confInfo;
+ tcKeyConf->transId1 = regApiPtr->transid[0];
+ tcKeyConf->transId2 = regApiPtr->transid[1];
+ copyFromToLen(&regApiPtr->tcSendArray[0],
+ (UintR*)&tcKeyConf->operations,
+ (UintR)ZTCOPCONF_SIZE);
+ sendSignal(regApiPtr->ndbapiBlockref,
+ GSN_TCKEYCONF, signal, (TpacketLen - 1), JBB);
+ return;
+ } else if (((TcurrLen + TpacketLen) > 25) && (TcurrLen > 0)) {
+ jam();
+ sendPackedTCKEYCONF(signal, localHostptr.p, localHostptr.i);
+ TcurrLen = 0;
+ } else {
+ jam();
+ updatePackedList(signal, localHostptr.p, localHostptr.i);
+ }//if
+ // -------------------------------------------------------------------------
+ // The header contains the block reference of receiver plus the real signal
+ // length - 3, since we have the real signal length plus one additional word
+ // for the header we have to do - 4.
+ // -------------------------------------------------------------------------
+ UintR Tpack0 = (TblockNum << 16) + (TpacketLen - 4);
+ UintR Tpack1 = regApiPtr->ndbapiConnect;
+ UintR Tpack2 = regApiPtr->globalcheckpointid;
+ UintR Tpack3 = confInfo;
+ UintR Tpack4 = regApiPtr->transid[0];
+ UintR Tpack5 = regApiPtr->transid[1];
+
+ localHostptr.p->noOfWordsTCKEYCONF = TcurrLen + TpacketLen;
+
+ localHostptr.p->packedWordsTCKEYCONF[TcurrLen + 0] = Tpack0;
+ localHostptr.p->packedWordsTCKEYCONF[TcurrLen + 1] = Tpack1;
+ localHostptr.p->packedWordsTCKEYCONF[TcurrLen + 2] = Tpack2;
+ localHostptr.p->packedWordsTCKEYCONF[TcurrLen + 3] = Tpack3;
+ localHostptr.p->packedWordsTCKEYCONF[TcurrLen + 4] = Tpack4;
+ localHostptr.p->packedWordsTCKEYCONF[TcurrLen + 5] = Tpack5;
+
+ UintR Ti;
+ for (Ti = 6; Ti < TpacketLen; Ti++) {
+ localHostptr.p->packedWordsTCKEYCONF[TcurrLen + Ti] =
+ regApiPtr->tcSendArray[Ti - 6];
+ }//for
+}//Dbtc::sendtckeyconf()
+
+void Dbtc::copyFromToLen(UintR* sourceBuffer, UintR* destBuffer, UintR Tlen)
+{
+ UintR Tindex = 0;
+ UintR Ti;
+ while (Tlen >= 4) {
+ UintR Tdata0 = sourceBuffer[Tindex + 0];
+ UintR Tdata1 = sourceBuffer[Tindex + 1];
+ UintR Tdata2 = sourceBuffer[Tindex + 2];
+ UintR Tdata3 = sourceBuffer[Tindex + 3];
+ Tlen -= 4;
+ destBuffer[Tindex + 0] = Tdata0;
+ destBuffer[Tindex + 1] = Tdata1;
+ destBuffer[Tindex + 2] = Tdata2;
+ destBuffer[Tindex + 3] = Tdata3;
+ Tindex += 4;
+ }//while
+ for (Ti = 0; Ti < Tlen; Ti++, Tindex++) {
+ destBuffer[Tindex] = sourceBuffer[Tindex];
+ }//for
+}//Dbtc::copyFromToLen()
+
+void Dbtc::execSEND_PACKED(Signal* signal)
+{
+ HostRecordPtr Thostptr;
+ HostRecord *localHostRecord = hostRecord;
+ UintR i;
+ UintR TpackedListIndex = cpackedListIndex;
+ jamEntry();
+ for (i = 0; i < TpackedListIndex; i++) {
+ Thostptr.i = cpackedList[i];
+ ptrAss(Thostptr, localHostRecord);
+ arrGuard(Thostptr.i - 1, MAX_NODES - 1);
+ UintR TnoOfPackedWordsLqh = Thostptr.p->noOfPackedWordsLqh;
+ UintR TnoOfWordsTCKEYCONF = Thostptr.p->noOfWordsTCKEYCONF;
+ UintR TnoOfWordsTCINDXCONF = Thostptr.p->noOfWordsTCINDXCONF;
+ jam();
+ if (TnoOfPackedWordsLqh > 0) {
+ jam();
+ sendPackedSignalLqh(signal, Thostptr.p);
+ }//if
+ if (TnoOfWordsTCKEYCONF > 0) {
+ jam();
+ sendPackedTCKEYCONF(signal, Thostptr.p, (Uint32)Thostptr.i);
+ }//if
+ if (TnoOfWordsTCINDXCONF > 0) {
+ jam();
+ sendPackedTCINDXCONF(signal, Thostptr.p, (Uint32)Thostptr.i);
+ }//if
+ Thostptr.p->inPackedList = false;
+ }//for
+ cpackedListIndex = 0;
+ return;
+}//Dbtc::execSEND_PACKED()
+
+void
+Dbtc::updatePackedList(Signal* signal, HostRecord* ahostptr, Uint16 ahostIndex)
+{
+ if (ahostptr->inPackedList == false) {
+ UintR TpackedListIndex = cpackedListIndex;
+ jam();
+ ahostptr->inPackedList = true;
+ cpackedList[TpackedListIndex] = ahostIndex;
+ cpackedListIndex = TpackedListIndex + 1;
+ }//if
+}//Dbtc::updatePackedList()
+
+void Dbtc::sendPackedSignalLqh(Signal* signal, HostRecord * ahostptr)
+{
+ UintR Tj;
+ UintR TnoOfWords = ahostptr->noOfPackedWordsLqh;
+ for (Tj = 0; Tj < TnoOfWords; Tj += 4) {
+ UintR sig0 = ahostptr->packedWordsLqh[Tj + 0];
+ UintR sig1 = ahostptr->packedWordsLqh[Tj + 1];
+ UintR sig2 = ahostptr->packedWordsLqh[Tj + 2];
+ UintR sig3 = ahostptr->packedWordsLqh[Tj + 3];
+ signal->theData[Tj + 0] = sig0;
+ signal->theData[Tj + 1] = sig1;
+ signal->theData[Tj + 2] = sig2;
+ signal->theData[Tj + 3] = sig3;
+ }//for
+ ahostptr->noOfPackedWordsLqh = 0;
+ sendSignal(ahostptr->hostLqhBlockRef,
+ GSN_PACKED_SIGNAL,
+ signal,
+ TnoOfWords,
+ JBB);
+}//Dbtc::sendPackedSignalLqh()
+
+void Dbtc::sendPackedTCKEYCONF(Signal* signal,
+ HostRecord * ahostptr,
+ UintR hostId)
+{
+ UintR Tj;
+ UintR TnoOfWords = ahostptr->noOfWordsTCKEYCONF;
+ BlockReference TBref = numberToRef(API_PACKED, hostId);
+ for (Tj = 0; Tj < ahostptr->noOfWordsTCKEYCONF; Tj += 4) {
+ UintR sig0 = ahostptr->packedWordsTCKEYCONF[Tj + 0];
+ UintR sig1 = ahostptr->packedWordsTCKEYCONF[Tj + 1];
+ UintR sig2 = ahostptr->packedWordsTCKEYCONF[Tj + 2];
+ UintR sig3 = ahostptr->packedWordsTCKEYCONF[Tj + 3];
+ signal->theData[Tj + 0] = sig0;
+ signal->theData[Tj + 1] = sig1;
+ signal->theData[Tj + 2] = sig2;
+ signal->theData[Tj + 3] = sig3;
+ }//for
+ ahostptr->noOfWordsTCKEYCONF = 0;
+ sendSignal(TBref, GSN_TCKEYCONF, signal, TnoOfWords, JBB);
+}//Dbtc::sendPackedTCKEYCONF()
+
+void Dbtc::sendPackedTCINDXCONF(Signal* signal,
+ HostRecord * ahostptr,
+ UintR hostId)
+{
+ UintR Tj;
+ UintR TnoOfWords = ahostptr->noOfWordsTCINDXCONF;
+ BlockReference TBref = numberToRef(API_PACKED, hostId);
+ for (Tj = 0; Tj < ahostptr->noOfWordsTCINDXCONF; Tj += 4) {
+ UintR sig0 = ahostptr->packedWordsTCINDXCONF[Tj + 0];
+ UintR sig1 = ahostptr->packedWordsTCINDXCONF[Tj + 1];
+ UintR sig2 = ahostptr->packedWordsTCINDXCONF[Tj + 2];
+ UintR sig3 = ahostptr->packedWordsTCINDXCONF[Tj + 3];
+ signal->theData[Tj + 0] = sig0;
+ signal->theData[Tj + 1] = sig1;
+ signal->theData[Tj + 2] = sig2;
+ signal->theData[Tj + 3] = sig3;
+ }//for
+ ahostptr->noOfWordsTCINDXCONF = 0;
+ sendSignal(TBref, GSN_TCINDXCONF, signal, TnoOfWords, JBB);
+}//Dbtc::sendPackedTCINDXCONF()
+
+/*
+4.3.11 DIVERIFY
+---------------
+*/
+/*****************************************************************************/
+/* D I V E R I F Y */
+/* */
+/*****************************************************************************/
+void Dbtc::diverify010Lab(Signal* signal)
+{
+ UintR TfirstfreeApiConnectCopy = cfirstfreeApiConnectCopy;
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ signal->theData[0] = apiConnectptr.i;
+ if (ERROR_INSERTED(8022)) {
+ jam();
+ systemErrorLab(signal);
+ }//if
+ if (TfirstfreeApiConnectCopy != RNIL) {
+ seizeApiConnectCopy(signal);
+ regApiPtr->apiConnectstate = CS_PREPARE_TO_COMMIT;
+ /*-----------------------------------------------------------------------
+ * WE COME HERE ONLY IF THE TRANSACTION IS PREPARED ON ALL TC CONNECTIONS.
+ * THUS WE CAN START THE COMMIT PHASE BY SENDING DIVERIFY ON ALL TC
+ * CONNECTIONS AND THEN WHEN ALL DIVERIFYCONF HAVE BEEN RECEIVED THE
+ * COMMIT MESSAGE CAN BE SENT TO ALL INVOLVED PARTS.
+ *-----------------------------------------------------------------------*/
+ EXECUTE_DIRECT(DBDIH, GSN_DIVERIFYREQ, signal, 1);
+ if (signal->theData[2] == 0) {
+ execDIVERIFYCONF(signal);
+ }
+ return;
+ } else {
+ /*-----------------------------------------------------------------------
+ * There were no free copy connections available. We must abort the
+ * transaction since otherwise we will have a problem with the report
+ * to the application.
+ * This should more or less not happen but if it happens we do not want to
+ * crash and we do not want to create code to handle it properly since
+ * it is difficult to test it and will be complex to handle a problem
+ * more or less not occurring.
+ *-----------------------------------------------------------------------*/
+ terrorCode = ZSEIZE_API_COPY_ERROR;
+ abortErrorLab(signal);
+ return;
+ }//if
+}//Dbtc::diverify010Lab()
+
+/* ------------------------------------------------------------------------- */
+/* ------- SEIZE_API_CONNECT ------- */
+/* SEIZE CONNECT RECORD FOR A REQUEST */
+/* ------------------------------------------------------------------------- */
+void Dbtc::seizeApiConnectCopy(Signal* signal)
+{
+ ApiConnectRecordPtr locApiConnectptr;
+
+ ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
+ UintR TapiConnectFilesize = capiConnectFilesize;
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+
+ locApiConnectptr.i = cfirstfreeApiConnectCopy;
+ ptrCheckGuard(locApiConnectptr, TapiConnectFilesize, localApiConnectRecord);
+ cfirstfreeApiConnectCopy = locApiConnectptr.p->nextApiConnect;
+ locApiConnectptr.p->nextApiConnect = RNIL;
+ regApiPtr->apiCopyRecord = locApiConnectptr.i;
+ regApiPtr->triggerPending = false;
+ regApiPtr->isIndexOp = false;
+}//Dbtc::seizeApiConnectCopy()
+
+void Dbtc::execDIVERIFYCONF(Signal* signal)
+{
+ UintR TapiConnectptrIndex = signal->theData[0];
+ UintR TapiConnectFilesize = capiConnectFilesize;
+ UintR Tgci = signal->theData[1];
+ ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
+
+ jamEntry();
+ if (ERROR_INSERTED(8017)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }//if
+ if (TapiConnectptrIndex >= TapiConnectFilesize) {
+ TCKEY_abort(signal, 31);
+ return;
+ }//if
+ ApiConnectRecord * const regApiPtr =
+ &localApiConnectRecord[TapiConnectptrIndex];
+ ConnectionState TapiConnectstate = regApiPtr->apiConnectstate;
+ UintR TApifailureNr = regApiPtr->failureNr;
+ UintR Tfailure_nr = cfailure_nr;
+ apiConnectptr.i = TapiConnectptrIndex;
+ apiConnectptr.p = regApiPtr;
+ if (TapiConnectstate != CS_PREPARE_TO_COMMIT) {
+ TCKEY_abort(signal, 32);
+ return;
+ }//if
+ /*--------------------------------------------------------------------------
+ * THIS IS THE COMMIT POINT. IF WE ARRIVE HERE THE TRANSACTION IS COMMITTED
+ * UNLESS EVERYTHING CRASHES BEFORE WE HAVE BEEN ABLE TO REPORT THE COMMIT
+ * DECISION. THERE IS NO TURNING BACK FROM THIS DECISION FROM HERE ON.
+ * WE WILL INSERT THE TRANSACTION INTO ITS PROPER QUEUE OF
+ * TRANSACTIONS FOR ITS GLOBAL CHECKPOINT.
+ *-------------------------------------------------------------------------*/
+ if (TApifailureNr != Tfailure_nr) {
+ DIVER_node_fail_handling(signal, Tgci);
+ return;
+ }//if
+ commitGciHandling(signal, Tgci);
+
+ /**************************************************************************
+ * C O M M I T
+ * THE TRANSACTION HAVE NOW BEEN VERIFIED AND NOW THE COMMIT PHASE CAN START
+ **************************************************************************/
+
+ UintR TtcConnectptrIndex = regApiPtr->firstTcConnect;
+ UintR TtcConnectFilesize = ctcConnectFilesize;
+ TcConnectRecord *localTcConnectRecord = tcConnectRecord;
+
+ regApiPtr->counter = regApiPtr->lqhkeyconfrec;
+ regApiPtr->apiConnectstate = CS_COMMITTING;
+ if (TtcConnectptrIndex >= TtcConnectFilesize) {
+ TCKEY_abort(signal, 33);
+ return;
+ }//if
+ TcConnectRecord* const regTcPtr = &localTcConnectRecord[TtcConnectptrIndex];
+ tcConnectptr.i = TtcConnectptrIndex;
+ tcConnectptr.p = regTcPtr;
+ commit020Lab(signal);
+}//Dbtc::execDIVERIFYCONF()
+
+/*--------------------------------------------------------------------------*/
+/* COMMIT_GCI_HANDLING */
+/* SET UP GLOBAL CHECKPOINT DATA STRUCTURE AT THE COMMIT POINT. */
+/*--------------------------------------------------------------------------*/
+void Dbtc::commitGciHandling(Signal* signal, UintR Tgci)
+{
+ GcpRecordPtr localGcpPointer;
+
+ UintR TgcpFilesize = cgcpFilesize;
+ UintR Tfirstgcp = cfirstgcp;
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ GcpRecord *localGcpRecord = gcpRecord;
+
+ regApiPtr->globalcheckpointid = Tgci;
+ if (Tfirstgcp != RNIL) {
+ /* IF THIS GLOBAL CHECKPOINT ALREADY EXISTS */
+ localGcpPointer.i = Tfirstgcp;
+ ptrCheckGuard(localGcpPointer, TgcpFilesize, localGcpRecord);
+ do {
+ if (regApiPtr->globalcheckpointid == localGcpPointer.p->gcpId) {
+ jam();
+ gcpPtr.i = localGcpPointer.i;
+ gcpPtr.p = localGcpPointer.p;
+ linkApiToGcp(signal);
+ return;
+ } else {
+ localGcpPointer.i = localGcpPointer.p->nextGcp;
+ jam();
+ if (localGcpPointer.i != RNIL) {
+ jam();
+ ptrCheckGuard(localGcpPointer, TgcpFilesize, localGcpRecord);
+ continue;
+ }//if
+ }//if
+ seizeGcp(signal);
+ linkApiToGcp(signal);
+ return;
+ } while (1);
+ } else {
+ jam();
+ seizeGcp(signal);
+ linkApiToGcp(signal);
+ }//if
+}//Dbtc::commitGciHandling()
+
+/* --------------------------------------------------------------------------*/
+/* -LINK AN API CONNECT RECORD IN STATE PREPARED INTO THE LIST WITH GLOBAL - */
+/* CHECKPOINTS. WHEN THE TRANSACTION I COMPLETED THE API CONNECT RECORD IS */
+/* LINKED OUT OF THE LIST. */
+/*---------------------------------------------------------------------------*/
+void Dbtc::linkApiToGcp(Signal* signal)
+{
+ ApiConnectRecordPtr localApiConnectptr;
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ GcpRecord * const regGcpPtr = gcpPtr.p;
+ UintR TapiConnectptrIndex = apiConnectptr.i;
+ ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
+
+ regApiPtr->nextGcpConnect = RNIL;
+ if (regGcpPtr->firstApiConnect == RNIL) {
+ regGcpPtr->firstApiConnect = TapiConnectptrIndex;
+ jam();
+ } else {
+ UintR TapiConnectFilesize = capiConnectFilesize;
+ localApiConnectptr.i = regGcpPtr->lastApiConnect;
+ jam();
+ ptrCheckGuard(localApiConnectptr,
+ TapiConnectFilesize, localApiConnectRecord);
+ localApiConnectptr.p->nextGcpConnect = TapiConnectptrIndex;
+ }//if
+ UintR TlastApiConnect = regGcpPtr->lastApiConnect;
+ regApiPtr->gcpPointer = gcpPtr.i;
+ regApiPtr->prevGcpConnect = TlastApiConnect;
+ regGcpPtr->lastApiConnect = TapiConnectptrIndex;
+}//Dbtc::linkApiToGcp()
+
+void Dbtc::seizeGcp(Signal* signal)
+{
+ GcpRecordPtr tmpGcpPointer;
+ GcpRecordPtr localGcpPointer;
+
+ UintR Tfirstgcp = cfirstgcp;
+ UintR Tglobalcheckpointid = apiConnectptr.p->globalcheckpointid;
+ UintR TgcpFilesize = cgcpFilesize;
+ GcpRecord *localGcpRecord = gcpRecord;
+
+ localGcpPointer.i = cfirstfreeGcp;
+ ptrCheckGuard(localGcpPointer, TgcpFilesize, localGcpRecord);
+ UintR TfirstfreeGcp = localGcpPointer.p->nextGcp;
+ localGcpPointer.p->gcpId = Tglobalcheckpointid;
+ localGcpPointer.p->nextGcp = RNIL;
+ localGcpPointer.p->firstApiConnect = RNIL;
+ localGcpPointer.p->lastApiConnect = RNIL;
+ localGcpPointer.p->gcpNomoretransRec = ZFALSE;
+ cfirstfreeGcp = TfirstfreeGcp;
+
+ if (Tfirstgcp == RNIL) {
+ jam();
+ cfirstgcp = localGcpPointer.i;
+ } else {
+ tmpGcpPointer.i = clastgcp;
+ jam();
+ ptrCheckGuard(tmpGcpPointer, TgcpFilesize, localGcpRecord);
+ tmpGcpPointer.p->nextGcp = localGcpPointer.i;
+ }//if
+ clastgcp = localGcpPointer.i;
+ gcpPtr = localGcpPointer;
+}//Dbtc::seizeGcp()
+
+/*---------------------------------------------------------------------------*/
+// Send COMMIT messages to all LQH operations involved in the transaction.
+/*---------------------------------------------------------------------------*/
+void Dbtc::commit020Lab(Signal* signal)
+{
+ TcConnectRecordPtr localTcConnectptr;
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ UintR TtcConnectFilesize = ctcConnectFilesize;
+ TcConnectRecord *localTcConnectRecord = tcConnectRecord;
+
+ localTcConnectptr.p = tcConnectptr.p;
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ UintR Tcount = 0;
+ do {
+ /*-----------------------------------------------------------------------
+ * WE ARE NOW READY TO RELEASE ALL OPERATIONS ON THE LQH
+ *-----------------------------------------------------------------------*/
+ /* *********< */
+ /* COMMIT < */
+ /* *********< */
+ localTcConnectptr.i = localTcConnectptr.p->nextTcConnect;
+ localTcConnectptr.p->tcConnectstate = OS_COMMITTING;
+ sendCommitLqh(signal, localTcConnectptr.p);
+
+ if (localTcConnectptr.i != RNIL) {
+ Tcount = Tcount + 1;
+ if (Tcount < 16) {
+ ptrCheckGuard(localTcConnectptr,
+ TtcConnectFilesize, localTcConnectRecord);
+ jam();
+ continue;
+ } else {
+ jam();
+ if (ERROR_INSERTED(8014)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }//if
+ signal->theData[0] = TcContinueB::ZSEND_COMMIT_LOOP;
+ signal->theData[1] = apiConnectptr.i;
+ signal->theData[2] = localTcConnectptr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
+ return;
+ }//if
+ } else {
+ jam();
+ regApiPtr->apiConnectstate = CS_COMMIT_SENT;
+ return;
+ }//if
+ } while (1);
+}//Dbtc::commit020Lab()
+
+void Dbtc::sendCommitLqh(Signal* signal,
+ TcConnectRecord * const regTcPtr)
+{
+ HostRecordPtr Thostptr;
+ UintR ThostFilesize = chostFilesize;
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ Thostptr.i = regTcPtr->lastLqhNodeId;
+ ptrCheckGuard(Thostptr, ThostFilesize, hostRecord);
+ if (Thostptr.p->noOfPackedWordsLqh > 21) {
+ jam();
+ sendPackedSignalLqh(signal, Thostptr.p);
+ } else {
+ jam();
+ updatePackedList(signal, Thostptr.p, Thostptr.i);
+ }//if
+ UintR Tindex = Thostptr.p->noOfPackedWordsLqh;
+ UintR* TDataPtr = &Thostptr.p->packedWordsLqh[Tindex];
+ UintR Tdata1 = regTcPtr->lastLqhCon;
+ UintR Tdata2 = regApiPtr->globalcheckpointid;
+ UintR Tdata3 = regApiPtr->transid[0];
+ UintR Tdata4 = regApiPtr->transid[1];
+
+ TDataPtr[0] = Tdata1 | (ZCOMMIT << 28);
+ TDataPtr[1] = Tdata2;
+ TDataPtr[2] = Tdata3;
+ TDataPtr[3] = Tdata4;
+ Thostptr.p->noOfPackedWordsLqh = Tindex + 4;
+}//Dbtc::sendCommitLqh()
+
+void
+Dbtc::DIVER_node_fail_handling(Signal* signal, UintR Tgci)
+{
+ /*------------------------------------------------------------------------
+ * AT LEAST ONE NODE HAS FAILED DURING THE TRANSACTION. WE NEED TO CHECK IF
+ * THIS IS SO SERIOUS THAT WE NEED TO ABORT THE TRANSACTION. IN BOTH THE
+ * ABORT AND THE COMMIT CASES WE NEED TO SET-UP THE DATA FOR THE
+ * ABORT/COMMIT/COMPLETE HANDLING AS ALSO USED BY TAKE OVER FUNCTIONALITY.
+ *------------------------------------------------------------------------*/
+ tabortInd = ZFALSE;
+ setupFailData(signal);
+ if (tabortInd == ZFALSE) {
+ jam();
+ commitGciHandling(signal, Tgci);
+ toCommitHandlingLab(signal);
+ } else {
+ jam();
+ apiConnectptr.p->returnsignal = RS_TCROLLBACKREP;
+ apiConnectptr.p->returncode = ZNODEFAIL_BEFORE_COMMIT;
+ toAbortHandlingLab(signal);
+ }//if
+ return;
+}//Dbtc::DIVER_node_fail_handling()
+
+
+/* ------------------------------------------------------------------------- */
+/* ------- ENTER COMMITTED ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dbtc::execCOMMITTED(Signal* signal)
+{
+ TcConnectRecordPtr localTcConnectptr;
+ ApiConnectRecordPtr localApiConnectptr;
+
+ UintR TtcConnectFilesize = ctcConnectFilesize;
+ UintR TapiConnectFilesize = capiConnectFilesize;
+ TcConnectRecord *localTcConnectRecord = tcConnectRecord;
+ ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
+
+#ifdef ERROR_INSERT
+ if (ERROR_INSERTED(8018)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }//if
+ if (ERROR_INSERTED(8030)) {
+ systemErrorLab(signal);
+ }//if
+ if (ERROR_INSERTED(8025)) {
+ SET_ERROR_INSERT_VALUE(8026);
+ return;
+ }//if
+ if (ERROR_INSERTED(8041)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ sendSignalWithDelay(cownref, GSN_COMMITTED, signal, 2000, 3);
+ return;
+ }//if
+ if (ERROR_INSERTED(8042)) {
+ SET_ERROR_INSERT_VALUE(8046);
+ sendSignalWithDelay(cownref, GSN_COMMITTED, signal, 2000, 4);
+ return;
+ }//if
+#endif
+ localTcConnectptr.i = signal->theData[0];
+ jamEntry();
+ ptrCheckGuard(localTcConnectptr, TtcConnectFilesize, localTcConnectRecord);
+ localApiConnectptr.i = localTcConnectptr.p->apiConnect;
+ if (localTcConnectptr.p->tcConnectstate != OS_COMMITTING) {
+ warningReport(signal, 4);
+ return;
+ }//if
+ ptrCheckGuard(localApiConnectptr, TapiConnectFilesize,
+ localApiConnectRecord);
+ UintR Tcounter = localApiConnectptr.p->counter - 1;
+ ConnectionState TapiConnectstate = localApiConnectptr.p->apiConnectstate;
+ UintR Tdata1 = localApiConnectptr.p->transid[0] - signal->theData[1];
+ UintR Tdata2 = localApiConnectptr.p->transid[1] - signal->theData[2];
+ Tdata1 = Tdata1 | Tdata2;
+ bool TcheckCondition =
+ (TapiConnectstate != CS_COMMIT_SENT) || (Tcounter != 0);
+
+ setApiConTimer(localApiConnectptr.i, ctcTimer, __LINE__);
+ localApiConnectptr.p->counter = Tcounter;
+ localTcConnectptr.p->tcConnectstate = OS_COMMITTED;
+ if (Tdata1 != 0) {
+ warningReport(signal, 5);
+ return;
+ }//if
+ if (TcheckCondition) {
+ jam();
+ /*-------------------------------------------------------*/
+ // We have not sent all COMMIT requests yet. We could be
+ // in the state that all sent are COMMITTED but we are
+ // still waiting for a CONTINUEB to send the rest of the
+ // COMMIT requests.
+ /*-------------------------------------------------------*/
+ return;
+ }//if
+ if (ERROR_INSERTED(8020)) {
+ jam();
+ systemErrorLab(signal);
+ }//if
+ /*-------------------------------------------------------*/
+ /* THE ENTIRE TRANSACTION IS NOW COMMITED */
+ /* NOW WE NEED TO SEND THE RESPONSE TO THE APPLICATION. */
+ /* THE APPLICATION CAN THEN REUSE THE API CONNECTION AND */
+ /* THEREFORE WE NEED TO MOVE THE API CONNECTION TO A */
+ /* NEW API CONNECT RECORD. */
+ /*-------------------------------------------------------*/
+
+ apiConnectptr = localApiConnectptr;
+ sendApiCommit(signal);
+
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ localTcConnectptr.i = regApiPtr->firstTcConnect;
+ UintR Tlqhkeyconfrec = regApiPtr->lqhkeyconfrec;
+ ptrCheckGuard(localTcConnectptr, TtcConnectFilesize, localTcConnectRecord);
+ regApiPtr->counter = Tlqhkeyconfrec;
+
+ tcConnectptr = localTcConnectptr;
+ complete010Lab(signal);
+ return;
+
+}//Dbtc::execCOMMITTED()
+
+/*-------------------------------------------------------*/
+/* SEND_API_COMMIT */
+/* SEND COMMIT DECISION TO THE API. */
+/*-------------------------------------------------------*/
+void Dbtc::sendApiCommit(Signal* signal)
+{
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+
+ if (regApiPtr->returnsignal == RS_TCKEYCONF) {
+ sendtckeyconf(signal, 1);
+ } else if (regApiPtr->returnsignal == RS_TC_COMMITCONF) {
+ jam();
+ TcCommitConf * const commitConf = (TcCommitConf *)&signal->theData[0];
+ if(regApiPtr->commitAckMarker == RNIL){
+ jam();
+ commitConf->apiConnectPtr = regApiPtr->ndbapiConnect;
+ } else {
+ jam();
+ commitConf->apiConnectPtr = regApiPtr->ndbapiConnect | 1;
+ }
+ commitConf->transId1 = regApiPtr->transid[0];
+ commitConf->transId2 = regApiPtr->transid[1];
+
+ sendSignal(regApiPtr->ndbapiBlockref, GSN_TC_COMMITCONF, signal, 3, JBB);
+ } else if (regApiPtr->returnsignal == RS_NO_RETURN) {
+ jam();
+ } else {
+ TCKEY_abort(signal, 37);
+ return;
+ }//if
+ UintR TapiConnectFilesize = capiConnectFilesize;
+ UintR TcommitCount = c_counters.ccommitCount;
+ UintR TapiIndex = apiConnectptr.i;
+ UintR TnewApiIndex = regApiPtr->apiCopyRecord;
+ UintR TapiFailState = regApiPtr->apiFailState;
+ ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
+
+ tmpApiConnectptr.p = apiConnectptr.p;
+ tmpApiConnectptr.i = TapiIndex;
+ c_counters.ccommitCount = TcommitCount + 1;
+ apiConnectptr.i = TnewApiIndex;
+ ptrCheckGuard(apiConnectptr, TapiConnectFilesize, localApiConnectRecord);
+ copyApi(signal);
+ if (TapiFailState != ZTRUE) {
+ return;
+ } else {
+ jam();
+ handleApiFailState(signal, tmpApiConnectptr.i);
+ return;
+ }//if
+}//Dbtc::sendApiCommit()
+
+/* ========================================================================= */
+/* ======= COPY_API ======= */
+/* COPY API RECORD ALSO RESET THE OLD API RECORD SO THAT IT */
+/* IS PREPARED TO RECEIVE A NEW TRANSACTION. */
+/*===========================================================================*/
+void Dbtc::copyApi(Signal* signal)
+{
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ ApiConnectRecord * const regTmpApiPtr = tmpApiConnectptr.p;
+
+ UintR TndbapiConnect = regTmpApiPtr->ndbapiConnect;
+ UintR TfirstTcConnect = regTmpApiPtr->firstTcConnect;
+ UintR Ttransid1 = regTmpApiPtr->transid[0];
+ UintR Ttransid2 = regTmpApiPtr->transid[1];
+ UintR Tlqhkeyconfrec = regTmpApiPtr->lqhkeyconfrec;
+ UintR TgcpPointer = regTmpApiPtr->gcpPointer;
+ UintR TgcpFilesize = cgcpFilesize;
+ UintR TcommitAckMarker = regTmpApiPtr->commitAckMarker;
+ GcpRecord *localGcpRecord = gcpRecord;
+
+ regApiPtr->ndbapiBlockref = regTmpApiPtr->ndbapiBlockref;
+ regApiPtr->ndbapiConnect = TndbapiConnect;
+ regApiPtr->firstTcConnect = TfirstTcConnect;
+ regApiPtr->apiConnectstate = CS_COMPLETING;
+ regApiPtr->transid[0] = Ttransid1;
+ regApiPtr->transid[1] = Ttransid2;
+ regApiPtr->lqhkeyconfrec = Tlqhkeyconfrec;
+ regApiPtr->commitAckMarker = TcommitAckMarker;
+
+ gcpPtr.i = TgcpPointer;
+ ptrCheckGuard(gcpPtr, TgcpFilesize, localGcpRecord);
+ unlinkApiConnect(signal);
+ linkApiToGcp(signal);
+ setApiConTimer(tmpApiConnectptr.i, 0, __LINE__);
+ regTmpApiPtr->apiConnectstate = CS_CONNECTED;
+ regTmpApiPtr->commitAckMarker = RNIL;
+ regTmpApiPtr->firstTcConnect = RNIL;
+ regTmpApiPtr->lastTcConnect = RNIL;
+}//Dbtc::copyApi()
+
+void Dbtc::unlinkApiConnect(Signal* signal)
+{
+ ApiConnectRecordPtr localApiConnectptr;
+ ApiConnectRecord * const regTmpApiPtr = tmpApiConnectptr.p;
+ UintR TapiConnectFilesize = capiConnectFilesize;
+ UintR TprevGcpConnect = regTmpApiPtr->prevGcpConnect;
+ UintR TnextGcpConnect = regTmpApiPtr->nextGcpConnect;
+ ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
+
+ if (TprevGcpConnect == RNIL) {
+ gcpPtr.p->firstApiConnect = TnextGcpConnect;
+ jam();
+ } else {
+ localApiConnectptr.i = TprevGcpConnect;
+ jam();
+ ptrCheckGuard(localApiConnectptr,
+ TapiConnectFilesize, localApiConnectRecord);
+ localApiConnectptr.p->nextGcpConnect = TnextGcpConnect;
+ }//if
+ if (TnextGcpConnect == RNIL) {
+ gcpPtr.p->lastApiConnect = TprevGcpConnect;
+ jam();
+ } else {
+ localApiConnectptr.i = TnextGcpConnect;
+ jam();
+ ptrCheckGuard(localApiConnectptr,
+ TapiConnectFilesize, localApiConnectRecord);
+ localApiConnectptr.p->prevGcpConnect = TprevGcpConnect;
+ }//if
+}//Dbtc::unlinkApiConnect()
+
+void Dbtc::complete010Lab(Signal* signal)
+{
+ TcConnectRecordPtr localTcConnectptr;
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ UintR TtcConnectFilesize = ctcConnectFilesize;
+ TcConnectRecord *localTcConnectRecord = tcConnectRecord;
+
+ localTcConnectptr.p = tcConnectptr.p;
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ UintR TapiConnectptrIndex = apiConnectptr.i;
+ UintR Tcount = 0;
+ do {
+ localTcConnectptr.p->apiConnect = TapiConnectptrIndex;
+ localTcConnectptr.p->tcConnectstate = OS_COMPLETING;
+
+ /* ************ */
+ /* COMPLETE < */
+ /* ************ */
+ const Uint32 nextTcConnect = localTcConnectptr.p->nextTcConnect;
+ sendCompleteLqh(signal, localTcConnectptr.p);
+ localTcConnectptr.i = nextTcConnect;
+ if (localTcConnectptr.i != RNIL) {
+ Tcount++;
+ if (Tcount < 16) {
+ ptrCheckGuard(localTcConnectptr,
+ TtcConnectFilesize, localTcConnectRecord);
+ jam();
+ continue;
+ } else {
+ jam();
+ if (ERROR_INSERTED(8013)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }//if
+ signal->theData[0] = TcContinueB::ZSEND_COMPLETE_LOOP;
+ signal->theData[1] = apiConnectptr.i;
+ signal->theData[2] = localTcConnectptr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
+ return;
+ }//if
+ } else {
+ jam();
+ regApiPtr->apiConnectstate = CS_COMPLETE_SENT;
+ return;
+ }//if
+ } while (1);
+}//Dbtc::complete010Lab()
+
+void Dbtc::sendCompleteLqh(Signal* signal,
+ TcConnectRecord * const regTcPtr)
+{
+ HostRecordPtr Thostptr;
+ UintR ThostFilesize = chostFilesize;
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ Thostptr.i = regTcPtr->lastLqhNodeId;
+ ptrCheckGuard(Thostptr, ThostFilesize, hostRecord);
+ if (Thostptr.p->noOfPackedWordsLqh > 22) {
+ jam();
+ sendPackedSignalLqh(signal, Thostptr.p);
+ } else {
+ jam();
+ updatePackedList(signal, Thostptr.p, Thostptr.i);
+ }//if
+
+ UintR Tindex = Thostptr.p->noOfPackedWordsLqh;
+ UintR* TDataPtr = &Thostptr.p->packedWordsLqh[Tindex];
+ UintR Tdata1 = regTcPtr->lastLqhCon | (ZCOMPLETE << 28);
+ UintR Tdata2 = regApiPtr->transid[0];
+ UintR Tdata3 = regApiPtr->transid[1];
+
+ TDataPtr[0] = Tdata1;
+ TDataPtr[1] = Tdata2;
+ TDataPtr[2] = Tdata3;
+ Thostptr.p->noOfPackedWordsLqh = Tindex + 3;
+}//Dbtc::sendCompleteLqh()
+
+void
+Dbtc::execTC_COMMIT_ACK(Signal* signal){
+ jamEntry();
+
+ CommitAckMarker key;
+ key.transid1 = signal->theData[0];
+ key.transid2 = signal->theData[1];
+
+ CommitAckMarkerPtr removedMarker;
+ m_commitAckMarkerHash.release(removedMarker, key);
+ if (removedMarker.i == RNIL) {
+ jam();
+ warningHandlerLab(signal);
+ return;
+ }//if
+ sendRemoveMarkers(signal, removedMarker.p);
+}
+
+void
+Dbtc::sendRemoveMarkers(Signal* signal, const CommitAckMarker * marker){
+ jam();
+ const Uint32 noOfLqhs = marker->noOfLqhs;
+ const Uint32 transId1 = marker->transid1;
+ const Uint32 transId2 = marker->transid2;
+
+ for(Uint32 i = 0; i<noOfLqhs; i++){
+ jam();
+ const NodeId nodeId = marker->lqhNodeId[i];
+ sendRemoveMarker(signal, nodeId, transId1, transId2);
+ }
+}
+
+void
+Dbtc::sendRemoveMarker(Signal* signal,
+ NodeId nodeId,
+ Uint32 transid1,
+ Uint32 transid2){
+ /**
+ * Seize host ptr
+ */
+ HostRecordPtr hostPtr;
+ const UintR ThostFilesize = chostFilesize;
+ hostPtr.i = nodeId;
+ ptrCheckGuard(hostPtr, ThostFilesize, hostRecord);
+
+ if (hostPtr.p->noOfPackedWordsLqh > (25 - 3)){
+ jam();
+ sendPackedSignalLqh(signal, hostPtr.p);
+ } else {
+ jam();
+ updatePackedList(signal, hostPtr.p, hostPtr.i);
+ }//if
+
+ UintR numWord = hostPtr.p->noOfPackedWordsLqh;
+ UintR* dataPtr = &hostPtr.p->packedWordsLqh[numWord];
+
+ dataPtr[0] = (ZREMOVE_MARKER << 28);
+ dataPtr[1] = transid1;
+ dataPtr[2] = transid2;
+ hostPtr.p->noOfPackedWordsLqh = numWord + 3;
+}
+
+void Dbtc::execCOMPLETED(Signal* signal)
+{
+ TcConnectRecordPtr localTcConnectptr;
+ ApiConnectRecordPtr localApiConnectptr;
+
+ UintR TtcConnectFilesize = ctcConnectFilesize;
+ UintR TapiConnectFilesize = capiConnectFilesize;
+ TcConnectRecord *localTcConnectRecord = tcConnectRecord;
+ ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
+
+#ifdef ERROR_INSERT
+ if (ERROR_INSERTED(8031)) {
+ systemErrorLab(signal);
+ }//if
+ if (ERROR_INSERTED(8019)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }//if
+ if (ERROR_INSERTED(8027)) {
+ SET_ERROR_INSERT_VALUE(8028);
+ return;
+ }//if
+ if (ERROR_INSERTED(8043)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ sendSignalWithDelay(cownref, GSN_COMPLETED, signal, 2000, 3);
+ return;
+ }//if
+ if (ERROR_INSERTED(8044)) {
+ SET_ERROR_INSERT_VALUE(8047);
+ sendSignalWithDelay(cownref, GSN_COMPLETED, signal, 2000, 3);
+ return;
+ }//if
+#endif
+ localTcConnectptr.i = signal->theData[0];
+ jamEntry();
+ ptrCheckGuard(localTcConnectptr, TtcConnectFilesize, localTcConnectRecord);
+ bool Tcond1 = (localTcConnectptr.p->tcConnectstate != OS_COMPLETING);
+ localApiConnectptr.i = localTcConnectptr.p->apiConnect;
+ if (Tcond1) {
+ warningReport(signal, 6);
+ return;
+ }//if
+ ptrCheckGuard(localApiConnectptr, TapiConnectFilesize,
+ localApiConnectRecord);
+ UintR Tdata1 = localApiConnectptr.p->transid[0] - signal->theData[1];
+ UintR Tdata2 = localApiConnectptr.p->transid[1] - signal->theData[2];
+ UintR Tcounter = localApiConnectptr.p->counter - 1;
+ ConnectionState TapiConnectstate = localApiConnectptr.p->apiConnectstate;
+ Tdata1 = Tdata1 | Tdata2;
+ bool TcheckCondition =
+ (TapiConnectstate != CS_COMPLETE_SENT) || (Tcounter != 0);
+ if (Tdata1 != 0) {
+ warningReport(signal, 7);
+ return;
+ }//if
+ setApiConTimer(localApiConnectptr.i, ctcTimer, __LINE__);
+ localApiConnectptr.p->counter = Tcounter;
+ localTcConnectptr.p->tcConnectstate = OS_COMPLETED;
+ localTcConnectptr.p->noOfNodes = 0; // == releaseNodes(signal)
+ if (TcheckCondition) {
+ jam();
+ /*-------------------------------------------------------*/
+ // We have not sent all COMPLETE requests yet. We could be
+ // in the state that all sent are COMPLETED but we are
+ // still waiting for a CONTINUEB to send the rest of the
+ // COMPLETE requests.
+ /*-------------------------------------------------------*/
+ return;
+ }//if
+ if (ERROR_INSERTED(8021)) {
+ jam();
+ systemErrorLab(signal);
+ }//if
+ apiConnectptr = localApiConnectptr;
+ releaseTransResources(signal);
+}//Dbtc::execCOMPLETED()
+
+/*---------------------------------------------------------------------------*/
+/* RELEASE_TRANS_RESOURCES */
+/* RELEASE ALL RESOURCES THAT ARE CONNECTED TO THIS TRANSACTION. */
+/*---------------------------------------------------------------------------*/
+void Dbtc::releaseTransResources(Signal* signal)
+{
+ TcConnectRecordPtr localTcConnectptr;
+ UintR TtcConnectFilesize = ctcConnectFilesize;
+ TcConnectRecord *localTcConnectRecord = tcConnectRecord;
+
+ localTcConnectptr.i = apiConnectptr.p->firstTcConnect;
+ do {
+ jam();
+ ptrCheckGuard(localTcConnectptr, TtcConnectFilesize, localTcConnectRecord);
+ UintR rtrTcConnectptrIndex = localTcConnectptr.p->nextTcConnect;
+ tcConnectptr.i = localTcConnectptr.i;
+ tcConnectptr.p = localTcConnectptr.p;
+ localTcConnectptr.i = rtrTcConnectptrIndex;
+ releaseTcCon();
+ } while (localTcConnectptr.i != RNIL);
+ handleGcp(signal);
+ releaseFiredTriggerData(&apiConnectptr.p->theFiredTriggers);
+ releaseAllSeizedIndexOperations(apiConnectptr.p);
+ releaseApiConCopy(signal);
+}//Dbtc::releaseTransResources()
+
+/* *********************************************************************>> */
+/* MODULE: HANDLE_GCP */
+/* DESCRIPTION: HANDLES GLOBAL CHECKPOINT HANDLING AT THE COMPLETION */
+/* OF THE COMMIT PHASE AND THE ABORT PHASE. WE MUST ENSURE THAT TC */
+/* SENDS GCP_TCFINISHED WHEN ALL TRANSACTIONS BELONGING TO A CERTAIN */
+/* GLOBAL CHECKPOINT HAVE COMPLETED. */
+/* *********************************************************************>> */
+void Dbtc::handleGcp(Signal* signal)
+{
+ GcpRecord *localGcpRecord = gcpRecord;
+ GcpRecordPtr localGcpPtr;
+ UintR TapiConnectptrIndex = apiConnectptr.i;
+ UintR TgcpFilesize = cgcpFilesize;
+ localGcpPtr.i = apiConnectptr.p->gcpPointer;
+ tmpApiConnectptr.i = TapiConnectptrIndex;
+ tmpApiConnectptr.p = apiConnectptr.p;
+ ptrCheckGuard(localGcpPtr, TgcpFilesize, localGcpRecord);
+ gcpPtr.i = localGcpPtr.i;
+ gcpPtr.p = localGcpPtr.p;
+ unlinkApiConnect(signal);
+ if (localGcpPtr.p->firstApiConnect == RNIL) {
+ if (localGcpPtr.p->gcpNomoretransRec == ZTRUE) {
+ jam();
+ tcheckGcpId = localGcpPtr.p->gcpId;
+ gcpTcfinished(signal);
+ unlinkGcp(signal);
+ }//if
+ }//if
+}//Dbtc::handleGcp()
+
+void Dbtc::releaseApiConCopy(Signal* signal)
+{
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ UintR TfirstfreeApiConnectCopyOld = cfirstfreeApiConnectCopy;
+ cfirstfreeApiConnectCopy = apiConnectptr.i;
+ regApiPtr->nextApiConnect = TfirstfreeApiConnectCopyOld;
+ setApiConTimer(apiConnectptr.i, 0, __LINE__);
+ regApiPtr->apiConnectstate = CS_RESTART;
+}//Dbtc::releaseApiConCopy()
+
+/* ========================================================================= */
+/* ------- RELEASE ALL RECORDS CONNECTED TO A DIRTY WRITE OPERATION ------- */
+/* ========================================================================= */
+void Dbtc::releaseDirtyWrite(Signal* signal)
+{
+ unlinkReadyTcCon(signal);
+ releaseTcCon();
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ if (regApiPtr->apiConnectstate == CS_START_COMMITTING) {
+ if (regApiPtr->firstTcConnect == RNIL) {
+ jam();
+ regApiPtr->apiConnectstate = CS_CONNECTED;
+ setApiConTimer(apiConnectptr.i, 0, __LINE__);
+ sendtckeyconf(signal, 1);
+ }//if
+ }//if
+}//Dbtc::releaseDirtyWrite()
+
+/*****************************************************************************
+ * L Q H K E Y R E F
+ * WHEN LQHKEYREF IS RECEIVED DBTC WILL CHECK IF COMMIT FLAG WAS SENT FROM THE
+ * APPLICATION. IF SO, THE WHOLE TRANSACTION WILL BE ROLLED BACK AND SIGNAL
+ * TCROLLBACKREP WILL BE SENT TO THE API.
+ *
+ * OTHERWISE TC WILL CHECK THE ERRORCODE. IF THE ERRORCODE IS INDICATING THAT
+ * THE "ROW IS NOT FOUND" FOR UPDATE/READ/DELETE OPERATIONS AND "ROW ALREADY
+ * EXISTS" FOR INSERT OPERATIONS, DBTC WILL RELEASE THE OPERATION AND THEN
+ * SEND RETURN SIGNAL TCKEYREF TO THE USER. THE USER THEN HAVE TO SEND
+ * SIGNAL TC_COMMITREQ OR TC_ROLLBACKREQ TO CONCLUDE THE TRANSACTION.
+ * IF ANY TCKEYREQ WITH COMMIT IS RECEIVED AND API_CONNECTSTATE EQUALS
+ * "REC_LQHREFUSE",
+ * THE OPERATION WILL BE TREATED AS AN OPERATION WITHOUT COMMIT. WHEN ANY
+ * OTHER FAULTCODE IS RECEIVED THE WHOLE TRANSACTION MUST BE ROLLED BACK
+ *****************************************************************************/
+void Dbtc::execLQHKEYREF(Signal* signal)
+{
+ const LqhKeyRef * const lqhKeyRef = (LqhKeyRef *)signal->getDataPtr();
+ jamEntry();
+
+ UintR compare_transid1, compare_transid2;
+ UintR TtcConnectFilesize = ctcConnectFilesize;
+ /*-------------------------------------------------------------------------
+ *
+ * RELEASE NODE BUFFER(S) TO INDICATE THAT THIS OPERATION HAVE NO
+ * TRANSACTION PARTS ACTIVE ANYMORE.
+ * LQHKEYREF HAVE CLEARED ALL PARTS ON ITS PATH BACK TO TC.
+ *-------------------------------------------------------------------------*/
+ if (lqhKeyRef->connectPtr < TtcConnectFilesize) {
+ /*-----------------------------------------------------------------------
+ * WE HAVE TO CHECK THAT THE TRANSACTION IS STILL VALID. FIRST WE CHECK
+ * THAT THE LQH IS STILL CONNECTED TO A TC, IF THIS HOLDS TRUE THEN THE
+ * TC MUST BE CONNECTED TO AN API CONNECT RECORD.
+ * WE MUST ENSURE THAT THE TRANSACTION ID OF THIS API CONNECT
+ * RECORD IS STILL THE SAME AS THE ONE LQHKEYREF REFERS TO.
+ * IF NOT SIMPLY EXIT AND FORGET THE SIGNAL SINCE THE TRANSACTION IS
+ * ALREADY COMPLETED (ABORTED).
+ *-----------------------------------------------------------------------*/
+ tcConnectptr.i = lqhKeyRef->connectPtr;
+ Uint32 errCode = terrorCode = lqhKeyRef->errorCode;
+ ptrAss(tcConnectptr, tcConnectRecord);
+ TcConnectRecord * const regTcPtr = tcConnectptr.p;
+ if (regTcPtr->tcConnectstate == OS_OPERATING) {
+ apiConnectptr.i = regTcPtr->apiConnect;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ compare_transid1 = regApiPtr->transid[0] ^ lqhKeyRef->transId1;
+ compare_transid2 = regApiPtr->transid[1] ^ lqhKeyRef->transId2;
+ compare_transid1 = compare_transid1 | compare_transid2;
+ if (compare_transid1 != 0) {
+ warningReport(signal, 25);
+ return;
+ }//if
+
+ const ConnectionState state = regApiPtr->apiConnectstate;
+ const Uint32 triggeringOp = regTcPtr->triggeringOperation;
+ if (triggeringOp != RNIL) {
+ jam();
+ // This operation was created by a trigger execting operation
+ TcConnectRecordPtr opPtr;
+ TcConnectRecord *localTcConnectRecord = tcConnectRecord;
+
+ const Uint32 currentIndexId = regTcPtr->currentIndexId;
+ ndbassert(currentIndexId != 0); // Only index triggers so far
+
+ opPtr.i = triggeringOp;
+ ptrCheckGuard(opPtr, ctcConnectFilesize, localTcConnectRecord);
+
+ // The operation executed an index trigger
+ const Uint32 opType = regTcPtr->operation;
+ if (errCode == ZALREADYEXIST)
+ errCode = terrorCode = ZNOTUNIQUE;
+ else if (!(opType == ZDELETE && errCode == ZNOT_FOUND)) {
+ jam();
+ /**
+ * "Normal path"
+ */
+ // fall-through
+ } else {
+ jam();
+ /** ZDELETE && NOT_FOUND */
+ TcIndexData* indexData = c_theIndexes.getPtr(currentIndexId);
+ if(indexData->indexState == IS_BUILDING && state != CS_ABORTING){
+ jam();
+ /**
+ * Ignore error
+ */
+ regApiPtr->lqhkeyconfrec++;
+
+ unlinkReadyTcCon(signal);
+ releaseTcCon();
+
+ opPtr.p->triggerExecutionCount--;
+ if (opPtr.p->triggerExecutionCount == 0) {
+ /**
+ * We have completed current trigger execution
+ * Continue triggering operation
+ */
+ jam();
+ continueTriggeringOp(signal, opPtr.p);
+ }
+ return;
+ }
+ }
+ }
+
+ Uint32 marker = regTcPtr->commitAckMarker;
+ markOperationAborted(regApiPtr, regTcPtr);
+
+ if(regApiPtr->apiConnectstate == CS_ABORTING){
+ /**
+ * We're already aborting' so don't send an "extra" TCKEYREF
+ */
+ jam();
+ return;
+ }
+
+ const Uint32 abort = regTcPtr->m_execAbortOption;
+ if (abort == TcKeyReq::AbortOnError || triggeringOp != RNIL) {
+ /**
+ * No error is allowed on this operation
+ */
+ TCKEY_abort(signal, 49);
+ return;
+ }//if
+
+ if (marker != RNIL){
+ /**
+ * This was an insert/update/delete/write which failed
+ * that contained the marker
+ * Currently unsupported to place new marker
+ */
+ TCKEY_abort(signal, 49);
+ return;
+ }
+
+ /* *************** */
+ /* TCKEYREF < */
+ /* *************** */
+ TcKeyRef * const tcKeyRef = (TcKeyRef *) signal->getDataPtrSend();
+ tcKeyRef->transId[0] = regApiPtr->transid[0];
+ tcKeyRef->transId[1] = regApiPtr->transid[1];
+ tcKeyRef->errorCode = terrorCode;
+ bool isIndexOp = regTcPtr->isIndexOp;
+ Uint32 indexOp = tcConnectptr.p->indexOp;
+ Uint32 clientData = regTcPtr->clientData;
+ unlinkReadyTcCon(signal); /* LINK TC CONNECT RECORD OUT OF */
+ releaseTcCon(); /* RELEASE THE TC CONNECT RECORD */
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ if (isIndexOp) {
+ jam();
+ regApiPtr->lqhkeyreqrec--; // Compensate for extra during read
+ tcKeyRef->connectPtr = indexOp;
+ EXECUTE_DIRECT(DBTC, GSN_TCKEYREF, signal, TcKeyRef::SignalLength);
+ apiConnectptr.i = regTcPtr->apiConnect;
+ apiConnectptr.p = regApiPtr;
+ } else {
+ jam();
+ tcKeyRef->connectPtr = clientData;
+ sendSignal(regApiPtr->ndbapiBlockref,
+ GSN_TCKEYREF, signal, TcKeyRef::SignalLength, JBB);
+ }//if
+
+ /*---------------------------------------------------------------------
+ * SINCE WE ARE NOT ABORTING WE NEED TO UPDATE THE COUNT OF HOW MANY
+ * LQHKEYREQ THAT HAVE RETURNED.
+ * IF NO MORE OUTSTANDING LQHKEYREQ'S THEN WE NEED TO
+ * TCKEYCONF (IF THERE IS ANYTHING TO SEND).
+ *---------------------------------------------------------------------*/
+ regApiPtr->lqhkeyreqrec--;
+ if (regApiPtr->lqhkeyconfrec == regApiPtr->lqhkeyreqrec) {
+ if (regApiPtr->apiConnectstate == CS_START_COMMITTING) {
+ if(regApiPtr->lqhkeyconfrec) {
+ jam();
+ diverify010Lab(signal);
+ } else {
+ jam();
+ sendtckeyconf(signal, 1);
+ regApiPtr->apiConnectstate = CS_CONNECTED;
+ }
+ return;
+ } else if (regApiPtr->tckeyrec > 0 || regApiPtr->m_exec_flag) {
+ jam();
+ sendtckeyconf(signal, 2);
+ return;
+ }
+ }//if
+ return;
+
+ } else {
+ warningReport(signal, 26);
+ }//if
+ } else {
+ errorReport(signal, 6);
+ }//if
+ return;
+}//Dbtc::execLQHKEYREF()
+
+void Dbtc::clearCommitAckMarker(ApiConnectRecord * const regApiPtr,
+ TcConnectRecord * const regTcPtr)
+{
+ const Uint32 commitAckMarker = regTcPtr->commitAckMarker;
+ if (regApiPtr->commitAckMarker == RNIL)
+ ndbassert(commitAckMarker == RNIL);
+ if (commitAckMarker != RNIL)
+ ndbassert(regApiPtr->commitAckMarker != RNIL);
+ if(commitAckMarker != RNIL){
+ jam();
+ m_commitAckMarkerHash.release(commitAckMarker);
+ regTcPtr->commitAckMarker = RNIL;
+ regApiPtr->commitAckMarker = RNIL;
+ }
+}
+
+void Dbtc::markOperationAborted(ApiConnectRecord * const regApiPtr,
+ TcConnectRecord * const regTcPtr)
+{
+ /*------------------------------------------------------------------------
+ * RELEASE NODES TO INDICATE THAT THE OPERATION IS ALREADY ABORTED IN THE
+ * LQH'S ALSO SET STATE TO ABORTING TO INDICATE THE ABORT IS
+ * ALREADY COMPLETED.
+ *------------------------------------------------------------------------*/
+ regTcPtr->noOfNodes = 0; // == releaseNodes(signal)
+ regTcPtr->tcConnectstate = OS_ABORTING;
+ clearCommitAckMarker(regApiPtr, regTcPtr);
+}
+
+/*--------------------------------------*/
+/* EXIT AND WAIT FOR SIGNAL TCOMMITREQ */
+/* OR TCROLLBACKREQ FROM THE USER TO */
+/* CONTINUE THE TRANSACTION */
+/*--------------------------------------*/
+void Dbtc::execTC_COMMITREQ(Signal* signal)
+{
+ UintR compare_transid1, compare_transid2;
+
+ jamEntry();
+ apiConnectptr.i = signal->theData[0];
+ if (apiConnectptr.i < capiConnectFilesize) {
+ ptrAss(apiConnectptr, apiConnectRecord);
+ compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[1];
+ compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[2];
+ compare_transid1 = compare_transid1 | compare_transid2;
+ if (compare_transid1 != 0) {
+ jam();
+ return;
+ }//if
+
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+
+ const Uint32 apiConnectPtr = regApiPtr->ndbapiConnect;
+ const Uint32 apiBlockRef = regApiPtr->ndbapiBlockref;
+ const Uint32 transId1 = regApiPtr->transid[0];
+ const Uint32 transId2 = regApiPtr->transid[1];
+ Uint32 errorCode = 0;
+
+ regApiPtr->m_exec_flag = 1;
+ switch (regApiPtr->apiConnectstate) {
+ case CS_STARTED:
+ tcConnectptr.i = regApiPtr->firstTcConnect;
+ if (tcConnectptr.i != RNIL) {
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ if (regApiPtr->lqhkeyconfrec == regApiPtr->lqhkeyreqrec) {
+ jam();
+ /*******************************************************************/
+ // The proper case where the application is waiting for commit or
+ // abort order.
+ // Start the commit order.
+ /*******************************************************************/
+ regApiPtr->returnsignal = RS_TC_COMMITCONF;
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ diverify010Lab(signal);
+ return;
+ } else {
+ jam();
+ /*******************************************************************/
+ // The transaction is started but not all operations are completed.
+ // It is not possible to commit the transaction in this state.
+ // We will abort it instead.
+ /*******************************************************************/
+ regApiPtr->returnsignal = RS_NO_RETURN;
+ errorCode = ZTRANS_STATUS_ERROR;
+ abort010Lab(signal);
+ }//if
+ } else {
+ jam();
+ /**
+ * No operations, accept commit
+ */
+ TcCommitConf * const commitConf = (TcCommitConf *)&signal->theData[0];
+ commitConf->apiConnectPtr = apiConnectPtr;
+ commitConf->transId1 = transId1;
+ commitConf->transId2 = transId2;
+
+ sendSignal(apiBlockRef, GSN_TC_COMMITCONF, signal, 3, JBB);
+
+ regApiPtr->returnsignal = RS_NO_RETURN;
+ releaseAbortResources(signal);
+ return;
+ }//if
+ break;
+ case CS_RECEIVING:
+ jam();
+ /***********************************************************************/
+ // A transaction is still receiving data. We cannot commit an unfinished
+ // transaction. We will abort it instead.
+ /***********************************************************************/
+ regApiPtr->returnsignal = RS_NO_RETURN;
+ errorCode = ZPREPAREINPROGRESS;
+ abort010Lab(signal);
+ break;
+
+ case CS_START_COMMITTING:
+ case CS_COMMITTING:
+ case CS_COMMIT_SENT:
+ case CS_COMPLETING:
+ case CS_COMPLETE_SENT:
+ case CS_REC_COMMITTING:
+ case CS_PREPARE_TO_COMMIT:
+ jam();
+ /***********************************************************************/
+ // The transaction is already performing a commit but it is not concluded
+ // yet.
+ /***********************************************************************/
+ errorCode = ZCOMMITINPROGRESS;
+ break;
+ case CS_ABORTING:
+ jam();
+ errorCode = ZABORTINPROGRESS;
+ break;
+ case CS_START_SCAN:
+ jam();
+ /***********************************************************************/
+ // The transaction is a scan. Scans cannot commit
+ /***********************************************************************/
+ errorCode = ZSCANINPROGRESS;
+ break;
+ case CS_PREPARED:
+ jam();
+ return;
+ case CS_START_PREPARING:
+ jam();
+ return;
+ case CS_REC_PREPARING:
+ jam();
+ return;
+ break;
+ default:
+ warningHandlerLab(signal);
+ return;
+ }//switch
+ TcCommitRef * const commitRef = (TcCommitRef*)&signal->theData[0];
+ commitRef->apiConnectPtr = apiConnectPtr;
+ commitRef->transId1 = transId1;
+ commitRef->transId2 = transId2;
+ commitRef->errorCode = errorCode;
+ sendSignal(apiBlockRef, GSN_TC_COMMITREF, signal,
+ TcCommitRef::SignalLength, JBB);
+ return;
+ } else /** apiConnectptr.i < capiConnectFilesize */ {
+ jam();
+ warningHandlerLab(signal);
+ return;
+ }
+}//Dbtc::execTC_COMMITREQ()
+
+void Dbtc::execTCROLLBACKREQ(Signal* signal)
+{
+ UintR compare_transid1, compare_transid2;
+
+ jamEntry();
+ apiConnectptr.i = signal->theData[0];
+ if (apiConnectptr.i >= capiConnectFilesize) {
+ goto TC_ROLL_warning;
+ }//if
+ ptrAss(apiConnectptr, apiConnectRecord);
+ compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[1];
+ compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[2];
+ compare_transid1 = compare_transid1 | compare_transid2;
+ if (compare_transid1 != 0) {
+ jam();
+ return;
+ }//if
+
+ apiConnectptr.p->m_exec_flag = 1;
+ switch (apiConnectptr.p->apiConnectstate) {
+ case CS_STARTED:
+ case CS_RECEIVING:
+ jam();
+ apiConnectptr.p->returnsignal = RS_TCROLLBACKCONF;
+ abort010Lab(signal);
+ return;
+ case CS_CONNECTED:
+ jam();
+ signal->theData[0] = apiConnectptr.p->ndbapiConnect;
+ signal->theData[1] = apiConnectptr.p->transid[0];
+ signal->theData[2] = apiConnectptr.p->transid[1];
+ sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_TCROLLBACKCONF,
+ signal, 3, JBB);
+ break;
+ case CS_START_SCAN:
+ case CS_PREPARE_TO_COMMIT:
+ case CS_COMMITTING:
+ case CS_COMMIT_SENT:
+ case CS_COMPLETING:
+ case CS_COMPLETE_SENT:
+ case CS_WAIT_COMMIT_CONF:
+ case CS_WAIT_COMPLETE_CONF:
+ case CS_RESTART:
+ case CS_DISCONNECTED:
+ case CS_START_COMMITTING:
+ case CS_REC_COMMITTING:
+ jam();
+ /* ***************< */
+ /* TC_ROLLBACKREF < */
+ /* ***************< */
+ signal->theData[0] = apiConnectptr.p->ndbapiConnect;
+ signal->theData[1] = apiConnectptr.p->transid[0];
+ signal->theData[2] = apiConnectptr.p->transid[1];
+ signal->theData[3] = ZROLLBACKNOTALLOWED;
+ signal->theData[4] = apiConnectptr.p->apiConnectstate;
+ sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_TCROLLBACKREF,
+ signal, 5, JBB);
+ break;
+ /* SEND A REFUSAL SIGNAL*/
+ case CS_ABORTING:
+ jam();
+ if (apiConnectptr.p->abortState == AS_IDLE) {
+ jam();
+ signal->theData[0] = apiConnectptr.p->ndbapiConnect;
+ signal->theData[1] = apiConnectptr.p->transid[0];
+ signal->theData[2] = apiConnectptr.p->transid[1];
+ sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_TCROLLBACKCONF,
+ signal, 3, JBB);
+ } else {
+ jam();
+ apiConnectptr.p->returnsignal = RS_TCROLLBACKCONF;
+ }//if
+ break;
+ case CS_WAIT_ABORT_CONF:
+ jam();
+ apiConnectptr.p->returnsignal = RS_TCROLLBACKCONF;
+ break;
+ case CS_START_PREPARING:
+ jam();
+ case CS_PREPARED:
+ jam();
+ case CS_REC_PREPARING:
+ jam();
+ default:
+ goto TC_ROLL_system_error;
+ break;
+ }//switch
+ return;
+
+TC_ROLL_warning:
+ jam();
+ warningHandlerLab(signal);
+ return;
+
+TC_ROLL_system_error:
+ jam();
+ systemErrorLab(signal);
+ return;
+}//Dbtc::execTCROLLBACKREQ()
+
+void Dbtc::execTC_HBREP(Signal* signal)
+{
+ const TcHbRep * const tcHbRep =
+ (TcHbRep *)signal->getDataPtr();
+
+ jamEntry();
+ apiConnectptr.i = tcHbRep->apiConnectPtr;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+
+ if (apiConnectptr.p->transid[0] == tcHbRep->transId1 &&
+ apiConnectptr.p->transid[1] == tcHbRep->transId2){
+
+ if (getApiConTimer(apiConnectptr.i) != 0){
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ } else {
+ DEBUG("TCHBREP received when timer was off apiConnectptr.i="
+ << apiConnectptr.i);
+ }
+ }
+}//Dbtc::execTCHBREP()
+
+/*
+4.3.15 ABORT
+-----------
+*/
+/*****************************************************************************/
+/* A B O R T */
+/* */
+/*****************************************************************************/
+void Dbtc::warningReport(Signal* signal, int place)
+{
+ switch (place) {
+ case 0:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "ABORTED to not active TC record" << endl;
+#endif
+ break;
+ case 1:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "ABORTED to TC record active with new transaction" << endl;
+#endif
+ break;
+ case 2:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "ABORTED to active TC record not expecting ABORTED" << endl;
+#endif
+ break;
+ case 3:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "ABORTED to TC rec active with trans but wrong node" << endl;
+ ndbout << "This is ok when aborting in node failure situations" << endl;
+#endif
+ break;
+ case 4:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received COMMITTED in wrong state in Dbtc" << endl;
+#endif
+ break;
+ case 5:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received COMMITTED with wrong transid in Dbtc" << endl;
+#endif
+ break;
+ case 6:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received COMPLETED in wrong state in Dbtc" << endl;
+#endif
+ break;
+ case 7:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received COMPLETED with wrong transid in Dbtc" << endl;
+#endif
+ break;
+ case 8:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received COMMITCONF with tc-rec in wrong state in Dbtc" << endl;
+#endif
+ break;
+ case 9:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received COMMITCONF with api-rec in wrong state in Dbtc" <<endl;
+#endif
+ break;
+ case 10:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received COMMITCONF with wrong transid in Dbtc" << endl;
+#endif
+ break;
+ case 11:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received COMMITCONF from wrong nodeid in Dbtc" << endl;
+#endif
+ break;
+ case 12:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received COMPLETECONF, tc-rec in wrong state in Dbtc" << endl;
+#endif
+ break;
+ case 13:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received COMPLETECONF, api-rec in wrong state in Dbtc" << endl;
+#endif
+ break;
+ case 14:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received COMPLETECONF with wrong transid in Dbtc" << endl;
+#endif
+ break;
+ case 15:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received COMPLETECONF from wrong nodeid in Dbtc" << endl;
+#endif
+ break;
+ case 16:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received ABORTCONF, tc-rec in wrong state in Dbtc" << endl;
+#endif
+ break;
+ case 17:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received ABORTCONF, api-rec in wrong state in Dbtc" << endl;
+#endif
+ break;
+ case 18:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received ABORTCONF with wrong transid in Dbtc" << endl;
+#endif
+ break;
+ case 19:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received ABORTCONF from wrong nodeid in Dbtc" << endl;
+#endif
+ break;
+ case 20:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Time-out waiting for ABORTCONF in Dbtc" << endl;
+#endif
+ break;
+ case 21:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Time-out waiting for COMMITCONF in Dbtc" << endl;
+#endif
+ break;
+ case 22:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Time-out waiting for COMPLETECONF in Dbtc" << endl;
+#endif
+ break;
+ case 23:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received LQHKEYCONF in wrong tc-state in Dbtc" << endl;
+#endif
+ break;
+ case 24:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received LQHKEYREF to wrong transid in Dbtc" << endl;
+#endif
+ break;
+ case 25:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received LQHKEYREF in wrong state in Dbtc" << endl;
+#endif
+ break;
+ case 26:
+ jam();
+#ifdef ABORT_TRACE
+ ndbout << "Received LQHKEYCONF to wrong transid in Dbtc" << endl;
+#endif
+ break;
+ case 27:
+ jam();
+ // printState(signal, 27);
+#ifdef ABORT_TRACE
+ ndbout << "Received LQHKEYCONF in wrong api-state in Dbtc" << endl;
+#endif
+ break;
+ default:
+ jam();
+ break;
+ }//switch
+ return;
+}//Dbtc::warningReport()
+
+void Dbtc::errorReport(Signal* signal, int place)
+{
+ switch (place) {
+ case 0:
+ jam();
+ break;
+ case 1:
+ jam();
+ break;
+ case 2:
+ jam();
+ break;
+ case 3:
+ jam();
+ break;
+ case 4:
+ jam();
+ break;
+ case 5:
+ jam();
+ break;
+ case 6:
+ jam();
+ break;
+ default:
+ jam();
+ break;
+ }//switch
+ systemErrorLab(signal);
+ return;
+}//Dbtc::errorReport()
+
+/* ------------------------------------------------------------------------- */
+/* ------- ENTER ABORTED ------- */
+/* */
+/*-------------------------------------------------------------------------- */
+void Dbtc::execABORTED(Signal* signal)
+{
+ UintR compare_transid1, compare_transid2;
+
+ jamEntry();
+ tcConnectptr.i = signal->theData[0];
+ UintR Tnodeid = signal->theData[3];
+ UintR TlastLqhInd = signal->theData[4];
+
+ if (ERROR_INSERTED(8040)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ sendSignalWithDelay(cownref, GSN_ABORTED, signal, 2000, 5);
+ return;
+ }//if
+ /*------------------------------------------------------------------------
+ * ONE PARTICIPANT IN THE TRANSACTION HAS REPORTED THAT IT IS ABORTED.
+ *------------------------------------------------------------------------*/
+ if (tcConnectptr.i >= ctcConnectFilesize) {
+ errorReport(signal, 0);
+ return;
+ }//if
+ /*-------------------------------------------------------------------------
+ * WE HAVE TO CHECK THAT THIS IS NOT AN OLD SIGNAL BELONGING TO A
+ * TRANSACTION ALREADY ABORTED. THIS CAN HAPPEN WHEN TIME-OUT OCCURS
+ * IN TC WAITING FOR ABORTED.
+ *-------------------------------------------------------------------------*/
+ ptrAss(tcConnectptr, tcConnectRecord);
+ if (tcConnectptr.p->tcConnectstate != OS_ABORT_SENT) {
+ warningReport(signal, 2);
+ return;
+ /*-----------------------------------------------------------------------*/
+ // ABORTED reported on an operation not expecting ABORT.
+ /*-----------------------------------------------------------------------*/
+ }//if
+ apiConnectptr.i = tcConnectptr.p->apiConnect;
+ if (apiConnectptr.i >= capiConnectFilesize) {
+ warningReport(signal, 0);
+ return;
+ }//if
+ ptrAss(apiConnectptr, apiConnectRecord);
+ compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[1];
+ compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[2];
+ compare_transid1 = compare_transid1 | compare_transid2;
+ if (compare_transid1 != 0) {
+ warningReport(signal, 1);
+ return;
+ }//if
+ if (ERROR_INSERTED(8024)) {
+ jam();
+ systemErrorLab(signal);
+ }//if
+
+ /**
+ * Release marker
+ */
+ clearCommitAckMarker(apiConnectptr.p, tcConnectptr.p);
+
+ Uint32 i;
+ Uint32 Tfound = 0;
+ for (i = 0; i < tcConnectptr.p->noOfNodes; i++) {
+ jam();
+ if (tcConnectptr.p->tcNodedata[i] == Tnodeid) {
+ /*---------------------------------------------------------------------
+ * We have received ABORTED from one of the participants in this
+ * operation in this aborted transaction.
+ * Record all nodes that have completed abort.
+ * If last indicator is set it means that no more replica has
+ * heard of the operation and are thus also aborted.
+ *---------------------------------------------------------------------*/
+ jam();
+ Tfound = 1;
+ clearTcNodeData(signal, TlastLqhInd, i);
+ }//if
+ }//for
+ if (Tfound == 0) {
+ warningReport(signal, 3);
+ return;
+ }
+ for (i = 0; i < tcConnectptr.p->noOfNodes; i++) {
+ if (tcConnectptr.p->tcNodedata[i] != 0) {
+ /*--------------------------------------------------------------------
+ * There are still outstanding ABORTED's to wait for.
+ *--------------------------------------------------------------------*/
+ jam();
+ return;
+ }//if
+ }//for
+ tcConnectptr.p->noOfNodes = 0;
+ tcConnectptr.p->tcConnectstate = OS_ABORTING;
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ apiConnectptr.p->counter--;
+ if (apiConnectptr.p->counter > 0) {
+ jam();
+ /*----------------------------------------------------------------------
+ * WE ARE STILL WAITING FOR MORE PARTICIPANTS TO SEND ABORTED.
+ *----------------------------------------------------------------------*/
+ return;
+ }//if
+ /*------------------------------------------------------------------------*/
+ /* */
+ /* WE HAVE NOW COMPLETED THE ABORT PROCESS. WE HAVE RECEIVED ABORTED */
+ /* FROM ALL PARTICIPANTS IN THE TRANSACTION. WE CAN NOW RELEASE ALL */
+ /* RESOURCES CONNECTED TO THE TRANSACTION AND SEND THE ABORT RESPONSE */
+ /*------------------------------------------------------------------------*/
+ releaseAbortResources(signal);
+}//Dbtc::execABORTED()
+
+void Dbtc::clearTcNodeData(Signal* signal,
+ UintR TLastLqhIndicator,
+ UintR Tstart)
+{
+ UintR Ti;
+ if (TLastLqhIndicator == ZTRUE) {
+ for (Ti = Tstart ; Ti < tcConnectptr.p->noOfNodes; Ti++) {
+ jam();
+ tcConnectptr.p->tcNodedata[Ti] = 0;
+ }//for
+ } else {
+ jam();
+ tcConnectptr.p->tcNodedata[Tstart] = 0;
+ }//for
+}//clearTcNodeData()
+
+void Dbtc::abortErrorLab(Signal* signal)
+{
+ ptrGuard(apiConnectptr);
+ ApiConnectRecord * transP = apiConnectptr.p;
+ if (transP->apiConnectstate == CS_ABORTING && transP->abortState != AS_IDLE){
+ jam();
+ return;
+ }
+ transP->returnsignal = RS_TCROLLBACKREP;
+ if(transP->returncode == 0){
+ jam();
+ transP->returncode = terrorCode;
+ }
+ abort010Lab(signal);
+}//Dbtc::abortErrorLab()
+
+void Dbtc::abort010Lab(Signal* signal)
+{
+ ApiConnectRecord * transP = apiConnectptr.p;
+ if (transP->apiConnectstate == CS_ABORTING && transP->abortState != AS_IDLE){
+ jam();
+ return;
+ }
+ transP->apiConnectstate = CS_ABORTING;
+ /*------------------------------------------------------------------------*/
+ /* AN ABORT DECISION HAS BEEN TAKEN FOR SOME REASON. WE NEED TO ABORT */
+ /* ALL PARTICIPANTS IN THE TRANSACTION. */
+ /*------------------------------------------------------------------------*/
+ transP->abortState = AS_ACTIVE;
+ transP->counter = 0;
+
+ if (transP->firstTcConnect == RNIL) {
+ jam();
+ /*-----------------------------------------------------------------------*/
+ /* WE HAVE NO PARTICIPANTS IN THE TRANSACTION. */
+ /*-----------------------------------------------------------------------*/
+ releaseAbortResources(signal);
+ return;
+ }//if
+ tcConnectptr.i = transP->firstTcConnect;
+ abort015Lab(signal);
+}//Dbtc::abort010Lab()
+
+/*--------------------------------------------------------------------------*/
+/* */
+/* WE WILL ABORT ONE NODE PER OPERATION AT A TIME. THIS IS TO KEEP */
+/* ERROR HANDLING OF THIS PROCESS FAIRLY SIMPLE AND TRACTABLE. */
+/* EVEN IF NO NODE OF THIS PARTICULAR NODE NUMBER NEEDS ABORTION WE */
+/* MUST ENSURE THAT ALL NODES ARE CHECKED. THUS A FAULTY NODE DOES */
+/* NOT MEAN THAT ALL NODES IN AN OPERATION IS ABORTED. FOR THIS REASON*/
+/* WE SET THE TCONTINUE_ABORT TO TRUE WHEN A FAULTY NODE IS DETECTED. */
+/*--------------------------------------------------------------------------*/
+void Dbtc::abort015Lab(Signal* signal)
+{
+ Uint32 TloopCount = 0;
+ABORT020:
+ jam();
+ TloopCount++;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ switch (tcConnectptr.p->tcConnectstate) {
+ case OS_WAIT_DIH:
+ case OS_WAIT_KEYINFO:
+ case OS_WAIT_ATTR:
+ jam();
+ /*----------------------------------------------------------------------*/
+ /* WE ARE STILL WAITING FOR MORE KEYINFO/ATTRINFO. WE HAVE NOT CONTACTED*/
+ /* ANY LQH YET AND SO WE CAN SIMPLY SET STATE TO ABORTING. */
+ /*----------------------------------------------------------------------*/
+ tcConnectptr.p->noOfNodes = 0; // == releaseAbort(signal)
+ tcConnectptr.p->tcConnectstate = OS_ABORTING;
+ break;
+ case OS_CONNECTED:
+ jam();
+ /*-----------------------------------------------------------------------
+ * WE ARE STILL IN THE INITIAL PHASE OF THIS OPERATION.
+ * NEED NOT BOTHER ABOUT ANY LQH ABORTS.
+ *-----------------------------------------------------------------------*/
+ tcConnectptr.p->noOfNodes = 0; // == releaseAbort(signal)
+ tcConnectptr.p->tcConnectstate = OS_ABORTING;
+ break;
+ case OS_PREPARED:
+ jam();
+ case OS_OPERATING:
+ jam();
+ /*----------------------------------------------------------------------
+ * WE HAVE SENT LQHKEYREQ AND ARE IN SOME STATE OF EITHER STILL
+ * SENDING THE OPERATION, WAITING FOR REPLIES, WAITING FOR MORE
+ * ATTRINFO OR OPERATION IS PREPARED. WE NEED TO ABORT ALL LQH'S.
+ *----------------------------------------------------------------------*/
+ releaseAndAbort(signal);
+ tcConnectptr.p->tcConnectstate = OS_ABORT_SENT;
+ TloopCount += 127;
+ break;
+ case OS_ABORTING:
+ jam();
+ break;
+ case OS_ABORT_SENT:
+ jam();
+ DEBUG("ABORT_SENT state in abort015Lab(), not expected");
+ systemErrorLab(signal);
+ return;
+ default:
+ jam();
+ DEBUG("tcConnectstate = " << tcConnectptr.p->tcConnectstate);
+ systemErrorLab(signal);
+ return;
+ }//switch
+
+ if (tcConnectptr.p->nextTcConnect != RNIL) {
+ jam();
+ tcConnectptr.i = tcConnectptr.p->nextTcConnect;
+ if (TloopCount < 1024) {
+ goto ABORT020;
+ } else {
+ jam();
+ /*---------------------------------------------------------------------
+ * Reset timer to avoid time-out in real-time break.
+ * Increase counter to ensure that we don't think that all ABORTED have
+ * been received before all have been sent.
+ *---------------------------------------------------------------------*/
+ apiConnectptr.p->counter++;
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ signal->theData[0] = TcContinueB::ZABORT_BREAK;
+ signal->theData[1] = tcConnectptr.i;
+ signal->theData[2] = apiConnectptr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
+ return;
+ }//if
+ }//if
+ if (apiConnectptr.p->counter > 0) {
+ jam();
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ return;
+ }//if
+ /*-----------------------------------------------------------------------
+ * WE HAVE NOW COMPLETED THE ABORT PROCESS. WE HAVE RECEIVED ABORTED
+ * FROM ALL PARTICIPANTS IN THE TRANSACTION. WE CAN NOW RELEASE ALL
+ * RESOURCES CONNECTED TO THE TRANSACTION AND SEND THE ABORT RESPONSE
+ *------------------------------------------------------------------------*/
+ releaseAbortResources(signal);
+}//Dbtc::abort015Lab()
+
+/*--------------------------------------------------------------------------*/
+/* RELEASE KEY AND ATTRINFO OBJECTS AND SEND ABORT TO THE LQH BLOCK. */
+/*--------------------------------------------------------------------------*/
+int Dbtc::releaseAndAbort(Signal* signal)
+{
+ HostRecordPtr localHostptr;
+ UintR TnoLoops = tcConnectptr.p->noOfNodes;
+
+ apiConnectptr.p->counter++;
+ bool prevAlive = false;
+ for (Uint32 Ti = 0; Ti < TnoLoops ; Ti++) {
+ localHostptr.i = tcConnectptr.p->tcNodedata[Ti];
+ ptrCheckGuard(localHostptr, chostFilesize, hostRecord);
+ if (localHostptr.p->hostStatus == HS_ALIVE) {
+ jam();
+ if (prevAlive) {
+ // if previous is alive, its LQH forwards abort to this node
+ jam();
+ continue;
+ }
+ /* ************< */
+ /* ABORT < */
+ /* ************< */
+ tblockref = calcLqhBlockRef(localHostptr.i);
+ signal->theData[0] = tcConnectptr.i;
+ signal->theData[1] = cownref;
+ signal->theData[2] = apiConnectptr.p->transid[0];
+ signal->theData[3] = apiConnectptr.p->transid[1];
+ sendSignal(tblockref, GSN_ABORT, signal, 4, JBB);
+ prevAlive = true;
+ } else {
+ jam();
+ signal->theData[0] = tcConnectptr.i;
+ signal->theData[1] = apiConnectptr.p->transid[0];
+ signal->theData[2] = apiConnectptr.p->transid[1];
+ signal->theData[3] = localHostptr.i;
+ signal->theData[4] = ZFALSE;
+ sendSignal(cownref, GSN_ABORTED, signal, 5, JBB);
+ prevAlive = false;
+ }//if
+ }//for
+ return 1;
+}//Dbtc::releaseAndAbort()
+
+/* ------------------------------------------------------------------------- */
+/* ------- ENTER TIME_SIGNAL ------- */
+/* */
+/* ------------------------------------------------------------------------- */
+void Dbtc::execTIME_SIGNAL(Signal* signal)
+{
+
+ jamEntry();
+ ctcTimer++;
+ if (csystemStart != SSS_TRUE) {
+ jam();
+ return;
+ }//if
+ checkStartTimeout(signal);
+ checkStartFragTimeout(signal);
+}//Dbtc::execTIME_SIGNAL()
+
+/*------------------------------------------------*/
+/* Start timeout handling if not already going on */
+/*------------------------------------------------*/
+void Dbtc::checkStartTimeout(Signal* signal)
+{
+ ctimeOutCheckCounter++;
+ if (ctimeOutCheckActive == TOCS_TRUE) {
+ jam();
+ // Check heartbeat of timeout loop
+ if(ctimeOutCheckHeartbeat > ctimeOutCheckLastHeartbeat){
+ jam();
+ ctimeOutMissedHeartbeats = 0;
+ }else{
+ jam();
+ ctimeOutMissedHeartbeats++;
+ if (ctimeOutMissedHeartbeats > 100){
+ jam();
+ systemErrorLab(signal);
+ }
+ }
+ ctimeOutCheckLastHeartbeat = ctimeOutCheckHeartbeat;
+ return;
+ }//if
+ if (ctimeOutCheckCounter < ctimeOutCheckDelay) {
+ jam();
+ /*------------------------------------------------------------------*/
+ /* */
+ /* NO TIME-OUT CHECKED THIS TIME. WAIT MORE. */
+ /*------------------------------------------------------------------*/
+ return;
+ }//if
+ ctimeOutCheckActive = TOCS_TRUE;
+ ctimeOutCheckCounter = 0;
+ timeOutLoopStartLab(signal, 0); // 0 is first api connect record
+ return;
+}//Dbtc::execTIME_SIGNAL()
+
+/*----------------------------------------------------------------*/
+/* Start fragment (scan) timeout handling if not already going on */
+/*----------------------------------------------------------------*/
+void Dbtc::checkStartFragTimeout(Signal* signal)
+{
+ ctimeOutCheckFragCounter++;
+ if (ctimeOutCheckFragActive == TOCS_TRUE) {
+ jam();
+ return;
+ }//if
+ if (ctimeOutCheckFragCounter < ctimeOutCheckDelay) {
+ jam();
+ /*------------------------------------------------------------------*/
+ /* NO TIME-OUT CHECKED THIS TIME. WAIT MORE. */
+ /*------------------------------------------------------------------*/
+ return;
+ }//if
+
+ // Go through the fragment records and look for timeout in a scan.
+ ctimeOutCheckFragActive = TOCS_TRUE;
+ ctimeOutCheckFragCounter = 0;
+ timeOutLoopStartFragLab(signal, 0); // 0 means first scan record
+}//checkStartFragTimeout()
+
+/*------------------------------------------------------------------*/
+/* IT IS NOW TIME TO CHECK WHETHER ANY TRANSACTIONS HAVE */
+/* BEEN DELAYED FOR SO LONG THAT WE ARE FORCED TO PERFORM */
+/* SOME ACTION, EITHER ABORT OR RESEND OR REMOVE A NODE FROM */
+/* THE WAITING PART OF A PROTOCOL. */
+/*
+The algorithm used here is to check 1024 transactions at a time before
+doing a real-time break.
+To avoid aborting both transactions in a deadlock detected by time-out
+we insert a random extra time-out of upto 630 ms by using the lowest
+six bits of the api connect reference.
+We spread it out from 0 to 630 ms if base time-out is larger than 3 sec,
+we spread it out from 0 to 70 ms if base time-out is smaller than 300 msec,
+and otherwise we spread it out 310 ms.
+*/
+/*------------------------------------------------------------------*/
+void Dbtc::timeOutLoopStartLab(Signal* signal, Uint32 api_con_ptr)
+{
+ Uint32 end_ptr, time_passed, time_out_value, mask_value;
+ const Uint32 api_con_sz= capiConnectFilesize;
+ const Uint32 tc_timer= ctcTimer;
+ const Uint32 time_out_param= ctimeOutValue;
+
+ ctimeOutCheckHeartbeat = tc_timer;
+
+ if (api_con_ptr + 1024 < api_con_sz) {
+ jam();
+ end_ptr= api_con_ptr + 1024;
+ } else {
+ jam();
+ end_ptr= api_con_sz;
+ }
+ if (time_out_param > 300) {
+ jam();
+ mask_value= 63;
+ } else if (time_out_param < 30) {
+ jam();
+ mask_value= 7;
+ } else {
+ jam();
+ mask_value= 31;
+ }
+ for ( ; api_con_ptr < end_ptr; api_con_ptr++) {
+ Uint32 api_timer= getApiConTimer(api_con_ptr);
+ jam();
+ if (api_timer != 0) {
+ time_out_value= time_out_param + (api_con_ptr & mask_value);
+ time_passed= tc_timer - api_timer;
+ if (time_passed > time_out_value) {
+ jam();
+ timeOutFoundLab(signal, api_con_ptr);
+ return;
+ }
+ }
+ }
+ if (api_con_ptr == api_con_sz) {
+ jam();
+ /*------------------------------------------------------------------*/
+ /* */
+ /* WE HAVE NOW CHECKED ALL TRANSACTIONS FOR TIME-OUT AND ALSO */
+ /* STARTED TIME-OUT HANDLING OF THOSE WE FOUND. WE ARE NOW */
+ /* READY AND CAN WAIT FOR THE NEXT TIME-OUT CHECK. */
+ /*------------------------------------------------------------------*/
+ ctimeOutCheckActive = TOCS_FALSE;
+ } else {
+ jam();
+ sendContinueTimeOutControl(signal, api_con_ptr);
+ }
+ return;
+}//Dbtc::timeOutLoopStartLab()
+
+void Dbtc::timeOutFoundLab(Signal* signal, Uint32 TapiConPtr)
+{
+ sendContinueTimeOutControl(signal, TapiConPtr + 1);
+
+ apiConnectptr.i = TapiConPtr;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ /*------------------------------------------------------------------*/
+ /* */
+ /* THIS TRANSACTION HAVE EXPERIENCED A TIME-OUT AND WE NEED TO*/
+ /* FIND OUT WHAT WE NEED TO DO BASED ON THE STATE INFORMATION.*/
+ /*------------------------------------------------------------------*/
+ DEBUG("[ H'" << hex << apiConnectptr.p->transid[0]
+ << " H'" << apiConnectptr.p->transid[1] << "] " << dec
+ << "Time-out in state = " << apiConnectptr.p->apiConnectstate
+ << " apiConnectptr.i = " << apiConnectptr.i
+ << " - exec: " << apiConnectptr.p->m_exec_flag
+ << " - place: " << c_apiConTimer_line[apiConnectptr.i]);
+ switch (apiConnectptr.p->apiConnectstate) {
+ case CS_STARTED:
+ ndbrequire(c_apiConTimer_line[apiConnectptr.i] != 3615);
+ if(apiConnectptr.p->lqhkeyreqrec == apiConnectptr.p->lqhkeyconfrec){
+ jam();
+ /*
+ We are waiting for application to continue the transaction. In this
+ particular state we will use the application timeout parameter rather
+ than the shorter Deadlock detection timeout.
+ */
+ if ((ctcTimer - getApiConTimer(apiConnectptr.i)) <= c_appl_timeout_value) {
+ jam();
+ return;
+ }//if
+ }
+ apiConnectptr.p->returnsignal = RS_TCROLLBACKREP;
+ apiConnectptr.p->returncode = ZTIME_OUT_ERROR;
+ abort010Lab(signal);
+ return;
+ case CS_RECEIVING:
+ case CS_REC_COMMITTING:
+ case CS_START_COMMITTING:
+ jam();
+ /*------------------------------------------------------------------*/
+ /* WE ARE STILL IN THE PREPARE PHASE AND THE TRANSACTION HAS */
+ /* NOT YET REACHED ITS COMMIT POINT. THUS IT IS NOW OK TO */
+ /* START ABORTING THE TRANSACTION. ALSO START CHECKING THE */
+ /* REMAINING TRANSACTIONS. */
+ /*------------------------------------------------------------------*/
+ terrorCode = ZTIME_OUT_ERROR;
+ abortErrorLab(signal);
+ return;
+ case CS_COMMITTING:
+ jam();
+ /*------------------------------------------------------------------*/
+ // We are simply waiting for a signal in the job buffer. Only extreme
+ // conditions should get us here. We ignore it.
+ /*------------------------------------------------------------------*/
+ case CS_COMPLETING:
+ jam();
+ /*------------------------------------------------------------------*/
+ // We are simply waiting for a signal in the job buffer. Only extreme
+ // conditions should get us here. We ignore it.
+ /*------------------------------------------------------------------*/
+ case CS_PREPARE_TO_COMMIT:
+ jam();
+ /*------------------------------------------------------------------*/
+ /* WE ARE WAITING FOR DIH TO COMMIT THE TRANSACTION. WE SIMPLY*/
+ /* KEEP WAITING SINCE THERE IS NO BETTER IDEA ON WHAT TO DO. */
+ /* IF IT IS BLOCKED THEN NO TRANSACTION WILL PASS THIS GATE. */
+ // To ensure against strange bugs we crash the system if we have passed
+ // time-out period by a factor of 10 and it is also at least 5 seconds.
+ /*------------------------------------------------------------------*/
+ if (((ctcTimer - getApiConTimer(apiConnectptr.i)) > (10 * ctimeOutValue)) &&
+ ((ctcTimer - getApiConTimer(apiConnectptr.i)) > 500)) {
+ jam();
+ systemErrorLab(signal);
+ }//if
+ break;
+ case CS_COMMIT_SENT:
+ jam();
+ /*------------------------------------------------------------------*/
+ /* WE HAVE SENT COMMIT TO A NUMBER OF NODES. WE ARE CURRENTLY */
+ /* WAITING FOR THEIR REPLY. WITH NODE RECOVERY SUPPORTED WE */
+ /* WILL CHECK FOR CRASHED NODES AND RESEND THE COMMIT SIGNAL */
+ /* TO THOSE NODES THAT HAVE MISSED THE COMMIT SIGNAL DUE TO */
+ /* A NODE FAILURE. */
+ /*------------------------------------------------------------------*/
+ tabortInd = ZCOMMIT_SETUP;
+ setupFailData(signal);
+ toCommitHandlingLab(signal);
+ return;
+ case CS_COMPLETE_SENT:
+ jam();
+ /*--------------------------------------------------------------------*/
+ /* WE HAVE SENT COMPLETE TO A NUMBER OF NODES. WE ARE CURRENTLY */
+ /* WAITING FOR THEIR REPLY. WITH NODE RECOVERY SUPPORTED WE */
+ /* WILL CHECK FOR CRASHED NODES AND RESEND THE COMPLETE SIGNAL */
+ /* TO THOSE NODES THAT HAVE MISSED THE COMPLETE SIGNAL DUE TO */
+ /* A NODE FAILURE. */
+ /*--------------------------------------------------------------------*/
+ tabortInd = ZCOMMIT_SETUP;
+ setupFailData(signal);
+ toCompleteHandlingLab(signal);
+ return;
+ case CS_ABORTING:
+ jam();
+ /*------------------------------------------------------------------*/
+ /* TIME-OUT DURING ABORT. WE NEED TO SEND ABORTED FOR ALL */
+ /* NODES THAT HAVE FAILED BEFORE SENDING ABORTED. */
+ /*------------------------------------------------------------------*/
+ tcConnectptr.i = apiConnectptr.p->firstTcConnect;
+ sendAbortedAfterTimeout(signal, 0);
+ break;
+ case CS_START_SCAN:{
+ jam();
+ ScanRecordPtr scanPtr;
+ scanPtr.i = apiConnectptr.p->apiScanRec;
+ ptrCheckGuard(scanPtr, cscanrecFileSize, scanRecord);
+ scanError(signal, scanPtr, ZSCANTIME_OUT_ERROR);
+ break;
+ }
+ case CS_WAIT_ABORT_CONF:
+ jam();
+ tcConnectptr.i = apiConnectptr.p->currentTcConnect;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ arrGuard(apiConnectptr.p->currentReplicaNo, 4);
+ hostptr.i = tcConnectptr.p->tcNodedata[apiConnectptr.p->currentReplicaNo];
+ ptrCheckGuard(hostptr, chostFilesize, hostRecord);
+ if (hostptr.p->hostStatus == HS_ALIVE) {
+ /*------------------------------------------------------------------*/
+ // Time-out waiting for ABORTCONF. We will resend the ABORTREQ just in
+ // case.
+ /*------------------------------------------------------------------*/
+ warningReport(signal, 20);
+ apiConnectptr.p->timeOutCounter++;
+ if (apiConnectptr.p->timeOutCounter > 3) {
+ /*------------------------------------------------------------------*/
+ // 100 time-outs are not acceptable. We will shoot down the node
+ // not responding.
+ /*------------------------------------------------------------------*/
+ reportNodeFailed(signal, hostptr.i);
+ }//if
+ apiConnectptr.p->currentReplicaNo++;
+ }//if
+ tcurrentReplicaNo = (Uint8)Z8NIL;
+ toAbortHandlingLab(signal);
+ return;
+ case CS_WAIT_COMMIT_CONF:
+ jam();
+ tcConnectptr.i = apiConnectptr.p->currentTcConnect;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ arrGuard(apiConnectptr.p->currentReplicaNo, 4);
+ hostptr.i = tcConnectptr.p->tcNodedata[apiConnectptr.p->currentReplicaNo];
+ ptrCheckGuard(hostptr, chostFilesize, hostRecord);
+ if (hostptr.p->hostStatus == HS_ALIVE) {
+ /*------------------------------------------------------------------*/
+ // Time-out waiting for COMMITCONF. We will resend the COMMITREQ just in
+ // case.
+ /*------------------------------------------------------------------*/
+ warningReport(signal, 21);
+ apiConnectptr.p->timeOutCounter++;
+ if (apiConnectptr.p->timeOutCounter > 3) {
+ /*------------------------------------------------------------------*/
+ // 100 time-outs are not acceptable. We will shoot down the node
+ // not responding.
+ /*------------------------------------------------------------------*/
+ reportNodeFailed(signal, hostptr.i);
+ }//if
+ apiConnectptr.p->currentReplicaNo++;
+ }//if
+ tcurrentReplicaNo = (Uint8)Z8NIL;
+ toCommitHandlingLab(signal);
+ return;
+ case CS_WAIT_COMPLETE_CONF:
+ jam();
+ tcConnectptr.i = apiConnectptr.p->currentTcConnect;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ arrGuard(apiConnectptr.p->currentReplicaNo, 4);
+ hostptr.i = tcConnectptr.p->tcNodedata[apiConnectptr.p->currentReplicaNo];
+ ptrCheckGuard(hostptr, chostFilesize, hostRecord);
+ if (hostptr.p->hostStatus == HS_ALIVE) {
+ /*------------------------------------------------------------------*/
+ // Time-out waiting for COMPLETECONF. We will resend the COMPLETEREQ
+ // just in case.
+ /*------------------------------------------------------------------*/
+ warningReport(signal, 22);
+ apiConnectptr.p->timeOutCounter++;
+ if (apiConnectptr.p->timeOutCounter > 100) {
+ /*------------------------------------------------------------------*/
+ // 100 time-outs are not acceptable. We will shoot down the node
+ // not responding.
+ /*------------------------------------------------------------------*/
+ reportNodeFailed(signal, hostptr.i);
+ }//if
+ apiConnectptr.p->currentReplicaNo++;
+ }//if
+ tcurrentReplicaNo = (Uint8)Z8NIL;
+ toCompleteHandlingLab(signal);
+ return;
+ case CS_FAIL_PREPARED:
+ jam();
+ case CS_FAIL_COMMITTING:
+ jam();
+ case CS_FAIL_COMMITTED:
+ jam();
+ case CS_REC_PREPARING:
+ jam();
+ case CS_START_PREPARING:
+ jam();
+ case CS_PREPARED:
+ jam();
+ case CS_RESTART:
+ jam();
+ case CS_FAIL_ABORTED:
+ jam();
+ case CS_DISCONNECTED:
+ jam();
+ default:
+ jam();
+ /*------------------------------------------------------------------*/
+ /* AN IMPOSSIBLE STATE IS SET. CRASH THE SYSTEM. */
+ /*------------------------------------------------------------------*/
+ DEBUG("State = " << apiConnectptr.p->apiConnectstate);
+ systemErrorLab(signal);
+ return;
+ }//switch
+ return;
+}//Dbtc::timeOutFoundLab()
+
+void Dbtc::sendAbortedAfterTimeout(Signal* signal, int Tcheck)
+{
+ ApiConnectRecord * transP = apiConnectptr.p;
+ if(transP->abortState == AS_IDLE){
+ jam();
+ warningEvent("TC: %d: %d state=%d abort==IDLE place: %d fop=%d t: %d",
+ __LINE__,
+ apiConnectptr.i,
+ transP->apiConnectstate,
+ c_apiConTimer_line[apiConnectptr.i],
+ transP->firstTcConnect,
+ c_apiConTimer[apiConnectptr.i]
+ );
+ ndbout_c("TC: %d: %d state=%d abort==IDLE place: %d fop=%d t: %d",
+ __LINE__,
+ apiConnectptr.i,
+ transP->apiConnectstate,
+ c_apiConTimer_line[apiConnectptr.i],
+ transP->firstTcConnect,
+ c_apiConTimer[apiConnectptr.i]
+ );
+ ndbrequire(false);
+ setApiConTimer(apiConnectptr.i, 0, __LINE__);
+ return;
+ }
+
+ OperationState tmp[16];
+
+ Uint32 TloopCount = 0;
+ do {
+ jam();
+ if (tcConnectptr.i == RNIL) {
+ jam();
+ if (Tcheck == 0) {
+ jam();
+ /*------------------------------------------------------------------
+ * All nodes had already reported ABORTED for all tcConnect records.
+ * Crash since it is an error situation that we then received a
+ * time-out.
+ *------------------------------------------------------------------*/
+ char buf[96]; buf[0] = 0;
+ char buf2[96];
+ BaseString::snprintf(buf, sizeof(buf), "TC %d: %d ops:",
+ __LINE__, apiConnectptr.i);
+ for(Uint32 i = 0; i<TloopCount; i++){
+ BaseString::snprintf(buf2, sizeof(buf2), "%s %d", buf, tmp[i]);
+ BaseString::snprintf(buf, sizeof(buf), buf2);
+ }
+ warningEvent(buf);
+ ndbout_c(buf);
+ ndbrequire(false);
+ }
+ releaseAbortResources(signal);
+ return;
+ }//if
+ TloopCount++;
+ if (TloopCount >= 1024) {
+ jam();
+ /*------------------------------------------------------------------*/
+ // Insert a real-time break for large transactions to avoid blowing
+ // away the job buffer.
+ /*------------------------------------------------------------------*/
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ apiConnectptr.p->counter++;
+ signal->theData[0] = TcContinueB::ZABORT_TIMEOUT_BREAK;
+ signal->theData[1] = tcConnectptr.i;
+ signal->theData[2] = apiConnectptr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
+ return;
+ }//if
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ if(TloopCount < 16){
+ jam();
+ tmp[TloopCount-1] = tcConnectptr.p->tcConnectstate;
+ }
+
+ if (tcConnectptr.p->tcConnectstate == OS_ABORT_SENT) {
+ jam();
+ /*------------------------------------------------------------------*/
+ // We have sent an ABORT signal to this node but not yet received any
+ // reply. We have to send an ABORTED signal on our own in some cases.
+ // If the node is declared as up and running and still do not respond
+ // in time to the ABORT signal we will declare it as dead.
+ /*------------------------------------------------------------------*/
+ UintR Ti = 0;
+ arrGuard(tcConnectptr.p->noOfNodes, 4);
+ for (Ti = 0; Ti < tcConnectptr.p->noOfNodes; Ti++) {
+ jam();
+ if (tcConnectptr.p->tcNodedata[Ti] != 0) {
+ TloopCount += 31;
+ Tcheck = 1;
+ hostptr.i = tcConnectptr.p->tcNodedata[Ti];
+ ptrCheckGuard(hostptr, chostFilesize, hostRecord);
+ if (hostptr.p->hostStatus == HS_ALIVE) {
+ jam();
+ /*---------------------------------------------------------------
+ * A backup replica has not sent ABORTED.
+ * Could be that a node before him has crashed.
+ * Send an ABORT signal specifically to this node.
+ * We will not send to any more nodes after this
+ * to avoid race problems.
+ * To also ensure that we use this message also as a heartbeat
+ * we will move this node to the primary replica seat.
+ * The primary replica and any failed node after it will
+ * be removed from the node list. Update also number of nodes.
+ * Finally break the loop to ensure we don't mess
+ * things up by executing another loop.
+ * We also update the timer to ensure we don't get time-out
+ * too early.
+ *--------------------------------------------------------------*/
+ BlockReference TBRef = calcLqhBlockRef(hostptr.i);
+ signal->theData[0] = tcConnectptr.i;
+ signal->theData[1] = cownref;
+ signal->theData[2] = apiConnectptr.p->transid[0];
+ signal->theData[3] = apiConnectptr.p->transid[1];
+ sendSignal(TBRef, GSN_ABORT, signal, 4, JBB);
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ break;
+ } else {
+ jam();
+ /*--------------------------------------------------------------
+ * The node we are waiting for is dead. We will send ABORTED to
+ * ourselves vicarious for the failed node.
+ *--------------------------------------------------------------*/
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ signal->theData[0] = tcConnectptr.i;
+ signal->theData[1] = apiConnectptr.p->transid[0];
+ signal->theData[2] = apiConnectptr.p->transid[1];
+ signal->theData[3] = hostptr.i;
+ signal->theData[4] = ZFALSE;
+ sendSignal(cownref, GSN_ABORTED, signal, 5, JBB);
+ }//if
+ }//if
+ }//for
+ }//if
+ tcConnectptr.i = tcConnectptr.p->nextTcConnect;
+ } while (1);
+}//Dbtc::sendAbortedAfterTimeout()
+
+void Dbtc::reportNodeFailed(Signal* signal, Uint32 nodeId)
+{
+ DisconnectRep * const rep = (DisconnectRep *)&signal->theData[0];
+ rep->nodeId = nodeId;
+ rep->err = DisconnectRep::TcReportNodeFailed;
+ sendSignal(QMGR_REF, GSN_DISCONNECT_REP, signal,
+ DisconnectRep::SignalLength, JBB);
+}//Dbtc::reportNodeFailed()
+
+/*-------------------------------------------------*/
+/* Timeout-loop for scanned fragments. */
+/*-------------------------------------------------*/
+void Dbtc::timeOutLoopStartFragLab(Signal* signal, Uint32 TscanConPtr)
+{
+ ScanFragRecPtr timeOutPtr[8];
+ UintR tfragTimer[8];
+ UintR texpiredTime[8];
+ UintR TloopCount = 0;
+ Uint32 TtcTimer = ctcTimer;
+
+ while ((TscanConPtr + 8) < cscanFragrecFileSize) {
+ jam();
+ timeOutPtr[0].i = TscanConPtr + 0;
+ timeOutPtr[1].i = TscanConPtr + 1;
+ timeOutPtr[2].i = TscanConPtr + 2;
+ timeOutPtr[3].i = TscanConPtr + 3;
+ timeOutPtr[4].i = TscanConPtr + 4;
+ timeOutPtr[5].i = TscanConPtr + 5;
+ timeOutPtr[6].i = TscanConPtr + 6;
+ timeOutPtr[7].i = TscanConPtr + 7;
+
+ c_scan_frag_pool.getPtrForce(timeOutPtr[0]);
+ c_scan_frag_pool.getPtrForce(timeOutPtr[1]);
+ c_scan_frag_pool.getPtrForce(timeOutPtr[2]);
+ c_scan_frag_pool.getPtrForce(timeOutPtr[3]);
+ c_scan_frag_pool.getPtrForce(timeOutPtr[4]);
+ c_scan_frag_pool.getPtrForce(timeOutPtr[5]);
+ c_scan_frag_pool.getPtrForce(timeOutPtr[6]);
+ c_scan_frag_pool.getPtrForce(timeOutPtr[7]);
+
+ tfragTimer[0] = timeOutPtr[0].p->scanFragTimer;
+ tfragTimer[1] = timeOutPtr[1].p->scanFragTimer;
+ tfragTimer[2] = timeOutPtr[2].p->scanFragTimer;
+ tfragTimer[3] = timeOutPtr[3].p->scanFragTimer;
+ tfragTimer[4] = timeOutPtr[4].p->scanFragTimer;
+ tfragTimer[5] = timeOutPtr[5].p->scanFragTimer;
+ tfragTimer[6] = timeOutPtr[6].p->scanFragTimer;
+ tfragTimer[7] = timeOutPtr[7].p->scanFragTimer;
+
+ texpiredTime[0] = TtcTimer - tfragTimer[0];
+ texpiredTime[1] = TtcTimer - tfragTimer[1];
+ texpiredTime[2] = TtcTimer - tfragTimer[2];
+ texpiredTime[3] = TtcTimer - tfragTimer[3];
+ texpiredTime[4] = TtcTimer - tfragTimer[4];
+ texpiredTime[5] = TtcTimer - tfragTimer[5];
+ texpiredTime[6] = TtcTimer - tfragTimer[6];
+ texpiredTime[7] = TtcTimer - tfragTimer[7];
+
+ for (Uint32 Ti = 0; Ti < 8; Ti++) {
+ jam();
+ if (tfragTimer[Ti] != 0) {
+
+ if (texpiredTime[Ti] > ctimeOutValue) {
+ jam();
+ DEBUG("Fragment timeout found:"<<
+ " ctimeOutValue=" <<ctimeOutValue
+ <<", texpiredTime="<<texpiredTime[Ti]<<endl
+ <<" tfragTimer="<<tfragTimer[Ti]
+ <<", ctcTimer="<<ctcTimer);
+ timeOutFoundFragLab(signal, TscanConPtr + Ti);
+ return;
+ }//if
+ }//if
+ }//for
+ TscanConPtr += 8;
+ /*----------------------------------------------------------------*/
+ /* We split the process up checking 1024 fragmentrecords at a time*/
+ /* to maintain real time behaviour. */
+ /*----------------------------------------------------------------*/
+ if (TloopCount++ > 128 ) {
+ jam();
+ signal->theData[0] = TcContinueB::ZCONTINUE_TIME_OUT_FRAG_CONTROL;
+ signal->theData[1] = TscanConPtr;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ }//if
+ }//while
+ for ( ; TscanConPtr < cscanFragrecFileSize; TscanConPtr++){
+ jam();
+ timeOutPtr[0].i = TscanConPtr;
+ c_scan_frag_pool.getPtrForce(timeOutPtr[0]);
+ if (timeOutPtr[0].p->scanFragTimer != 0) {
+ texpiredTime[0] = ctcTimer - timeOutPtr[0].p->scanFragTimer;
+ if (texpiredTime[0] > ctimeOutValue) {
+ jam();
+ DEBUG("Fragment timeout found:"<<
+ " ctimeOutValue=" <<ctimeOutValue
+ <<", texpiredTime="<<texpiredTime[0]<<endl
+ <<" tfragTimer="<<tfragTimer[0]
+ <<", ctcTimer="<<ctcTimer);
+ timeOutFoundFragLab(signal, TscanConPtr);
+ return;
+ }//if
+ }//if
+ }//for
+ ctimeOutCheckFragActive = TOCS_FALSE;
+
+ return;
+}//timeOutLoopStartFragLab()
+
+/*--------------------------------------------------------------------------*/
+/*Handle the heartbeat signal from LQH in a scan process */
+// (Set timer on fragrec.)
+/*--------------------------------------------------------------------------*/
+void Dbtc::execSCAN_HBREP(Signal* signal)
+{
+ jamEntry();
+
+ scanFragptr.i = signal->theData[0];
+ c_scan_frag_pool.getPtr(scanFragptr);
+ switch (scanFragptr.p->scanFragState){
+ case ScanFragRec::LQH_ACTIVE:
+ break;
+ default:
+ DEBUG("execSCAN_HBREP: scanFragState="<<scanFragptr.p->scanFragState);
+ systemErrorLab(signal);
+ break;
+ }
+
+ ScanRecordPtr scanptr;
+ scanptr.i = scanFragptr.p->scanRec;
+ ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
+
+ apiConnectptr.i = scanptr.p->scanApiRec;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+
+ if (!(apiConnectptr.p->transid[0] == signal->theData[1] &&
+ apiConnectptr.p->transid[1] == signal->theData[2])){
+ jam();
+ /**
+ * Send signal back to sender so that the crash occurs there
+ */
+ // Save original transid
+ signal->theData[3] = signal->theData[0];
+ signal->theData[4] = signal->theData[1];
+ // Set transid to illegal values
+ signal->theData[1] = RNIL;
+ signal->theData[2] = RNIL;
+
+ sendSignal(signal->senderBlockRef(), GSN_SCAN_HBREP, signal, 5, JBA);
+ DEBUG("SCAN_HBREP with wrong transid("
+ <<signal->theData[3]<<", "<<signal->theData[4]<<")");
+ return;
+ }//if
+
+ // Update timer on ScanFragRec
+ if (scanFragptr.p->scanFragTimer != 0){
+ updateBuddyTimer(apiConnectptr);
+ scanFragptr.p->startFragTimer(ctcTimer);
+ } else {
+ ndbassert(false);
+ DEBUG("SCAN_HBREP when scanFragTimer was turned off");
+ }
+}//execSCAN_HBREP()
+
+/*--------------------------------------------------------------------------*/
+/* Timeout has occured on a fragment which means a scan has timed out. */
+/* If this is true we have an error in LQH/ACC. */
+/*--------------------------------------------------------------------------*/
+void Dbtc::timeOutFoundFragLab(Signal* signal, UintR TscanConPtr)
+{
+ ScanFragRecPtr ptr;
+ c_scan_frag_pool.getPtr(ptr, TscanConPtr);
+ DEBUG(TscanConPtr << " timeOutFoundFragLab: scanFragState = "<< ptr.p->scanFragState);
+
+ /*-------------------------------------------------------------------------*/
+ // The scan fragment has expired its timeout. Check its state to decide
+ // what to do.
+ /*-------------------------------------------------------------------------*/
+ switch (ptr.p->scanFragState) {
+ case ScanFragRec::WAIT_GET_PRIMCONF:
+ jam();
+ ndbrequire(false);
+ break;
+ case ScanFragRec::LQH_ACTIVE:{
+ jam();
+
+ /**
+ * The LQH expired it's timeout, try to close it
+ */
+ Uint32 nodeId = refToNode(ptr.p->lqhBlockref);
+ Uint32 connectCount = getNodeInfo(nodeId).m_connectCount;
+ ScanRecordPtr scanptr;
+ scanptr.i = ptr.p->scanRec;
+ ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
+
+ if(connectCount != ptr.p->m_connectCount){
+ jam();
+ /**
+ * The node has died
+ */
+ ptr.p->scanFragState = ScanFragRec::COMPLETED;
+ ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
+
+ run.release(ptr);
+ ptr.p->stopFragTimer();
+ }
+
+ scanError(signal, scanptr, ZSCAN_FRAG_LQH_ERROR);
+ break;
+ }
+ case ScanFragRec::DELIVERED:
+ jam();
+ case ScanFragRec::IDLE:
+ jam();
+ case ScanFragRec::QUEUED_FOR_DELIVERY:
+ jam();
+ /*-----------------------------------------------------------------------
+ * Should never occur. We will simply report set the timer to zero and
+ * continue. In a debug version we should crash here but not in a release
+ * version. In a release version we will simply set the time-out to zero.
+ *-----------------------------------------------------------------------*/
+#ifdef VM_TRACE
+ systemErrorLab(signal);
+#endif
+ scanFragptr.p->stopFragTimer();
+ break;
+ default:
+ jam();
+ /*-----------------------------------------------------------------------
+ * Non-existent state. Crash.
+ *-----------------------------------------------------------------------*/
+ systemErrorLab(signal);
+ break;
+ }//switch
+
+ signal->theData[0] = TcContinueB::ZCONTINUE_TIME_OUT_FRAG_CONTROL;
+ signal->theData[1] = TscanConPtr + 1;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ return;
+}//timeOutFoundFragLab()
+
+
+/*
+ 4.3.16 GCP_NOMORETRANS
+ ----------------------
+*/
+/*****************************************************************************
+ * G C P _ N O M O R E T R A N S
+ *
+ * WHEN DBTC RECEIVES SIGNAL GCP_NOMORETRANS A CHECK IS DONE TO FIND OUT IF
+ * THERE ARE ANY GLOBAL CHECKPOINTS GOING ON - CFIRSTGCP /= RNIL. DBTC THEN
+ * SEARCHES THE GCP_RECORD FILE TO FIND OUT IF THERE ARE ANY TRANSACTIONS NOT
+ * CONCLUDED WITH THIS SPECIFIC CHECKPOINT - GCP_PTR:GCP_ID = TCHECK_GCP_ID.
+ * FOR EACH TRANSACTION WHERE API_CONNECTSTATE EQUALS PREPARED, COMMITTING,
+ * COMMITTED OR COMPLETING SIGNAL CONTINUEB IS SENT WITH A DELAY OF 100 MS,
+ * THE COUNTER GCP_PTR:OUTSTANDINGAPI IS INCREASED. WHEN CONTINUEB IS RECEIVED
+ * THE COUNTER IS DECREASED AND A CHECK IS DONE TO FIND OUT IF ALL
+ * TRANSACTIONS ARE CONCLUDED. IF SO, SIGNAL GCP_TCFINISHED IS SENT.
+ *****************************************************************************/
+void Dbtc::execGCP_NOMORETRANS(Signal* signal)
+{
+ jamEntry();
+ tcheckGcpId = signal->theData[1];
+ if (cfirstgcp != RNIL) {
+ jam();
+ /* A GLOBAL CHECKPOINT IS GOING ON */
+ gcpPtr.i = cfirstgcp; /* SET POINTER TO FIRST GCP IN QUEUE*/
+ ptrCheckGuard(gcpPtr, cgcpFilesize, gcpRecord);
+ if (gcpPtr.p->gcpId == tcheckGcpId) {
+ jam();
+ if (gcpPtr.p->firstApiConnect != RNIL) {
+ jam();
+ gcpPtr.p->gcpNomoretransRec = ZTRUE;
+ } else {
+ jam();
+ gcpTcfinished(signal);
+ unlinkGcp(signal);
+ }//if
+ } else {
+ jam();
+ /*------------------------------------------------------------*/
+ /* IF IT IS NOT THE FIRST THEN THERE SHOULD BE NO */
+ /* RECORD FOR THIS GLOBAL CHECKPOINT. WE ALWAYS REMOVE */
+ /* THE GLOBAL CHECKPOINTS IN ORDER. */
+ /*------------------------------------------------------------*/
+ gcpTcfinished(signal);
+ }//if
+ } else {
+ jam();
+ gcpTcfinished(signal);
+ }//if
+ return;
+}//Dbtc::execGCP_NOMORETRANS()
+
+/*****************************************************************************/
+/* */
+/* TAKE OVER MODULE */
+/* */
+/*****************************************************************************/
+/* */
+/* THIS PART OF TC TAKES OVER THE COMMIT/ABORT OF TRANSACTIONS WHERE THE */
+/* NODE ACTING AS TC HAVE FAILED. IT STARTS BY QUERYING ALL NODES ABOUT */
+/* ANY OPERATIONS PARTICIPATING IN A TRANSACTION WHERE THE TC NODE HAVE */
+/* FAILED. */
+/* */
+/* AFTER RECEIVING INFORMATION FROM ALL NODES ABOUT OPERATION STATUS THIS */
+/* CODE WILL ENSURE THAT ALL AFFECTED TRANSACTIONS ARE PROPERLY ABORTED OR*/
+/* COMMITTED. THE ORIGINATING APPLICATION NODE WILL ALSO BE CONTACTED. */
+/* IF THE ORIGINATING APPLICATION ALSO FAILED THEN THERE IS CURRENTLY NO */
+/* WAY TO FIND OUT WHETHER A TRANSACTION WAS PERFORMED OR NOT. */
+/*****************************************************************************/
+void Dbtc::execNODE_FAILREP(Signal* signal)
+{
+ HostRecordPtr tmpHostptr;
+ jamEntry();
+
+ NodeFailRep * const nodeFail = (NodeFailRep *)&signal->theData[0];
+
+ cfailure_nr = nodeFail->failNo;
+ const Uint32 tnoOfNodes = nodeFail->noOfNodes;
+ const Uint32 tnewMasterId = nodeFail->masterNodeId;
+
+ arrGuard(tnoOfNodes, MAX_NDB_NODES);
+ int index = 0;
+ for (unsigned i = 1; i< MAX_NDB_NODES; i++) {
+ if(NodeBitmask::get(nodeFail->theNodes, i)){
+ cdata[index] = i;
+ index++;
+ }//if
+ }//for
+
+ tcNodeFailptr.i = 0;
+ ptrAss(tcNodeFailptr, tcFailRecord);
+ Uint32 tindex;
+ for (tindex = 0; tindex < tnoOfNodes; tindex++) {
+ jam();
+ hostptr.i = cdata[tindex];
+ ptrCheckGuard(hostptr, chostFilesize, hostRecord);
+ /*------------------------------------------------------------*/
+ /* SET STATUS OF THE FAILED NODE TO DEAD SINCE IT HAS */
+ /* FAILED. */
+ /*------------------------------------------------------------*/
+ hostptr.p->hostStatus = HS_DEAD;
+
+ if (hostptr.p->takeOverStatus == TOS_COMPLETED) {
+ jam();
+ /*------------------------------------------------------------*/
+ /* A VERY UNUSUAL SITUATION. THE TAKE OVER WAS COMPLETED*/
+ /* EVEN BEFORE WE HEARD ABOUT THE NODE FAILURE REPORT. */
+ /* HOWEVER UNUSUAL THIS SITUATION IS POSSIBLE. */
+ /*------------------------------------------------------------*/
+ /* RELEASE THE CURRENTLY UNUSED LQH CONNECTIONS. THE */
+ /* REMAINING WILL BE RELEASED WHEN THE TRANSACTION THAT */
+ /* USED THEM IS COMPLETED. */
+ /*------------------------------------------------------------*/
+ {
+ NFCompleteRep * const nfRep = (NFCompleteRep *)&signal->theData[0];
+ nfRep->blockNo = DBTC;
+ nfRep->nodeId = cownNodeid;
+ nfRep->failedNodeId = hostptr.i;
+ }
+ sendSignal(cdihblockref, GSN_NF_COMPLETEREP, signal,
+ NFCompleteRep::SignalLength, JBB);
+ } else {
+ ndbrequire(hostptr.p->takeOverStatus == TOS_IDLE);
+ hostptr.p->takeOverStatus = TOS_NODE_FAILED;
+ }//if
+
+ if (tcNodeFailptr.p->failStatus == FS_LISTENING) {
+ jam();
+ /*------------------------------------------------------------*/
+ /* THE CURRENT TAKE OVER CAN BE AFFECTED BY THIS NODE */
+ /* FAILURE. */
+ /*------------------------------------------------------------*/
+ if (hostptr.p->lqhTransStatus == LTS_ACTIVE) {
+ jam();
+ /*------------------------------------------------------------*/
+ /* WE WERE WAITING FOR THE FAILED NODE IN THE TAKE OVER */
+ /* PROTOCOL FOR TC. */
+ /*------------------------------------------------------------*/
+ signal->theData[0] = TcContinueB::ZNODE_TAKE_OVER_COMPLETED;
+ signal->theData[1] = hostptr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ }//if
+ }//if
+
+ }//for
+
+ const bool masterFailed = (cmasterNodeId != tnewMasterId);
+ cmasterNodeId = tnewMasterId;
+
+ if(getOwnNodeId() == cmasterNodeId && masterFailed){
+ /**
+ * Master has failed and I'm the new master
+ */
+ jam();
+
+ for (hostptr.i = 1; hostptr.i < MAX_NDB_NODES; hostptr.i++) {
+ jam();
+ ptrAss(hostptr, hostRecord);
+ if (hostptr.p->hostStatus != HS_ALIVE) {
+ jam();
+ if (hostptr.p->takeOverStatus == TOS_COMPLETED) {
+ jam();
+ /*------------------------------------------------------------*/
+ /* SEND TAKE OVER CONFIRMATION TO ALL ALIVE NODES IF */
+ /* TAKE OVER IS COMPLETED. THIS IS PERFORMED TO ENSURE */
+ /* THAT ALL NODES AGREE ON THE IDLE STATE OF THE TAKE */
+ /* OVER. THIS MIGHT BE MISSED IN AN ERROR SITUATION IF */
+ /* MASTER FAILS AFTER SENDING CONFIRMATION TO NEW */
+ /* MASTER BUT FAILING BEFORE SENDING TO ANOTHER NODE */
+ /* WHICH WAS NOT MASTER. IF THIS NODE LATER BECOMES */
+ /* MASTER IT MIGHT START A NEW TAKE OVER EVEN AFTER THE */
+ /* CRASHED NODE HAVE ALREADY RECOVERED. */
+ /*------------------------------------------------------------*/
+ for(tmpHostptr.i = 1; tmpHostptr.i < MAX_NDB_NODES;tmpHostptr.i++) {
+ jam();
+ ptrAss(tmpHostptr, hostRecord);
+ if (tmpHostptr.p->hostStatus == HS_ALIVE) {
+ jam();
+ tblockref = calcTcBlockRef(tmpHostptr.i);
+ signal->theData[0] = hostptr.i;
+ sendSignal(tblockref, GSN_TAKE_OVERTCCONF, signal, 1, JBB);
+ }//if
+ }//for
+ }//if
+ }//if
+ }//for
+ }
+
+ if(getOwnNodeId() == cmasterNodeId){
+ jam();
+ for (hostptr.i = 1; hostptr.i < MAX_NDB_NODES; hostptr.i++) {
+ jam();
+ ptrAss(hostptr, hostRecord);
+ if (hostptr.p->hostStatus != HS_ALIVE) {
+ jam();
+ if (hostptr.p->takeOverStatus == TOS_NODE_FAILED) {
+ jam();
+ /*------------------------------------------------------------*/
+ /* CONCLUDE ALL ACTIVITIES THE FAILED TC DID CONTROL */
+ /* SINCE WE ARE THE MASTER. THIS COULD HAVE BEEN STARTED*/
+ /* BY A PREVIOUS MASTER BUT HAVE NOT BEEN CONCLUDED YET.*/
+ /*------------------------------------------------------------*/
+ hostptr.p->takeOverStatus = TOS_ACTIVE;
+ signal->theData[0] = hostptr.i;
+ sendSignal(cownref, GSN_TAKE_OVERTCREQ, signal, 1, JBB);
+ }//if
+ }//if
+ }//for
+ }//if
+ for (tindex = 0; tindex < tnoOfNodes; tindex++) {
+ jam();
+ hostptr.i = cdata[tindex];
+ ptrCheckGuard(hostptr, chostFilesize, hostRecord);
+ /*------------------------------------------------------------*/
+ /* LOOP THROUGH AND ABORT ALL SCANS THAT WHERE */
+ /* CONTROLLED BY THIS TC AND ACTIVE IN THE FAILED */
+ /* NODE'S LQH */
+ /*------------------------------------------------------------*/
+ checkScanActiveInFailedLqh(signal, 0, hostptr.i);
+ checkWaitDropTabFailedLqh(signal, hostptr.i, 0); // nodeid, tableid
+ }//for
+
+}//Dbtc::execNODE_FAILREP()
+
+void Dbtc::checkScanActiveInFailedLqh(Signal* signal,
+ Uint32 scanPtrI,
+ Uint32 failedNodeId){
+
+ ScanRecordPtr scanptr;
+ for (scanptr.i = scanPtrI; scanptr.i < cscanrecFileSize; scanptr.i++) {
+ jam();
+ ptrAss(scanptr, scanRecord);
+ bool found = false;
+ if (scanptr.p->scanState != ScanRecord::IDLE){
+ jam();
+ ScanFragRecPtr ptr;
+ ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
+
+ for(run.first(ptr); !ptr.isNull(); ){
+ jam();
+ ScanFragRecPtr curr = ptr;
+ run.next(ptr);
+ if (curr.p->scanFragState == ScanFragRec::LQH_ACTIVE &&
+ refToNode(curr.p->lqhBlockref) == failedNodeId){
+ jam();
+
+ run.release(curr);
+ curr.p->scanFragState = ScanFragRec::COMPLETED;
+ curr.p->stopFragTimer();
+ found = true;
+ }
+ }
+ }
+ if(found){
+ jam();
+ scanError(signal, scanptr, ZSCAN_LQH_ERROR);
+ }
+
+ // Send CONTINUEB to continue later
+ signal->theData[0] = TcContinueB::ZCHECK_SCAN_ACTIVE_FAILED_LQH;
+ signal->theData[1] = scanptr.i + 1; // Check next scanptr
+ signal->theData[2] = failedNodeId;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
+ return;
+ }//for
+}
+
+void
+Dbtc::checkScanFragList(Signal* signal,
+ Uint32 failedNodeId,
+ ScanRecord * scanP,
+ ScanFragList::Head & head){
+
+ DEBUG("checkScanActiveInFailedLqh: scanFragError");
+}
+
+void Dbtc::execTAKE_OVERTCCONF(Signal* signal)
+{
+ jamEntry();
+ tfailedNodeId = signal->theData[0];
+ hostptr.i = tfailedNodeId;
+ ptrCheckGuard(hostptr, chostFilesize, hostRecord);
+ switch (hostptr.p->takeOverStatus) {
+ case TOS_IDLE:
+ jam();
+ /*------------------------------------------------------------*/
+ /* THIS MESSAGE ARRIVED EVEN BEFORE THE NODE_FAILREP */
+ /* MESSAGE. THIS IS POSSIBLE IN EXTREME SITUATIONS. */
+ /* WE SET THE STATE TO TAKE_OVER_COMPLETED AND WAIT */
+ /* FOR THE NODE_FAILREP MESSAGE. */
+ /*------------------------------------------------------------*/
+ hostptr.p->takeOverStatus = TOS_COMPLETED;
+ break;
+ case TOS_NODE_FAILED:
+ case TOS_ACTIVE:
+ jam();
+ /*------------------------------------------------------------*/
+ /* WE ARE NOT MASTER AND THE TAKE OVER IS ACTIVE OR WE */
+ /* ARE MASTER AND THE TAKE OVER IS ACTIVE. IN BOTH */
+ /* WE SET THE STATE TO TAKE_OVER_COMPLETED. */
+ /*------------------------------------------------------------*/
+ /* RELEASE THE CURRENTLY UNUSED LQH CONNECTIONS. THE */
+ /* REMAINING WILL BE RELEASED WHEN THE TRANSACTION THAT */
+ /* USED THEM IS COMPLETED. */
+ /*------------------------------------------------------------*/
+ hostptr.p->takeOverStatus = TOS_COMPLETED;
+ {
+ NFCompleteRep * const nfRep = (NFCompleteRep *)&signal->theData[0];
+ nfRep->blockNo = DBTC;
+ nfRep->nodeId = cownNodeid;
+ nfRep->failedNodeId = hostptr.i;
+ }
+ sendSignal(cdihblockref, GSN_NF_COMPLETEREP, signal,
+ NFCompleteRep::SignalLength, JBB);
+ break;
+ case TOS_COMPLETED:
+ jam();
+ /*------------------------------------------------------------*/
+ /* WE HAVE ALREADY RECEIVED THE CONF SIGNAL. IT IS MOST */
+ /* LIKELY SENT FROM A NEW MASTER WHICH WASN'T SURE IF */
+ /* THIS NODE HEARD THE CONF SIGNAL FROM THE OLD MASTER. */
+ /* WE SIMPLY IGNORE THE MESSAGE. */
+ /*------------------------------------------------------------*/
+ /*empty*/;
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ }//switch
+}//Dbtc::execTAKE_OVERTCCONF()
+
+void Dbtc::execTAKE_OVERTCREQ(Signal* signal)
+{
+ jamEntry();
+ tfailedNodeId = signal->theData[0];
+ tcNodeFailptr.i = 0;
+ ptrAss(tcNodeFailptr, tcFailRecord);
+ if (tcNodeFailptr.p->failStatus != FS_IDLE) {
+ jam();
+ /*------------------------------------------------------------*/
+ /* WE CAN CURRENTLY ONLY HANDLE ONE TAKE OVER AT A TIME */
+ /*------------------------------------------------------------*/
+ /* IF MORE THAN ONE TAKE OVER IS REQUESTED WE WILL */
+ /* QUEUE THE TAKE OVER AND START IT AS SOON AS THE */
+ /* PREVIOUS ARE COMPLETED. */
+ /*------------------------------------------------------------*/
+ arrGuard(tcNodeFailptr.p->queueIndex, MAX_NDB_NODES);
+ tcNodeFailptr.p->queueList[tcNodeFailptr.p->queueIndex] = tfailedNodeId;
+ tcNodeFailptr.p->queueIndex = tcNodeFailptr.p->queueIndex + 1;
+ return;
+ }//if
+ startTakeOverLab(signal);
+}//Dbtc::execTAKE_OVERTCREQ()
+
+/*------------------------------------------------------------*/
+/* INITIALISE THE HASH TABLES FOR STORING TRANSACTIONS */
+/* AND OPERATIONS DURING TC TAKE OVER. */
+/*------------------------------------------------------------*/
+void Dbtc::startTakeOverLab(Signal* signal)
+{
+ for (tindex = 0; tindex <= 511; tindex++) {
+ ctransidFailHash[tindex] = RNIL;
+ }//for
+ for (tindex = 0; tindex <= 1023; tindex++) {
+ ctcConnectFailHash[tindex] = RNIL;
+ }//for
+ tcNodeFailptr.p->failStatus = FS_LISTENING;
+ tcNodeFailptr.p->takeOverNode = tfailedNodeId;
+ for (hostptr.i = 1; hostptr.i < MAX_NDB_NODES; hostptr.i++) {
+ jam();
+ ptrAss(hostptr, hostRecord);
+ if (hostptr.p->hostStatus == HS_ALIVE) {
+ jam();
+ tblockref = calcLqhBlockRef(hostptr.i);
+ hostptr.p->lqhTransStatus = LTS_ACTIVE;
+ signal->theData[0] = tcNodeFailptr.i;
+ signal->theData[1] = cownref;
+ signal->theData[2] = tfailedNodeId;
+ sendSignal(tblockref, GSN_LQH_TRANSREQ, signal, 3, JBB);
+ }//if
+ }//for
+}//Dbtc::startTakeOverLab()
+
+/*------------------------------------------------------------*/
+/* A REPORT OF AN OPERATION WHERE TC FAILED HAS ARRIVED.*/
+/*------------------------------------------------------------*/
+void Dbtc::execLQH_TRANSCONF(Signal* signal)
+{
+ jamEntry();
+ LqhTransConf * const lqhTransConf = (LqhTransConf *)&signal->theData[0];
+
+ tcNodeFailptr.i = lqhTransConf->tcRef;
+ ptrCheckGuard(tcNodeFailptr, 1, tcFailRecord);
+ tnodeid = lqhTransConf->lqhNodeId;
+ ttransStatus = (LqhTransConf::OperationStatus)lqhTransConf->operationStatus;
+ ttransid1 = lqhTransConf->transId1;
+ ttransid2 = lqhTransConf->transId2;
+ ttcOprec = lqhTransConf->oldTcOpRec;
+ treqinfo = lqhTransConf->requestInfo;
+ tgci = lqhTransConf->gci;
+ cnodes[0] = lqhTransConf->nextNodeId1;
+ cnodes[1] = lqhTransConf->nextNodeId2;
+ cnodes[2] = lqhTransConf->nextNodeId3;
+ const Uint32 ref = tapplRef = lqhTransConf->apiRef;
+ tapplOprec = lqhTransConf->apiOpRec;
+ const Uint32 tableId = lqhTransConf->tableId;
+
+ if (ttransStatus == LqhTransConf::LastTransConf){
+ jam();
+ /*------------------------------------------------------------*/
+ /* A NODE HAS REPORTED COMPLETION OF TAKE OVER REPORTING*/
+ /*------------------------------------------------------------*/
+ nodeTakeOverCompletedLab(signal);
+ return;
+ }//if
+ if (ttransStatus == LqhTransConf::Marker){
+ jam();
+ treqinfo = 0;
+ LqhTransConf::setMarkerFlag(treqinfo, 1);
+ } else {
+ TableRecordPtr tabPtr;
+ tabPtr.i = tableId;
+ ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord);
+ switch((DictTabInfo::TableType)tabPtr.p->tableType){
+ case DictTabInfo::SystemTable:
+ case DictTabInfo::UserTable:
+ break;
+ default:
+ tapplRef = 0;
+ tapplOprec = 0;
+ }
+ }
+
+ findApiConnectFail(signal);
+
+ if(apiConnectptr.p->ndbapiBlockref == 0 && tapplRef != 0){
+ apiConnectptr.p->ndbapiBlockref = ref;
+ apiConnectptr.p->ndbapiConnect = tapplOprec;
+ }
+
+ if (ttransStatus != LqhTransConf::Marker){
+ jam();
+ findTcConnectFail(signal);
+ }
+}//Dbtc::execLQH_TRANSCONF()
+
+/*------------------------------------------------------------*/
+/* A NODE HAS REPORTED COMPLETION OF TAKE OVER REPORTING*/
+/*------------------------------------------------------------*/
+void Dbtc::nodeTakeOverCompletedLab(Signal* signal)
+{
+ Uint32 guard0;
+
+ hostptr.i = tnodeid;
+ ptrCheckGuard(hostptr, chostFilesize, hostRecord);
+ hostptr.p->lqhTransStatus = LTS_IDLE;
+ for (hostptr.i = 1; hostptr.i < MAX_NDB_NODES; hostptr.i++) {
+ jam();
+ ptrAss(hostptr, hostRecord);
+ if (hostptr.p->hostStatus == HS_ALIVE) {
+ if (hostptr.p->lqhTransStatus == LTS_ACTIVE) {
+ jam();
+ /*------------------------------------------------------------*/
+ /* NOT ALL NODES ARE COMPLETED WITH REPORTING IN THE */
+ /* TAKE OVER. */
+ /*------------------------------------------------------------*/
+ return;
+ }//if
+ }//if
+ }//for
+ /*------------------------------------------------------------*/
+ /* ALL NODES HAVE REPORTED ON THE STATUS OF THE VARIOUS */
+ /* OPERATIONS THAT WAS CONTROLLED BY THE FAILED TC. WE */
+ /* ARE NOW IN A POSITION TO COMPLETE ALL OF THOSE */
+ /* TRANSACTIONS EITHER IN A SUCCESSFUL WAY OR IN AN */
+ /* UNSUCCESSFUL WAY. WE WILL ALSO REPORT THIS CONCLUSION*/
+ /* TO THE APPLICATION IF THAT IS STILL ALIVE. */
+ /*------------------------------------------------------------*/
+ tcNodeFailptr.p->currentHashIndexTakeOver = 0;
+ tcNodeFailptr.p->completedTakeOver = 0;
+ tcNodeFailptr.p->failStatus = FS_COMPLETING;
+ guard0 = cnoParallelTakeOver - 1;
+ /*------------------------------------------------------------*/
+ /* WE WILL COMPLETE THE TRANSACTIONS BY STARTING A */
+ /* NUMBER OF PARALLEL ACTIVITIES. EACH ACTIVITY WILL */
+ /* COMPLETE ONE TRANSACTION AT A TIME AND IN THAT */
+ /* TRANSACTION IT WILL COMPLETE ONE OPERATION AT A TIME.*/
+ /* WHEN ALL ACTIVITIES ARE COMPLETED THEN THE TAKE OVER */
+ /* IS COMPLETED. */
+ /*------------------------------------------------------------*/
+ arrGuard(guard0, MAX_NDB_NODES);
+ for (tindex = 0; tindex <= guard0; tindex++) {
+ jam();
+ tcNodeFailptr.p->takeOverProcState[tindex] = ZTAKE_OVER_ACTIVE;
+ signal->theData[0] = TcContinueB::ZCOMPLETE_TRANS_AT_TAKE_OVER;
+ signal->theData[1] = tcNodeFailptr.i;
+ signal->theData[2] = tindex;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
+ }//for
+}//Dbtc::nodeTakeOverCompletedLab()
+
+/*------------------------------------------------------------*/
+/* COMPLETE A NEW TRANSACTION FROM THE HASH TABLE OF */
+/* TRANSACTIONS TO COMPLETE. */
+/*------------------------------------------------------------*/
+void Dbtc::completeTransAtTakeOverLab(Signal* signal, UintR TtakeOverInd)
+{
+ jam();
+ while (tcNodeFailptr.p->currentHashIndexTakeOver < 512){
+ jam();
+ apiConnectptr.i =
+ ctransidFailHash[tcNodeFailptr.p->currentHashIndexTakeOver];
+ if (apiConnectptr.i != RNIL) {
+ jam();
+ /*------------------------------------------------------------*/
+ /* WE HAVE FOUND A TRANSACTION THAT NEEDS TO BE */
+ /* COMPLETED. REMOVE IT FROM THE HASH TABLE SUCH THAT */
+ /* NOT ANOTHER ACTIVITY ALSO TRIES TO COMPLETE THIS */
+ /* TRANSACTION. */
+ /*------------------------------------------------------------*/
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ ctransidFailHash[tcNodeFailptr.p->currentHashIndexTakeOver] =
+ apiConnectptr.p->nextApiConnect;
+
+ completeTransAtTakeOverDoOne(signal, TtakeOverInd);
+ // One transaction taken care of, return from this function
+ // and wait for the next CONTINUEB to continue processing
+ break;
+
+ } else {
+ if (tcNodeFailptr.p->currentHashIndexTakeOver < 511){
+ jam();
+ tcNodeFailptr.p->currentHashIndexTakeOver++;
+ } else {
+ jam();
+ completeTransAtTakeOverDoLast(signal, TtakeOverInd);
+ tcNodeFailptr.p->currentHashIndexTakeOver++;
+ }//if
+ }//if
+ }//while
+}//Dbtc::completeTransAtTakeOverLab()
+
+
+
+
+void Dbtc::completeTransAtTakeOverDoLast(Signal* signal, UintR TtakeOverInd)
+{
+ Uint32 guard0;
+ /*------------------------------------------------------------*/
+ /* THERE ARE NO MORE TRANSACTIONS TO COMPLETE. THIS */
+ /* ACTIVITY IS COMPLETED. */
+ /*------------------------------------------------------------*/
+ arrGuard(TtakeOverInd, MAX_NDB_NODES);
+ if (tcNodeFailptr.p->takeOverProcState[TtakeOverInd] != ZTAKE_OVER_ACTIVE) {
+ jam();
+ systemErrorLab(signal);
+ return;
+ }//if
+ tcNodeFailptr.p->takeOverProcState[TtakeOverInd] = ZTAKE_OVER_IDLE;
+ tcNodeFailptr.p->completedTakeOver++;
+
+ if (tcNodeFailptr.p->completedTakeOver == cnoParallelTakeOver) {
+ jam();
+ /*------------------------------------------------------------*/
+ /* WE WERE THE LAST ACTIVITY THAT WAS COMPLETED. WE NEED*/
+ /* TO REPORT THE COMPLETION OF THE TAKE OVER TO ALL */
+ /* NODES THAT ARE ALIVE. */
+ /*------------------------------------------------------------*/
+ for (hostptr.i = 1; hostptr.i < MAX_NDB_NODES; hostptr.i++) {
+ jam();
+ ptrAss(hostptr, hostRecord);
+ if (hostptr.p->hostStatus == HS_ALIVE) {
+ jam();
+ tblockref = calcTcBlockRef(hostptr.i);
+ signal->theData[0] = tcNodeFailptr.p->takeOverNode;
+ sendSignal(tblockref, GSN_TAKE_OVERTCCONF, signal, 1, JBB);
+ }//if
+ }//for
+ if (tcNodeFailptr.p->queueIndex > 0) {
+ jam();
+ /*------------------------------------------------------------*/
+ /* THERE ARE MORE NODES TO TAKE OVER. WE NEED TO START */
+ /* THE TAKE OVER. */
+ /*------------------------------------------------------------*/
+ tfailedNodeId = tcNodeFailptr.p->queueList[0];
+ guard0 = tcNodeFailptr.p->queueIndex - 1;
+ arrGuard(guard0 + 1, MAX_NDB_NODES);
+ for (tindex = 0; tindex <= guard0; tindex++) {
+ jam();
+ tcNodeFailptr.p->queueList[tindex] =
+ tcNodeFailptr.p->queueList[tindex + 1];
+ }//for
+ tcNodeFailptr.p->queueIndex--;
+ startTakeOverLab(signal);
+ return;
+ } else {
+ jam();
+ tcNodeFailptr.p->failStatus = FS_IDLE;
+ }//if
+ }//if
+ return;
+}//Dbtc::completeTransAtTakeOverDoLast()
+
+void Dbtc::completeTransAtTakeOverDoOne(Signal* signal, UintR TtakeOverInd)
+{
+ apiConnectptr.p->takeOverRec = (Uint8)tcNodeFailptr.i;
+ apiConnectptr.p->takeOverInd = TtakeOverInd;
+
+ switch (apiConnectptr.p->apiConnectstate) {
+ case CS_FAIL_COMMITTED:
+ jam();
+ /*------------------------------------------------------------*/
+ /* ALL PARTS OF THE TRANSACTIONS REPORTED COMMITTED. WE */
+ /* HAVE THUS COMPLETED THE COMMIT PHASE. WE CAN REPORT */
+ /* COMMITTED TO THE APPLICATION AND CONTINUE WITH THE */
+ /* COMPLETE PHASE. */
+ /*------------------------------------------------------------*/
+ sendTCKEY_FAILCONF(signal, apiConnectptr.p);
+ tcConnectptr.i = apiConnectptr.p->firstTcConnect;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ apiConnectptr.p->currentTcConnect = tcConnectptr.i;
+ apiConnectptr.p->currentReplicaNo = tcConnectptr.p->lastReplicaNo;
+ tcurrentReplicaNo = tcConnectptr.p->lastReplicaNo;
+ toCompleteHandlingLab(signal);
+ return;
+ case CS_FAIL_COMMITTING:
+ jam();
+ /*------------------------------------------------------------*/
+ /* AT LEAST ONE PART WAS ONLY PREPARED AND AT LEAST ONE */
+ /* PART WAS COMMITTED. COMPLETE THE COMMIT PHASE FIRST. */
+ /* THEN CONTINUE AS AFTER COMMITTED. */
+ /*------------------------------------------------------------*/
+ tcConnectptr.i = apiConnectptr.p->firstTcConnect;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ apiConnectptr.p->currentTcConnect = tcConnectptr.i;
+ apiConnectptr.p->currentReplicaNo = tcConnectptr.p->lastReplicaNo;
+ tcurrentReplicaNo = tcConnectptr.p->lastReplicaNo;
+ toCommitHandlingLab(signal);
+ return;
+ case CS_FAIL_ABORTING:
+ case CS_FAIL_PREPARED:
+ jam();
+ /*------------------------------------------------------------*/
+ /* WE WILL ABORT THE TRANSACTION IF IT IS IN A PREPARED */
+ /* STATE IN THIS VERSION. IN LATER VERSIONS WE WILL */
+ /* HAVE TO ADD CODE FOR HANDLING OF PREPARED-TO-COMMIT */
+ /* TRANSACTIONS. THESE ARE NOT ALLOWED TO ABORT UNTIL WE*/
+ /* HAVE HEARD FROM THE TRANSACTION COORDINATOR. */
+ /* */
+ /* IT IS POSSIBLE TO COMMIT TRANSACTIONS THAT ARE */
+ /* PREPARED ACTUALLY. WE WILL LEAVE THIS PROBLEM UNTIL */
+ /* LATER VERSIONS. */
+ /*------------------------------------------------------------*/
+ tcConnectptr.i = apiConnectptr.p->firstTcConnect;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ apiConnectptr.p->currentTcConnect = tcConnectptr.i;
+ apiConnectptr.p->currentReplicaNo = tcConnectptr.p->lastReplicaNo;
+ tcurrentReplicaNo = tcConnectptr.p->lastReplicaNo;
+ toAbortHandlingLab(signal);
+ return;
+ case CS_FAIL_ABORTED:
+ jam();
+ sendTCKEY_FAILREF(signal, apiConnectptr.p);
+
+ signal->theData[0] = TcContinueB::ZCOMPLETE_TRANS_AT_TAKE_OVER;
+ signal->theData[1] = (UintR)apiConnectptr.p->takeOverRec;
+ signal->theData[2] = apiConnectptr.p->takeOverInd;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
+ releaseTakeOver(signal);
+ break;
+ case CS_FAIL_COMPLETED:
+ jam();
+ sendTCKEY_FAILCONF(signal, apiConnectptr.p);
+
+ signal->theData[0] = TcContinueB::ZCOMPLETE_TRANS_AT_TAKE_OVER;
+ signal->theData[1] = (UintR)apiConnectptr.p->takeOverRec;
+ signal->theData[2] = apiConnectptr.p->takeOverInd;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
+ releaseApiConnectFail(signal);
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ }//switch
+}//Dbtc::completeTransAtTakeOverDoOne()
+
+void
+Dbtc::sendTCKEY_FAILREF(Signal* signal, const ApiConnectRecord * regApiPtr){
+ jam();
+
+ const Uint32 ref = regApiPtr->ndbapiBlockref;
+ if(ref != 0){
+ signal->theData[0] = regApiPtr->ndbapiConnect;
+ signal->theData[1] = regApiPtr->transid[0];
+ signal->theData[2] = regApiPtr->transid[1];
+
+ sendSignal(ref, GSN_TCKEY_FAILREF, signal, 3, JBB);
+ }
+}
+
+void
+Dbtc::sendTCKEY_FAILCONF(Signal* signal, ApiConnectRecord * regApiPtr){
+ jam();
+ TcKeyFailConf * const failConf = (TcKeyFailConf *)&signal->theData[0];
+
+ const Uint32 ref = regApiPtr->ndbapiBlockref;
+ const Uint32 marker = regApiPtr->commitAckMarker;
+ if(ref != 0){
+ failConf->apiConnectPtr = regApiPtr->ndbapiConnect | (marker != RNIL);
+ failConf->transId1 = regApiPtr->transid[0];
+ failConf->transId2 = regApiPtr->transid[1];
+
+ sendSignal(regApiPtr->ndbapiBlockref,
+ GSN_TCKEY_FAILCONF, signal, TcKeyFailConf::SignalLength, JBB);
+ }
+ regApiPtr->commitAckMarker = RNIL;
+}
+
+/*------------------------------------------------------------*/
+/* THIS PART HANDLES THE ABORT PHASE IN THE CASE OF A */
+/* NODE FAILURE BEFORE THE COMMIT DECISION. */
+/*------------------------------------------------------------*/
+/* ABORT REQUEST SUCCESSFULLY COMPLETED ON TNODEID */
+/*------------------------------------------------------------*/
+void Dbtc::execABORTCONF(Signal* signal)
+{
+ UintR compare_transid1, compare_transid2;
+
+ jamEntry();
+ tcConnectptr.i = signal->theData[0];
+ tnodeid = signal->theData[2];
+ if (ERROR_INSERTED(8045)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ sendSignalWithDelay(cownref, GSN_ABORTCONF, signal, 2000, 5);
+ return;
+ }//if
+ if (tcConnectptr.i >= ctcConnectFilesize) {
+ errorReport(signal, 5);
+ return;
+ }//if
+ ptrAss(tcConnectptr, tcConnectRecord);
+ if (tcConnectptr.p->tcConnectstate != OS_WAIT_ABORT_CONF) {
+ warningReport(signal, 16);
+ return;
+ }//if
+ apiConnectptr.i = tcConnectptr.p->apiConnect;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ if (apiConnectptr.p->apiConnectstate != CS_WAIT_ABORT_CONF) {
+ warningReport(signal, 17);
+ return;
+ }//if
+ compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[3];
+ compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[4];
+ compare_transid1 = compare_transid1 | compare_transid2;
+ if (compare_transid1 != 0) {
+ warningReport(signal, 18);
+ return;
+ }//if
+ arrGuard(apiConnectptr.p->currentReplicaNo, 4);
+ if (tcConnectptr.p->tcNodedata[apiConnectptr.p->currentReplicaNo] !=
+ tnodeid) {
+ warningReport(signal, 19);
+ return;
+ }//if
+ tcurrentReplicaNo = (Uint8)Z8NIL;
+ tcConnectptr.p->tcConnectstate = OS_ABORTING;
+ toAbortHandlingLab(signal);
+}//Dbtc::execABORTCONF()
+
+void Dbtc::toAbortHandlingLab(Signal* signal)
+{
+ do {
+ if (tcurrentReplicaNo != (Uint8)Z8NIL) {
+ jam();
+ arrGuard(tcurrentReplicaNo, 4);
+ const LqhTransConf::OperationStatus stat =
+ (LqhTransConf::OperationStatus)
+ tcConnectptr.p->failData[tcurrentReplicaNo];
+ switch(stat){
+ case LqhTransConf::InvalidStatus:
+ case LqhTransConf::Aborted:
+ jam();
+ /*empty*/;
+ break;
+ case LqhTransConf::Prepared:
+ jam();
+ hostptr.i = tcConnectptr.p->tcNodedata[tcurrentReplicaNo];
+ ptrCheckGuard(hostptr, chostFilesize, hostRecord);
+ if (hostptr.p->hostStatus == HS_ALIVE) {
+ jam();
+ tblockref = calcLqhBlockRef(hostptr.i);
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ tcConnectptr.p->tcConnectstate = OS_WAIT_ABORT_CONF;
+ apiConnectptr.p->apiConnectstate = CS_WAIT_ABORT_CONF;
+ apiConnectptr.p->timeOutCounter = 0;
+ signal->theData[0] = tcConnectptr.i;
+ signal->theData[1] = cownref;
+ signal->theData[2] = apiConnectptr.p->transid[0];
+ signal->theData[3] = apiConnectptr.p->transid[1];
+ signal->theData[4] = apiConnectptr.p->tcBlockref;
+ signal->theData[5] = tcConnectptr.p->tcOprec;
+ sendSignal(tblockref, GSN_ABORTREQ, signal, 6, JBB);
+ return;
+ }//if
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ }//switch
+ }//if
+ if (apiConnectptr.p->currentReplicaNo > 0) {
+ jam();
+ /*------------------------------------------------------------*/
+ /* THERE IS STILL ANOTHER REPLICA THAT NEEDS TO BE */
+ /* ABORTED. */
+ /*------------------------------------------------------------*/
+ apiConnectptr.p->currentReplicaNo--;
+ tcurrentReplicaNo = apiConnectptr.p->currentReplicaNo;
+ } else {
+ /*------------------------------------------------------------*/
+ /* THE LAST REPLICA IN THIS OPERATION HAVE COMMITTED. */
+ /*------------------------------------------------------------*/
+ tcConnectptr.i = tcConnectptr.p->nextTcConnect;
+ if (tcConnectptr.i == RNIL) {
+ /*------------------------------------------------------------*/
+ /* WE HAVE COMPLETED THE ABORT PHASE. WE CAN NOW REPORT */
+ /* THE ABORT STATUS TO THE APPLICATION AND CONTINUE */
+ /* WITH THE NEXT TRANSACTION. */
+ /*------------------------------------------------------------*/
+ if (apiConnectptr.p->takeOverRec != (Uint8)Z8NIL) {
+ jam();
+ sendTCKEY_FAILREF(signal, apiConnectptr.p);
+ const Uint32 marker = apiConnectptr.p->commitAckMarker;
+ if(marker != RNIL){
+ jam();
+
+ CommitAckMarkerPtr tmp;
+ tmp.i = marker;
+ tmp.p = m_commitAckMarkerHash.getPtr(tmp.i);
+
+ m_commitAckMarkerHash.release(tmp);
+ apiConnectptr.p->commitAckMarker = RNIL;
+ }
+
+ /*------------------------------------------------------------*/
+ /* WE HAVE COMPLETED THIS TRANSACTION NOW AND CAN */
+ /* CONTINUE THE PROCESS WITH THE NEXT TRANSACTION. */
+ /*------------------------------------------------------------*/
+ signal->theData[0] = TcContinueB::ZCOMPLETE_TRANS_AT_TAKE_OVER;
+ signal->theData[1] = (UintR)apiConnectptr.p->takeOverRec;
+ signal->theData[2] = apiConnectptr.p->takeOverInd;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
+ releaseTakeOver(signal);
+ } else {
+ jam();
+ releaseAbortResources(signal);
+ }//if
+ return;
+ }//if
+ apiConnectptr.p->currentTcConnect = tcConnectptr.i;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ apiConnectptr.p->currentReplicaNo = tcConnectptr.p->lastReplicaNo;
+ tcurrentReplicaNo = tcConnectptr.p->lastReplicaNo;
+ }//if
+ } while (1);
+}//Dbtc::toAbortHandlingLab()
+
+/*------------------------------------------------------------*/
+/* THIS PART HANDLES THE COMMIT PHASE IN THE CASE OF A */
+/* NODE FAILURE IN THE MIDDLE OF THE COMMIT PHASE. */
+/*------------------------------------------------------------*/
+/* COMMIT REQUEST SUCCESSFULLY COMPLETED ON TNODEID */
+/*------------------------------------------------------------*/
+void Dbtc::execCOMMITCONF(Signal* signal)
+{
+ UintR compare_transid1, compare_transid2;
+
+ jamEntry();
+ tcConnectptr.i = signal->theData[0];
+ tnodeid = signal->theData[1];
+ if (ERROR_INSERTED(8046)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ sendSignalWithDelay(cownref, GSN_COMMITCONF, signal, 2000, 4);
+ return;
+ }//if
+ if (tcConnectptr.i >= ctcConnectFilesize) {
+ errorReport(signal, 4);
+ return;
+ }//if
+ ptrAss(tcConnectptr, tcConnectRecord);
+ if (tcConnectptr.p->tcConnectstate != OS_WAIT_COMMIT_CONF) {
+ warningReport(signal, 8);
+ return;
+ }//if
+ apiConnectptr.i = tcConnectptr.p->apiConnect;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ if (apiConnectptr.p->apiConnectstate != CS_WAIT_COMMIT_CONF) {
+ warningReport(signal, 9);
+ return;
+ }//if
+ compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[2];
+ compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[3];
+ compare_transid1 = compare_transid1 | compare_transid2;
+ if (compare_transid1 != 0) {
+ warningReport(signal, 10);
+ return;
+ }//if
+ arrGuard(apiConnectptr.p->currentReplicaNo, 4);
+ if (tcConnectptr.p->tcNodedata[apiConnectptr.p->currentReplicaNo] !=
+ tnodeid) {
+ warningReport(signal, 11);
+ return;
+ }//if
+ if (ERROR_INSERTED(8026)) {
+ jam();
+ systemErrorLab(signal);
+ }//if
+ tcurrentReplicaNo = (Uint8)Z8NIL;
+ tcConnectptr.p->tcConnectstate = OS_COMMITTED;
+ toCommitHandlingLab(signal);
+}//Dbtc::execCOMMITCONF()
+
+void Dbtc::toCommitHandlingLab(Signal* signal)
+{
+ do {
+ if (tcurrentReplicaNo != (Uint8)Z8NIL) {
+ jam();
+ arrGuard(tcurrentReplicaNo, 4);
+ switch (tcConnectptr.p->failData[tcurrentReplicaNo]) {
+ case LqhTransConf::InvalidStatus:
+ jam();
+ /*empty*/;
+ break;
+ case LqhTransConf::Committed:
+ jam();
+ /*empty*/;
+ break;
+ case LqhTransConf::Prepared:
+ jam();
+ /*------------------------------------------------------------*/
+ /* THE NODE WAS PREPARED AND IS WAITING FOR ABORT OR */
+ /* COMMIT REQUEST FROM TC. */
+ /*------------------------------------------------------------*/
+ hostptr.i = tcConnectptr.p->tcNodedata[tcurrentReplicaNo];
+ ptrCheckGuard(hostptr, chostFilesize, hostRecord);
+ if (hostptr.p->hostStatus == HS_ALIVE) {
+ jam();
+ tblockref = calcLqhBlockRef(hostptr.i);
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ apiConnectptr.p->apiConnectstate = CS_WAIT_COMMIT_CONF;
+ apiConnectptr.p->timeOutCounter = 0;
+ tcConnectptr.p->tcConnectstate = OS_WAIT_COMMIT_CONF;
+ signal->theData[0] = tcConnectptr.i;
+ signal->theData[1] = cownref;
+ signal->theData[2] = apiConnectptr.p->globalcheckpointid;
+ signal->theData[3] = apiConnectptr.p->transid[0];
+ signal->theData[4] = apiConnectptr.p->transid[1];
+ signal->theData[5] = apiConnectptr.p->tcBlockref;
+ signal->theData[6] = tcConnectptr.p->tcOprec;
+ sendSignal(tblockref, GSN_COMMITREQ, signal, 7, JBB);
+ return;
+ }//if
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ }//switch
+ }//if
+ if (apiConnectptr.p->currentReplicaNo > 0) {
+ jam();
+ /*------------------------------------------------------------*/
+ /* THERE IS STILL ANOTHER REPLICA THAT NEEDS TO BE */
+ /* COMMITTED. */
+ /*------------------------------------------------------------*/
+ apiConnectptr.p->currentReplicaNo--;
+ tcurrentReplicaNo = apiConnectptr.p->currentReplicaNo;
+ } else {
+ /*------------------------------------------------------------*/
+ /* THE LAST REPLICA IN THIS OPERATION HAVE COMMITTED. */
+ /*------------------------------------------------------------*/
+ tcConnectptr.i = tcConnectptr.p->nextTcConnect;
+ if (tcConnectptr.i == RNIL) {
+ /*------------------------------------------------------------*/
+ /* WE HAVE COMPLETED THE COMMIT PHASE. WE CAN NOW REPORT*/
+ /* THE COMMIT STATUS TO THE APPLICATION AND CONTINUE */
+ /* WITH THE COMPLETE PHASE. */
+ /*------------------------------------------------------------*/
+ if (apiConnectptr.p->takeOverRec != (Uint8)Z8NIL) {
+ jam();
+ sendTCKEY_FAILCONF(signal, apiConnectptr.p);
+ } else {
+ jam();
+ sendApiCommit(signal);
+ }//if
+ apiConnectptr.p->currentTcConnect = apiConnectptr.p->firstTcConnect;
+ tcConnectptr.i = apiConnectptr.p->firstTcConnect;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ tcurrentReplicaNo = tcConnectptr.p->lastReplicaNo;
+ apiConnectptr.p->currentReplicaNo = tcurrentReplicaNo;
+ toCompleteHandlingLab(signal);
+ return;
+ }//if
+ apiConnectptr.p->currentTcConnect = tcConnectptr.i;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ apiConnectptr.p->currentReplicaNo = tcConnectptr.p->lastReplicaNo;
+ tcurrentReplicaNo = tcConnectptr.p->lastReplicaNo;
+ }//if
+ } while (1);
+}//Dbtc::toCommitHandlingLab()
+
+/*------------------------------------------------------------*/
+/* COMMON PART TO HANDLE COMPLETE PHASE WHEN ANY NODE */
+/* HAVE FAILED. */
+/*------------------------------------------------------------*/
+/* THE NODE WITH TNODEID HAVE COMPLETED THE OPERATION */
+/*------------------------------------------------------------*/
+void Dbtc::execCOMPLETECONF(Signal* signal)
+{
+ UintR compare_transid1, compare_transid2;
+
+ jamEntry();
+ tcConnectptr.i = signal->theData[0];
+ tnodeid = signal->theData[1];
+ if (ERROR_INSERTED(8047)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ sendSignalWithDelay(cownref, GSN_COMPLETECONF, signal, 2000, 4);
+ return;
+ }//if
+ if (tcConnectptr.i >= ctcConnectFilesize) {
+ errorReport(signal, 3);
+ return;
+ }//if
+ ptrAss(tcConnectptr, tcConnectRecord);
+ if (tcConnectptr.p->tcConnectstate != OS_WAIT_COMPLETE_CONF) {
+ warningReport(signal, 12);
+ return;
+ }//if
+ apiConnectptr.i = tcConnectptr.p->apiConnect;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ if (apiConnectptr.p->apiConnectstate != CS_WAIT_COMPLETE_CONF) {
+ warningReport(signal, 13);
+ return;
+ }//if
+ compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[2];
+ compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[3];
+ compare_transid1 = compare_transid1 | compare_transid2;
+ if (compare_transid1 != 0) {
+ warningReport(signal, 14);
+ return;
+ }//if
+ arrGuard(apiConnectptr.p->currentReplicaNo, 4);
+ if (tcConnectptr.p->tcNodedata[apiConnectptr.p->currentReplicaNo] !=
+ tnodeid) {
+ warningReport(signal, 15);
+ return;
+ }//if
+ if (ERROR_INSERTED(8028)) {
+ jam();
+ systemErrorLab(signal);
+ }//if
+ tcConnectptr.p->tcConnectstate = OS_COMPLETED;
+ tcurrentReplicaNo = (Uint8)Z8NIL;
+ toCompleteHandlingLab(signal);
+}//Dbtc::execCOMPLETECONF()
+
+void Dbtc::toCompleteHandlingLab(Signal* signal)
+{
+ do {
+ if (tcurrentReplicaNo != (Uint8)Z8NIL) {
+ jam();
+ arrGuard(tcurrentReplicaNo, 4);
+ switch (tcConnectptr.p->failData[tcurrentReplicaNo]) {
+ case LqhTransConf::InvalidStatus:
+ jam();
+ /*empty*/;
+ break;
+ default:
+ jam();
+ /*------------------------------------------------------------*/
+ /* THIS NODE DID NOT REPORT ANYTHING FOR THIS OPERATION */
+ /* IT MUST HAVE FAILED. */
+ /*------------------------------------------------------------*/
+ /*------------------------------------------------------------*/
+ /* SEND COMPLETEREQ TO THE NEXT REPLICA. */
+ /*------------------------------------------------------------*/
+ hostptr.i = tcConnectptr.p->tcNodedata[tcurrentReplicaNo];
+ ptrCheckGuard(hostptr, chostFilesize, hostRecord);
+ if (hostptr.p->hostStatus == HS_ALIVE) {
+ jam();
+ tblockref = calcLqhBlockRef(hostptr.i);
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ tcConnectptr.p->tcConnectstate = OS_WAIT_COMPLETE_CONF;
+ apiConnectptr.p->apiConnectstate = CS_WAIT_COMPLETE_CONF;
+ apiConnectptr.p->timeOutCounter = 0;
+ tcConnectptr.p->apiConnect = apiConnectptr.i;
+ signal->theData[0] = tcConnectptr.i;
+ signal->theData[1] = cownref;
+ signal->theData[2] = apiConnectptr.p->transid[0];
+ signal->theData[3] = apiConnectptr.p->transid[1];
+ signal->theData[4] = apiConnectptr.p->tcBlockref;
+ signal->theData[5] = tcConnectptr.p->tcOprec;
+ sendSignal(tblockref, GSN_COMPLETEREQ, signal, 6, JBB);
+ return;
+ }//if
+ break;
+ }//switch
+ }//if
+ if (apiConnectptr.p->currentReplicaNo != 0) {
+ jam();
+ /*------------------------------------------------------------*/
+ /* THERE ARE STILL MORE REPLICAS IN THIS OPERATION. WE */
+ /* NEED TO CONTINUE WITH THOSE REPLICAS. */
+ /*------------------------------------------------------------*/
+ apiConnectptr.p->currentReplicaNo--;
+ tcurrentReplicaNo = apiConnectptr.p->currentReplicaNo;
+ } else {
+ tcConnectptr.i = tcConnectptr.p->nextTcConnect;
+ if (tcConnectptr.i == RNIL) {
+ /*------------------------------------------------------------*/
+ /* WE HAVE COMPLETED THIS TRANSACTION NOW AND CAN */
+ /* CONTINUE THE PROCESS WITH THE NEXT TRANSACTION. */
+ /*------------------------------------------------------------*/
+ if (apiConnectptr.p->takeOverRec != (Uint8)Z8NIL) {
+ jam();
+ signal->theData[0] = TcContinueB::ZCOMPLETE_TRANS_AT_TAKE_OVER;
+ signal->theData[1] = (UintR)apiConnectptr.p->takeOverRec;
+ signal->theData[2] = apiConnectptr.p->takeOverInd;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
+ releaseTakeOver(signal);
+ } else {
+ jam();
+ releaseTransResources(signal);
+ }//if
+ return;
+ }//if
+ /*------------------------------------------------------------*/
+ /* WE HAVE COMPLETED AN OPERATION AND THERE ARE MORE TO */
+ /* COMPLETE. TAKE THE NEXT OPERATION AND START WITH THE */
+ /* FIRST REPLICA SINCE IT IS THE COMPLETE PHASE. */
+ /*------------------------------------------------------------*/
+ apiConnectptr.p->currentTcConnect = tcConnectptr.i;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ tcurrentReplicaNo = tcConnectptr.p->lastReplicaNo;
+ apiConnectptr.p->currentReplicaNo = tcurrentReplicaNo;
+ }//if
+ } while (1);
+}//Dbtc::toCompleteHandlingLab()
+
+/*------------------------------------------------------------*/
+/* */
+/* FIND THE API CONNECT RECORD FOR THIS TRANSACTION */
+/* DURING TAKE OVER FROM A FAILED TC. IF NONE EXISTS */
+/* YET THEN SEIZE A NEW API CONNECT RECORD AND LINK IT */
+/* INTO THE HASH TABLE. */
+/*------------------------------------------------------------*/
+void Dbtc::findApiConnectFail(Signal* signal)
+{
+ ApiConnectRecordPtr fafPrevApiConnectptr;
+ ApiConnectRecordPtr fafNextApiConnectptr;
+ UintR tfafHashNumber;
+
+ tfafHashNumber = ttransid1 & 511;
+ fafPrevApiConnectptr.i = RNIL;
+ ptrNull(fafPrevApiConnectptr);
+ arrGuard(tfafHashNumber, 512);
+ fafNextApiConnectptr.i = ctransidFailHash[tfafHashNumber];
+ ptrCheck(fafNextApiConnectptr, capiConnectFilesize, apiConnectRecord);
+FAF_LOOP:
+ jam();
+ if (fafNextApiConnectptr.i == RNIL) {
+ jam();
+ if (cfirstfreeApiConnectFail == RNIL) {
+ jam();
+ systemErrorLab(signal);
+ return;
+ }//if
+ seizeApiConnectFail(signal);
+ if (fafPrevApiConnectptr.i == RNIL) {
+ jam();
+ ctransidFailHash[tfafHashNumber] = apiConnectptr.i;
+ } else {
+ jam();
+ ptrGuard(fafPrevApiConnectptr);
+ fafPrevApiConnectptr.p->nextApiConnect = apiConnectptr.i;
+ }//if
+ apiConnectptr.p->nextApiConnect = RNIL;
+ initApiConnectFail(signal);
+ } else {
+ jam();
+ fafPrevApiConnectptr.i = fafNextApiConnectptr.i;
+ fafPrevApiConnectptr.p = fafNextApiConnectptr.p;
+ apiConnectptr.i = fafNextApiConnectptr.i;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ fafNextApiConnectptr.i = apiConnectptr.p->nextApiConnect;
+ ptrCheck(fafNextApiConnectptr, capiConnectFilesize, apiConnectRecord);
+ if ((apiConnectptr.p->transid[1] != ttransid2) ||
+ (apiConnectptr.p->transid[0] != ttransid1)) {
+ goto FAF_LOOP;
+ }//if
+ updateApiStateFail(signal);
+ }//if
+}//Dbtc::findApiConnectFail()
+
+/*----------------------------------------------------------*/
+/* FIND THE TC CONNECT AND IF NOT FOUND ALLOCATE A NEW */
+/*----------------------------------------------------------*/
+void Dbtc::findTcConnectFail(Signal* signal)
+{
+ UintR tftfHashNumber;
+
+ tftfHashNumber = (ttransid1 ^ ttcOprec) & 1023;
+ tcConnectptr.i = ctcConnectFailHash[tftfHashNumber];
+ do {
+ if (tcConnectptr.i == RNIL) {
+ jam();
+ if (cfirstfreeTcConnectFail == RNIL) {
+ jam();
+ systemErrorLab(signal);
+ return;
+ }//if
+ seizeTcConnectFail(signal);
+ linkTcInConnectionlist(signal);
+ tcConnectptr.p->nextTcFailHash = ctcConnectFailHash[tftfHashNumber];
+ ctcConnectFailHash[tftfHashNumber] = tcConnectptr.i;
+ initTcConnectFail(signal);
+ return;
+ } else {
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ if (tcConnectptr.p->tcOprec != ttcOprec) {
+ jam(); /* FRAGMENTID = TC_OPREC HERE, LOOP ANOTHER TURN */
+ tcConnectptr.i = tcConnectptr.p->nextTcFailHash;
+ } else {
+ updateTcStateFail(signal);
+ return;
+ }//if
+ }//if
+ } while (1);
+}//Dbtc::findTcConnectFail()
+
+/*----------------------------------------------------------*/
+/* INITIALISE AN API CONNECT FAIL RECORD */
+/*----------------------------------------------------------*/
+void Dbtc::initApiConnectFail(Signal* signal)
+{
+ apiConnectptr.p->transid[0] = ttransid1;
+ apiConnectptr.p->transid[1] = ttransid2;
+ apiConnectptr.p->firstTcConnect = RNIL;
+ apiConnectptr.p->currSavePointId = 0;
+ apiConnectptr.p->lastTcConnect = RNIL;
+ tblockref = calcTcBlockRef(tcNodeFailptr.p->takeOverNode);
+
+ apiConnectptr.p->tcBlockref = tblockref;
+ apiConnectptr.p->ndbapiBlockref = 0;
+ apiConnectptr.p->ndbapiConnect = 0;
+ apiConnectptr.p->buddyPtr = RNIL;
+ setApiConTimer(apiConnectptr.i, 0, __LINE__);
+ switch(ttransStatus){
+ case LqhTransConf::Committed:
+ jam();
+ apiConnectptr.p->globalcheckpointid = tgci;
+ apiConnectptr.p->apiConnectstate = CS_FAIL_COMMITTED;
+ break;
+ case LqhTransConf::Prepared:
+ jam();
+ apiConnectptr.p->apiConnectstate = CS_FAIL_PREPARED;
+ break;
+ case LqhTransConf::Aborted:
+ jam();
+ apiConnectptr.p->apiConnectstate = CS_FAIL_ABORTED;
+ break;
+ case LqhTransConf::Marker:
+ jam();
+ apiConnectptr.p->apiConnectstate = CS_FAIL_COMPLETED;
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ }//if
+ apiConnectptr.p->commitAckMarker = RNIL;
+ if(LqhTransConf::getMarkerFlag(treqinfo)){
+ jam();
+ CommitAckMarkerPtr tmp;
+ m_commitAckMarkerHash.seize(tmp);
+
+ ndbrequire(tmp.i != RNIL);
+
+ apiConnectptr.p->commitAckMarker = tmp.i;
+ tmp.p->transid1 = ttransid1;
+ tmp.p->transid2 = ttransid2;
+ tmp.p->apiNodeId = refToNode(tapplRef);
+ tmp.p->noOfLqhs = 1;
+ tmp.p->lqhNodeId[0] = tnodeid;
+ tmp.p->apiConnectPtr = apiConnectptr.i;
+ m_commitAckMarkerHash.add(tmp);
+ }
+}//Dbtc::initApiConnectFail()
+
+/*------------------------------------------------------------*/
+/* INITIALISE AT TC CONNECT AT TAKE OVER WHEN ALLOCATING*/
+/* THE TC CONNECT RECORD. */
+/*------------------------------------------------------------*/
+void Dbtc::initTcConnectFail(Signal* signal)
+{
+ tcConnectptr.p->apiConnect = apiConnectptr.i;
+ tcConnectptr.p->tcOprec = ttcOprec;
+ Uint32 treplicaNo = LqhTransConf::getReplicaNo(treqinfo);
+ for (Uint32 i = 0; i < MAX_REPLICAS; i++) {
+ tcConnectptr.p->failData[i] = LqhTransConf::InvalidStatus;
+ }//for
+ tcConnectptr.p->tcNodedata[treplicaNo] = tnodeid;
+ tcConnectptr.p->failData[treplicaNo] = ttransStatus;
+ tcConnectptr.p->lastReplicaNo = LqhTransConf::getLastReplicaNo(treqinfo);
+ tcConnectptr.p->dirtyOp = LqhTransConf::getDirtyFlag(treqinfo);
+
+}//Dbtc::initTcConnectFail()
+
+/*----------------------------------------------------------*/
+/* INITIALISE TC NODE FAIL RECORD. */
+/*----------------------------------------------------------*/
+void Dbtc::initTcFail(Signal* signal)
+{
+ tcNodeFailptr.i = 0;
+ ptrAss(tcNodeFailptr, tcFailRecord);
+ tcNodeFailptr.p->queueIndex = 0;
+ tcNodeFailptr.p->failStatus = FS_IDLE;
+}//Dbtc::initTcFail()
+
+/*----------------------------------------------------------*/
+/* RELEASE_TAKE_OVER */
+/*----------------------------------------------------------*/
+void Dbtc::releaseTakeOver(Signal* signal)
+{
+ TcConnectRecordPtr rtoNextTcConnectptr;
+
+ rtoNextTcConnectptr.i = apiConnectptr.p->firstTcConnect;
+ do {
+ jam();
+ tcConnectptr.i = rtoNextTcConnectptr.i;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ rtoNextTcConnectptr.i = tcConnectptr.p->nextTcConnect;
+ releaseTcConnectFail(signal);
+ } while (rtoNextTcConnectptr.i != RNIL);
+ releaseApiConnectFail(signal);
+}//Dbtc::releaseTakeOver()
+
+/*---------------------------------------------------------------------------*/
+/* SETUP_FAIL_DATA */
+/* SETUP DATA TO REUSE TAKE OVER CODE FOR HANDLING ABORT/COMMIT IN NODE */
+/* FAILURE SITUATIONS. */
+/*---------------------------------------------------------------------------*/
+void Dbtc::setupFailData(Signal* signal)
+{
+ tcConnectptr.i = apiConnectptr.p->firstTcConnect;
+ do {
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ switch (tcConnectptr.p->tcConnectstate) {
+ case OS_PREPARED:
+ case OS_COMMITTING:
+ jam();
+ for (tindex = 0; tindex <= tcConnectptr.p->lastReplicaNo; tindex++) {
+ jam();
+ /*-------------------------------------------------------------------
+ * KEYDATA IS USED TO KEEP AN INDICATION OF STATE IN LQH.
+ * IN THIS CASE ALL LQH'S ARE PREPARED AND WAITING FOR
+ * COMMIT/ABORT DECISION.
+ *------------------------------------------------------------------*/
+ arrGuard(tindex, 4);
+ tcConnectptr.p->failData[tindex] = LqhTransConf::Prepared;
+ }//for
+ break;
+ case OS_COMMITTED:
+ case OS_COMPLETING:
+ jam();
+ for (tindex = 0; tindex <= tcConnectptr.p->lastReplicaNo; tindex++) {
+ jam();
+ /*-------------------------------------------------------------------
+ * KEYDATA IS USED TO KEEP AN INDICATION OF STATE IN LQH.
+ * IN THIS CASE ALL LQH'S ARE COMMITTED AND WAITING FOR
+ * COMPLETE MESSAGE.
+ *------------------------------------------------------------------*/
+ arrGuard(tindex, 4);
+ tcConnectptr.p->failData[tindex] = LqhTransConf::Committed;
+ }//for
+ break;
+ case OS_COMPLETED:
+ jam();
+ for (tindex = 0; tindex <= tcConnectptr.p->lastReplicaNo; tindex++) {
+ jam();
+ /*-------------------------------------------------------------------
+ * KEYDATA IS USED TO KEEP AN INDICATION OF STATE IN LQH.
+ * IN THIS CASE ALL LQH'S ARE COMPLETED.
+ *-------------------------------------------------------------------*/
+ arrGuard(tindex, 4);
+ tcConnectptr.p->failData[tindex] = LqhTransConf::InvalidStatus;
+ }//for
+ break;
+ default:
+ jam();
+ sendSystemError(signal);
+ break;
+ }//switch
+ if (tabortInd != ZCOMMIT_SETUP) {
+ jam();
+ for (UintR Ti = 0; Ti <= tcConnectptr.p->lastReplicaNo; Ti++) {
+ hostptr.i = tcConnectptr.p->tcNodedata[Ti];
+ ptrCheckGuard(hostptr, chostFilesize, hostRecord);
+ if (hostptr.p->hostStatus != HS_ALIVE) {
+ jam();
+ /*-----------------------------------------------------------------
+ * FAILURE OF ANY INVOLVED NODE ALWAYS INVOKES AN ABORT DECISION.
+ *-----------------------------------------------------------------*/
+ tabortInd = ZTRUE;
+ }//if
+ }//for
+ }//if
+ tcConnectptr.p->tcConnectstate = OS_TAKE_OVER;
+ tcConnectptr.p->tcOprec = tcConnectptr.i;
+ tcConnectptr.i = tcConnectptr.p->nextTcConnect;
+ } while (tcConnectptr.i != RNIL);
+ apiConnectptr.p->tcBlockref = cownref;
+ apiConnectptr.p->currentTcConnect = apiConnectptr.p->firstTcConnect;
+ tcConnectptr.i = apiConnectptr.p->firstTcConnect;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ apiConnectptr.p->currentReplicaNo = tcConnectptr.p->lastReplicaNo;
+ tcurrentReplicaNo = tcConnectptr.p->lastReplicaNo;
+}//Dbtc::setupFailData()
+
+/*----------------------------------------------------------*/
+/* UPDATE THE STATE OF THE API CONNECT FOR THIS PART. */
+/*----------------------------------------------------------*/
+void Dbtc::updateApiStateFail(Signal* signal)
+{
+ if(LqhTransConf::getMarkerFlag(treqinfo)){
+ jam();
+ const Uint32 marker = apiConnectptr.p->commitAckMarker;
+ if(marker == RNIL){
+ jam();
+
+ CommitAckMarkerPtr tmp;
+ m_commitAckMarkerHash.seize(tmp);
+ ndbrequire(tmp.i != RNIL);
+
+ apiConnectptr.p->commitAckMarker = tmp.i;
+ tmp.p->transid1 = ttransid1;
+ tmp.p->transid2 = ttransid2;
+ tmp.p->apiNodeId = refToNode(tapplRef);
+ tmp.p->noOfLqhs = 1;
+ tmp.p->lqhNodeId[0] = tnodeid;
+ tmp.p->apiConnectPtr = apiConnectptr.i;
+ m_commitAckMarkerHash.add(tmp);
+ } else {
+ jam();
+
+ CommitAckMarkerPtr tmp;
+ tmp.i = marker;
+ tmp.p = m_commitAckMarkerHash.getPtr(marker);
+
+ const Uint32 noOfLqhs = tmp.p->noOfLqhs;
+ ndbrequire(noOfLqhs < MAX_REPLICAS);
+ tmp.p->lqhNodeId[noOfLqhs] = tnodeid;
+ tmp.p->noOfLqhs = (noOfLqhs + 1);
+ }
+ }
+
+ switch (ttransStatus) {
+ case LqhTransConf::Committed:
+ jam();
+ switch (apiConnectptr.p->apiConnectstate) {
+ case CS_FAIL_COMMITTING:
+ case CS_FAIL_COMMITTED:
+ jam();
+ ndbrequire(tgci == apiConnectptr.p->globalcheckpointid);
+ break;
+ case CS_FAIL_PREPARED:
+ jam();
+ apiConnectptr.p->apiConnectstate = CS_FAIL_COMMITTING;
+ apiConnectptr.p->globalcheckpointid = tgci;
+ break;
+ case CS_FAIL_COMPLETED:
+ jam();
+ apiConnectptr.p->globalcheckpointid = tgci;
+ apiConnectptr.p->apiConnectstate = CS_FAIL_COMMITTED;
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ break;
+ }//switch
+ break;
+ case LqhTransConf::Prepared:
+ jam();
+ switch (apiConnectptr.p->apiConnectstate) {
+ case CS_FAIL_COMMITTED:
+ jam();
+ apiConnectptr.p->apiConnectstate = CS_FAIL_COMMITTING;
+ break;
+ case CS_FAIL_ABORTED:
+ jam();
+ apiConnectptr.p->apiConnectstate = CS_FAIL_ABORTING;
+ break;
+ case CS_FAIL_COMMITTING:
+ case CS_FAIL_PREPARED:
+ case CS_FAIL_ABORTING:
+ jam();
+ /*empty*/;
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ break;
+ }//switch
+ break;
+ case LqhTransConf::Aborted:
+ jam();
+ switch (apiConnectptr.p->apiConnectstate) {
+ case CS_FAIL_COMMITTING:
+ case CS_FAIL_COMMITTED:
+ jam();
+ systemErrorLab(signal);
+ break;
+ case CS_FAIL_PREPARED:
+ jam();
+ apiConnectptr.p->apiConnectstate = CS_FAIL_ABORTING;
+ break;
+ case CS_FAIL_ABORTING:
+ case CS_FAIL_ABORTED:
+ jam();
+ /*empty*/;
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ break;
+ }//switch
+ break;
+ case LqhTransConf::Marker:
+ jam();
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ break;
+ }//switch
+}//Dbtc::updateApiStateFail()
+
+/*------------------------------------------------------------*/
+/* UPDATE_TC_STATE_FAIL */
+/* */
+/* WE NEED TO UPDATE THE STATUS OF TC_CONNECT RECORD AND*/
+/* WE ALSO NEED TO CHECK THAT THERE IS CONSISTENCY */
+/* BETWEEN THE DIFFERENT REPLICAS. */
+/*------------------------------------------------------------*/
+void Dbtc::updateTcStateFail(Signal* signal)
+{
+ const Uint8 treplicaNo = LqhTransConf::getReplicaNo(treqinfo);
+ const Uint8 tlastReplicaNo = LqhTransConf::getLastReplicaNo(treqinfo);
+ const Uint8 tdirtyOp = LqhTransConf::getDirtyFlag(treqinfo);
+
+ TcConnectRecord * regTcPtr = tcConnectptr.p;
+
+ ndbrequire(regTcPtr->apiConnect == apiConnectptr.i);
+ ndbrequire(regTcPtr->failData[treplicaNo] == LqhTransConf::InvalidStatus);
+ ndbrequire(regTcPtr->lastReplicaNo == tlastReplicaNo);
+ ndbrequire(regTcPtr->dirtyOp == tdirtyOp);
+
+ regTcPtr->tcNodedata[treplicaNo] = tnodeid;
+ regTcPtr->failData[treplicaNo] = ttransStatus;
+}//Dbtc::updateTcStateFail()
+
+void Dbtc::execTCGETOPSIZEREQ(Signal* signal)
+{
+ jamEntry();
+ CRASH_INSERTION(8000);
+
+ UintR Tuserpointer = signal->theData[0]; /* DBDIH POINTER */
+ BlockReference Tusersblkref = signal->theData[1];/* DBDIH BLOCK REFERENCE */
+ signal->theData[0] = Tuserpointer;
+ signal->theData[1] = coperationsize;
+ sendSignal(Tusersblkref, GSN_TCGETOPSIZECONF, signal, 2, JBB);
+}//Dbtc::execTCGETOPSIZEREQ()
+
+void Dbtc::execTC_CLOPSIZEREQ(Signal* signal)
+{
+ jamEntry();
+ CRASH_INSERTION(8001);
+
+ tuserpointer = signal->theData[0];
+ tusersblkref = signal->theData[1];
+ /* DBDIH BLOCK REFERENCE */
+ coperationsize = 0;
+ signal->theData[0] = tuserpointer;
+ sendSignal(tusersblkref, GSN_TC_CLOPSIZECONF, signal, 1, JBB);
+}//Dbtc::execTC_CLOPSIZEREQ()
+
+/* ######################################################################### */
+/* ####### ERROR MODULE ####### */
+/* ######################################################################### */
+void Dbtc::tabStateErrorLab(Signal* signal)
+{
+ terrorCode = ZSTATE_ERROR;
+ releaseAtErrorLab(signal);
+}//Dbtc::tabStateErrorLab()
+
+void Dbtc::wrongSchemaVersionErrorLab(Signal* signal)
+{
+ const TcKeyReq * const tcKeyReq = (TcKeyReq *)&signal->theData[0];
+
+ TableRecordPtr tabPtr;
+ tabPtr.i = tcKeyReq->tableId;
+ const Uint32 schemVer = tcKeyReq->tableSchemaVersion;
+ ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord);
+
+ terrorCode = tabPtr.p->getErrorCode(schemVer);
+
+ abortErrorLab(signal);
+}//Dbtc::wrongSchemaVersionErrorLab()
+
+void Dbtc::noFreeConnectionErrorLab(Signal* signal)
+{
+ terrorCode = ZNO_FREE_TC_CONNECTION;
+ abortErrorLab(signal); /* RECORD. OTHERWISE GOTO ERRORHANDLING */
+}//Dbtc::noFreeConnectionErrorLab()
+
+void Dbtc::aiErrorLab(Signal* signal)
+{
+ terrorCode = ZLENGTH_ERROR;
+ abortErrorLab(signal);
+}//Dbtc::aiErrorLab()
+
+void Dbtc::seizeAttrbuferrorLab(Signal* signal)
+{
+ terrorCode = ZGET_ATTRBUF_ERROR;
+ abortErrorLab(signal);
+}//Dbtc::seizeAttrbuferrorLab()
+
+void Dbtc::seizeDatabuferrorLab(Signal* signal)
+{
+ terrorCode = ZGET_DATAREC_ERROR;
+ releaseAtErrorLab(signal);
+}//Dbtc::seizeDatabuferrorLab()
+
+void Dbtc::releaseAtErrorLab(Signal* signal)
+{
+ ptrGuard(tcConnectptr);
+ tcConnectptr.p->tcConnectstate = OS_ABORTING;
+ /*-------------------------------------------------------------------------*
+ * A FAILURE OF THIS OPERATION HAS OCCURRED. THIS FAILURE WAS EITHER A
+ * FAULTY PARAMETER OR A RESOURCE THAT WAS NOT AVAILABLE.
+ * WE WILL ABORT THE ENTIRE TRANSACTION SINCE THIS IS THE SAFEST PATH
+ * TO HANDLE THIS PROBLEM.
+ * SINCE WE HAVE NOT YET CONTACTED ANY LQH WE SET NUMBER OF NODES TO ZERO
+ * WE ALSO SET THE STATE TO ABORTING TO INDICATE THAT WE ARE NOT EXPECTING
+ * ANY SIGNALS.
+ *-------------------------------------------------------------------------*/
+ tcConnectptr.p->noOfNodes = 0;
+ abortErrorLab(signal);
+}//Dbtc::releaseAtErrorLab()
+
+void Dbtc::warningHandlerLab(Signal* signal)
+{
+ ndbassert(false);
+}//Dbtc::warningHandlerLab()
+
+void Dbtc::systemErrorLab(Signal* signal)
+{
+ progError(0, 0);
+}//Dbtc::systemErrorLab()
+
+
+/* ######################################################################### *
+ * ####### SCAN MODULE ####### *
+ * ######################################################################### *
+
+ The application orders a scan of a table. We divide the scan into a scan on
+ each fragment. The scan uses the primary replicas since the scan might be
+ used for an update in a separate transaction.
+
+ Scans are always done as a separate transaction. Locks from the scan
+ can be overtaken by another transaction. Scans can never lock the entire
+ table. Locks are released immediately after the read has been verified
+ by the application. There is not even an option to leave the locks.
+ The reason is that this would hurt real-time behaviour too much.
+
+ -# The first step in handling a scan of a table is to receive all signals
+ defining the scan. If failures occur during this step we release all
+ resource and reply with SCAN_TABREF providing the error code.
+ If system load is too high, the request will not be allowed.
+
+ -# The second step retrieves the number of fragments that exist in the
+ table. It also ensures that the table actually exist. After this,
+ the scan is ready to be parallelised. The idea is that the receiving
+ process (hereafter called delivery process) will start up a number
+ of scan processes. Each of these scan processes will
+ independently scan one fragment at a time. The delivery
+ process object is the scan record and the scan process object is
+ the scan fragment record plus the scan operation record.
+
+ -# The third step is thus performed in parallel. In the third step each
+ scan process retrieves the primary replica of the fragment it will
+ scan. Then it starts the scan as soon as the load on that node permits.
+
+ The LQH returns either when it retrieved the maximum number of tuples or
+ when it has retrived at least one tuple and is hindered by a lock to
+ retrieve the next tuple. This is to ensure that a scan process never
+ can be involved in a deadlock situation.
+
+ When the scan process receives a number of tuples to report to the
+ application it checks the state of the delivery process. Only one delivery
+ at a time is handled by the application. Thus if the delivery process
+ has already sent a number of tuples to the application this set of tuples
+ are queued.
+
+ When the application requests the next set of tuples it is immediately
+ delivered if any are queued, otherwise it waits for the next scan
+ process that is ready to deliver.
+
+
+ ERROR HANDLING
+
+ As already mentioned it is rather easy to handle errors before the scan
+ processes have started. In this case it is enough to release the resources
+ and send SCAN_TAB_REF.
+
+ If an error occurs in any of the scan processes then we have to stop all
+ scan processes. We do however only stop the delivery process and ask
+ the api to order us to close the scan. The reason is that we can easily
+ enter into difficult timing problems since the application and this
+ block is out of synch we will thus always start by report the error to
+ the application and wait for a close request. This error report uses the
+ SCAN_TABREF signal with a special error code that the api must check for.
+
+
+ CLOSING AN ACTIVE SCAN
+
+ The application can close a scan for several reasons before it is completed.
+ One reason was mentioned above where an error in a scan process led to a
+ request to close the scan. Another reason could simply be that the
+ application found what it looked for and is thus not interested in the
+ rest of the scan.
+
+ IT COULD ALSO BE DEPENDENT ON INTERNAL ERRORS IN THE API.
+
+ When a close scan request is received, all scan processes are stopped and all
+ resources belonging to those scan processes are released. Stopping the scan
+ processes most often includes communication with an LQH where the local scan
+ is controlled. Finally all resources belonging to the scan is released and
+ the SCAN_TABCONF is sent with an indication of that the scan is closed.
+
+
+ CLOSING A COMPLETED SCAN
+
+ When all scan processes are completed then a report is sent to the
+ application which indicates that no more tuples can be fetched.
+ The application will send a close scan and the same action as when
+ closing an active scan is performed.
+ In this case it will of course not find any active scan processes.
+ It will even find all scan processes already released.
+
+ The reason for requiring the api to close the scan is the same as above.
+ It is to avoid any timing problems due to that the api and this block
+ is out of synch.
+
+ * ######################################################################## */
+void Dbtc::execSCAN_TABREQ(Signal* signal)
+{
+ const ScanTabReq * const scanTabReq = (ScanTabReq *)&signal->theData[0];
+ const Uint32 ri = scanTabReq->requestInfo;
+ const Uint32 aiLength = (scanTabReq->attrLenKeyLen & 0xFFFF);
+ const Uint32 keyLen = scanTabReq->attrLenKeyLen >> 16;
+ const Uint32 schemaVersion = scanTabReq->tableSchemaVersion;
+ const Uint32 transid1 = scanTabReq->transId1;
+ const Uint32 transid2 = scanTabReq->transId2;
+ const Uint32 tmpXX = scanTabReq->buddyConPtr;
+ const Uint32 buddyPtr = (tmpXX == 0xFFFFFFFF ? RNIL : tmpXX);
+ Uint32 currSavePointId = 0;
+
+ Uint32 scanConcurrency = scanTabReq->getParallelism(ri);
+ Uint32 noOprecPerFrag = ScanTabReq::getScanBatch(ri);
+ Uint32 scanParallel = scanConcurrency;
+ Uint32 errCode;
+ ScanRecordPtr scanptr;
+
+ jamEntry();
+
+ SegmentedSectionPtr api_op_ptr;
+ signal->getSection(api_op_ptr, 0);
+ copy(&cdata[0], api_op_ptr);
+ releaseSections(signal);
+
+ apiConnectptr.i = scanTabReq->apiConnectPtr;
+ tabptr.i = scanTabReq->tableId;
+
+ if (apiConnectptr.i >= capiConnectFilesize)
+ {
+ jam();
+ warningHandlerLab(signal);
+ return;
+ }//if
+
+ ptrAss(apiConnectptr, apiConnectRecord);
+ ApiConnectRecord * transP = apiConnectptr.p;
+
+ if (transP->apiConnectstate != CS_CONNECTED) {
+ jam();
+ // could be left over from TCKEYREQ rollback
+ if (transP->apiConnectstate == CS_ABORTING &&
+ transP->abortState == AS_IDLE) {
+ jam();
+ } else if(transP->apiConnectstate == CS_STARTED &&
+ transP->firstTcConnect == RNIL){
+ jam();
+ // left over from simple/dirty read
+ } else {
+ jam();
+ errCode = ZSTATE_ERROR;
+ goto SCAN_TAB_error_no_state_change;
+ }
+ }
+
+ if(tabptr.i >= ctabrecFilesize)
+ {
+ errCode = ZUNKNOWN_TABLE_ERROR;
+ goto SCAN_TAB_error;
+ }
+
+ ptrAss(tabptr, tableRecord);
+ if ((aiLength == 0) ||
+ (!tabptr.p->checkTable(schemaVersion)) ||
+ (scanConcurrency == 0) ||
+ (cfirstfreeTcConnect == RNIL) ||
+ (cfirstfreeScanrec == RNIL)) {
+ goto SCAN_error_check;
+ }
+ if (buddyPtr != RNIL) {
+ jam();
+ ApiConnectRecordPtr buddyApiPtr;
+ buddyApiPtr.i = buddyPtr;
+ ptrCheckGuard(buddyApiPtr, capiConnectFilesize, apiConnectRecord);
+ if ((transid1 == buddyApiPtr.p->transid[0]) &&
+ (transid2 == buddyApiPtr.p->transid[1])) {
+ jam();
+
+ if (buddyApiPtr.p->apiConnectstate == CS_ABORTING) {
+ // transaction has been aborted
+ jam();
+ errCode = buddyApiPtr.p->returncode;
+ goto SCAN_TAB_error;
+ }//if
+ currSavePointId = buddyApiPtr.p->currSavePointId;
+ buddyApiPtr.p->currSavePointId++;
+ }
+ }
+
+ seizeTcConnect(signal);
+ tcConnectptr.p->apiConnect = apiConnectptr.i;
+ tcConnectptr.p->tcConnectstate = OS_WAIT_SCAN;
+ apiConnectptr.p->lastTcConnect = tcConnectptr.i;
+
+ seizeCacheRecord(signal);
+ cachePtr.p->keylen = keyLen;
+ cachePtr.p->save1 = 0;
+ cachePtr.p->distributionKey = scanTabReq->distributionKey;
+ cachePtr.p->distributionKeyIndicator= ScanTabReq::getDistributionKeyFlag(ri);
+ scanptr = seizeScanrec(signal);
+
+ ndbrequire(transP->apiScanRec == RNIL);
+ ndbrequire(scanptr.p->scanApiRec == RNIL);
+
+ initScanrec(scanptr, scanTabReq, scanParallel, noOprecPerFrag);
+
+ transP->apiScanRec = scanptr.i;
+ transP->returncode = 0;
+ transP->transid[0] = transid1;
+ transP->transid[1] = transid2;
+ transP->buddyPtr = buddyPtr;
+
+ // The scan is started
+ transP->apiConnectstate = CS_START_SCAN;
+ transP->currSavePointId = currSavePointId;
+
+ /**********************************************************
+ * We start the timer on scanRec to be able to discover a
+ * timeout in the API the API now is in charge!
+ ***********************************************************/
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ updateBuddyTimer(apiConnectptr);
+
+ /***********************************************************
+ * WE HAVE NOW RECEIVED ALL REFERENCES TO SCAN OBJECTS IN
+ * THE API. WE ARE NOW READY TO RECEIVE THE ATTRIBUTE INFO
+ * IF ANY TO RECEIVE.
+ **********************************************************/
+ scanptr.p->scanState = ScanRecord::WAIT_AI;
+ return;
+
+ SCAN_error_check:
+ if (aiLength == 0) {
+ jam()
+ errCode = ZSCAN_AI_LEN_ERROR;
+ goto SCAN_TAB_error;
+ }//if
+ if (!tabptr.p->checkTable(schemaVersion)){
+ jam();
+ errCode = tabptr.p->getErrorCode(schemaVersion);
+ goto SCAN_TAB_error;
+ }//if
+ if (scanConcurrency == 0) {
+ jam();
+ errCode = ZNO_CONCURRENCY_ERROR;
+ goto SCAN_TAB_error;
+ }//if
+ if (cfirstfreeTcConnect == RNIL) {
+ jam();
+ errCode = ZNO_FREE_TC_CONNECTION;
+ goto SCAN_TAB_error;
+ }//if
+ ndbrequire(cfirstfreeScanrec == RNIL);
+ jam();
+ errCode = ZNO_SCANREC_ERROR;
+ goto SCAN_TAB_error;
+
+SCAN_TAB_error:
+ jam();
+ /**
+ * Prepare for up coming ATTRINFO/KEYINFO
+ */
+ transP->apiConnectstate = CS_ABORTING;
+ transP->abortState = AS_IDLE;
+ transP->transid[0] = transid1;
+ transP->transid[1] = transid2;
+
+SCAN_TAB_error_no_state_change:
+
+ ScanTabRef * ref = (ScanTabRef*)&signal->theData[0];
+ ref->apiConnectPtr = transP->ndbapiConnect;
+ ref->transId1 = transid1;
+ ref->transId2 = transid2;
+ ref->errorCode = errCode;
+ ref->closeNeeded = 0;
+ sendSignal(transP->ndbapiBlockref, GSN_SCAN_TABREF,
+ signal, ScanTabRef::SignalLength, JBB);
+ return;
+}//Dbtc::execSCAN_TABREQ()
+
+void Dbtc::initScanrec(ScanRecordPtr scanptr,
+ const ScanTabReq * scanTabReq,
+ UintR scanParallel,
+ UintR noOprecPerFrag)
+{
+ const UintR ri = scanTabReq->requestInfo;
+ scanptr.p->scanTcrec = tcConnectptr.i;
+ scanptr.p->scanApiRec = apiConnectptr.i;
+ scanptr.p->scanAiLength = scanTabReq->attrLenKeyLen & 0xFFFF;
+ scanptr.p->scanKeyLen = scanTabReq->attrLenKeyLen >> 16;
+ scanptr.p->scanTableref = tabptr.i;
+ scanptr.p->scanSchemaVersion = scanTabReq->tableSchemaVersion;
+ scanptr.p->scanParallel = scanParallel;
+ scanptr.p->first_batch_size_rows = scanTabReq->first_batch_size;
+ scanptr.p->batch_byte_size = scanTabReq->batch_byte_size;
+ scanptr.p->batch_size_rows = noOprecPerFrag;
+
+ Uint32 tmp = 0;
+ ScanFragReq::setLockMode(tmp, ScanTabReq::getLockMode(ri));
+ ScanFragReq::setHoldLockFlag(tmp, ScanTabReq::getHoldLockFlag(ri));
+ ScanFragReq::setKeyinfoFlag(tmp, ScanTabReq::getKeyinfoFlag(ri));
+ ScanFragReq::setReadCommittedFlag(tmp,ScanTabReq::getReadCommittedFlag(ri));
+ ScanFragReq::setRangeScanFlag(tmp, ScanTabReq::getRangeScanFlag(ri));
+ ScanFragReq::setDescendingFlag(tmp, ScanTabReq::getDescendingFlag(ri));
+ ScanFragReq::setAttrLen(tmp, scanTabReq->attrLenKeyLen & 0xFFFF);
+
+ scanptr.p->scanRequestInfo = tmp;
+ scanptr.p->scanStoredProcId = scanTabReq->storedProcId;
+ scanptr.p->scanState = ScanRecord::RUNNING;
+ scanptr.p->m_queued_count = 0;
+
+ ScanFragList list(c_scan_frag_pool,
+ scanptr.p->m_running_scan_frags);
+ for (Uint32 i = 0; i < scanParallel; i++) {
+ jam();
+ ScanFragRecPtr ptr;
+ ndbrequire(list.seize(ptr));
+ ptr.p->scanRec = scanptr.i;
+ ptr.p->scanFragId = 0;
+ ptr.p->m_apiPtr = cdata[i];
+ }//for
+
+ (* (ScanTabReq::getRangeScanFlag(ri) ?
+ &c_counters.c_range_scan_count :
+ &c_counters.c_scan_count))++;
+}//Dbtc::initScanrec()
+
+void Dbtc::scanTabRefLab(Signal* signal, Uint32 errCode)
+{
+ ScanTabRef * ref = (ScanTabRef*)&signal->theData[0];
+ ref->apiConnectPtr = apiConnectptr.p->ndbapiConnect;
+ ref->transId1 = apiConnectptr.p->transid[0];
+ ref->transId2 = apiConnectptr.p->transid[1];
+ ref->errorCode = errCode;
+ ref->closeNeeded = 0;
+ sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_SCAN_TABREF,
+ signal, ScanTabRef::SignalLength, JBB);
+}//Dbtc::scanTabRefLab()
+
+/*---------------------------------------------------------------------------*/
+/* */
+/* RECEPTION OF ATTRINFO FOR SCAN TABLE REQUEST. */
+/*---------------------------------------------------------------------------*/
+void Dbtc::scanAttrinfoLab(Signal* signal, UintR Tlen)
+{
+ ScanRecordPtr scanptr;
+ scanptr.i = apiConnectptr.p->apiScanRec;
+ ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
+ tcConnectptr.i = scanptr.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ cachePtr.i = apiConnectptr.p->cachePtr;
+ ptrCheckGuard(cachePtr, ccacheFilesize, cacheRecord);
+ CacheRecord * const regCachePtr = cachePtr.p;
+ ndbrequire(scanptr.p->scanState == ScanRecord::WAIT_AI);
+
+ regCachePtr->currReclenAi = regCachePtr->currReclenAi + Tlen;
+ if (regCachePtr->currReclenAi < scanptr.p->scanAiLength) {
+ if (cfirstfreeAttrbuf == RNIL) {
+ goto scanAttrinfo_attrbuf_error;
+ }//if
+ saveAttrbuf(signal);
+ } else {
+ if (regCachePtr->currReclenAi > scanptr.p->scanAiLength) {
+ goto scanAttrinfo_len_error;
+ } else {
+ /* CURR_RECLEN_AI = SCAN_AI_LENGTH */
+ if (cfirstfreeAttrbuf == RNIL) {
+ goto scanAttrinfo_attrbuf2_error;
+ }//if
+ saveAttrbuf(signal);
+ /**************************************************
+ * WE HAVE NOW RECEIVED ALL INFORMATION CONCERNING
+ * THIS SCAN. WE ARE READY TO START THE ACTUAL
+ * EXECUTION OF THE SCAN QUERY
+ **************************************************/
+ diFcountReqLab(signal, scanptr);
+ return;
+ }//if
+ }//if
+ return;
+
+scanAttrinfo_attrbuf_error:
+ jam();
+ abortScanLab(signal, scanptr, ZGET_ATTRBUF_ERROR);
+ return;
+
+scanAttrinfo_attrbuf2_error:
+ jam();
+ abortScanLab(signal, scanptr, ZGET_ATTRBUF_ERROR);
+ return;
+
+scanAttrinfo_len_error:
+ jam();
+ abortScanLab(signal, scanptr, ZLENGTH_ERROR);
+ return;
+}//Dbtc::scanAttrinfoLab()
+
+void Dbtc::diFcountReqLab(Signal* signal, ScanRecordPtr scanptr)
+{
+ /**
+ * Check so that the table is not being dropped
+ */
+ TableRecordPtr tabPtr;
+ tabPtr.i = scanptr.p->scanTableref;
+ tabPtr.p = &tableRecord[tabPtr.i];
+ if (tabPtr.p->checkTable(scanptr.p->scanSchemaVersion)){
+ ;
+ } else {
+ abortScanLab(signal, scanptr,
+ tabPtr.p->getErrorCode(scanptr.p->scanSchemaVersion));
+ return;
+ }
+
+ scanptr.p->scanNextFragId = 0;
+ scanptr.p->m_booked_fragments_count= 0;
+ scanptr.p->scanState = ScanRecord::WAIT_FRAGMENT_COUNT;
+
+ if(!cachePtr.p->distributionKeyIndicator)
+ {
+ jam();
+ /*************************************************
+ * THE FIRST STEP TO RECEIVE IS SUCCESSFULLY COMPLETED.
+ * WE MUST FIRST GET THE NUMBER OF FRAGMENTS IN THE TABLE.
+ ***************************************************/
+ signal->theData[0] = tcConnectptr.p->dihConnectptr;
+ signal->theData[1] = scanptr.p->scanTableref;
+ sendSignal(cdihblockref, GSN_DI_FCOUNTREQ, signal, 2, JBB);
+ }
+ else
+ {
+ signal->theData[0] = tcConnectptr.p->dihConnectptr;
+ signal->theData[1] = tabPtr.i;
+ signal->theData[2] = cachePtr.p->distributionKey;
+ EXECUTE_DIRECT(DBDIH, GSN_DIGETNODESREQ, signal, 3);
+ UintR TerrorIndicator = signal->theData[0];
+ jamEntry();
+ if (TerrorIndicator != 0) {
+ signal->theData[0] = tcConnectptr.i;
+ //signal->theData[1] Contains error
+ execDI_FCOUNTREF(signal);
+ return;
+ }
+
+ UintR Tdata1 = signal->theData[1];
+ scanptr.p->scanNextFragId = Tdata1;
+
+ signal->theData[0] = tcConnectptr.i;
+ signal->theData[1] = 1; // Frag count
+ execDI_FCOUNTCONF(signal);
+ }
+ return;
+}//Dbtc::diFcountReqLab()
+
+/********************************************************************
+ * execDI_FCOUNTCONF
+ *
+ * WE HAVE ASKED DIH ABOUT THE NUMBER OF FRAGMENTS IN THIS TABLE.
+ * WE WILL NOW START A NUMBER OF PARALLEL SCAN PROCESSES. EACH OF
+ * THESE WILL SCAN ONE FRAGMENT AT A TIME. THEY WILL CONTINUE THIS
+ * UNTIL THERE ARE NO MORE FRAGMENTS TO SCAN OR UNTIL THE APPLICATION
+ * CLOSES THE SCAN.
+ ********************************************************************/
+void Dbtc::execDI_FCOUNTCONF(Signal* signal)
+{
+ jamEntry();
+ tcConnectptr.i = signal->theData[0];
+ Uint32 tfragCount = signal->theData[1];
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ apiConnectptr.i = tcConnectptr.p->apiConnect;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ ScanRecordPtr scanptr;
+ scanptr.i = apiConnectptr.p->apiScanRec;
+ ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
+ ndbrequire(scanptr.p->scanState == ScanRecord::WAIT_FRAGMENT_COUNT);
+ if (apiConnectptr.p->apiFailState == ZTRUE) {
+ jam();
+ releaseScanResources(scanptr);
+ handleApiFailState(signal, apiConnectptr.i);
+ return;
+ }//if
+ if (tfragCount == 0) {
+ jam();
+ abortScanLab(signal, scanptr, ZNO_FRAGMENT_ERROR);
+ return;
+ }//if
+
+ /**
+ * Check so that the table is not being dropped
+ */
+ TableRecordPtr tabPtr;
+ tabPtr.i = scanptr.p->scanTableref;
+ tabPtr.p = &tableRecord[tabPtr.i];
+ if (tabPtr.p->checkTable(scanptr.p->scanSchemaVersion)){
+ ;
+ } else {
+ abortScanLab(signal, scanptr,
+ tabPtr.p->getErrorCode(scanptr.p->scanSchemaVersion));
+ return;
+ }
+
+ scanptr.p->scanParallel = tfragCount;
+ scanptr.p->scanNoFrag = tfragCount;
+ scanptr.p->scanState = ScanRecord::RUNNING;
+
+ setApiConTimer(apiConnectptr.i, 0, __LINE__);
+ updateBuddyTimer(apiConnectptr);
+
+ ScanFragRecPtr ptr;
+ ScanFragList list(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
+ for (list.first(ptr); !ptr.isNull() && tfragCount;
+ list.next(ptr), tfragCount--){
+ jam();
+
+ ptr.p->lqhBlockref = 0;
+ ptr.p->startFragTimer(ctcTimer);
+ ptr.p->scanFragId = scanptr.p->scanNextFragId++;
+ ptr.p->scanFragState = ScanFragRec::WAIT_GET_PRIMCONF;
+ ptr.p->startFragTimer(ctcTimer);
+
+ signal->theData[0] = tcConnectptr.p->dihConnectptr;
+ signal->theData[1] = ptr.i;
+ signal->theData[2] = scanptr.p->scanTableref;
+ signal->theData[3] = ptr.p->scanFragId;
+ sendSignal(cdihblockref, GSN_DIGETPRIMREQ, signal, 4, JBB);
+ }//for
+
+ ScanFragList queued(c_scan_frag_pool, scanptr.p->m_queued_scan_frags);
+ for (; !ptr.isNull();)
+ {
+ ptr.p->m_ops = 0;
+ ptr.p->m_totalLen = 0;
+ ptr.p->m_scan_frag_conf_status = 1;
+ ptr.p->scanFragState = ScanFragRec::QUEUED_FOR_DELIVERY;
+ ptr.p->stopFragTimer();
+
+ ScanFragRecPtr tmp = ptr;
+ list.next(ptr);
+ list.remove(tmp);
+ queued.add(tmp);
+ scanptr.p->m_queued_count++;
+ }
+}//Dbtc::execDI_FCOUNTCONF()
+
+/******************************************************
+ * execDI_FCOUNTREF
+ ******************************************************/
+void Dbtc::execDI_FCOUNTREF(Signal* signal)
+{
+ jamEntry();
+ tcConnectptr.i = signal->theData[0];
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ const Uint32 errCode = signal->theData[1];
+ apiConnectptr.i = tcConnectptr.p->apiConnect;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ ScanRecordPtr scanptr;
+ scanptr.i = apiConnectptr.p->apiScanRec;
+ ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
+ ndbrequire(scanptr.p->scanState == ScanRecord::WAIT_FRAGMENT_COUNT);
+ if (apiConnectptr.p->apiFailState == ZTRUE) {
+ jam();
+ releaseScanResources(scanptr);
+ handleApiFailState(signal, apiConnectptr.i);
+ return;
+ }//if
+ abortScanLab(signal, scanptr, errCode);
+}//Dbtc::execDI_FCOUNTREF()
+
+void Dbtc::abortScanLab(Signal* signal, ScanRecordPtr scanptr, Uint32 errCode)
+{
+ scanTabRefLab(signal, errCode);
+ releaseScanResources(scanptr);
+}//Dbtc::abortScanLab()
+
+void Dbtc::releaseScanResources(ScanRecordPtr scanPtr)
+{
+ if (apiConnectptr.p->cachePtr != RNIL) {
+ cachePtr.i = apiConnectptr.p->cachePtr;
+ ptrCheckGuard(cachePtr, ccacheFilesize, cacheRecord);
+ releaseKeys();
+ releaseAttrinfo();
+ }//if
+ tcConnectptr.i = scanPtr.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ releaseTcCon();
+
+ ndbrequire(scanPtr.p->m_running_scan_frags.isEmpty());
+ ndbrequire(scanPtr.p->m_queued_scan_frags.isEmpty());
+ ndbrequire(scanPtr.p->m_delivered_scan_frags.isEmpty());
+
+ ndbassert(scanPtr.p->scanApiRec == apiConnectptr.i);
+ ndbassert(apiConnectptr.p->apiScanRec == scanPtr.i);
+
+ // link into free list
+ scanPtr.p->nextScan = cfirstfreeScanrec;
+ scanPtr.p->scanState = ScanRecord::IDLE;
+ scanPtr.p->scanTcrec = RNIL;
+ scanPtr.p->scanApiRec = RNIL;
+ cfirstfreeScanrec = scanPtr.i;
+
+ apiConnectptr.p->apiScanRec = RNIL;
+ apiConnectptr.p->apiConnectstate = CS_CONNECTED;
+ setApiConTimer(apiConnectptr.i, 0, __LINE__);
+}//Dbtc::releaseScanResources()
+
+
+/****************************************************************
+ * execDIGETPRIMCONF
+ *
+ * WE HAVE RECEIVED THE PRIMARY NODE OF THIS FRAGMENT.
+ * WE ARE NOW READY TO ASK FOR PERMISSION TO LOAD THIS
+ * SPECIFIC NODE WITH A SCAN OPERATION.
+ ****************************************************************/
+void Dbtc::execDIGETPRIMCONF(Signal* signal)
+{
+ jamEntry();
+ // tcConnectptr.i in theData[0] is not used
+ scanFragptr.i = signal->theData[1];
+ c_scan_frag_pool.getPtr(scanFragptr);
+
+ tnodeid = signal->theData[2];
+ arrGuard(tnodeid, MAX_NDB_NODES);
+
+ ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::WAIT_GET_PRIMCONF);
+ scanFragptr.p->stopFragTimer();
+
+ ScanRecordPtr scanptr;
+ scanptr.i = scanFragptr.p->scanRec;
+ ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
+
+ /**
+ * This must be false as select count(*) otherwise
+ * can "pass" committing on backup fragments and
+ * get incorrect row count
+ */
+ if(false && ScanFragReq::getReadCommittedFlag(scanptr.p->scanRequestInfo))
+ {
+ jam();
+ Uint32 max = 3+signal->theData[6];
+ Uint32 nodeid = getOwnNodeId();
+ for(Uint32 i = 3; i<max; i++)
+ if(signal->theData[i] == nodeid)
+ {
+ jam();
+ tnodeid = nodeid;
+ break;
+ }
+ }
+
+ {
+ /**
+ * Check table
+ */
+ TableRecordPtr tabPtr;
+ tabPtr.i = scanptr.p->scanTableref;
+ ptrAss(tabPtr, tableRecord);
+ Uint32 schemaVersion = scanptr.p->scanSchemaVersion;
+ if(tabPtr.p->checkTable(schemaVersion) == false){
+ jam();
+ ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
+
+ run.release(scanFragptr);
+ scanError(signal, scanptr, tabPtr.p->getErrorCode(schemaVersion));
+ return;
+ }
+ }
+
+ tcConnectptr.i = scanptr.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ apiConnectptr.i = scanptr.p->scanApiRec;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ cachePtr.i = apiConnectptr.p->cachePtr;
+ ptrCheckGuard(cachePtr, ccacheFilesize, cacheRecord);
+ switch (scanptr.p->scanState) {
+ case ScanRecord::CLOSING_SCAN:
+ jam();
+ updateBuddyTimer(apiConnectptr);
+ {
+ ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
+
+ run.release(scanFragptr);
+ }
+ close_scan_req_send_conf(signal, scanptr);
+ return;
+ default:
+ jam();
+ /*empty*/;
+ break;
+ }//switch
+ Uint32 ref = calcLqhBlockRef(tnodeid);
+ scanFragptr.p->lqhBlockref = ref;
+ scanFragptr.p->m_connectCount = getNodeInfo(tnodeid).m_connectCount;
+ sendScanFragReq(signal, scanptr.p, scanFragptr.p);
+ if(ERROR_INSERTED(8035))
+ globalTransporterRegistry.performSend();
+ attrbufptr.i = cachePtr.p->firstAttrbuf;
+ while (attrbufptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(attrbufptr, cattrbufFilesize, attrbufRecord);
+ sendAttrinfo(signal,
+ scanFragptr.i,
+ attrbufptr.p,
+ ref);
+ attrbufptr.i = attrbufptr.p->attrbuf[ZINBUF_NEXT];
+ if(ERROR_INSERTED(8035))
+ globalTransporterRegistry.performSend();
+ }//while
+ scanFragptr.p->scanFragState = ScanFragRec::LQH_ACTIVE;
+ scanFragptr.p->startFragTimer(ctcTimer);
+ updateBuddyTimer(apiConnectptr);
+ /*********************************************
+ * WE HAVE NOW STARTED A FRAGMENT SCAN. NOW
+ * WAIT FOR THE FIRST SCANNED RECORDS
+ *********************************************/
+}//Dbtc::execDIGETPRIMCONF
+
+/***************************************************
+ * execDIGETPRIMREF
+ *
+ * WE ARE NOW FORCED TO STOP THE SCAN. THIS ERROR
+ * IS NOT RECOVERABLE SINCE THERE IS A PROBLEM WITH
+ * FINDING A PRIMARY REPLICA OF A CERTAIN FRAGMENT.
+ ***************************************************/
+void Dbtc::execDIGETPRIMREF(Signal* signal)
+{
+ jamEntry();
+ // tcConnectptr.i in theData[0] is not used.
+ scanFragptr.i = signal->theData[1];
+ const Uint32 errCode = signal->theData[2];
+ c_scan_frag_pool.getPtr(scanFragptr);
+ ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::WAIT_GET_PRIMCONF);
+
+ ScanRecordPtr scanptr;
+ scanptr.i = scanFragptr.p->scanRec;
+ ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
+
+ ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
+
+ run.release(scanFragptr);
+
+ scanError(signal, scanptr, errCode);
+}//Dbtc::execDIGETPRIMREF()
+
+/**
+ * Dbtc::execSCAN_FRAGREF
+ * Our attempt to scan a fragment was refused
+ * set error code and close all other fragment
+ * scan's belonging to this scan
+ */
+void Dbtc::execSCAN_FRAGREF(Signal* signal)
+{
+ const ScanFragRef * const ref = (ScanFragRef *)&signal->theData[0];
+
+ jamEntry();
+ const Uint32 errCode = ref->errorCode;
+
+ scanFragptr.i = ref->senderData;
+ c_scan_frag_pool.getPtr(scanFragptr);
+
+ ScanRecordPtr scanptr;
+ scanptr.i = scanFragptr.p->scanRec;
+ ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
+
+ apiConnectptr.i = scanptr.p->scanApiRec;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+
+ Uint32 transid1 = apiConnectptr.p->transid[0] ^ ref->transId1;
+ Uint32 transid2 = apiConnectptr.p->transid[1] ^ ref->transId2;
+ transid1 = transid1 | transid2;
+ if (transid1 != 0) {
+ jam();
+ systemErrorLab(signal);
+ }//if
+
+ /**
+ * Set errorcode, close connection to this lqh fragment,
+ * stop fragment timer and call scanFragError to start
+ * close of the other fragment scans
+ */
+ ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::LQH_ACTIVE);
+ {
+ scanFragptr.p->scanFragState = ScanFragRec::COMPLETED;
+ ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
+
+ run.release(scanFragptr);
+ scanFragptr.p->stopFragTimer();
+ }
+ scanError(signal, scanptr, errCode);
+}//Dbtc::execSCAN_FRAGREF()
+
+/**
+ * Dbtc::scanError
+ *
+ * Called when an error occurs during
+ */
+void Dbtc::scanError(Signal* signal, ScanRecordPtr scanptr, Uint32 errorCode)
+{
+ jam();
+ ScanRecord* scanP = scanptr.p;
+
+ DEBUG("scanError, errorCode = "<< errorCode <<
+ ", scanState = " << scanptr.p->scanState);
+
+ apiConnectptr.i = scanP->scanApiRec;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ ndbrequire(apiConnectptr.p->apiScanRec == scanptr.i);
+
+ if(scanP->scanState == ScanRecord::CLOSING_SCAN){
+ jam();
+ close_scan_req_send_conf(signal, scanptr);
+ return;
+ }
+
+ ndbrequire(scanP->scanState == ScanRecord::RUNNING);
+
+ /**
+ * Close scan wo/ having received an order to do so
+ */
+ close_scan_req(signal, scanptr, false);
+
+ const bool apiFail = (apiConnectptr.p->apiFailState == ZTRUE);
+ if(apiFail){
+ jam();
+ return;
+ }
+
+ ScanTabRef * ref = (ScanTabRef*)&signal->theData[0];
+ ref->apiConnectPtr = apiConnectptr.p->ndbapiConnect;
+ ref->transId1 = apiConnectptr.p->transid[0];
+ ref->transId2 = apiConnectptr.p->transid[1];
+ ref->errorCode = errorCode;
+ ref->closeNeeded = 1;
+ sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_SCAN_TABREF,
+ signal, ScanTabRef::SignalLength, JBB);
+}//Dbtc::scanError()
+
+/************************************************************
+ * execSCAN_FRAGCONF
+ *
+ * A NUMBER OF OPERATIONS HAVE BEEN COMPLETED IN THIS
+ * FRAGMENT. TAKE CARE OF AND ISSUE FURTHER ACTIONS.
+ ************************************************************/
+void Dbtc::execSCAN_FRAGCONF(Signal* signal)
+{
+ Uint32 transid1, transid2, total_len;
+ jamEntry();
+
+ const ScanFragConf * const conf = (ScanFragConf*)&signal->theData[0];
+ const Uint32 noCompletedOps = conf->completedOps;
+ const Uint32 status = conf->fragmentCompleted;
+
+ scanFragptr.i = conf->senderData;
+ c_scan_frag_pool.getPtr(scanFragptr);
+
+ ScanRecordPtr scanptr;
+ scanptr.i = scanFragptr.p->scanRec;
+ ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
+
+ apiConnectptr.i = scanptr.p->scanApiRec;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+
+ transid1 = apiConnectptr.p->transid[0] ^ conf->transId1;
+ transid2 = apiConnectptr.p->transid[1] ^ conf->transId2;
+ total_len= conf->total_len;
+ transid1 = transid1 | transid2;
+ if (transid1 != 0) {
+ jam();
+ systemErrorLab(signal);
+ }//if
+
+ ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::LQH_ACTIVE);
+
+ if(scanptr.p->scanState == ScanRecord::CLOSING_SCAN){
+ jam();
+ if(status == 0){
+ /**
+ * We have started closing = we sent a close -> ignore this
+ */
+ return;
+ } else {
+ jam();
+ ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
+
+ run.release(scanFragptr);
+ scanFragptr.p->stopFragTimer();
+ scanFragptr.p->scanFragState = ScanFragRec::COMPLETED;
+ }
+ close_scan_req_send_conf(signal, scanptr);
+ return;
+ }
+
+ if(noCompletedOps == 0 && status != 0 &&
+ scanptr.p->scanNextFragId+scanptr.p->m_booked_fragments_count < scanptr.p->scanNoFrag){
+ /**
+ * Start on next fragment
+ */
+ scanFragptr.p->scanFragState = ScanFragRec::WAIT_GET_PRIMCONF;
+ scanFragptr.p->startFragTimer(ctcTimer);
+
+ tcConnectptr.i = scanptr.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ scanFragptr.p->scanFragId = scanptr.p->scanNextFragId++;
+ signal->theData[0] = tcConnectptr.p->dihConnectptr;
+ signal->theData[1] = scanFragptr.i;
+ signal->theData[2] = scanptr.p->scanTableref;
+ signal->theData[3] = scanFragptr.p->scanFragId;
+ sendSignal(cdihblockref, GSN_DIGETPRIMREQ, signal, 4, JBB);
+ return;
+ }
+ /*
+ Uint32 totalLen = 0;
+ for(Uint32 i = 0; i<noCompletedOps; i++){
+ Uint32 tmp = conf->opReturnDataLen[i];
+ totalLen += tmp;
+ }
+ */
+ {
+ ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
+ ScanFragList queued(c_scan_frag_pool, scanptr.p->m_queued_scan_frags);
+
+ run.remove(scanFragptr);
+ queued.add(scanFragptr);
+ scanptr.p->m_queued_count++;
+ }
+
+ scanFragptr.p->m_scan_frag_conf_status = status;
+ scanFragptr.p->m_ops = noCompletedOps;
+ scanFragptr.p->m_totalLen = total_len;
+ scanFragptr.p->scanFragState = ScanFragRec::QUEUED_FOR_DELIVERY;
+ scanFragptr.p->stopFragTimer();
+
+ if(scanptr.p->m_queued_count > /** Min */ 0){
+ jam();
+ sendScanTabConf(signal, scanptr);
+ }
+}//Dbtc::execSCAN_FRAGCONF()
+
+/****************************************************************************
+ * execSCAN_NEXTREQ
+ *
+ * THE APPLICATION HAVE PROCESSED THE TUPLES TRANSFERRED AND IS NOW READY FOR
+ * MORE. THIS SIGNAL IS ALSO USED TO CLOSE THE SCAN.
+ ****************************************************************************/
+void Dbtc::execSCAN_NEXTREQ(Signal* signal)
+{
+ const ScanNextReq * const req = (ScanNextReq *)&signal->theData[0];
+ const UintR transid1 = req->transId1;
+ const UintR transid2 = req->transId2;
+ const UintR stopScan = req->stopScan;
+
+ jamEntry();
+
+ apiConnectptr.i = req->apiConnectPtr;
+ if (apiConnectptr.i >= capiConnectFilesize) {
+ jam();
+ warningHandlerLab(signal);
+ return;
+ }//if
+ ptrAss(apiConnectptr, apiConnectRecord);
+
+ /**
+ * Check transid
+ */
+ const UintR ctransid1 = apiConnectptr.p->transid[0] ^ transid1;
+ const UintR ctransid2 = apiConnectptr.p->transid[1] ^ transid2;
+ if ((ctransid1 | ctransid2) != 0){
+ ScanTabRef * ref = (ScanTabRef*)&signal->theData[0];
+ ref->apiConnectPtr = apiConnectptr.p->ndbapiConnect;
+ ref->transId1 = transid1;
+ ref->transId2 = transid2;
+ ref->errorCode = ZSTATE_ERROR;
+ ref->closeNeeded = 0;
+ sendSignal(signal->senderBlockRef(), GSN_SCAN_TABREF,
+ signal, ScanTabRef::SignalLength, JBB);
+ DEBUG("Wrong transid");
+ return;
+ }
+
+ /**
+ * Check state of API connection
+ */
+ if (apiConnectptr.p->apiConnectstate != CS_START_SCAN) {
+ jam();
+ if (apiConnectptr.p->apiConnectstate == CS_CONNECTED) {
+ jam();
+ /*********************************************************************
+ * The application sends a SCAN_NEXTREQ after experiencing a time-out.
+ * We will send a SCAN_TABREF to indicate a time-out occurred.
+ *********************************************************************/
+ DEBUG("scanTabRefLab: ZSCANTIME_OUT_ERROR2");
+ ndbout_c("apiConnectptr(%d) -> abort", apiConnectptr.i);
+ ndbrequire(false); //B2 indication of strange things going on
+ scanTabRefLab(signal, ZSCANTIME_OUT_ERROR2);
+ return;
+ }
+ DEBUG("scanTabRefLab: ZSTATE_ERROR");
+ DEBUG(" apiConnectstate="<<apiConnectptr.p->apiConnectstate);
+ ndbrequire(false); //B2 indication of strange things going on
+ scanTabRefLab(signal, ZSTATE_ERROR);
+ return;
+ }//if
+
+ /*******************************************************
+ * START THE ACTUAL LOGIC OF SCAN_NEXTREQ.
+ ********************************************************/
+ // Stop the timer that is used to check for timeout in the API
+ setApiConTimer(apiConnectptr.i, 0, __LINE__);
+ ScanRecordPtr scanptr;
+ scanptr.i = apiConnectptr.p->apiScanRec;
+ ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
+ ScanRecord* scanP = scanptr.p;
+
+ const Uint32 len = signal->getLength() - 4;
+
+ if (stopScan == ZTRUE) {
+ jam();
+ /*********************************************************************
+ * APPLICATION IS CLOSING THE SCAN.
+ **********************************************************************/
+ close_scan_req(signal, scanptr, true);
+ return;
+ }//if
+
+ if (scanptr.p->scanState == ScanRecord::CLOSING_SCAN){
+ jam();
+ /**
+ * The scan is closing (typically due to error)
+ * but the API hasn't understood it yet
+ *
+ * Wait for API close request
+ */
+ return;
+ }
+
+ // Copy op ptrs so I dont overwrite them when sending...
+ memcpy(signal->getDataPtrSend()+25, signal->getDataPtr()+4, 4 * len);
+
+ ScanFragNextReq tmp;
+ tmp.closeFlag = ZFALSE;
+ tmp.transId1 = apiConnectptr.p->transid[0];
+ tmp.transId2 = apiConnectptr.p->transid[1];
+ tmp.batch_size_rows = scanP->batch_size_rows;
+ tmp.batch_size_bytes = scanP->batch_byte_size;
+
+ ScanFragList running(c_scan_frag_pool, scanP->m_running_scan_frags);
+ ScanFragList delivered(c_scan_frag_pool, scanP->m_delivered_scan_frags);
+ for(Uint32 i = 0 ; i<len; i++){
+ jam();
+ scanFragptr.i = signal->theData[i+25];
+ c_scan_frag_pool.getPtr(scanFragptr);
+ ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::DELIVERED);
+
+ scanFragptr.p->startFragTimer(ctcTimer);
+ scanFragptr.p->m_ops = 0;
+
+ if(scanFragptr.p->m_scan_frag_conf_status)
+ {
+ /**
+ * last scan was complete
+ */
+ jam();
+ ndbrequire(scanptr.p->scanNextFragId < scanptr.p->scanNoFrag);
+ jam();
+ ndbassert(scanptr.p->m_booked_fragments_count);
+ scanptr.p->m_booked_fragments_count--;
+ scanFragptr.p->scanFragState = ScanFragRec::WAIT_GET_PRIMCONF;
+
+ tcConnectptr.i = scanptr.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ scanFragptr.p->scanFragId = scanptr.p->scanNextFragId++;
+ signal->theData[0] = tcConnectptr.p->dihConnectptr;
+ signal->theData[1] = scanFragptr.i;
+ signal->theData[2] = scanptr.p->scanTableref;
+ signal->theData[3] = scanFragptr.p->scanFragId;
+ sendSignal(cdihblockref, GSN_DIGETPRIMREQ, signal, 4, JBB);
+ }
+ else
+ {
+ jam();
+ scanFragptr.p->scanFragState = ScanFragRec::LQH_ACTIVE;
+ ScanFragNextReq * req = (ScanFragNextReq*)signal->getDataPtrSend();
+ * req = tmp;
+ req->senderData = scanFragptr.i;
+ sendSignal(scanFragptr.p->lqhBlockref, GSN_SCAN_NEXTREQ, signal,
+ ScanFragNextReq::SignalLength, JBB);
+ }
+ delivered.remove(scanFragptr);
+ running.add(scanFragptr);
+ }//for
+
+}//Dbtc::execSCAN_NEXTREQ()
+
+void
+Dbtc::close_scan_req(Signal* signal, ScanRecordPtr scanPtr, bool req_received){
+
+ ScanRecord* scanP = scanPtr.p;
+ ndbrequire(scanPtr.p->scanState != ScanRecord::IDLE);
+ scanPtr.p->scanState = ScanRecord::CLOSING_SCAN;
+ scanPtr.p->m_close_scan_req = req_received;
+
+ /**
+ * Queue : Action
+ * ============= : =================
+ * completed : -
+ * running : close -> LQH
+ * delivered w/ : close -> LQH
+ * delivered wo/ : move to completed
+ * queued w/ : close -> LQH
+ * queued wo/ : move to completed
+ */
+
+ ScanFragNextReq * nextReq = (ScanFragNextReq*)&signal->theData[0];
+ nextReq->closeFlag = ZTRUE;
+ nextReq->transId1 = apiConnectptr.p->transid[0];
+ nextReq->transId2 = apiConnectptr.p->transid[1];
+
+ {
+ ScanFragRecPtr ptr;
+ ScanFragList running(c_scan_frag_pool, scanP->m_running_scan_frags);
+ ScanFragList delivered(c_scan_frag_pool, scanP->m_delivered_scan_frags);
+ ScanFragList queued(c_scan_frag_pool, scanP->m_queued_scan_frags);
+
+ // Close running
+ for(running.first(ptr); !ptr.isNull(); ){
+ ScanFragRecPtr curr = ptr; // Remove while iterating...
+ running.next(ptr);
+
+ if(curr.p->scanFragState == ScanFragRec::WAIT_GET_PRIMCONF){
+ jam();
+ continue;
+ }
+ ndbrequire(curr.p->scanFragState == ScanFragRec::LQH_ACTIVE);
+
+ curr.p->startFragTimer(ctcTimer);
+ curr.p->scanFragState = ScanFragRec::LQH_ACTIVE;
+ nextReq->senderData = curr.i;
+ sendSignal(curr.p->lqhBlockref, GSN_SCAN_NEXTREQ, signal,
+ ScanFragNextReq::SignalLength, JBB);
+ }
+
+ // Close delivered
+ for(delivered.first(ptr); !ptr.isNull(); ){
+ jam();
+ ScanFragRecPtr curr = ptr; // Remove while iterating...
+ delivered.next(ptr);
+
+ ndbrequire(curr.p->scanFragState == ScanFragRec::DELIVERED);
+ delivered.remove(curr);
+
+ if(curr.p->m_ops > 0 && curr.p->m_scan_frag_conf_status == 0){
+ jam();
+ running.add(curr);
+ curr.p->scanFragState = ScanFragRec::LQH_ACTIVE;
+ curr.p->startFragTimer(ctcTimer);
+ nextReq->senderData = curr.i;
+ sendSignal(curr.p->lqhBlockref, GSN_SCAN_NEXTREQ, signal,
+ ScanFragNextReq::SignalLength, JBB);
+
+ } else {
+ jam();
+ c_scan_frag_pool.release(curr);
+ curr.p->scanFragState = ScanFragRec::COMPLETED;
+ curr.p->stopFragTimer();
+ }
+ }//for
+
+ /**
+ * All queued with data should be closed
+ */
+ for(queued.first(ptr); !ptr.isNull(); ){
+ jam();
+ ndbrequire(ptr.p->scanFragState == ScanFragRec::QUEUED_FOR_DELIVERY);
+ ScanFragRecPtr curr = ptr; // Remove while iterating...
+ queued.next(ptr);
+
+ queued.remove(curr);
+ scanP->m_queued_count--;
+
+ if(curr.p->m_ops > 0){
+ jam();
+ running.add(curr);
+ curr.p->scanFragState = ScanFragRec::LQH_ACTIVE;
+ curr.p->startFragTimer(ctcTimer);
+ nextReq->senderData = curr.i;
+ sendSignal(curr.p->lqhBlockref, GSN_SCAN_NEXTREQ, signal,
+ ScanFragNextReq::SignalLength, JBB);
+ } else {
+ jam();
+ c_scan_frag_pool.release(curr);
+ curr.p->scanFragState = ScanFragRec::COMPLETED;
+ curr.p->stopFragTimer();
+ }
+ }
+ }
+ close_scan_req_send_conf(signal, scanPtr);
+}
+
+void
+Dbtc::close_scan_req_send_conf(Signal* signal, ScanRecordPtr scanPtr){
+
+ jam();
+
+ ndbrequire(scanPtr.p->m_queued_scan_frags.isEmpty());
+ ndbrequire(scanPtr.p->m_delivered_scan_frags.isEmpty());
+ //ndbrequire(scanPtr.p->m_running_scan_frags.isEmpty());
+
+#if 0
+ {
+ ScanFragList comp(c_scan_frag_pool, scanPtr.p->m_completed_scan_frags);
+ ScanFragRecPtr ptr;
+ for(comp.first(ptr); !ptr.isNull(); comp.next(ptr)){
+ ndbrequire(ptr.p->scanFragTimer == 0);
+ ndbrequire(ptr.p->scanFragState == ScanFragRec::COMPLETED);
+ }
+ }
+#endif
+
+ if(!scanPtr.p->m_running_scan_frags.isEmpty()){
+ jam();
+ return;
+ }
+
+ const bool apiFail = (apiConnectptr.p->apiFailState == ZTRUE);
+
+ if(!scanPtr.p->m_close_scan_req){
+ jam();
+ /**
+ * The API hasn't order closing yet
+ */
+ return;
+ }
+
+ Uint32 ref = apiConnectptr.p->ndbapiBlockref;
+ if(!apiFail && ref){
+ jam();
+ ScanTabConf * conf = (ScanTabConf*)&signal->theData[0];
+ conf->apiConnectPtr = apiConnectptr.p->ndbapiConnect;
+ conf->requestInfo = ScanTabConf::EndOfData;
+ conf->transId1 = apiConnectptr.p->transid[0];
+ conf->transId2 = apiConnectptr.p->transid[1];
+ sendSignal(ref, GSN_SCAN_TABCONF, signal, ScanTabConf::SignalLength, JBB);
+ }
+
+ releaseScanResources(scanPtr);
+
+ if(apiFail){
+ jam();
+ /**
+ * API has failed
+ */
+ handleApiFailState(signal, apiConnectptr.i);
+ }
+}
+
+Dbtc::ScanRecordPtr
+Dbtc::seizeScanrec(Signal* signal) {
+ ScanRecordPtr scanptr;
+ scanptr.i = cfirstfreeScanrec;
+ ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
+ cfirstfreeScanrec = scanptr.p->nextScan;
+ scanptr.p->nextScan = RNIL;
+ ndbrequire(scanptr.p->scanState == ScanRecord::IDLE);
+ return scanptr;
+}//Dbtc::seizeScanrec()
+
+void Dbtc::sendScanFragReq(Signal* signal,
+ ScanRecord* scanP,
+ ScanFragRec* scanFragP)
+{
+ ScanFragReq * const req = (ScanFragReq *)&signal->theData[0];
+ Uint32 requestInfo = scanP->scanRequestInfo;
+ ScanFragReq::setScanPrio(requestInfo, 1);
+ apiConnectptr.i = scanP->scanApiRec;
+ req->tableId = scanP->scanTableref;
+ req->schemaVersion = scanP->scanSchemaVersion;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ req->senderData = scanFragptr.i;
+ req->requestInfo = requestInfo;
+ req->fragmentNoKeyLen = scanFragP->scanFragId | (scanP->scanKeyLen << 16);
+ req->resultRef = apiConnectptr.p->ndbapiBlockref;
+ req->savePointId = apiConnectptr.p->currSavePointId;
+ req->transId1 = apiConnectptr.p->transid[0];
+ req->transId2 = apiConnectptr.p->transid[1];
+ req->clientOpPtr = scanFragP->m_apiPtr;
+ req->batch_size_rows= scanP->batch_size_rows;
+ req->batch_size_bytes= scanP->batch_byte_size;
+ sendSignal(scanFragP->lqhBlockref, GSN_SCAN_FRAGREQ, signal,
+ ScanFragReq::SignalLength, JBB);
+ if(scanP->scanKeyLen > 0)
+ {
+ tcConnectptr.i = scanFragptr.i;
+ packKeyData000Lab(signal, scanFragP->lqhBlockref, scanP->scanKeyLen);
+ }
+ updateBuddyTimer(apiConnectptr);
+ scanFragP->startFragTimer(ctcTimer);
+}//Dbtc::sendScanFragReq()
+
+
+void Dbtc::sendScanTabConf(Signal* signal, ScanRecordPtr scanPtr) {
+ jam();
+ Uint32* ops = signal->getDataPtrSend()+4;
+ Uint32 op_count = scanPtr.p->m_queued_count;
+ if(4 + 3 * op_count > 25){
+ jam();
+ ops += 21;
+ }
+
+ int left = scanPtr.p->scanNoFrag - scanPtr.p->scanNextFragId;
+ Uint32 booked = scanPtr.p->m_booked_fragments_count;
+
+ ScanTabConf * conf = (ScanTabConf*)&signal->theData[0];
+ conf->apiConnectPtr = apiConnectptr.p->ndbapiConnect;
+ conf->requestInfo = op_count;
+ conf->transId1 = apiConnectptr.p->transid[0];
+ conf->transId2 = apiConnectptr.p->transid[1];
+ ScanFragRecPtr ptr;
+ {
+ ScanFragList queued(c_scan_frag_pool, scanPtr.p->m_queued_scan_frags);
+ ScanFragList delivered(c_scan_frag_pool,scanPtr.p->m_delivered_scan_frags);
+ for(queued.first(ptr); !ptr.isNull(); ){
+ ndbrequire(ptr.p->scanFragState == ScanFragRec::QUEUED_FOR_DELIVERY);
+ ScanFragRecPtr curr = ptr; // Remove while iterating...
+ queued.next(ptr);
+
+ bool done = curr.p->m_scan_frag_conf_status && (left <= (int)booked);
+ if(curr.p->m_scan_frag_conf_status)
+ booked++;
+
+ * ops++ = curr.p->m_apiPtr;
+ * ops++ = done ? RNIL : curr.i;
+ * ops++ = (curr.p->m_totalLen << 10) + curr.p->m_ops;
+
+ queued.remove(curr);
+ if(!done){
+ delivered.add(curr);
+ curr.p->scanFragState = ScanFragRec::DELIVERED;
+ curr.p->stopFragTimer();
+ } else {
+ c_scan_frag_pool.release(curr);
+ curr.p->scanFragState = ScanFragRec::COMPLETED;
+ curr.p->stopFragTimer();
+ }
+ }
+ }
+
+ scanPtr.p->m_booked_fragments_count = booked;
+ if(scanPtr.p->m_delivered_scan_frags.isEmpty() &&
+ scanPtr.p->m_running_scan_frags.isEmpty())
+ {
+ conf->requestInfo = op_count | ScanTabConf::EndOfData;
+ releaseScanResources(scanPtr);
+ }
+
+ if(4 + 3 * op_count > 25){
+ jam();
+ LinearSectionPtr ptr[3];
+ ptr[0].p = signal->getDataPtrSend()+25;
+ ptr[0].sz = 3 * op_count;
+ sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_SCAN_TABCONF, signal,
+ ScanTabConf::SignalLength, JBB, ptr, 1);
+ } else {
+ jam();
+ sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_SCAN_TABCONF, signal,
+ ScanTabConf::SignalLength + 3 * op_count, JBB);
+ }
+ scanPtr.p->m_queued_count = 0;
+}//Dbtc::sendScanTabConf()
+
+
+void Dbtc::gcpTcfinished(Signal* signal)
+{
+ signal->theData[1] = tcheckGcpId;
+ sendSignal(cdihblockref, GSN_GCP_TCFINISHED, signal, 2, JBB);
+}//Dbtc::gcpTcfinished()
+
+void Dbtc::initApiConnect(Signal* signal)
+{
+ Uint32 tiacTmp;
+ Uint32 guard4;
+
+ tiacTmp = capiConnectFilesize / 3;
+ ndbrequire(tiacTmp > 0);
+ guard4 = tiacTmp + 1;
+ for (cachePtr.i = 0; cachePtr.i < guard4; cachePtr.i++) {
+ refresh_watch_dog();
+ ptrAss(cachePtr, cacheRecord);
+ cachePtr.p->firstAttrbuf = RNIL;
+ cachePtr.p->lastAttrbuf = RNIL;
+ cachePtr.p->firstKeybuf = RNIL;
+ cachePtr.p->lastKeybuf = RNIL;
+ cachePtr.p->nextCacheRec = cachePtr.i + 1;
+ }//for
+ cachePtr.i = tiacTmp;
+ ptrCheckGuard(cachePtr, ccacheFilesize, cacheRecord);
+ cachePtr.p->nextCacheRec = RNIL;
+ cfirstfreeCacheRec = 0;
+
+ guard4 = tiacTmp - 1;
+ for (apiConnectptr.i = 0; apiConnectptr.i <= guard4; apiConnectptr.i++) {
+ refresh_watch_dog();
+ jam();
+ ptrAss(apiConnectptr, apiConnectRecord);
+ apiConnectptr.p->apiConnectstate = CS_DISCONNECTED;
+ apiConnectptr.p->apiFailState = ZFALSE;
+ setApiConTimer(apiConnectptr.i, 0, __LINE__);
+ apiConnectptr.p->takeOverRec = (Uint8)Z8NIL;
+ apiConnectptr.p->cachePtr = RNIL;
+ apiConnectptr.p->nextApiConnect = apiConnectptr.i + 1;
+ apiConnectptr.p->ndbapiBlockref = 0xFFFFFFFF; // Invalid ref
+ apiConnectptr.p->commitAckMarker = RNIL;
+ apiConnectptr.p->firstTcConnect = RNIL;
+ apiConnectptr.p->lastTcConnect = RNIL;
+ apiConnectptr.p->triggerPending = false;
+ apiConnectptr.p->isIndexOp = false;
+ apiConnectptr.p->accumulatingIndexOp = RNIL;
+ apiConnectptr.p->executingIndexOp = RNIL;
+ apiConnectptr.p->buddyPtr = RNIL;
+ apiConnectptr.p->currSavePointId = 0;
+ }//for
+ apiConnectptr.i = tiacTmp - 1;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ apiConnectptr.p->nextApiConnect = RNIL;
+ cfirstfreeApiConnect = 0;
+ guard4 = (2 * tiacTmp) - 1;
+ for (apiConnectptr.i = tiacTmp; apiConnectptr.i <= guard4; apiConnectptr.i++)
+ {
+ refresh_watch_dog();
+ jam();
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ apiConnectptr.p->apiConnectstate = CS_RESTART;
+ apiConnectptr.p->apiFailState = ZFALSE;
+ setApiConTimer(apiConnectptr.i, 0, __LINE__);
+ apiConnectptr.p->takeOverRec = (Uint8)Z8NIL;
+ apiConnectptr.p->cachePtr = RNIL;
+ apiConnectptr.p->nextApiConnect = apiConnectptr.i + 1;
+ apiConnectptr.p->ndbapiBlockref = 0xFFFFFFFF; // Invalid ref
+ apiConnectptr.p->commitAckMarker = RNIL;
+ apiConnectptr.p->firstTcConnect = RNIL;
+ apiConnectptr.p->lastTcConnect = RNIL;
+ apiConnectptr.p->triggerPending = false;
+ apiConnectptr.p->isIndexOp = false;
+ apiConnectptr.p->accumulatingIndexOp = RNIL;
+ apiConnectptr.p->executingIndexOp = RNIL;
+ apiConnectptr.p->buddyPtr = RNIL;
+ apiConnectptr.p->currSavePointId = 0;
+ }//for
+ apiConnectptr.i = (2 * tiacTmp) - 1;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ apiConnectptr.p->nextApiConnect = RNIL;
+ cfirstfreeApiConnectCopy = tiacTmp;
+ guard4 = (3 * tiacTmp) - 1;
+ for (apiConnectptr.i = 2 * tiacTmp; apiConnectptr.i <= guard4;
+ apiConnectptr.i++) {
+ refresh_watch_dog();
+ jam();
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ setApiConTimer(apiConnectptr.i, 0, __LINE__);
+ apiConnectptr.p->apiFailState = ZFALSE;
+ apiConnectptr.p->apiConnectstate = CS_RESTART;
+ apiConnectptr.p->takeOverRec = (Uint8)Z8NIL;
+ apiConnectptr.p->cachePtr = RNIL;
+ apiConnectptr.p->nextApiConnect = apiConnectptr.i + 1;
+ apiConnectptr.p->ndbapiBlockref = 0xFFFFFFFF; // Invalid ref
+ apiConnectptr.p->commitAckMarker = RNIL;
+ apiConnectptr.p->firstTcConnect = RNIL;
+ apiConnectptr.p->lastTcConnect = RNIL;
+ apiConnectptr.p->triggerPending = false;
+ apiConnectptr.p->isIndexOp = false;
+ apiConnectptr.p->accumulatingIndexOp = RNIL;
+ apiConnectptr.p->executingIndexOp = RNIL;
+ apiConnectptr.p->buddyPtr = RNIL;
+ apiConnectptr.p->currSavePointId = 0;
+ }//for
+ apiConnectptr.i = (3 * tiacTmp) - 1;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ apiConnectptr.p->nextApiConnect = RNIL;
+ cfirstfreeApiConnectFail = 2 * tiacTmp;
+}//Dbtc::initApiConnect()
+
+void Dbtc::initattrbuf(Signal* signal)
+{
+ ndbrequire(cattrbufFilesize > 0);
+ for (attrbufptr.i = 0; attrbufptr.i < cattrbufFilesize; attrbufptr.i++) {
+ refresh_watch_dog();
+ jam();
+ ptrAss(attrbufptr, attrbufRecord);
+ attrbufptr.p->attrbuf[ZINBUF_NEXT] = attrbufptr.i + 1; /* NEXT ATTRBUF */
+ }//for
+ attrbufptr.i = cattrbufFilesize - 1;
+ ptrAss(attrbufptr, attrbufRecord);
+ attrbufptr.p->attrbuf[ZINBUF_NEXT] = RNIL; /* NEXT ATTRBUF */
+ cfirstfreeAttrbuf = 0;
+}//Dbtc::initattrbuf()
+
+void Dbtc::initdatabuf(Signal* signal)
+{
+ ndbrequire(cdatabufFilesize > 0);
+ for (databufptr.i = 0; databufptr.i < cdatabufFilesize; databufptr.i++) {
+ refresh_watch_dog();
+ ptrAss(databufptr, databufRecord);
+ databufptr.p->nextDatabuf = databufptr.i + 1;
+ }//for
+ databufptr.i = cdatabufFilesize - 1;
+ ptrCheckGuard(databufptr, cdatabufFilesize, databufRecord);
+ databufptr.p->nextDatabuf = RNIL;
+ cfirstfreeDatabuf = 0;
+}//Dbtc::initdatabuf()
+
+void Dbtc::initgcp(Signal* signal)
+{
+ ndbrequire(cgcpFilesize > 0);
+ for (gcpPtr.i = 0; gcpPtr.i < cgcpFilesize; gcpPtr.i++) {
+ ptrAss(gcpPtr, gcpRecord);
+ gcpPtr.p->nextGcp = gcpPtr.i + 1;
+ }//for
+ gcpPtr.i = cgcpFilesize - 1;
+ ptrCheckGuard(gcpPtr, cgcpFilesize, gcpRecord);
+ gcpPtr.p->nextGcp = RNIL;
+ cfirstfreeGcp = 0;
+ cfirstgcp = RNIL;
+ clastgcp = RNIL;
+}//Dbtc::initgcp()
+
+void Dbtc::inithost(Signal* signal)
+{
+ cpackedListIndex = 0;
+ ndbrequire(chostFilesize > 0);
+ for (hostptr.i = 0; hostptr.i < chostFilesize; hostptr.i++) {
+ jam();
+ ptrAss(hostptr, hostRecord);
+ hostptr.p->hostStatus = HS_DEAD;
+ hostptr.p->inPackedList = false;
+ hostptr.p->takeOverStatus = TOS_NOT_DEFINED;
+ hostptr.p->lqhTransStatus = LTS_IDLE;
+ hostptr.p->noOfWordsTCKEYCONF = 0;
+ hostptr.p->noOfWordsTCINDXCONF = 0;
+ hostptr.p->noOfPackedWordsLqh = 0;
+ hostptr.p->hostLqhBlockRef = calcLqhBlockRef(hostptr.i);
+ }//for
+}//Dbtc::inithost()
+
+void Dbtc::initialiseRecordsLab(Signal* signal, UintR Tdata0,
+ Uint32 retRef, Uint32 retData)
+{
+ switch (Tdata0) {
+ case 0:
+ jam();
+ initApiConnect(signal);
+ break;
+ case 1:
+ jam();
+ initattrbuf(signal);
+ break;
+ case 2:
+ jam();
+ initdatabuf(signal);
+ break;
+ case 3:
+ jam();
+ initgcp(signal);
+ break;
+ case 4:
+ jam();
+ inithost(signal);
+ break;
+ case 5:
+ jam();
+ // UNUSED Free to initialise something
+ break;
+ case 6:
+ jam();
+ initTable(signal);
+ break;
+ case 7:
+ jam();
+ initialiseScanrec(signal);
+ break;
+ case 8:
+ jam();
+ initialiseScanOprec(signal);
+ break;
+ case 9:
+ jam();
+ initialiseScanFragrec(signal);
+ break;
+ case 10:
+ jam();
+ initialiseTcConnect(signal);
+ break;
+ case 11:
+ jam();
+ initTcFail(signal);
+
+ {
+ ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = retData;
+ sendSignal(retRef, GSN_READ_CONFIG_CONF, signal,
+ ReadConfigConf::SignalLength, JBB);
+ }
+ return;
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ }//switch
+
+ signal->theData[0] = TcContinueB::ZINITIALISE_RECORDS;
+ signal->theData[1] = Tdata0 + 1;
+ signal->theData[2] = 0;
+ signal->theData[3] = retRef;
+ signal->theData[4] = retData;
+ sendSignal(DBTC_REF, GSN_CONTINUEB, signal, 5, JBB);
+}
+
+/* ========================================================================= */
+/* ======= INITIALISE_SCANREC ======= */
+/* */
+/* ========================================================================= */
+void Dbtc::initialiseScanrec(Signal* signal)
+{
+ ScanRecordPtr scanptr;
+ ndbrequire(cscanrecFileSize > 0);
+ for (scanptr.i = 0; scanptr.i < cscanrecFileSize; scanptr.i++) {
+ refresh_watch_dog();
+ jam();
+ ptrAss(scanptr, scanRecord);
+ new (scanptr.p) ScanRecord();
+ scanptr.p->scanState = ScanRecord::IDLE;
+ scanptr.p->scanApiRec = RNIL;
+ scanptr.p->nextScan = scanptr.i + 1;
+ }//for
+ scanptr.i = cscanrecFileSize - 1;
+ ptrAss(scanptr, scanRecord);
+ scanptr.p->nextScan = RNIL;
+ cfirstfreeScanrec = 0;
+}//Dbtc::initialiseScanrec()
+
+void Dbtc::initialiseScanFragrec(Signal* signal)
+{
+}//Dbtc::initialiseScanFragrec()
+
+void Dbtc::initialiseScanOprec(Signal* signal)
+{
+}//Dbtc::initialiseScanOprec()
+
+void Dbtc::initTable(Signal* signal)
+{
+
+ ndbrequire(ctabrecFilesize > 0);
+ for (tabptr.i = 0; tabptr.i < ctabrecFilesize; tabptr.i++) {
+ refresh_watch_dog();
+ ptrAss(tabptr, tableRecord);
+ tabptr.p->currentSchemaVersion = 0;
+ tabptr.p->storedTable = true;
+ tabptr.p->tableType = 0;
+ tabptr.p->enabled = false;
+ tabptr.p->dropping = false;
+ tabptr.p->noOfKeyAttr = 0;
+ tabptr.p->hasCharAttr = 0;
+ tabptr.p->noOfDistrKeys = 0;
+ for (unsigned k = 0; k < MAX_ATTRIBUTES_IN_INDEX; k++) {
+ tabptr.p->keyAttr[k].attributeDescriptor = 0;
+ tabptr.p->keyAttr[k].charsetInfo = 0;
+ }
+ }//for
+}//Dbtc::initTable()
+
+void Dbtc::initialiseTcConnect(Signal* signal)
+{
+ ndbrequire(ctcConnectFilesize >= 2);
+
+ // Place half of tcConnectptr's in cfirstfreeTcConnectFail list
+ Uint32 titcTmp = ctcConnectFilesize / 2;
+ for (tcConnectptr.i = 0; tcConnectptr.i < titcTmp; tcConnectptr.i++) {
+ refresh_watch_dog();
+ jam();
+ ptrAss(tcConnectptr, tcConnectRecord);
+ tcConnectptr.p->tcConnectstate = OS_RESTART;
+ tcConnectptr.p->apiConnect = RNIL;
+ tcConnectptr.p->noOfNodes = 0;
+ tcConnectptr.p->nextTcConnect = tcConnectptr.i + 1;
+ }//for
+ tcConnectptr.i = titcTmp - 1;
+ ptrAss(tcConnectptr, tcConnectRecord);
+ tcConnectptr.p->nextTcConnect = RNIL;
+ cfirstfreeTcConnectFail = 0;
+
+ // Place other half in cfirstfreeTcConnect list
+ for (tcConnectptr.i = titcTmp; tcConnectptr.i < ctcConnectFilesize;
+ tcConnectptr.i++) {
+ refresh_watch_dog();
+ jam();
+ ptrAss(tcConnectptr, tcConnectRecord);
+ tcConnectptr.p->tcConnectstate = OS_RESTART;
+ tcConnectptr.p->apiConnect = RNIL;
+ tcConnectptr.p->noOfNodes = 0;
+ tcConnectptr.p->nextTcConnect = tcConnectptr.i + 1;
+ }//for
+ tcConnectptr.i = ctcConnectFilesize - 1;
+ ptrAss(tcConnectptr, tcConnectRecord);
+ tcConnectptr.p->nextTcConnect = RNIL;
+ cfirstfreeTcConnect = titcTmp;
+ c_counters.cconcurrentOp = 0;
+}//Dbtc::initialiseTcConnect()
+
+/* ------------------------------------------------------------------------- */
+/* ---- LINK A GLOBAL CHECKPOINT RECORD INTO THE LIST WITH TRANSACTIONS */
+/* WAITING FOR COMPLETION. */
+/* ------------------------------------------------------------------------- */
+void Dbtc::linkGciInGcilist(Signal* signal)
+{
+ GcpRecordPtr tmpGcpPointer;
+ if (cfirstgcp == RNIL) {
+ jam();
+ cfirstgcp = gcpPtr.i;
+ } else {
+ jam();
+ tmpGcpPointer.i = clastgcp;
+ ptrCheckGuard(tmpGcpPointer, cgcpFilesize, gcpRecord);
+ tmpGcpPointer.p->nextGcp = gcpPtr.i;
+ }//if
+ clastgcp = gcpPtr.i;
+}//Dbtc::linkGciInGcilist()
+
+/* ------------------------------------------------------------------------- */
+/* ------- LINK SECONDARY KEY BUFFER IN OPERATION RECORD ------- */
+/* ------------------------------------------------------------------------- */
+void Dbtc::linkKeybuf(Signal* signal)
+{
+ seizeDatabuf(signal);
+ tmpDatabufptr.i = cachePtr.p->lastKeybuf;
+ cachePtr.p->lastKeybuf = databufptr.i;
+ if (tmpDatabufptr.i == RNIL) {
+ jam();
+ cachePtr.p->firstKeybuf = databufptr.i;
+ } else {
+ jam();
+ ptrCheckGuard(tmpDatabufptr, cdatabufFilesize, databufRecord);
+ tmpDatabufptr.p->nextDatabuf = databufptr.i;
+ }//if
+}//Dbtc::linkKeybuf()
+
+/* ------------------------------------------------------------------------- */
+/* ------- LINK A TC CONNECT RECORD INTO THE API LIST OF TC CONNECTIONS --- */
+/* ------------------------------------------------------------------------- */
+void Dbtc::linkTcInConnectionlist(Signal* signal)
+{
+ /* POINTER FOR THE CONNECT_RECORD */
+ TcConnectRecordPtr ltcTcConnectptr;
+
+ tcConnectptr.p->nextTcConnect = RNIL;
+ ltcTcConnectptr.i = apiConnectptr.p->lastTcConnect;
+ ptrCheck(ltcTcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ apiConnectptr.p->lastTcConnect = tcConnectptr.i;
+ if (ltcTcConnectptr.i == RNIL) {
+ jam();
+ apiConnectptr.p->firstTcConnect = tcConnectptr.i;
+ } else {
+ jam();
+ ptrGuard(ltcTcConnectptr);
+ ltcTcConnectptr.p->nextTcConnect = tcConnectptr.i;
+ }//if
+}//Dbtc::linkTcInConnectionlist()
+
+/*---------------------------------------------------------------------------*/
+/* RELEASE_ABORT_RESOURCES */
+/* THIS CODE RELEASES ALL RESOURCES AFTER AN ABORT OF A TRANSACTION AND ALSO */
+/* SENDS THE ABORT DECISION TO THE APPLICATION. */
+/*---------------------------------------------------------------------------*/
+void Dbtc::releaseAbortResources(Signal* signal)
+{
+ TcConnectRecordPtr rarTcConnectptr;
+
+ c_counters.cabortCount++;
+ if (apiConnectptr.p->cachePtr != RNIL) {
+ cachePtr.i = apiConnectptr.p->cachePtr;
+ ptrCheckGuard(cachePtr, ccacheFilesize, cacheRecord);
+ releaseAttrinfo();
+ releaseKeys();
+ }//if
+ tcConnectptr.i = apiConnectptr.p->firstTcConnect;
+ while (tcConnectptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ // Clear any markers that were set in CS_RECEIVING state
+ clearCommitAckMarker(apiConnectptr.p, tcConnectptr.p);
+ rarTcConnectptr.i = tcConnectptr.p->nextTcConnect;
+ releaseTcCon();
+ tcConnectptr.i = rarTcConnectptr.i;
+ }//while
+ apiConnectptr.p->firstTcConnect = RNIL;
+ apiConnectptr.p->lastTcConnect = RNIL;
+
+ // MASV let state be CS_ABORTING until all
+ // signals in the "air" have been received. Reset to CS_CONNECTED
+ // will be done when a TCKEYREQ with start flag is recieved
+ // or releaseApiCon is called
+ // apiConnectptr.p->apiConnectstate = CS_CONNECTED;
+ apiConnectptr.p->apiConnectstate = CS_ABORTING;
+ apiConnectptr.p->abortState = AS_IDLE;
+
+ if(apiConnectptr.p->m_exec_flag || apiConnectptr.p->apiFailState == ZTRUE){
+ jam();
+ bool ok = false;
+ Uint32 blockRef = apiConnectptr.p->ndbapiBlockref;
+ ReturnSignal ret = apiConnectptr.p->returnsignal;
+ apiConnectptr.p->returnsignal = RS_NO_RETURN;
+ apiConnectptr.p->m_exec_flag = 0;
+ switch(ret){
+ case RS_TCROLLBACKCONF:
+ jam();
+ ok = true;
+ signal->theData[0] = apiConnectptr.p->ndbapiConnect;
+ signal->theData[1] = apiConnectptr.p->transid[0];
+ signal->theData[2] = apiConnectptr.p->transid[1];
+ sendSignal(blockRef, GSN_TCROLLBACKCONF, signal, 3, JBB);
+ break;
+ case RS_TCROLLBACKREP:{
+ jam();
+ ok = true;
+ TcRollbackRep * const tcRollbackRep =
+ (TcRollbackRep *) signal->getDataPtr();
+
+ tcRollbackRep->connectPtr = apiConnectptr.p->ndbapiConnect;
+ tcRollbackRep->transId[0] = apiConnectptr.p->transid[0];
+ tcRollbackRep->transId[1] = apiConnectptr.p->transid[1];
+ tcRollbackRep->returnCode = apiConnectptr.p->returncode;
+ sendSignal(blockRef, GSN_TCROLLBACKREP, signal,
+ TcRollbackRep::SignalLength, JBB);
+ }
+ break;
+ case RS_NO_RETURN:
+ jam();
+ ok = true;
+ break;
+ case RS_TCKEYCONF:
+ case RS_TC_COMMITCONF:
+ break;
+ }
+ if(!ok){
+ jam();
+ ndbout_c("returnsignal = %d", apiConnectptr.p->returnsignal);
+ sendSystemError(signal);
+ }//if
+
+ }
+ setApiConTimer(apiConnectptr.i, 0,
+ 100000+c_apiConTimer_line[apiConnectptr.i]);
+ if (apiConnectptr.p->apiFailState == ZTRUE) {
+ jam();
+ handleApiFailState(signal, apiConnectptr.i);
+ return;
+ }//if
+}//Dbtc::releaseAbortResources()
+
+void Dbtc::releaseApiCon(Signal* signal, UintR TapiConnectPtr)
+{
+ ApiConnectRecordPtr TlocalApiConnectptr;
+
+ TlocalApiConnectptr.i = TapiConnectPtr;
+ ptrCheckGuard(TlocalApiConnectptr, capiConnectFilesize, apiConnectRecord);
+ TlocalApiConnectptr.p->nextApiConnect = cfirstfreeApiConnect;
+ cfirstfreeApiConnect = TlocalApiConnectptr.i;
+ setApiConTimer(TlocalApiConnectptr.i, 0, __LINE__);
+ TlocalApiConnectptr.p->apiConnectstate = CS_DISCONNECTED;
+ ndbassert(TlocalApiConnectptr.p->apiScanRec == RNIL);
+ TlocalApiConnectptr.p->ndbapiBlockref = 0;
+}//Dbtc::releaseApiCon()
+
+void Dbtc::releaseApiConnectFail(Signal* signal)
+{
+ apiConnectptr.p->apiConnectstate = CS_RESTART;
+ apiConnectptr.p->takeOverRec = (Uint8)Z8NIL;
+ setApiConTimer(apiConnectptr.i, 0, __LINE__);
+ apiConnectptr.p->nextApiConnect = cfirstfreeApiConnectFail;
+ cfirstfreeApiConnectFail = apiConnectptr.i;
+}//Dbtc::releaseApiConnectFail()
+
+void Dbtc::releaseGcp(Signal* signal)
+{
+ ptrGuard(gcpPtr);
+ gcpPtr.p->nextGcp = cfirstfreeGcp;
+ cfirstfreeGcp = gcpPtr.i;
+}//Dbtc::releaseGcp()
+
+void Dbtc::releaseKeys()
+{
+ UintR Tmp;
+ databufptr.i = cachePtr.p->firstKeybuf;
+ while (databufptr.i != RNIL) {
+ jam();
+ ptrCheckGuard(databufptr, cdatabufFilesize, databufRecord);
+ Tmp = databufptr.p->nextDatabuf;
+ databufptr.p->nextDatabuf = cfirstfreeDatabuf;
+ cfirstfreeDatabuf = databufptr.i;
+ databufptr.i = Tmp;
+ }//while
+ cachePtr.p->firstKeybuf = RNIL;
+ cachePtr.p->lastKeybuf = RNIL;
+}//Dbtc::releaseKeys()
+
+void Dbtc::releaseTcConnectFail(Signal* signal)
+{
+ ptrGuard(tcConnectptr);
+ tcConnectptr.p->nextTcConnect = cfirstfreeTcConnectFail;
+ cfirstfreeTcConnectFail = tcConnectptr.i;
+}//Dbtc::releaseTcConnectFail()
+
+void Dbtc::seizeApiConnect(Signal* signal)
+{
+ if (cfirstfreeApiConnect != RNIL) {
+ jam();
+ terrorCode = ZOK;
+ apiConnectptr.i = cfirstfreeApiConnect; /* ASSIGN A FREE RECORD FROM */
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ cfirstfreeApiConnect = apiConnectptr.p->nextApiConnect;
+ apiConnectptr.p->nextApiConnect = RNIL;
+ setApiConTimer(apiConnectptr.i, 0, __LINE__);
+ apiConnectptr.p->apiConnectstate = CS_CONNECTED; /* STATE OF CONNECTION */
+ apiConnectptr.p->triggerPending = false;
+ apiConnectptr.p->isIndexOp = false;
+ } else {
+ jam();
+ terrorCode = ZNO_FREE_API_CONNECTION;
+ }//if
+}//Dbtc::seizeApiConnect()
+
+void Dbtc::seizeApiConnectFail(Signal* signal)
+{
+ apiConnectptr.i = cfirstfreeApiConnectFail;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ cfirstfreeApiConnectFail = apiConnectptr.p->nextApiConnect;
+}//Dbtc::seizeApiConnectFail()
+
+void Dbtc::seizeDatabuf(Signal* signal)
+{
+ databufptr.i = cfirstfreeDatabuf;
+ ptrCheckGuard(databufptr, cdatabufFilesize, databufRecord);
+ cfirstfreeDatabuf = databufptr.p->nextDatabuf;
+ databufptr.p->nextDatabuf = RNIL;
+}//Dbtc::seizeDatabuf()
+
+void Dbtc::seizeTcConnect(Signal* signal)
+{
+ tcConnectptr.i = cfirstfreeTcConnect;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ cfirstfreeTcConnect = tcConnectptr.p->nextTcConnect;
+ c_counters.cconcurrentOp++;
+ tcConnectptr.p->isIndexOp = false;
+}//Dbtc::seizeTcConnect()
+
+void Dbtc::seizeTcConnectFail(Signal* signal)
+{
+ tcConnectptr.i = cfirstfreeTcConnectFail;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ cfirstfreeTcConnectFail = tcConnectptr.p->nextTcConnect;
+}//Dbtc::seizeTcConnectFail()
+
+void Dbtc::sendAttrinfo(Signal* signal,
+ UintR TattrinfoPtr,
+ AttrbufRecord * const regAttrPtr,
+ UintR TBref)
+{
+ UintR TdataPos;
+ UintR sig0, sig1, sig2, sig3, sig4, sig5, sig6, sig7;
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ TdataPos = regAttrPtr->attrbuf[ZINBUF_DATA_LEN];
+ sig0 = TattrinfoPtr;
+ sig1 = regApiPtr->transid[0];
+ sig2 = regApiPtr->transid[1];
+
+ signal->theData[0] = sig0;
+ signal->theData[1] = sig1;
+ signal->theData[2] = sig2;
+
+ sig0 = regAttrPtr->attrbuf[0];
+ sig1 = regAttrPtr->attrbuf[1];
+ sig2 = regAttrPtr->attrbuf[2];
+ sig3 = regAttrPtr->attrbuf[3];
+ sig4 = regAttrPtr->attrbuf[4];
+ sig5 = regAttrPtr->attrbuf[5];
+ sig6 = regAttrPtr->attrbuf[6];
+ sig7 = regAttrPtr->attrbuf[7];
+
+ signal->theData[3] = sig0;
+ signal->theData[4] = sig1;
+ signal->theData[5] = sig2;
+ signal->theData[6] = sig3;
+ signal->theData[7] = sig4;
+ signal->theData[8] = sig5;
+ signal->theData[9] = sig6;
+ signal->theData[10] = sig7;
+
+ if (TdataPos > 8) {
+ sig0 = regAttrPtr->attrbuf[8];
+ sig1 = regAttrPtr->attrbuf[9];
+ sig2 = regAttrPtr->attrbuf[10];
+ sig3 = regAttrPtr->attrbuf[11];
+ sig4 = regAttrPtr->attrbuf[12];
+ sig5 = regAttrPtr->attrbuf[13];
+ sig6 = regAttrPtr->attrbuf[14];
+
+ jam();
+ signal->theData[11] = sig0;
+ signal->theData[12] = sig1;
+ signal->theData[13] = sig2;
+ signal->theData[14] = sig3;
+ signal->theData[15] = sig4;
+ signal->theData[16] = sig5;
+ signal->theData[17] = sig6;
+
+ if (TdataPos > 15) {
+
+ sig0 = regAttrPtr->attrbuf[15];
+ sig1 = regAttrPtr->attrbuf[16];
+ sig2 = regAttrPtr->attrbuf[17];
+ sig3 = regAttrPtr->attrbuf[18];
+ sig4 = regAttrPtr->attrbuf[19];
+ sig5 = regAttrPtr->attrbuf[20];
+ sig6 = regAttrPtr->attrbuf[21];
+
+ jam();
+ signal->theData[18] = sig0;
+ signal->theData[19] = sig1;
+ signal->theData[20] = sig2;
+ signal->theData[21] = sig3;
+ signal->theData[22] = sig4;
+ signal->theData[23] = sig5;
+ signal->theData[24] = sig6;
+ }//if
+ }//if
+ sendSignal(TBref, GSN_ATTRINFO, signal, TdataPos + 3, JBB);
+}//Dbtc::sendAttrinfo()
+
+void Dbtc::sendContinueTimeOutControl(Signal* signal, Uint32 TapiConPtr)
+{
+ signal->theData[0] = TcContinueB::ZCONTINUE_TIME_OUT_CONTROL;
+ signal->theData[1] = TapiConPtr;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+}//Dbtc::sendContinueTimeOutControl()
+
+void Dbtc::sendKeyinfo(Signal* signal, BlockReference TBRef, Uint32 len)
+{
+ signal->theData[0] = tcConnectptr.i;
+ signal->theData[1] = apiConnectptr.p->transid[0];
+ signal->theData[2] = apiConnectptr.p->transid[1];
+ signal->theData[3] = cdata[0];
+ signal->theData[4] = cdata[1];
+ signal->theData[5] = cdata[2];
+ signal->theData[6] = cdata[3];
+ signal->theData[7] = cdata[4];
+ signal->theData[8] = cdata[5];
+ signal->theData[9] = cdata[6];
+ signal->theData[10] = cdata[7];
+ signal->theData[11] = cdata[8];
+ signal->theData[12] = cdata[9];
+ signal->theData[13] = cdata[10];
+ signal->theData[14] = cdata[11];
+ signal->theData[15] = cdata[12];
+ signal->theData[16] = cdata[13];
+ signal->theData[17] = cdata[14];
+ signal->theData[18] = cdata[15];
+ signal->theData[19] = cdata[16];
+ signal->theData[20] = cdata[17];
+ signal->theData[21] = cdata[18];
+ signal->theData[22] = cdata[19];
+ sendSignal(TBRef, GSN_KEYINFO, signal, 3 + len, JBB);
+}//Dbtc::sendKeyinfo()
+
+void Dbtc::sendSystemError(Signal* signal)
+{
+ progError(0, 0);
+}//Dbtc::sendSystemError()
+
+/* ========================================================================= */
+/* ------- LINK ACTUAL GCP OUT OF LIST ------- */
+/* ------------------------------------------------------------------------- */
+void Dbtc::unlinkGcp(Signal* signal)
+{
+ if (cfirstgcp == gcpPtr.i) {
+ jam();
+ cfirstgcp = gcpPtr.p->nextGcp;
+ if (gcpPtr.i == clastgcp) {
+ jam();
+ clastgcp = RNIL;
+ }//if
+ } else {
+ jam();
+ /* --------------------------------------------------------------------
+ * WE ARE TRYING TO REMOVE A GLOBAL CHECKPOINT WHICH WAS NOT THE OLDEST.
+ * THIS IS A SYSTEM ERROR.
+ * ------------------------------------------------------------------- */
+ sendSystemError(signal);
+ }//if
+ gcpPtr.p->nextGcp = cfirstfreeGcp;
+ cfirstfreeGcp = gcpPtr.i;
+}//Dbtc::unlinkGcp()
+
+void
+Dbtc::execDUMP_STATE_ORD(Signal* signal)
+{
+ DumpStateOrd * const dumpState = (DumpStateOrd *)&signal->theData[0];
+ if(signal->theData[0] == DumpStateOrd::CommitAckMarkersSize){
+ infoEvent("TC: m_commitAckMarkerPool: %d free size: %d",
+ m_commitAckMarkerPool.getNoOfFree(),
+ m_commitAckMarkerPool.getSize());
+ }
+ if(signal->theData[0] == DumpStateOrd::CommitAckMarkersDump){
+ infoEvent("TC: m_commitAckMarkerPool: %d free size: %d",
+ m_commitAckMarkerPool.getNoOfFree(),
+ m_commitAckMarkerPool.getSize());
+
+ CommitAckMarkerIterator iter;
+ for(m_commitAckMarkerHash.first(iter); iter.curr.i != RNIL;
+ m_commitAckMarkerHash.next(iter)){
+ infoEvent("CommitAckMarker: i = %d (0x%x, 0x%x)"
+ " Api: %d Lghs(%d): %d %d %d %d bucket = %d",
+ iter.curr.i,
+ iter.curr.p->transid1,
+ iter.curr.p->transid2,
+ iter.curr.p->apiNodeId,
+ iter.curr.p->noOfLqhs,
+ iter.curr.p->lqhNodeId[0],
+ iter.curr.p->lqhNodeId[1],
+ iter.curr.p->lqhNodeId[2],
+ iter.curr.p->lqhNodeId[3],
+ iter.bucket);
+ }
+ }
+ // Dump all ScanFragRecs
+ if (dumpState->args[0] == DumpStateOrd::TcDumpAllScanFragRec){
+ Uint32 recordNo = 0;
+ if (signal->getLength() == 1)
+ infoEvent("TC: Dump all ScanFragRec - size: %d",
+ cscanFragrecFileSize);
+ else if (signal->getLength() == 2)
+ recordNo = dumpState->args[1];
+ else
+ return;
+
+ dumpState->args[0] = DumpStateOrd::TcDumpOneScanFragRec;
+ dumpState->args[1] = recordNo;
+ execDUMP_STATE_ORD(signal);
+
+ if (recordNo < cscanFragrecFileSize-1){
+ dumpState->args[0] = DumpStateOrd::TcDumpAllScanFragRec;
+ dumpState->args[1] = recordNo+1;
+ sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB);
+ }
+ }
+
+ // Dump one ScanFragRec
+ if (dumpState->args[0] == DumpStateOrd::TcDumpOneScanFragRec){
+ Uint32 recordNo = RNIL;
+ if (signal->getLength() == 2)
+ recordNo = dumpState->args[1];
+ else
+ return;
+
+ if (recordNo >= cscanFragrecFileSize)
+ return;
+
+ ScanFragRecPtr sfp;
+ sfp.i = recordNo;
+ c_scan_frag_pool.getPtr(sfp);
+ infoEvent("Dbtc::ScanFragRec[%d]: state=%d fragid=%d",
+ sfp.i,
+ sfp.p->scanFragState,
+ sfp.p->scanFragId);
+ infoEvent(" nodeid=%d, timer=%d",
+ refToNode(sfp.p->lqhBlockref),
+ sfp.p->scanFragTimer);
+ }
+
+ // Dump all ScanRecords
+ if (dumpState->args[0] == DumpStateOrd::TcDumpAllScanRec){
+ Uint32 recordNo = 0;
+ if (signal->getLength() == 1)
+ infoEvent("TC: Dump all ScanRecord - size: %d",
+ cscanrecFileSize);
+ else if (signal->getLength() == 2)
+ recordNo = dumpState->args[1];
+ else
+ return;
+
+ dumpState->args[0] = DumpStateOrd::TcDumpOneScanRec;
+ dumpState->args[1] = recordNo;
+ execDUMP_STATE_ORD(signal);
+
+ if (recordNo < cscanrecFileSize-1){
+ dumpState->args[0] = DumpStateOrd::TcDumpAllScanRec;
+ dumpState->args[1] = recordNo+1;
+ sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB);
+ }
+ }
+
+ // Dump all active ScanRecords
+ if (dumpState->args[0] == DumpStateOrd::TcDumpAllActiveScanRec){
+ Uint32 recordNo = 0;
+ if (signal->getLength() == 1)
+ infoEvent("TC: Dump active ScanRecord - size: %d",
+ cscanrecFileSize);
+ else if (signal->getLength() == 2)
+ recordNo = dumpState->args[1];
+ else
+ return;
+
+ ScanRecordPtr sp;
+ sp.i = recordNo;
+ ptrAss(sp, scanRecord);
+ if (sp.p->scanState != ScanRecord::IDLE){
+ dumpState->args[0] = DumpStateOrd::TcDumpOneScanRec;
+ dumpState->args[1] = recordNo;
+ execDUMP_STATE_ORD(signal);
+ }
+
+ if (recordNo < cscanrecFileSize-1){
+ dumpState->args[0] = DumpStateOrd::TcDumpAllActiveScanRec;
+ dumpState->args[1] = recordNo+1;
+ sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB);
+ }
+ }
+
+ // Dump one ScanRecord
+ // and associated ScanFragRec and ApiConnectRecord
+ if (dumpState->args[0] == DumpStateOrd::TcDumpOneScanRec){
+ Uint32 recordNo = RNIL;
+ if (signal->getLength() == 2)
+ recordNo = dumpState->args[1];
+ else
+ return;
+
+ if (recordNo >= cscanrecFileSize)
+ return;
+
+ ScanRecordPtr sp;
+ sp.i = recordNo;
+ ptrAss(sp, scanRecord);
+ infoEvent("Dbtc::ScanRecord[%d]: state=%d"
+ "nextfrag=%d, nofrag=%d",
+ sp.i,
+ sp.p->scanState,
+ sp.p->scanNextFragId,
+ sp.p->scanNoFrag);
+ infoEvent(" ailen=%d, para=%d, receivedop=%d, noOprePperFrag=%d",
+ sp.p->scanAiLength,
+ sp.p->scanParallel,
+ sp.p->scanReceivedOperations,
+ sp.p->batch_size_rows);
+ infoEvent(" schv=%d, tab=%d, sproc=%d",
+ sp.p->scanSchemaVersion,
+ sp.p->scanTableref,
+ sp.p->scanStoredProcId);
+ infoEvent(" apiRec=%d, next=%d",
+ sp.p->scanApiRec, sp.p->nextScan);
+
+ if (sp.p->scanState != ScanRecord::IDLE){
+ // Request dump of ScanFragRec
+ ScanFragRecPtr sfptr;
+#define DUMP_SFR(x){\
+ ScanFragList list(c_scan_frag_pool, x);\
+ for(list.first(sfptr); !sfptr.isNull(); list.next(sfptr)){\
+ dumpState->args[0] = DumpStateOrd::TcDumpOneScanFragRec; \
+ dumpState->args[1] = sfptr.i;\
+ execDUMP_STATE_ORD(signal);\
+ }}
+
+ DUMP_SFR(sp.p->m_running_scan_frags);
+ DUMP_SFR(sp.p->m_queued_scan_frags);
+ DUMP_SFR(sp.p->m_delivered_scan_frags);
+
+ // Request dump of ApiConnectRecord
+ dumpState->args[0] = DumpStateOrd::TcDumpOneApiConnectRec;
+ dumpState->args[1] = sp.p->scanApiRec;
+ execDUMP_STATE_ORD(signal);
+ }
+
+ }
+
+ // Dump all ApiConnectRecord(s)
+ if (dumpState->args[0] == DumpStateOrd::TcDumpAllApiConnectRec){
+ Uint32 recordNo = 0;
+ if (signal->getLength() == 1)
+ infoEvent("TC: Dump all ApiConnectRecord - size: %d",
+ capiConnectFilesize);
+ else if (signal->getLength() == 2)
+ recordNo = dumpState->args[1];
+ else
+ return;
+
+ dumpState->args[0] = DumpStateOrd::TcDumpOneApiConnectRec;
+ dumpState->args[1] = recordNo;
+ execDUMP_STATE_ORD(signal);
+
+ if (recordNo < capiConnectFilesize-1){
+ dumpState->args[0] = DumpStateOrd::TcDumpAllApiConnectRec;
+ dumpState->args[1] = recordNo+1;
+ sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB);
+ }
+ }
+
+ // Dump one ApiConnectRecord
+ if (dumpState->args[0] == DumpStateOrd::TcDumpOneApiConnectRec){
+ Uint32 recordNo = RNIL;
+ if (signal->getLength() == 2)
+ recordNo = dumpState->args[1];
+ else
+ return;
+
+ if (recordNo >= capiConnectFilesize)
+ return;
+
+ ApiConnectRecordPtr ap;
+ ap.i = recordNo;
+ ptrAss(ap, apiConnectRecord);
+ infoEvent("Dbtc::ApiConnectRecord[%d]: state=%d, abortState=%d, "
+ "apiFailState=%d",
+ ap.i,
+ ap.p->apiConnectstate,
+ ap.p->abortState,
+ ap.p->apiFailState);
+ infoEvent(" transid(0x%x, 0x%x), apiBref=0x%x, scanRec=%d",
+ ap.p->transid[0],
+ ap.p->transid[1],
+ ap.p->ndbapiBlockref,
+ ap.p->apiScanRec);
+ infoEvent(" ctcTimer=%d, apiTimer=%d, counter=%d, retcode=%d, "
+ "retsig=%d",
+ ctcTimer, getApiConTimer(ap.i),
+ ap.p->counter,
+ ap.p->returncode,
+ ap.p->returnsignal);
+ infoEvent(" lqhkeyconfrec=%d, lqhkeyreqrec=%d, "
+ "tckeyrec=%d",
+ ap.p->lqhkeyconfrec,
+ ap.p->lqhkeyreqrec,
+ ap.p->tckeyrec);
+ infoEvent(" next=%d ",
+ ap.p->nextApiConnect);
+ }
+
+ if (dumpState->args[0] == DumpStateOrd::TcSetTransactionTimeout){
+ jam();
+ if(signal->getLength() > 1){
+ set_timeout_value(signal->theData[1]);
+ }
+ }
+
+ if (dumpState->args[0] == DumpStateOrd::TcSetApplTransactionTimeout){
+ jam();
+ if(signal->getLength() > 1){
+ set_appl_timeout_value(signal->theData[1]);
+ }
+ }
+
+ if (dumpState->args[0] == DumpStateOrd::StartTcTimer){
+ c_counters.c_trans_status = TransCounters::Started;
+ c_counters.reset();
+ }
+
+ if (dumpState->args[0] == DumpStateOrd::StopTcTimer){
+ c_counters.c_trans_status = TransCounters::Off;
+ Uint32 len = c_counters.report(signal);
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, len, JBB);
+ c_counters.reset();
+ }
+
+ if (dumpState->args[0] == DumpStateOrd::StartPeriodicTcTimer){
+ c_counters.c_trans_status = TransCounters::Timer;
+ c_counters.reset();
+ signal->theData[0] = TcContinueB::ZTRANS_EVENT_REP;
+ sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 5000, 1);
+ }
+}//Dbtc::execDUMP_STATE_ORD()
+
+void Dbtc::execSET_VAR_REQ(Signal* signal)
+{
+#if 0
+ SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
+ ConfigParamId var = setVarReq->variable();
+ int val = setVarReq->value();
+
+
+ switch (var) {
+
+ case TransactionInactiveTime:
+ jam();
+ set_appl_timeout_value(val);
+ break;
+ case TransactionDeadlockDetectionTimeout:
+ set_timeout_value(val);
+ sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
+ break;
+
+ case NoOfConcurrentProcessesHandleTakeover:
+ set_no_parallel_takeover(val);
+ sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
+ break;
+
+ default:
+ sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
+ } // switch
+#endif
+}
+
+void Dbtc::execABORT_ALL_REQ(Signal* signal)
+{
+ jamEntry();
+ AbortAllReq * req = (AbortAllReq*)&signal->theData[0];
+ AbortAllRef * ref = (AbortAllRef*)&signal->theData[0];
+
+ const Uint32 senderData = req->senderData;
+ const BlockReference senderRef = req->senderRef;
+
+ if(getAllowStartTransaction() == true && !getNodeState().getSingleUserMode()){
+ jam();
+
+ ref->senderData = senderData;
+ ref->errorCode = AbortAllRef::InvalidState;
+ sendSignal(senderRef, GSN_ABORT_ALL_REF, signal,
+ AbortAllRef::SignalLength, JBB);
+ return;
+ }
+
+ if(c_abortRec.clientRef != 0){
+ jam();
+
+ ref->senderData = senderData;
+ ref->errorCode = AbortAllRef::AbortAlreadyInProgress;
+ sendSignal(senderRef, GSN_ABORT_ALL_REF, signal,
+ AbortAllRef::SignalLength, JBB);
+ return;
+ }
+
+ if(refToNode(senderRef) != getOwnNodeId()){
+ jam();
+
+ ref->senderData = senderData;
+ ref->errorCode = AbortAllRef::FunctionNotImplemented;
+ sendSignal(senderRef, GSN_ABORT_ALL_REF, signal,
+ AbortAllRef::SignalLength, JBB);
+ return;
+ }
+
+ c_abortRec.clientRef = senderRef;
+ c_abortRec.clientData = senderData;
+ c_abortRec.oldTimeOutValue = ctimeOutValue;
+
+ ctimeOutValue = 0;
+ const Uint32 sleepTime = (2 * 10 * ctimeOutCheckDelay + 199) / 200;
+
+ checkAbortAllTimeout(signal, (sleepTime == 0 ? 1 : sleepTime));
+}
+
+void Dbtc::checkAbortAllTimeout(Signal* signal, Uint32 sleepTime)
+{
+
+ ndbrequire(c_abortRec.clientRef != 0);
+
+ if(sleepTime > 0){
+ jam();
+
+ sleepTime -= 1;
+ signal->theData[0] = TcContinueB::ZWAIT_ABORT_ALL;
+ signal->theData[1] = sleepTime;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 200, 2);
+ return;
+ }
+
+ AbortAllConf * conf = (AbortAllConf*)&signal->theData[0];
+ conf->senderData = c_abortRec.clientData;
+ sendSignal(c_abortRec.clientRef, GSN_ABORT_ALL_CONF, signal,
+ AbortAllConf::SignalLength, JBB);
+
+ ctimeOutValue = c_abortRec.oldTimeOutValue;
+ c_abortRec.clientRef = 0;
+}
+
+/* **************************************************************** */
+/* ---------------------------------------------------------------- */
+/* ------------------ TRIGGER AND INDEX HANDLING ------------------ */
+/* ---------------------------------------------------------------- */
+/* **************************************************************** */
+
+void Dbtc::execCREATE_TRIG_REQ(Signal* signal)
+{
+ jamEntry();
+ CreateTrigReq * const createTrigReq =
+ (CreateTrigReq *)&signal->theData[0];
+ TcDefinedTriggerData* triggerData;
+ DefinedTriggerPtr triggerPtr;
+ BlockReference sender = signal->senderBlockRef();
+
+ releaseSections(signal);
+
+ triggerPtr.i = createTrigReq->getTriggerId();
+ if (ERROR_INSERTED(8033) ||
+ !c_theDefinedTriggers.seizeId(triggerPtr,
+ createTrigReq->getTriggerId())) {
+ CLEAR_ERROR_INSERT_VALUE;
+ // Failed to allocate trigger record
+ CreateTrigRef * const createTrigRef =
+ (CreateTrigRef *)&signal->theData[0];
+
+ createTrigRef->setConnectionPtr(createTrigReq->getConnectionPtr());
+ createTrigRef->setErrorCode(CreateTrigRef::TooManyTriggers);
+ sendSignal(sender, GSN_CREATE_TRIG_REF,
+ signal, CreateTrigRef::SignalLength, JBB);
+ return;
+ }
+
+ triggerData = triggerPtr.p;
+ triggerData->triggerId = createTrigReq->getTriggerId();
+ triggerData->triggerType = createTrigReq->getTriggerType();
+ triggerData->triggerEvent = createTrigReq->getTriggerEvent();
+ triggerData->attributeMask = createTrigReq->getAttributeMask();
+ if (triggerData->triggerType == TriggerType::SECONDARY_INDEX)
+ triggerData->indexId = createTrigReq->getIndexId();
+ CreateTrigConf * const createTrigConf =
+ (CreateTrigConf *)&signal->theData[0];
+
+ createTrigConf->setConnectionPtr(createTrigReq->getConnectionPtr());
+ sendSignal(sender, GSN_CREATE_TRIG_CONF,
+ signal, CreateTrigConf::SignalLength, JBB);
+}
+
+
+void Dbtc::execDROP_TRIG_REQ(Signal* signal)
+{
+ jamEntry();
+ DropTrigReq * const dropTrigReq = (DropTrigReq *)&signal->theData[0];
+ BlockReference sender = signal->senderBlockRef();
+
+ if ((c_theDefinedTriggers.getPtr(dropTrigReq->getTriggerId())) == NULL) {
+ jam();
+ // Failed to find find trigger record
+ DropTrigRef * const dropTrigRef = (DropTrigRef *)&signal->theData[0];
+
+ dropTrigRef->setConnectionPtr(dropTrigReq->getConnectionPtr());
+ dropTrigRef->setErrorCode(DropTrigRef::TriggerNotFound);
+ sendSignal(sender, GSN_DROP_TRIG_REF,
+ signal, DropTrigRef::SignalLength, JBB);
+ return;
+ }
+
+ // Release trigger record
+ c_theDefinedTriggers.release(dropTrigReq->getTriggerId());
+
+ DropTrigConf * const dropTrigConf = (DropTrigConf *)&signal->theData[0];
+
+ dropTrigConf->setConnectionPtr(dropTrigReq->getConnectionPtr());
+ sendSignal(sender, GSN_DROP_TRIG_CONF,
+ signal, DropTrigConf::SignalLength, JBB);
+}
+
+void Dbtc::execCREATE_INDX_REQ(Signal* signal)
+{
+ jamEntry();
+ CreateIndxReq * const createIndxReq =
+ (CreateIndxReq *)signal->getDataPtr();
+ TcIndexData* indexData;
+ TcIndexDataPtr indexPtr;
+ BlockReference sender = signal->senderBlockRef();
+
+ if (ERROR_INSERTED(8034) ||
+ !c_theIndexes.seizeId(indexPtr, createIndxReq->getIndexId())) {
+ CLEAR_ERROR_INSERT_VALUE;
+ // Failed to allocate index record
+ CreateIndxRef * const createIndxRef =
+ (CreateIndxRef *)&signal->theData[0];
+
+ createIndxRef->setConnectionPtr(createIndxReq->getConnectionPtr());
+ createIndxRef->setErrorCode(CreateIndxRef::TooManyIndexes);
+ releaseSections(signal);
+ sendSignal(sender, GSN_CREATE_INDX_REF,
+ signal, CreateIndxRef::SignalLength, JBB);
+ return;
+ }
+ indexData = indexPtr.p;
+ // Indexes always start in state IS_BUILDING
+ // Will become IS_ONLINE in execALTER_INDX_REQ
+ indexData->indexState = IS_BUILDING;
+ indexData->indexId = indexPtr.i;
+ indexData->primaryTableId = createIndxReq->getTableId();
+
+ // So far need only attribute count
+ SegmentedSectionPtr ssPtr;
+ signal->getSection(ssPtr, CreateIndxReq::ATTRIBUTE_LIST_SECTION);
+ SimplePropertiesSectionReader r0(ssPtr, getSectionSegmentPool());
+ r0.reset(); // undo implicit first()
+ if (!r0.getWord(&indexData->attributeList.sz) ||
+ !r0.getWords(indexData->attributeList.id, indexData->attributeList.sz)) {
+ ndbrequire(false);
+ }
+ indexData->primaryKeyPos = indexData->attributeList.sz;
+
+ releaseSections(signal);
+
+ CreateIndxConf * const createIndxConf =
+ (CreateIndxConf *)&signal->theData[0];
+
+ createIndxConf->setConnectionPtr(createIndxReq->getConnectionPtr());
+ createIndxConf->setTableId(createIndxReq->getTableId());
+ createIndxConf->setIndexId(createIndxReq->getIndexId());
+ sendSignal(sender, GSN_CREATE_INDX_CONF,
+ signal, CreateIndxConf::SignalLength, JBB);
+}
+
+void Dbtc::execALTER_INDX_REQ(Signal* signal)
+{
+ jamEntry();
+ AlterIndxReq * const alterIndxReq = (AlterIndxReq *)signal->getDataPtr();
+ TcIndexData* indexData;
+ //BlockReference sender = signal->senderBlockRef();
+ BlockReference sender = (BlockReference) alterIndxReq->getUserRef();
+ Uint32 connectionPtr = alterIndxReq->getConnectionPtr();
+ AlterIndxReq::RequestType requestType = alterIndxReq->getRequestType();
+ Uint32 tableId = alterIndxReq->getTableId();
+ Uint32 indexId = alterIndxReq->getIndexId();
+ bool online = (alterIndxReq->getOnline() == 1) ? true : false;
+
+ if ((indexData = c_theIndexes.getPtr(indexId)) == NULL) {
+ jam();
+ // Failed to find index record
+ AlterIndxRef * const alterIndxRef =
+ (AlterIndxRef *)signal->getDataPtrSend();
+
+ alterIndxRef->setUserRef(reference());
+ alterIndxRef->setConnectionPtr(connectionPtr);
+ alterIndxRef->setRequestType(requestType);
+ alterIndxRef->setTableId(tableId);
+ alterIndxRef->setIndexId(indexId);
+ alterIndxRef->setErrorCode(AlterIndxRef::IndexNotFound);
+ alterIndxRef->setErrorLine(__LINE__);
+ alterIndxRef->setErrorNode(getOwnNodeId());
+ sendSignal(sender, GSN_ALTER_INDX_REF,
+ signal, AlterIndxRef::SignalLength, JBB);
+ return;
+ }
+ // Found index record, alter it's state
+ if (online) {
+ jam();
+ indexData->indexState = IS_ONLINE;
+ } else {
+ jam();
+ indexData->indexState = IS_BUILDING;
+ }//if
+ AlterIndxConf * const alterIndxConf =
+ (AlterIndxConf *)signal->getDataPtrSend();
+
+ alterIndxConf->setUserRef(reference());
+ alterIndxConf->setConnectionPtr(connectionPtr);
+ alterIndxConf->setRequestType(requestType);
+ alterIndxConf->setTableId(tableId);
+ alterIndxConf->setIndexId(indexId);
+ sendSignal(sender, GSN_ALTER_INDX_CONF,
+ signal, AlterIndxConf::SignalLength, JBB);
+}
+
+void Dbtc::execFIRE_TRIG_ORD(Signal* signal)
+{
+ jamEntry();
+ FireTrigOrd * const fireOrd = (FireTrigOrd *)signal->getDataPtr();
+ ApiConnectRecord *localApiConnectRecord = apiConnectRecord;
+ ApiConnectRecordPtr transPtr;
+ TcConnectRecord *localTcConnectRecord = tcConnectRecord;
+ TcConnectRecordPtr opPtr;
+
+ /**
+ * TODO
+ * Check transid,
+ * Fix overload i.e invalid word count
+ */
+ TcFiredTriggerData key;
+ key.fireingOperation = fireOrd->getConnectionPtr();
+ key.nodeId = refToNode(signal->getSendersBlockRef());
+ FiredTriggerPtr trigPtr;
+ if(c_firedTriggerHash.find(trigPtr, key)){
+
+ c_firedTriggerHash.remove(trigPtr);
+
+ bool ok = trigPtr.p->keyValues.getSize() == fireOrd->m_noPrimKeyWords;
+ ok &= trigPtr.p->afterValues.getSize() == fireOrd->m_noAfterValueWords;
+ ok &= trigPtr.p->beforeValues.getSize() == fireOrd->m_noBeforeValueWords;
+ if(ok){
+ opPtr.i = key.fireingOperation;
+ ptrCheckGuard(opPtr, ctcConnectFilesize, localTcConnectRecord);
+ transPtr.i = opPtr.p->apiConnect;
+ transPtr.p = &localApiConnectRecord[transPtr.i];
+
+ opPtr.p->noReceivedTriggers++;
+ opPtr.p->triggerExecutionCount++;
+
+ // Insert fired trigger in execution queue
+ transPtr.p->theFiredTriggers.add(trigPtr);
+ if (opPtr.p->noReceivedTriggers == opPtr.p->noFiredTriggers) {
+ executeTriggers(signal, &transPtr);
+ }
+ return;
+ }
+ jam();
+ c_theFiredTriggerPool.release(trigPtr);
+ }
+ jam();
+ /**
+ * Failed to find record or invalid word counts
+ */
+ ndbrequire(false);
+}
+
+void Dbtc::execTRIG_ATTRINFO(Signal* signal)
+{
+ jamEntry();
+ TrigAttrInfo * const trigAttrInfo = (TrigAttrInfo *)signal->getDataPtr();
+ Uint32 attrInfoLength = signal->getLength() - TrigAttrInfo::StaticLength;
+ const Uint32 *src = trigAttrInfo->getData();
+ FiredTriggerPtr firedTrigPtr;
+
+ TcFiredTriggerData key;
+ key.fireingOperation = trigAttrInfo->getConnectionPtr();
+ key.nodeId = refToNode(signal->getSendersBlockRef());
+ if(!c_firedTriggerHash.find(firedTrigPtr, key)){
+ jam();
+ if(!c_firedTriggerHash.seize(firedTrigPtr)){
+ jam();
+ /**
+ * Will be handled when FIRE_TRIG_ORD arrives
+ */
+ ndbout_c("op: %d node: %d failed to seize",
+ key.fireingOperation, key.nodeId);
+ return;
+ }
+ ndbrequire(firedTrigPtr.p->keyValues.getSize() == 0 &&
+ firedTrigPtr.p->beforeValues.getSize() == 0 &&
+ firedTrigPtr.p->afterValues.getSize() == 0);
+
+ firedTrigPtr.p->nodeId = refToNode(signal->getSendersBlockRef());
+ firedTrigPtr.p->fireingOperation = key.fireingOperation;
+ firedTrigPtr.p->triggerId = trigAttrInfo->getTriggerId();
+ c_firedTriggerHash.add(firedTrigPtr);
+ }
+
+ AttributeBuffer::DataBufferPool & pool = c_theAttributeBufferPool;
+ switch (trigAttrInfo->getAttrInfoType()) {
+ case(TrigAttrInfo::PRIMARY_KEY):
+ jam();
+ {
+ LocalDataBuffer<11> buf(pool, firedTrigPtr.p->keyValues);
+ buf.append(src, attrInfoLength);
+ }
+ break;
+ case(TrigAttrInfo::BEFORE_VALUES):
+ jam();
+ {
+ LocalDataBuffer<11> buf(pool, firedTrigPtr.p->beforeValues);
+ buf.append(src, attrInfoLength);
+ }
+ break;
+ case(TrigAttrInfo::AFTER_VALUES):
+ jam();
+ {
+ LocalDataBuffer<11> buf(pool, firedTrigPtr.p->afterValues);
+ buf.append(src, attrInfoLength);
+ }
+ break;
+ default:
+ ndbrequire(false);
+ }
+}
+
+void Dbtc::execDROP_INDX_REQ(Signal* signal)
+{
+ jamEntry();
+ DropIndxReq * const dropIndxReq = (DropIndxReq *)signal->getDataPtr();
+ TcIndexData* indexData;
+ BlockReference sender = signal->senderBlockRef();
+
+ if ((indexData = c_theIndexes.getPtr(dropIndxReq->getIndexId())) == NULL) {
+ jam();
+ // Failed to find index record
+ DropIndxRef * const dropIndxRef =
+ (DropIndxRef *)signal->getDataPtrSend();
+
+ dropIndxRef->setConnectionPtr(dropIndxReq->getConnectionPtr());
+ dropIndxRef->setErrorCode(DropIndxRef::IndexNotFound);
+ sendSignal(sender, GSN_DROP_INDX_REF,
+ signal, DropIndxRef::SignalLength, JBB);
+ return;
+ }
+ // Release index record
+ c_theIndexes.release(dropIndxReq->getIndexId());
+
+ DropIndxConf * const dropIndxConf =
+ (DropIndxConf *)signal->getDataPtrSend();
+
+ dropIndxConf->setConnectionPtr(dropIndxReq->getConnectionPtr());
+ sendSignal(sender, GSN_DROP_INDX_CONF,
+ signal, DropIndxConf::SignalLength, JBB);
+}
+
+void Dbtc::execTCINDXREQ(Signal* signal)
+{
+ jamEntry();
+
+ TcKeyReq * const tcIndxReq = (TcKeyReq *)signal->getDataPtr();
+ const UintR TapiIndex = tcIndxReq->apiConnectPtr;
+ Uint32 tcIndxRequestInfo = tcIndxReq->requestInfo;
+ Uint32 startFlag = tcIndxReq->getStartFlag(tcIndxRequestInfo);
+ Uint32 * dataPtr = &tcIndxReq->scanInfo;
+ Uint32 indexBufSize = 8; // Maximum for index in TCINDXREQ
+ Uint32 attrBufSize = 5; // Maximum for attrInfo in TCINDXREQ
+ ApiConnectRecordPtr transPtr;
+ transPtr.i = TapiIndex;
+ if (transPtr.i >= capiConnectFilesize) {
+ jam();
+ warningHandlerLab(signal);
+ return;
+ }//if
+ ptrAss(transPtr, apiConnectRecord);
+ ApiConnectRecord * const regApiPtr = transPtr.p;
+ // Seize index operation
+ TcIndexOperationPtr indexOpPtr;
+ if ((startFlag == 1) &&
+ (regApiPtr->apiConnectstate == CS_CONNECTED ||
+ (regApiPtr->apiConnectstate == CS_STARTED &&
+ regApiPtr->firstTcConnect == RNIL)) ||
+ (regApiPtr->apiConnectstate == CS_ABORTING &&
+ regApiPtr->abortState == AS_IDLE)) {
+ jam();
+ // This is a newly started transaction, clean-up
+ releaseAllSeizedIndexOperations(regApiPtr);
+ }//if
+ if (!seizeIndexOperation(regApiPtr, indexOpPtr)) {
+ jam();
+ // Failed to allocate index operation
+ TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
+
+ tcIndxRef->connectPtr = tcIndxReq->senderData;
+ tcIndxRef->transId[0] = regApiPtr->transid[0];
+ tcIndxRef->transId[1] = regApiPtr->transid[1];
+ tcIndxRef->errorCode = 4000;
+ sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
+ TcKeyRef::SignalLength, JBB);
+ return;
+ }
+ TcIndexOperation* indexOp = indexOpPtr.p;
+ indexOp->indexOpId = indexOpPtr.i;
+
+ // Save original signal
+ indexOp->tcIndxReq = *tcIndxReq;
+ indexOp->connectionIndex = TapiIndex;
+ regApiPtr->accumulatingIndexOp = indexOp->indexOpId;
+
+ // If operation is readTupleExclusive or updateTuple then read index
+ // table with exclusive lock
+ Uint32 indexLength = TcKeyReq::getKeyLength(tcIndxRequestInfo);
+ Uint32 attrLength = tcIndxReq->attrLen;
+ indexOp->expectedKeyInfo = indexLength;
+ Uint32 includedIndexLength = MIN(indexLength, indexBufSize);
+ indexOp->expectedAttrInfo = attrLength;
+ Uint32 includedAttrLength = MIN(attrLength, attrBufSize);
+ if (saveINDXKEYINFO(signal,
+ indexOp,
+ dataPtr,
+ includedIndexLength)) {
+ jam();
+ // We have received all we need
+ readIndexTable(signal, regApiPtr, indexOp);
+ return;
+ }
+ dataPtr += includedIndexLength;
+ if (saveINDXATTRINFO(signal,
+ indexOp,
+ dataPtr,
+ includedAttrLength)) {
+ jam();
+ // We have received all we need
+ readIndexTable(signal, regApiPtr, indexOp);
+ return;
+ }
+}
+
+
+void Dbtc::sendTcIndxConf(Signal* signal, UintR TcommitFlag)
+{
+ HostRecordPtr localHostptr;
+ ApiConnectRecord * const regApiPtr = apiConnectptr.p;
+ const UintR TopWords = (UintR)regApiPtr->tcindxrec;
+ localHostptr.i = refToNode(regApiPtr->ndbapiBlockref);
+ const Uint32 type = getNodeInfo(localHostptr.i).m_type;
+ const bool is_api = (type >= NodeInfo::API && type <= NodeInfo::REP);
+ const BlockNumber TblockNum = refToBlock(regApiPtr->ndbapiBlockref);
+ const Uint32 Tmarker = (regApiPtr->commitAckMarker == RNIL ? 0 : 1);
+ ptrAss(localHostptr, hostRecord);
+ UintR TcurrLen = localHostptr.p->noOfWordsTCINDXCONF;
+ UintR confInfo = 0;
+ TcIndxConf::setNoOfOperations(confInfo, (TopWords >> 1));
+ TcIndxConf::setCommitFlag(confInfo, TcommitFlag == 1);
+ TcIndxConf::setMarkerFlag(confInfo, Tmarker);
+ const UintR TpacketLen = 6 + TopWords;
+ regApiPtr->tcindxrec = 0;
+
+ if(TcommitFlag || (regApiPtr->lqhkeyreqrec == regApiPtr->lqhkeyconfrec)){
+ jam();
+ regApiPtr->m_exec_flag = 0;
+ }
+
+ if ((TpacketLen > 25) || !is_api){
+ TcIndxConf * const tcIndxConf = (TcIndxConf *)signal->getDataPtrSend();
+
+ jam();
+ tcIndxConf->apiConnectPtr = regApiPtr->ndbapiConnect;
+ tcIndxConf->gci = regApiPtr->globalcheckpointid;;
+ tcIndxConf->confInfo = confInfo;
+ tcIndxConf->transId1 = regApiPtr->transid[0];
+ tcIndxConf->transId2 = regApiPtr->transid[1];
+ copyFromToLen(&regApiPtr->tcIndxSendArray[0],
+ (UintR*)&tcIndxConf->operations,
+ (UintR)ZTCOPCONF_SIZE);
+ sendSignal(regApiPtr->ndbapiBlockref,
+ GSN_TCINDXCONF, signal, (TpacketLen - 1), JBB);
+ return;
+ } else if (((TcurrLen + TpacketLen) > 25) && (TcurrLen > 0)) {
+ jam();
+ sendPackedTCINDXCONF(signal, localHostptr.p, localHostptr.i);
+ TcurrLen = 0;
+ } else {
+ jam();
+ updatePackedList(signal, localHostptr.p, localHostptr.i);
+ }//if
+// -------------------------------------------------------------------------
+// The header contains the block reference of receiver plus the real signal
+// length - 3, since we have the real signal length plus one additional word
+// for the header we have to do - 4.
+// -------------------------------------------------------------------------
+ UintR Tpack0 = (TblockNum << 16) + (TpacketLen - 4);
+ UintR Tpack1 = regApiPtr->ndbapiConnect;
+ UintR Tpack2 = regApiPtr->globalcheckpointid;
+ UintR Tpack3 = confInfo;
+ UintR Tpack4 = regApiPtr->transid[0];
+ UintR Tpack5 = regApiPtr->transid[1];
+
+ localHostptr.p->noOfWordsTCINDXCONF = TcurrLen + TpacketLen;
+
+ localHostptr.p->packedWordsTCINDXCONF[TcurrLen + 0] = Tpack0;
+ localHostptr.p->packedWordsTCINDXCONF[TcurrLen + 1] = Tpack1;
+ localHostptr.p->packedWordsTCINDXCONF[TcurrLen + 2] = Tpack2;
+ localHostptr.p->packedWordsTCINDXCONF[TcurrLen + 3] = Tpack3;
+ localHostptr.p->packedWordsTCINDXCONF[TcurrLen + 4] = Tpack4;
+ localHostptr.p->packedWordsTCINDXCONF[TcurrLen + 5] = Tpack5;
+
+ UintR Ti;
+ for (Ti = 6; Ti < TpacketLen; Ti++) {
+ localHostptr.p->packedWordsTCINDXCONF[TcurrLen + Ti] =
+ regApiPtr->tcIndxSendArray[Ti - 6];
+ }//for
+}//Dbtc::sendTcIndxConf()
+
+void Dbtc::execINDXKEYINFO(Signal* signal)
+{
+ jamEntry();
+ Uint32 keyInfoLength = signal->getLength() - IndxKeyInfo::HeaderLength;
+ IndxKeyInfo * const indxKeyInfo = (IndxKeyInfo *)signal->getDataPtr();
+ const Uint32 *src = indxKeyInfo->getData();
+ const UintR TconnectIndex = indxKeyInfo->connectPtr;
+ ApiConnectRecordPtr transPtr;
+ transPtr.i = TconnectIndex;
+ if (transPtr.i >= capiConnectFilesize) {
+ jam();
+ warningHandlerLab(signal);
+ return;
+ }//if
+ ptrAss(transPtr, apiConnectRecord);
+ ApiConnectRecord * const regApiPtr = transPtr.p;
+ TcIndexOperationPtr indexOpPtr;
+ TcIndexOperation* indexOp;
+
+ indexOpPtr.i = regApiPtr->accumulatingIndexOp;
+ indexOp = c_theIndexOperations.getPtr(indexOpPtr.i);
+ if (saveINDXKEYINFO(signal,
+ indexOp,
+ src,
+ keyInfoLength)) {
+ jam();
+ // We have received all we need
+ readIndexTable(signal, regApiPtr, indexOp);
+ }
+}
+
+void Dbtc::execINDXATTRINFO(Signal* signal)
+{
+ jamEntry();
+ Uint32 attrInfoLength = signal->getLength() - IndxAttrInfo::HeaderLength;
+ IndxAttrInfo * const indxAttrInfo = (IndxAttrInfo *)signal->getDataPtr();
+ const Uint32 *src = indxAttrInfo->getData();
+ const UintR TconnectIndex = indxAttrInfo->connectPtr;
+ ApiConnectRecordPtr transPtr;
+ transPtr.i = TconnectIndex;
+ if (transPtr.i >= capiConnectFilesize) {
+ jam();
+ warningHandlerLab(signal);
+ return;
+ }//if
+ ptrAss(transPtr, apiConnectRecord);
+ ApiConnectRecord * const regApiPtr = transPtr.p;
+ TcIndexOperationPtr indexOpPtr;
+ TcIndexOperation* indexOp;
+
+ indexOpPtr.i = regApiPtr->accumulatingIndexOp;
+ indexOp = c_theIndexOperations.getPtr(indexOpPtr.i);
+ if (saveINDXATTRINFO(signal,
+ indexOp,
+ src,
+ attrInfoLength)) {
+ jam();
+ // We have received all we need
+ readIndexTable(signal, regApiPtr, indexOp);
+ }
+}
+
+/**
+ * Save signal INDXKEYINFO
+ * Return true if we have received all needed data
+ */
+bool Dbtc::saveINDXKEYINFO(Signal* signal,
+ TcIndexOperation* indexOp,
+ const Uint32 *src,
+ Uint32 len)
+{
+ if (!indexOp->keyInfo.append(src, len)) {
+ jam();
+ // Failed to seize keyInfo, abort transaction
+#ifdef VM_TRACE
+ ndbout_c("Dbtc::saveINDXKEYINFO: Failed to seize keyinfo\n");
+#endif
+ // Abort transaction
+ apiConnectptr.i = indexOp->connectionIndex;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ releaseIndexOperation(apiConnectptr.p, indexOp);
+ terrorCode = 4000;
+ abortErrorLab(signal);
+ return true;
+ }
+ if (receivedAllINDXKEYINFO(indexOp) && receivedAllINDXATTRINFO(indexOp)) {
+ jam();
+ return true;
+ }
+ return false;
+}
+
+bool Dbtc::receivedAllINDXKEYINFO(TcIndexOperation* indexOp)
+{
+ return (indexOp->keyInfo.getSize() == indexOp->expectedKeyInfo);
+}
+
+/**
+ * Save signal INDXATTRINFO
+ * Return true if we have received all needed data
+ */
+bool Dbtc::saveINDXATTRINFO(Signal* signal,
+ TcIndexOperation* indexOp,
+ const Uint32 *src,
+ Uint32 len)
+{
+ if (!indexOp->attrInfo.append(src, len)) {
+ jam();
+#ifdef VM_TRACE
+ ndbout_c("Dbtc::saveINDXATTRINFO: Failed to seize attrInfo\n");
+#endif
+ apiConnectptr.i = indexOp->connectionIndex;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ releaseIndexOperation(apiConnectptr.p, indexOp);
+ terrorCode = 4000;
+ abortErrorLab(signal);
+ return true;
+ }
+ if (receivedAllINDXKEYINFO(indexOp) && receivedAllINDXATTRINFO(indexOp)) {
+ jam();
+ return true;
+ }
+ return false;
+}
+
+bool Dbtc::receivedAllINDXATTRINFO(TcIndexOperation* indexOp)
+{
+ return (indexOp->attrInfo.getSize() == indexOp->expectedAttrInfo);
+}
+
+bool Dbtc::saveTRANSID_AI(Signal* signal,
+ TcIndexOperation* indexOp,
+ const Uint32 *src,
+ Uint32 len)
+{
+ Uint32 currentTransIdAILength = indexOp->transIdAI.getSize();
+
+ if (currentTransIdAILength == 0) {
+ jam();
+ // Read first AttributeHeader to get expected size
+ // of the single key attribute expected
+ AttributeHeader* head = (AttributeHeader *) src;
+ indexOp->expectedTransIdAI = head->getHeaderSize() + head->getDataSize();
+ }
+ if (!indexOp->transIdAI.append(src, len)) {
+ jam();
+#ifdef VM_TRACE
+ ndbout_c("Dbtc::saveTRANSID_AI: Failed to seize transIdAI\n");
+#endif
+ apiConnectptr.i = indexOp->connectionIndex;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ releaseIndexOperation(apiConnectptr.p, indexOp);
+ terrorCode = 4000;
+ abortErrorLab(signal);
+ return false;
+ }
+ return true;
+}
+
+bool Dbtc::receivedAllTRANSID_AI(TcIndexOperation* indexOp)
+{
+ return (indexOp->transIdAI.getSize() == indexOp->expectedTransIdAI);
+}
+
+/**
+ * Receive signal TCINDXCONF
+ * This can be either the return of reading an index table
+ * or performing an index operation
+ */
+void Dbtc::execTCKEYCONF(Signal* signal)
+{
+ TcKeyConf * const tcKeyConf = (TcKeyConf *)signal->getDataPtr();
+ TcIndexOperationPtr indexOpPtr;
+
+ jamEntry();
+ indexOpPtr.i = tcKeyConf->apiConnectPtr;
+ TcIndexOperation* indexOp = c_theIndexOperations.getPtr(indexOpPtr.i);
+ Uint32 confInfo = tcKeyConf->confInfo;
+
+ /**
+ * Check on TCKEYCONF wheater the the transaction was committed
+ */
+ Uint32 Tcommit = TcKeyConf::getCommitFlag(confInfo);
+
+ indexOpPtr.p = indexOp;
+ if (!indexOp) {
+ jam();
+ // Missing index operation
+ return;
+ }
+ const UintR TconnectIndex = indexOp->connectionIndex;
+ ApiConnectRecord * const regApiPtr = &apiConnectRecord[TconnectIndex];
+ apiConnectptr.p = regApiPtr;
+ apiConnectptr.i = TconnectIndex;
+ switch(indexOp->indexOpState) {
+ case(IOS_NOOP): {
+ jam();
+ // Should never happen, abort
+ TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
+
+ tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
+ tcIndxRef->transId[0] = regApiPtr->transid[0];
+ tcIndxRef->transId[1] = regApiPtr->transid[1];
+ tcIndxRef->errorCode = 4349;
+ sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
+ TcKeyRef::SignalLength, JBB);
+ return;
+ }
+ case(IOS_INDEX_ACCESS): {
+ jam();
+ // Wait for TRANSID_AI
+ indexOp->indexOpState = IOS_INDEX_ACCESS_WAIT_FOR_TRANSID_AI;
+ break;
+ }
+ case(IOS_INDEX_ACCESS_WAIT_FOR_TRANSID_AI): {
+ jam();
+ // Double TCKEYCONF, should never happen, abort
+ TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
+
+ tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
+ tcIndxRef->transId[0] = regApiPtr->transid[0];
+ tcIndxRef->transId[1] = regApiPtr->transid[1];
+ tcIndxRef->errorCode = 4349;
+ sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
+ TcKeyRef::SignalLength, JBB);
+ return;
+ }
+ case(IOS_INDEX_ACCESS_WAIT_FOR_TCKEYCONF): {
+ jam();
+ // Continue with index operation
+ executeIndexOperation(signal, regApiPtr, indexOp);
+ break;
+ }
+ case(IOS_INDEX_OPERATION): {
+ // We are done, send TCINDXCONF
+ jam();
+ Uint32 Ttcindxrec = regApiPtr->tcindxrec;
+ // Copy reply from TcKeyConf
+
+ ndbassert(regApiPtr->noIndexOp);
+ regApiPtr->noIndexOp--; // Decrease count
+ regApiPtr->tcIndxSendArray[Ttcindxrec] = indexOp->tcIndxReq.senderData;
+ regApiPtr->tcIndxSendArray[Ttcindxrec + 1] =
+ tcKeyConf->operations[0].attrInfoLen;
+ regApiPtr->tcindxrec = Ttcindxrec + 2;
+ if (regApiPtr->noIndexOp == 0) {
+ jam();
+ sendTcIndxConf(signal, Tcommit);
+ } else if (regApiPtr->tcindxrec == ZTCOPCONF_SIZE) {
+ jam();
+ sendTcIndxConf(signal, 0);
+ }
+ releaseIndexOperation(regApiPtr, indexOp);
+ break;
+ }
+ }
+}
+
+void Dbtc::execTCKEYREF(Signal* signal)
+{
+ TcKeyRef * const tcKeyRef = (TcKeyRef *)signal->getDataPtr();
+ TcIndexOperationPtr indexOpPtr;
+
+ jamEntry();
+ indexOpPtr.i = tcKeyRef->connectPtr;
+ TcIndexOperation* indexOp = c_theIndexOperations.getPtr(indexOpPtr.i);
+ indexOpPtr.p = indexOp;
+ if (!indexOp) {
+ jam();
+ // Missing index operation
+ return;
+ }
+ const UintR TconnectIndex = indexOp->connectionIndex;
+ ApiConnectRecord * const regApiPtr = &apiConnectRecord[TconnectIndex];
+ Uint32 tcKeyRequestInfo = indexOp->tcIndxReq.requestInfo;
+ Uint32 commitFlg = TcKeyReq::getCommitFlag(tcKeyRequestInfo);
+
+ switch(indexOp->indexOpState) {
+ case(IOS_NOOP): {
+ jam();
+ // Should never happen, abort
+ break;
+ }
+ case(IOS_INDEX_ACCESS):
+ case(IOS_INDEX_ACCESS_WAIT_FOR_TRANSID_AI):
+ case(IOS_INDEX_ACCESS_WAIT_FOR_TCKEYCONF): {
+ jam();
+ // If we fail index access for a non-read operation during commit
+ // we abort transaction
+ if (commitFlg == 1) {
+ jam();
+ releaseIndexOperation(regApiPtr, indexOp);
+ apiConnectptr.i = indexOp->connectionIndex;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ terrorCode = tcKeyRef->errorCode;
+ abortErrorLab(signal);
+ break;
+ }
+ /**
+ * Increase count as it will be decreased below...
+ * (and the code is written to handle failing lookup on "real" table
+ * not lookup on index table)
+ */
+ regApiPtr->noIndexOp++;
+ // else continue
+ }
+ case(IOS_INDEX_OPERATION): {
+ // Send TCINDXREF
+
+ jam();
+ TcKeyReq * const tcIndxReq = &indexOp->tcIndxReq;
+ TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
+
+ ndbassert(regApiPtr->noIndexOp);
+ regApiPtr->noIndexOp--; // Decrease count
+ tcIndxRef->connectPtr = tcIndxReq->senderData;
+ tcIndxRef->transId[0] = tcKeyRef->transId[0];
+ tcIndxRef->transId[1] = tcKeyRef->transId[1];
+ tcIndxRef->errorCode = tcKeyRef->errorCode;
+ sendSignal(regApiPtr->ndbapiBlockref,
+ GSN_TCINDXREF, signal, TcKeyRef::SignalLength, JBB);
+ return;
+ }
+ }
+}
+
+void Dbtc::execTRANSID_AI_R(Signal* signal){
+ TransIdAI * const transIdAI = (TransIdAI *)signal->getDataPtr();
+ Uint32 sigLen = signal->length();
+ Uint32 dataLen = sigLen - TransIdAI::HeaderLength - 1;
+ Uint32 recBlockref = transIdAI->attrData[dataLen];
+
+ jamEntry();
+
+ /**
+ * Forward signal to final destination
+ * Truncate last word since that was used to hold the final dest.
+ */
+ sendSignal(recBlockref, GSN_TRANSID_AI,
+ signal, sigLen - 1, JBB);
+}
+
+void Dbtc::execKEYINFO20_R(Signal* signal){
+ KeyInfo20 * const keyInfo = (KeyInfo20 *)signal->getDataPtr();
+ Uint32 sigLen = signal->length();
+ Uint32 dataLen = sigLen - KeyInfo20::HeaderLength - 1;
+ Uint32 recBlockref = keyInfo->keyData[dataLen];
+
+ jamEntry();
+
+ /**
+ * Forward signal to final destination
+ * Truncate last word since that was used to hold the final dest.
+ */
+ sendSignal(recBlockref, GSN_KEYINFO20,
+ signal, sigLen - 1, JBB);
+}
+
+
+void Dbtc::execTRANSID_AI(Signal* signal)
+{
+ TransIdAI * const transIdAI = (TransIdAI *)signal->getDataPtr();
+
+ jamEntry();
+ TcIndexOperationPtr indexOpPtr;
+ indexOpPtr.i = transIdAI->connectPtr;
+ TcIndexOperation* indexOp = c_theIndexOperations.getPtr(indexOpPtr.i);
+ indexOpPtr.p = indexOp;
+ if (!indexOp) {
+ jam();
+ // Missing index operation
+ }
+ const UintR TconnectIndex = indexOp->connectionIndex;
+ // ApiConnectRecord * const regApiPtr = &apiConnectRecord[TconnectIndex];
+ ApiConnectRecordPtr transPtr;
+
+ transPtr.i = TconnectIndex;
+ ptrCheckGuard(transPtr, capiConnectFilesize, apiConnectRecord);
+ ApiConnectRecord * const regApiPtr = transPtr.p;
+
+ // Acccumulate attribute data
+ if (!saveTRANSID_AI(signal,
+ indexOp,
+ transIdAI->getData(),
+ signal->getLength() - TransIdAI::HeaderLength)) {
+ jam();
+ // Failed to allocate space for TransIdAI
+ TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
+
+ tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
+ tcIndxRef->transId[0] = regApiPtr->transid[0];
+ tcIndxRef->transId[1] = regApiPtr->transid[1];
+ tcIndxRef->errorCode = 4000;
+ sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
+ TcKeyRef::SignalLength, JBB);
+ return;
+ }
+
+ switch(indexOp->indexOpState) {
+ case(IOS_NOOP): {
+ jam();
+ // Should never happen, abort
+ TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
+
+ tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
+ tcIndxRef->transId[0] = regApiPtr->transid[0];
+ tcIndxRef->transId[1] = regApiPtr->transid[1];
+ tcIndxRef->errorCode = 4349;
+ sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
+ TcKeyRef::SignalLength, JBB);
+ return;
+ break;
+ }
+ case(IOS_INDEX_ACCESS): {
+ jam();
+ // Check if all TRANSID_AI have been received
+ if (receivedAllTRANSID_AI(indexOp)) {
+ jam();
+ // Wait for TRANSID_AI
+ indexOp->indexOpState = IOS_INDEX_ACCESS_WAIT_FOR_TCKEYCONF;
+ }
+ break;
+ }
+ case(IOS_INDEX_ACCESS_WAIT_FOR_TCKEYCONF): {
+ jam();
+#ifdef VM_TRACE
+ ndbout_c("Dbtc::execTRANSID_AI: Too many TRANSID_AI, ignore for now\n");
+#endif
+ /*
+ // Too many TRANSID_AI
+ TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
+
+ tcIndexRef->connectPtr = indexOp->tcIndxReq.senderData;
+ tcIndxRef->transId[0] = regApiPtr->transid[0];
+ tcIndxRef->transId[1] = regApiPtr->transid[1];
+ tcIndxRef->errorCode = 4349;
+ sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
+ TcKeyRef::SignalLength, JBB);
+ */
+ break;
+ }
+ case(IOS_INDEX_ACCESS_WAIT_FOR_TRANSID_AI): {
+ jam();
+ // Check if all TRANSID_AI have been received
+ if (receivedAllTRANSID_AI(indexOp)) {
+ jam();
+ // Continue with index operation
+ executeIndexOperation(signal, regApiPtr, indexOp);
+ }
+ // else continue waiting for more TRANSID_AI
+ break;
+ }
+ case(IOS_INDEX_OPERATION): {
+ // Should never receive TRANSID_AI in this state!!
+ jam();
+ TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
+
+ tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
+ tcIndxRef->transId[0] = regApiPtr->transid[0];
+ tcIndxRef->transId[1] = regApiPtr->transid[1];
+ tcIndxRef->errorCode = 4349;
+ sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
+ TcKeyRef::SignalLength, JBB);
+ return;
+ }
+ }
+}
+
+void Dbtc::execTCROLLBACKREP(Signal* signal)
+{
+ TcRollbackRep* tcRollbackRep = (TcRollbackRep *)signal->getDataPtr();
+ jamEntry();
+ TcIndexOperationPtr indexOpPtr;
+ indexOpPtr.i = tcRollbackRep->connectPtr;
+ TcIndexOperation* indexOp = c_theIndexOperations.getPtr(indexOpPtr.i);
+ indexOpPtr.p = indexOp;
+ tcRollbackRep = (TcRollbackRep *)signal->getDataPtrSend();
+ tcRollbackRep->connectPtr = indexOp->tcIndxReq.senderData;
+ sendSignal(apiConnectptr.p->ndbapiBlockref,
+ GSN_TCROLLBACKREP, signal, TcRollbackRep::SignalLength, JBB);
+}
+
+/**
+ * Read index table with the index attributes as PK
+ */
+void Dbtc::readIndexTable(Signal* signal,
+ ApiConnectRecord* regApiPtr,
+ TcIndexOperation* indexOp)
+{
+ Uint32 keyBufSize = 8; // Maximum for key in TCKEYREQ
+ Uint32 dataPos = 0;
+ TcKeyReq * const tcKeyReq = (TcKeyReq *)signal->getDataPtrSend();
+ Uint32 * dataPtr = &tcKeyReq->scanInfo;
+ Uint32 tcKeyLength = TcKeyReq::StaticLength;
+ Uint32 tcKeyRequestInfo = indexOp->tcIndxReq.requestInfo;
+ AttributeBuffer::DataBufferIterator keyIter;
+ Uint32 keyLength = TcKeyReq::getKeyLength(tcKeyRequestInfo);
+ TcIndexData* indexData;
+ Uint32 transId1 = indexOp->tcIndxReq.transId1;
+ Uint32 transId2 = indexOp->tcIndxReq.transId2;
+
+ const Operation_t opType =
+ (Operation_t)TcKeyReq::getOperationType(tcKeyRequestInfo);
+
+ // Find index table
+ if ((indexData = c_theIndexes.getPtr(indexOp->tcIndxReq.tableId)) == NULL) {
+ jam();
+ // Failed to find index record
+ TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
+
+ tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
+ tcIndxRef->transId[0] = regApiPtr->transid[0];
+ tcIndxRef->transId[1] = regApiPtr->transid[1];
+ tcIndxRef->errorCode = 4000;
+ sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
+ TcKeyRef::SignalLength, JBB);
+ return;
+ }
+ tcKeyReq->transId1 = transId1;
+ tcKeyReq->transId2 = transId2;
+ tcKeyReq->tableId = indexData->indexId;
+ tcKeyLength += MIN(keyLength, keyBufSize);
+ tcKeyReq->tableSchemaVersion = indexOp->tcIndxReq.tableSchemaVersion;
+ TcKeyReq::setOperationType(tcKeyRequestInfo,
+ opType == ZREAD ? ZREAD : ZREAD_EX);
+ TcKeyReq::setAIInTcKeyReq(tcKeyRequestInfo, 1); // Allways send one AttrInfo
+ TcKeyReq::setExecutingTrigger(tcKeyRequestInfo, 0);
+ BlockReference originalReceiver = regApiPtr->ndbapiBlockref;
+ regApiPtr->ndbapiBlockref = reference(); // Send result to me
+ tcKeyReq->senderData = indexOp->indexOpId;
+ indexOp->indexOpState = IOS_INDEX_ACCESS;
+ regApiPtr->executingIndexOp = regApiPtr->accumulatingIndexOp;
+ regApiPtr->accumulatingIndexOp = RNIL;
+ regApiPtr->isIndexOp = true;
+
+ Uint32 remainingKey = indexOp->keyInfo.getSize();
+ bool moreKeyData = indexOp->keyInfo.first(keyIter);
+ // *********** KEYINFO in TCKEYREQ ***********
+ while((dataPos < keyBufSize) &&
+ (remainingKey-- != 0)) {
+ *dataPtr++ = *keyIter.data;
+ dataPos++;
+ moreKeyData = indexOp->keyInfo.next(keyIter);
+ }
+ // *********** ATTRINFO in TCKEYREQ ***********
+ tcKeyReq->attrLen = 1; // Primary key is stored as one attribute
+ AttributeHeader::init(dataPtr, indexData->primaryKeyPos, 0);
+ tcKeyLength++;
+ tcKeyReq->requestInfo = tcKeyRequestInfo;
+
+ ndbassert(TcKeyReq::getDirtyFlag(tcKeyRequestInfo) == 0);
+ ndbassert(TcKeyReq::getSimpleFlag(tcKeyRequestInfo) == 0);
+ EXECUTE_DIRECT(DBTC, GSN_TCKEYREQ, signal, tcKeyLength);
+
+ /**
+ * "Fool" TC not to start commiting transaction since it always will
+ * have one outstanding lqhkeyreq
+ * This is later decreased when the index read is complete
+ */
+ regApiPtr->lqhkeyreqrec++;
+
+ /**
+ * Remember ptr to index read operation
+ * (used to set correct save point id on index operation later)
+ */
+ indexOp->indexReadTcConnect = regApiPtr->lastTcConnect;
+
+ jamEntry();
+ // *********** KEYINFO ***********
+ if (moreKeyData) {
+ jam();
+ // Send KEYINFO sequence
+ KeyInfo * const keyInfo = (KeyInfo *)signal->getDataPtrSend();
+
+ keyInfo->connectPtr = indexOp->tcIndxReq.apiConnectPtr;
+ keyInfo->transId[0] = transId1;
+ keyInfo->transId[1] = transId2;
+ dataPtr = (Uint32 *) &keyInfo->keyData;
+ dataPos = 0;
+ while(remainingKey-- != 0) {// If we have not read complete key
+ *dataPtr++ = *keyIter.data;
+ dataPos++;
+ if (dataPos == KeyInfo::DataLength) {
+ // Flush KEYINFO
+ EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + KeyInfo::DataLength);
+ jamEntry();
+ dataPos = 0;
+ dataPtr = (Uint32 *) &keyInfo->keyData;
+ }
+ moreKeyData = indexOp->keyInfo.next(keyIter);
+ }
+ if (dataPos != 0) {
+ // Flush last KEYINFO
+ EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + dataPos);
+ jamEntry();
+ }
+ }
+
+ regApiPtr->ndbapiBlockref = originalReceiver; // reset original receiver
+}
+
+/**
+ * Execute the index operation with the result from
+ * the index table read as PK
+ */
+void Dbtc::executeIndexOperation(Signal* signal,
+ ApiConnectRecord* regApiPtr,
+ TcIndexOperation* indexOp) {
+
+ Uint32 keyBufSize = 8; // Maximum for key in TCKEYREQ
+ Uint32 attrBufSize = 5;
+ Uint32 dataPos = 0;
+ TcKeyReq * const tcIndxReq = &indexOp->tcIndxReq;
+ TcKeyReq * const tcKeyReq = (TcKeyReq *)signal->getDataPtrSend();
+ Uint32 * dataPtr = &tcKeyReq->scanInfo;
+ Uint32 tcKeyLength = TcKeyReq::StaticLength;
+ Uint32 tcKeyRequestInfo = tcIndxReq->requestInfo;
+ TcIndexData* indexData;
+ AttributeBuffer::DataBufferIterator attrIter;
+ AttributeBuffer::DataBufferIterator aiIter;
+ bool moreKeyData = indexOp->transIdAI.first(aiIter);
+
+ // Find index table
+ if ((indexData = c_theIndexes.getPtr(tcIndxReq->tableId)) == NULL) {
+ jam();
+ // Failed to find index record
+ TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
+
+ tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
+ tcIndxRef->transId[0] = regApiPtr->transid[0];
+ tcIndxRef->transId[1] = regApiPtr->transid[1];
+ tcIndxRef->errorCode = 4349;
+ sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
+ TcKeyRef::SignalLength, JBB);
+ return;
+ }
+ // Find schema version of primary table
+ TableRecordPtr tabPtr;
+ tabPtr.i = indexData->primaryTableId;
+ ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord);
+
+ tcKeyReq->apiConnectPtr = tcIndxReq->apiConnectPtr;
+ tcKeyReq->attrLen = tcIndxReq->attrLen;
+ tcKeyReq->tableId = indexData->primaryTableId;
+ tcKeyReq->tableSchemaVersion = tabPtr.p->currentSchemaVersion;
+ tcKeyReq->transId1 = regApiPtr->transid[0];
+ tcKeyReq->transId2 = regApiPtr->transid[1];
+ tcKeyReq->senderData = tcIndxReq->senderData; // Needed for TRANSID_AI to API
+ indexOp->indexOpState = IOS_INDEX_OPERATION;
+ regApiPtr->isIndexOp = true;
+ regApiPtr->executingIndexOp = indexOp->indexOpId;;
+ regApiPtr->noIndexOp++; // Increase count
+
+ // Filter out AttributeHeader:s since this should not be in key
+ AttributeHeader* attrHeader = (AttributeHeader *) aiIter.data;
+
+ Uint32 headerSize = attrHeader->getHeaderSize();
+ Uint32 keySize = attrHeader->getDataSize();
+ TcKeyReq::setKeyLength(tcKeyRequestInfo, keySize);
+ // Skip header
+ if (headerSize == 1) {
+ jam();
+ moreKeyData = indexOp->transIdAI.next(aiIter);
+ } else {
+ jam();
+ moreKeyData = indexOp->transIdAI.next(aiIter, headerSize - 1);
+ }//if
+ while(// If we have not read complete key
+ (keySize != 0) &&
+ (dataPos < keyBufSize)) {
+ *dataPtr++ = *aiIter.data;
+ dataPos++;
+ keySize--;
+ moreKeyData = indexOp->transIdAI.next(aiIter);
+ }
+ tcKeyLength += dataPos;
+
+ Uint32 attributesLength = indexOp->attrInfo.getSize();
+ if (attributesLength <= attrBufSize) {
+ jam();
+ // ATTRINFO fits in TCKEYREQ
+ // Pack ATTRINFO IN TCKEYREQ
+ TcKeyReq::setAIInTcKeyReq(tcKeyRequestInfo, indexOp->attrInfo.getSize());
+ // Insert IndxAttrInfo
+ for(bool moreAttrData = indexOp->attrInfo.first(attrIter);
+ moreAttrData;
+ moreAttrData = indexOp->attrInfo.next(attrIter)) {
+ *dataPtr++ = *attrIter.data;
+ }
+ tcKeyLength += attributesLength;
+ } else {
+ jam();
+ // No ATTRINFO in TCKEYREQ
+ TcKeyReq::setAIInTcKeyReq(tcKeyRequestInfo, 0);
+ }
+
+ TcKeyReq::setCommitFlag(tcKeyRequestInfo, 0);
+ TcKeyReq::setExecuteFlag(tcKeyRequestInfo, 0);
+ TcKeyReq::setExecutingTrigger(tcKeyRequestInfo, 0);
+ tcKeyReq->requestInfo = tcKeyRequestInfo;
+
+ ndbassert(TcKeyReq::getDirtyFlag(tcKeyRequestInfo) == 0);
+ ndbassert(TcKeyReq::getSimpleFlag(tcKeyRequestInfo) == 0);
+
+ /**
+ * Decrease lqhkeyreqrec to compensate for addition
+ * during read of index table
+ * I.e. let TC start committing when other operations has completed
+ */
+ regApiPtr->lqhkeyreqrec--;
+
+ /**
+ * Fix savepoint id -
+ * fix so that index operation has the same savepoint id
+ * as the read of the index table (TCINDXREQ)
+ */
+ TcConnectRecordPtr tmp;
+ tmp.i = indexOp->indexReadTcConnect;
+ ptrCheckGuard(tmp, ctcConnectFilesize, tcConnectRecord);
+ const Uint32 currSavePointId = regApiPtr->currSavePointId;
+ regApiPtr->currSavePointId = tmp.p->savePointId;
+ EXECUTE_DIRECT(DBTC, GSN_TCKEYREQ, signal, tcKeyLength);
+ regApiPtr->currSavePointId = currSavePointId;
+
+ jamEntry();
+ // *********** KEYINFO ***********
+ if (moreKeyData) {
+ jam();
+ // Send KEYINFO sequence
+ KeyInfo * const keyInfo = (KeyInfo *)signal->getDataPtrSend();
+
+ keyInfo->connectPtr = indexOp->tcIndxReq.apiConnectPtr;
+ keyInfo->transId[0] = regApiPtr->transid[0];
+ keyInfo->transId[1] = regApiPtr->transid[1];
+ dataPtr = (Uint32 *) &keyInfo->keyData;
+ dataPos = 0;
+ // Pack any part of a key attribute that did no fit TCKEYREQ
+ while(keySize-- != 0) {// If we have not read complete key
+ *dataPtr++ = *aiIter.data;
+ dataPos++;
+ if (dataPos == KeyInfo::DataLength) {
+ // Flush KEYINFO
+ EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + KeyInfo::DataLength);
+ jamEntry();
+ dataPos = 0;
+ dataPtr = (Uint32 *) &keyInfo->keyData;
+ }
+ moreKeyData = indexOp->transIdAI.next(aiIter);
+ }
+ if (dataPos != 0) {
+ // Flush last KEYINFO
+ EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + dataPos);
+ jamEntry();
+ }
+ }
+
+ // *********** ATTRINFO ***********
+ if (attributesLength > attrBufSize) {
+ jam();
+ // No ATTRINFO in TcKeyReq
+ TcKeyReq::setAIInTcKeyReq(tcKeyReq->requestInfo, 0);
+ // Send ATTRINFO sequence
+ AttrInfo * const attrInfo = (AttrInfo *)signal->getDataPtrSend();
+ Uint32 attrInfoPos = 0;
+
+ attrInfo->connectPtr = indexOp->tcIndxReq.apiConnectPtr;
+ attrInfo->transId[0] = regApiPtr->transid[0];
+ attrInfo->transId[1] = regApiPtr->transid[1];
+ dataPtr = (Uint32 *) &attrInfo->attrData;
+
+
+ // Insert attribute values (insert key values of primary table)
+ for(bool moreAttrData = indexOp->attrInfo.first(attrIter);
+ moreAttrData;
+ moreAttrData = indexOp->attrInfo.next(attrIter)) {
+ *dataPtr++ = *attrIter.data;
+ attrInfoPos++;
+ if (attrInfoPos == AttrInfo::DataLength) {
+ // Flush ATTRINFO
+ EXECUTE_DIRECT(DBTC, GSN_ATTRINFO, signal,
+ AttrInfo::HeaderLength + AttrInfo::DataLength);
+ jamEntry();
+ attrInfoPos = 0;
+ dataPtr = (Uint32 *) &attrInfo->attrData;
+ }
+ }
+ if (attrInfoPos != 0) {
+ // Send last ATTRINFO
+ EXECUTE_DIRECT(DBTC, GSN_ATTRINFO, signal,
+ AttrInfo::HeaderLength + attrInfoPos);
+ jamEntry();
+ }
+ }
+}
+
+bool Dbtc::seizeIndexOperation(ApiConnectRecord* regApiPtr,
+ TcIndexOperationPtr& indexOpPtr)
+{
+ bool seizeOk;
+
+ seizeOk = c_theIndexOperations.seize(indexOpPtr);
+ if (seizeOk) {
+ jam();
+ TcSeizedIndexOperationPtr seizedIndexOpPtr;
+ seizeOk &= regApiPtr->theSeizedIndexOperations.seizeId(seizedIndexOpPtr,
+ indexOpPtr.i);
+ }
+ return seizeOk;
+}
+
+void Dbtc::releaseIndexOperation(ApiConnectRecord* regApiPtr,
+ TcIndexOperation* indexOp)
+{
+ indexOp->indexOpState = IOS_NOOP;
+ indexOp->expectedKeyInfo = 0;
+ indexOp->keyInfo.release();
+ indexOp->expectedAttrInfo = 0;
+ indexOp->attrInfo.release();
+ indexOp->expectedTransIdAI = 0;
+ indexOp->transIdAI.release();
+ regApiPtr->theSeizedIndexOperations.release(indexOp->indexOpId);
+ c_theIndexOperations.release(indexOp->indexOpId);
+}
+
+void Dbtc::releaseAllSeizedIndexOperations(ApiConnectRecord* regApiPtr)
+{
+ TcSeizedIndexOperationPtr seizedIndexOpPtr;
+
+ regApiPtr->theSeizedIndexOperations.first(seizedIndexOpPtr);
+ while(seizedIndexOpPtr.i != RNIL) {
+ jam();
+ TcIndexOperation* indexOp =
+ c_theIndexOperations.getPtr(seizedIndexOpPtr.i);
+
+ indexOp->indexOpState = IOS_NOOP;
+ indexOp->expectedKeyInfo = 0;
+ indexOp->keyInfo.release();
+ indexOp->expectedAttrInfo = 0;
+ indexOp->attrInfo.release();
+ indexOp->expectedTransIdAI = 0;
+ indexOp->transIdAI.release();
+ c_theIndexOperations.release(seizedIndexOpPtr.i);
+ regApiPtr->theSeizedIndexOperations.next(seizedIndexOpPtr);
+ }
+ regApiPtr->theSeizedIndexOperations.release();
+}
+
+void Dbtc::saveTriggeringOpState(Signal* signal, TcConnectRecord* trigOp)
+{
+ LqhKeyConf * lqhKeyConf = (LqhKeyConf *)signal->getDataPtr();
+ copyFromToLen((UintR*)lqhKeyConf,
+ &trigOp->savedState[0],
+ LqhKeyConf::SignalLength);
+}
+
+void Dbtc::continueTriggeringOp(Signal* signal, TcConnectRecord* trigOp)
+{
+ LqhKeyConf * lqhKeyConf = (LqhKeyConf *)signal->getDataPtr();
+ copyFromToLen(&trigOp->savedState[0],
+ (UintR*)lqhKeyConf,
+ LqhKeyConf::SignalLength);
+
+ lqhKeyConf->noFiredTriggers = 0;
+ trigOp->noReceivedTriggers = 0;
+
+ // All triggers executed successfully, continue operation
+ execLQHKEYCONF(signal);
+}
+
+void Dbtc::scheduleFiredTrigger(ApiConnectRecordPtr* transPtr,
+ TcConnectRecordPtr* opPtr)
+{
+ // Set initial values for trigger fireing operation
+ opPtr->p->triggerExecutionCount++;
+
+ // Insert fired trigger in execution queue
+ transPtr->p->theFiredTriggers.add(opPtr->p->accumulatingTriggerData);
+ opPtr->p->accumulatingTriggerData.i = RNIL;
+ opPtr->p->accumulatingTriggerData.p = NULL;
+}
+
+void Dbtc::executeTriggers(Signal* signal, ApiConnectRecordPtr* transPtr)
+{
+ ApiConnectRecord* regApiPtr = transPtr->p;
+ TcConnectRecord *localTcConnectRecord = tcConnectRecord;
+ TcConnectRecordPtr opPtr;
+ FiredTriggerPtr trigPtr;
+
+ if (!regApiPtr->theFiredTriggers.isEmpty()) {
+ jam();
+ if ((regApiPtr->apiConnectstate == CS_STARTED) ||
+ (regApiPtr->apiConnectstate == CS_START_COMMITTING)) {
+ jam();
+ regApiPtr->theFiredTriggers.first(trigPtr);
+ while (trigPtr.i != RNIL) {
+ jam();
+ // Execute all ready triggers in parallel
+ opPtr.i = trigPtr.p->fireingOperation;
+ ptrCheckGuard(opPtr, ctcConnectFilesize, localTcConnectRecord);
+ FiredTriggerPtr nextTrigPtr = trigPtr;
+ regApiPtr->theFiredTriggers.next(nextTrigPtr);
+ if (opPtr.p->noReceivedTriggers == opPtr.p->noFiredTriggers) {
+ jam();
+ // Fireing operation is ready to have a trigger executing
+ executeTrigger(signal, trigPtr.p, transPtr, &opPtr);
+ // Should allow for interleaving here by sending a CONTINUEB and
+ // return
+ // Release trigger records
+ AttributeBuffer::DataBufferPool & pool = c_theAttributeBufferPool;
+ LocalDataBuffer<11> tmp1(pool, trigPtr.p->keyValues);
+ tmp1.release();
+ LocalDataBuffer<11> tmp2(pool, trigPtr.p->beforeValues);
+ tmp2.release();
+ LocalDataBuffer<11> tmp3(pool, trigPtr.p->afterValues);
+ tmp3.release();
+ regApiPtr->theFiredTriggers.release(trigPtr.i);
+ }
+ trigPtr = nextTrigPtr;
+ }
+ return;
+ // No more triggers, continue transaction after last executed trigger has
+ // reurned (in execLQHKEYCONF or execLQHKEYREF)
+ } else {
+ // Wait until transaction is ready to execute a trigger
+ jam();
+ if (!regApiPtr->triggerPending) {
+ jam();
+ regApiPtr->triggerPending = true;
+ signal->theData[0] = TcContinueB::TRIGGER_PENDING;
+ signal->theData[1] = transPtr->i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+ }
+ // else
+ // We are already waiting for a pending trigger (CONTINUEB)
+ }
+ }
+}
+
+void Dbtc::executeTrigger(Signal* signal,
+ TcFiredTriggerData* firedTriggerData,
+ ApiConnectRecordPtr* transPtr,
+ TcConnectRecordPtr* opPtr)
+{
+ TcDefinedTriggerData* definedTriggerData;
+
+ if ((definedTriggerData =
+ c_theDefinedTriggers.getPtr(firedTriggerData->triggerId))
+ != NULL) {
+ switch(definedTriggerData->triggerType) {
+ case(TriggerType::SECONDARY_INDEX):
+ jam();
+ executeIndexTrigger(signal, definedTriggerData, firedTriggerData,
+ transPtr, opPtr);
+ break;
+ default:
+ ndbrequire(false);
+ }
+ }
+}
+
+void Dbtc::executeIndexTrigger(Signal* signal,
+ TcDefinedTriggerData* definedTriggerData,
+ TcFiredTriggerData* firedTriggerData,
+ ApiConnectRecordPtr* transPtr,
+ TcConnectRecordPtr* opPtr)
+{
+ TcIndexData* indexData;
+
+ indexData = c_theIndexes.getPtr(definedTriggerData->indexId);
+ ndbassert(indexData != NULL);
+
+ switch (definedTriggerData->triggerEvent) {
+ case(TriggerEvent::TE_INSERT): {
+ jam();
+ insertIntoIndexTable(signal, firedTriggerData, transPtr, opPtr, indexData);
+ break;
+ }
+ case(TriggerEvent::TE_DELETE): {
+ jam();
+ deleteFromIndexTable(signal, firedTriggerData, transPtr, opPtr, indexData);
+ break;
+ }
+ case(TriggerEvent::TE_UPDATE): {
+ jam();
+ deleteFromIndexTable(signal, firedTriggerData, transPtr, opPtr,
+ indexData, true); // Hold the triggering operation
+ insertIntoIndexTable(signal, firedTriggerData, transPtr, opPtr, indexData);
+ break;
+ }
+ default:
+ ndbrequire(false);
+ }
+}
+
+void Dbtc::releaseFiredTriggerData(DLFifoList<TcFiredTriggerData>* triggers)
+{
+ FiredTriggerPtr trigPtr;
+
+ triggers->first(trigPtr);
+ while (trigPtr.i != RNIL) {
+ jam();
+ // Release trigger records
+
+ AttributeBuffer::DataBufferPool & pool = c_theAttributeBufferPool;
+ LocalDataBuffer<11> tmp1(pool, trigPtr.p->keyValues);
+ tmp1.release();
+ LocalDataBuffer<11> tmp2(pool, trigPtr.p->beforeValues);
+ tmp2.release();
+ LocalDataBuffer<11> tmp3(pool, trigPtr.p->afterValues);
+ tmp3.release();
+
+ triggers->next(trigPtr);
+ }
+ triggers->release();
+}
+
+void Dbtc::insertIntoIndexTable(Signal* signal,
+ TcFiredTriggerData* firedTriggerData,
+ ApiConnectRecordPtr* transPtr,
+ TcConnectRecordPtr* opPtr,
+ TcIndexData* indexData,
+ bool holdOperation)
+{
+ ApiConnectRecord* regApiPtr = transPtr->p;
+ TcConnectRecord* opRecord = opPtr->p;
+ TcKeyReq * const tcKeyReq = (TcKeyReq *)signal->getDataPtrSend();
+ Uint32 tcKeyRequestInfo = 0;
+ Uint32 tcKeyLength = TcKeyReq::StaticLength;
+ TableRecordPtr indexTabPtr;
+ AttributeBuffer::DataBufferIterator iter;
+ Uint32 attrId = 0;
+ Uint32 keyLength = 0;
+ Uint32 totalPrimaryKeyLength = 0;
+ Uint32 hops;
+
+ indexTabPtr.i = indexData->indexId;
+ ptrCheckGuard(indexTabPtr, ctabrecFilesize, tableRecord);
+ tcKeyReq->apiConnectPtr = transPtr->i;
+ tcKeyReq->senderData = opPtr->i;
+ if (holdOperation) {
+ jam();
+ opRecord->triggerExecutionCount++;
+ }//if
+ // Calculate key length and renumber attribute id:s
+ AttributeBuffer::DataBufferPool & pool = c_theAttributeBufferPool;
+ LocalDataBuffer<11> afterValues(pool, firedTriggerData->afterValues);
+ bool skipNull = false;
+ for(bool moreKeyAttrs = afterValues.first(iter); moreKeyAttrs; attrId++) {
+ jam();
+ AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
+
+ // Filter out NULL valued attributes
+ if (attrHeader->isNULL()) {
+ skipNull = true;
+ break;
+ }
+ attrHeader->setAttributeId(attrId);
+ keyLength += attrHeader->getDataSize();
+ hops = attrHeader->getHeaderSize() + attrHeader->getDataSize();
+ moreKeyAttrs = afterValues.next(iter, hops);
+ }
+ if (skipNull) {
+ jam();
+ opRecord->triggerExecutionCount--;
+ if (opRecord->triggerExecutionCount == 0) {
+ /*
+ We have completed current trigger execution
+ Continue triggering operation
+ */
+ jam();
+ continueTriggeringOp(signal, opRecord);
+ }//if
+ return;
+ }//if
+
+ // Calculate total length of primary key to be stored in index table
+ LocalDataBuffer<11> keyValues(pool, firedTriggerData->keyValues);
+ for(bool moreAttrData = keyValues.first(iter); moreAttrData; ) {
+ jam();
+ AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
+
+ totalPrimaryKeyLength += attrHeader->getDataSize();
+ hops = attrHeader->getHeaderSize() + attrHeader->getDataSize();
+ moreAttrData = keyValues.next(iter, hops);
+ }
+ AttributeHeader pkAttrHeader(attrId, totalPrimaryKeyLength);
+
+ TcKeyReq::setKeyLength(tcKeyRequestInfo, keyLength);
+ tcKeyReq->attrLen = afterValues.getSize() +
+ pkAttrHeader.getHeaderSize() + pkAttrHeader.getDataSize();
+ tcKeyReq->tableId = indexData->indexId;
+ TcKeyReq::setOperationType(tcKeyRequestInfo, ZINSERT);
+ TcKeyReq::setExecutingTrigger(tcKeyRequestInfo, true);
+ tcKeyReq->tableSchemaVersion = indexTabPtr.p->currentSchemaVersion;
+ tcKeyReq->transId1 = regApiPtr->transid[0];
+ tcKeyReq->transId2 = regApiPtr->transid[1];
+ Uint32 * dataPtr = &tcKeyReq->scanInfo;
+ // Write first part of key in TCKEYREQ
+ Uint32 keyBufSize = 8; // Maximum for key in TCKEYREQ
+ Uint32 attrBufSize = 5; // Maximum for key in TCKEYREQ
+ Uint32 dataPos = 0;
+ // Filter out AttributeHeader:s since this should no be in key
+ bool moreKeyData = afterValues.first(iter);
+ Uint32 headerSize = 0, keyAttrSize = 0, dataSize = 0, headAndData = 0;
+
+ while (moreKeyData && (dataPos < keyBufSize)) {
+ /*
+ * If we have not read complete key
+ * and it fits in the signal
+ */
+ jam();
+ AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
+
+ headerSize = attrHeader->getHeaderSize();
+ keyAttrSize = attrHeader->getDataSize();
+ headAndData = headerSize + attrHeader->getDataSize();
+ // Skip header
+ if (headerSize == 1) {
+ jam();
+ moreKeyData = afterValues.next(iter);
+ } else {
+ jam();
+ moreKeyData = afterValues.next(iter, headerSize - 1);
+ }//if
+ while((keyAttrSize != 0) && (dataPos < keyBufSize)) {
+ // If we have not read complete key
+ jam();
+ *dataPtr++ = *iter.data;
+ dataPos++;
+ keyAttrSize--;
+ moreKeyData = afterValues.next(iter);
+ }
+ if (keyAttrSize != 0) {
+ jam();
+ break;
+ }//if
+ }
+
+ tcKeyLength += dataPos;
+ Uint32 attributesLength = afterValues.getSize() +
+ pkAttrHeader.getHeaderSize() + pkAttrHeader.getDataSize();
+ if (attributesLength <= attrBufSize) {
+ jam();
+ // ATTRINFO fits in TCKEYREQ
+ // Pack ATTRINFO IN TCKEYREQ as one attribute
+ TcKeyReq::setAIInTcKeyReq(tcKeyRequestInfo, attributesLength);
+ bool moreAttrData;
+ // Insert primary key attributes (insert after values of primary table)
+ for(moreAttrData = afterValues.first(iter);
+ moreAttrData;
+ moreAttrData = afterValues.next(iter)) {
+ *dataPtr++ = *iter.data;
+ }
+ // Insert attribute values (insert key values of primary table)
+ // as one attribute
+ pkAttrHeader.insertHeader(dataPtr);
+ dataPtr += pkAttrHeader.getHeaderSize();
+ moreAttrData = keyValues.first(iter);
+ while(moreAttrData) {
+ jam();
+ AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
+
+ headerSize = attrHeader->getHeaderSize();
+ dataSize = attrHeader->getDataSize();
+ // Skip header
+ if (headerSize == 1) {
+ jam();
+ moreAttrData = keyValues.next(iter);
+ } else {
+ jam();
+ moreAttrData = keyValues.next(iter, headerSize - 1);
+ }//if
+ // Copy attribute data
+ while(dataSize-- != 0) {
+ *dataPtr++ = *iter.data;
+ moreAttrData = keyValues.next(iter);
+ }
+ }
+ tcKeyLength += attributesLength;
+ } else {
+ jam();
+ // No ATTRINFO in TCKEYREQ
+ TcKeyReq::setAIInTcKeyReq(tcKeyRequestInfo, 0);
+ }
+ tcKeyReq->requestInfo = tcKeyRequestInfo;
+
+ /**
+ * Fix savepoint id -
+ * fix so that insert has same savepoint id as triggering operation
+ */
+ const Uint32 currSavePointId = regApiPtr->currSavePointId;
+ regApiPtr->currSavePointId = opRecord->savePointId;
+ EXECUTE_DIRECT(DBTC, GSN_TCKEYREQ, signal, tcKeyLength);
+ regApiPtr->currSavePointId = currSavePointId;
+ tcConnectptr.p->currentIndexId = indexData->indexId;
+ jamEntry();
+
+ // *********** KEYINFO ***********
+ if (moreKeyData) {
+ jam();
+ // Send KEYINFO sequence
+ KeyInfo * const keyInfo = (KeyInfo *)signal->getDataPtrSend();
+
+ keyInfo->connectPtr = transPtr->i;
+ keyInfo->transId[0] = regApiPtr->transid[0];
+ keyInfo->transId[1] = regApiPtr->transid[1];
+ dataPtr = (Uint32 *) &keyInfo->keyData;
+ dataPos = 0;
+ // Pack any part of a key attribute that did no fit TCKEYREQ
+ while((keyAttrSize != 0) && (dataPos < KeyInfo::DataLength)) {
+ // If we have not read complete key
+ *dataPtr++ = *iter.data;
+ dataPos++;
+ keyAttrSize--;
+ if (dataPos == KeyInfo::DataLength) {
+ jam();
+ // Flush KEYINFO
+#if INTERNAL_TRIGGER_TCKEYREQ_JBA
+ sendSignal(reference(), GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + KeyInfo::DataLength, JBA);
+#else
+ EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + KeyInfo::DataLength);
+ jamEntry();
+#endif
+ dataPtr = (Uint32 *) &keyInfo->keyData;
+ dataPos = 0;
+ }
+ moreKeyData = afterValues.next(iter);
+ }
+
+ while(moreKeyData) {
+ jam();
+ AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
+
+ headerSize = attrHeader->getHeaderSize();
+ keyAttrSize = attrHeader->getDataSize();
+ headAndData = headerSize + attrHeader->getDataSize();
+ // Skip header
+ if (headerSize == 1) {
+ jam();
+ moreKeyData = afterValues.next(iter);
+ } else {
+ jam();
+ moreKeyData = afterValues.next(iter, headerSize - 1);
+ }//if
+ while (keyAttrSize-- != 0) {
+ *dataPtr++ = *iter.data;
+ dataPos++;
+ if (dataPos == KeyInfo::DataLength) {
+ jam();
+ // Flush KEYINFO
+#if INTERNAL_TRIGGER_TCKEYREQ_JBA
+ sendSignal(reference(), GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + KeyInfo::DataLength, JBA);
+#else
+ EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + KeyInfo::DataLength);
+ jamEntry();
+#endif
+ dataPtr = (Uint32 *) &keyInfo->keyData;
+ dataPos = 0;
+ }
+ moreKeyData = afterValues.next(iter);
+ }
+ }
+ if (dataPos != 0) {
+ jam();
+ // Flush last KEYINFO
+#if INTERNAL_TRIGGER_TCKEYREQ_JBA
+ sendSignal(reference(), GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + dataPos, JBA);
+#else
+ EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + dataPos);
+ jamEntry();
+#endif
+ }
+ }
+
+ // *********** ATTRINFO ***********
+ if (attributesLength > attrBufSize) {
+ jam();
+ // No ATTRINFO in TcKeyReq
+ TcKeyReq::setAIInTcKeyReq(tcKeyReq->requestInfo, 0);
+ // Send ATTRINFO sequence
+ AttrInfo * const attrInfo = (AttrInfo *)signal->getDataPtrSend();
+ Uint32 attrInfoPos = 0;
+
+ attrInfo->connectPtr = transPtr->i;
+ attrInfo->transId[0] = regApiPtr->transid[0];
+ attrInfo->transId[1] = regApiPtr->transid[1];
+ dataPtr = (Uint32 *) &attrInfo->attrData;
+
+ bool moreAttrData;
+ // Insert primary key attributes (insert after values of primary table)
+ for(moreAttrData = afterValues.first(iter);
+ moreAttrData;
+ moreAttrData = afterValues.next(iter)) {
+ *dataPtr++ = *iter.data;
+ attrInfoPos++;
+ if (attrInfoPos == AttrInfo::DataLength) {
+ jam();
+ // Flush ATTRINFO
+#if INTERNAL_TRIGGER_TCKEYREQ_JBA
+ sendSignal(reference(), GSN_ATTRINFO, signal,
+ AttrInfo::HeaderLength + AttrInfo::DataLength, JBA);
+#else
+ EXECUTE_DIRECT(DBTC, GSN_ATTRINFO, signal,
+ AttrInfo::HeaderLength + AttrInfo::DataLength);
+ jamEntry();
+#endif
+ dataPtr = (Uint32 *) &attrInfo->attrData;
+ attrInfoPos = 0;
+ }
+ }
+ // Insert attribute values (insert key values of primary table)
+ // as one attribute
+ pkAttrHeader.insertHeader(dataPtr);
+ dataPtr += pkAttrHeader.getHeaderSize();
+ attrInfoPos += pkAttrHeader.getHeaderSize();
+ moreAttrData = keyValues.first(iter);
+ while(moreAttrData) {
+ jam();
+ AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
+
+ headerSize = attrHeader->getHeaderSize();
+ dataSize = attrHeader->getDataSize();
+ // Skip header
+ if (headerSize == 1) {
+ jam();
+ moreAttrData = keyValues.next(iter);
+ } else {
+ jam();
+ moreAttrData = keyValues.next(iter, headerSize - 1);
+ }//if
+ while(dataSize-- != 0) { // If we have not read complete key
+ if (attrInfoPos == AttrInfo::DataLength) {
+ jam();
+ // Flush ATTRINFO
+#if INTERNAL_TRIGGER_TCKEYREQ_JBA
+ sendSignal(reference(), GSN_ATTRINFO, signal,
+ AttrInfo::HeaderLength + AttrInfo::DataLength, JBA);
+#else
+ EXECUTE_DIRECT(DBTC, GSN_ATTRINFO, signal,
+ AttrInfo::HeaderLength + AttrInfo::DataLength);
+ jamEntry();
+#endif
+ dataPtr = (Uint32 *) &attrInfo->attrData;
+ attrInfoPos = 0;
+ }
+ *dataPtr++ = *iter.data;
+ attrInfoPos++;
+ moreAttrData = keyValues.next(iter);
+ }
+ }
+ if (attrInfoPos != 0) {
+ jam();
+ // Flush last ATTRINFO
+#if INTERNAL_TRIGGER_TCKEYREQ_JBA
+ sendSignal(reference(), GSN_ATTRINFO, signal,
+ AttrInfo::HeaderLength + attrInfoPos, JBA);
+#else
+ EXECUTE_DIRECT(DBTC, GSN_ATTRINFO, signal,
+ AttrInfo::HeaderLength + attrInfoPos);
+ jamEntry();
+#endif
+ }
+ }
+}
+
+void Dbtc::deleteFromIndexTable(Signal* signal,
+ TcFiredTriggerData* firedTriggerData,
+ ApiConnectRecordPtr* transPtr,
+ TcConnectRecordPtr* opPtr,
+ TcIndexData* indexData,
+ bool holdOperation)
+{
+ ApiConnectRecord* regApiPtr = transPtr->p;
+ TcConnectRecord* opRecord = opPtr->p;
+ TcKeyReq * const tcKeyReq = (TcKeyReq *)signal->getDataPtrSend();
+ Uint32 tcKeyRequestInfo = 0;
+ Uint32 tcKeyLength = 12; // Static length
+ TableRecordPtr indexTabPtr;
+ AttributeBuffer::DataBufferIterator iter;
+ Uint32 attrId = 0;
+ Uint32 keyLength = 0;
+ Uint32 hops;
+
+ indexTabPtr.i = indexData->indexId;
+ ptrCheckGuard(indexTabPtr, ctabrecFilesize, tableRecord);
+ tcKeyReq->apiConnectPtr = transPtr->i;
+ tcKeyReq->senderData = opPtr->i;
+ if (holdOperation) {
+ jam();
+ opRecord->triggerExecutionCount++;
+ }//if
+ // Calculate key length and renumber attribute id:s
+ AttributeBuffer::DataBufferPool & pool = c_theAttributeBufferPool;
+ LocalDataBuffer<11> beforeValues(pool, firedTriggerData->beforeValues);
+ bool skipNull = false;
+ for(bool moreKeyAttrs = beforeValues.first(iter);
+ (moreKeyAttrs);
+ attrId++) {
+ jam();
+ AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
+
+ // Filter out NULL valued attributes
+ if (attrHeader->isNULL()) {
+ skipNull = true;
+ break;
+ }
+ attrHeader->setAttributeId(attrId);
+ keyLength += attrHeader->getDataSize();
+ hops = attrHeader->getHeaderSize() + attrHeader->getDataSize();
+ moreKeyAttrs = beforeValues.next(iter, hops);
+ }
+
+ if (skipNull) {
+ jam();
+ opRecord->triggerExecutionCount--;
+ if (opRecord->triggerExecutionCount == 0) {
+ /*
+ We have completed current trigger execution
+ Continue triggering operation
+ */
+ jam();
+ continueTriggeringOp(signal, opRecord);
+ }//if
+ return;
+ }//if
+
+ TcKeyReq::setKeyLength(tcKeyRequestInfo, keyLength);
+ tcKeyReq->attrLen = 0;
+ tcKeyReq->tableId = indexData->indexId;
+ TcKeyReq::setOperationType(tcKeyRequestInfo, ZDELETE);
+ TcKeyReq::setExecutingTrigger(tcKeyRequestInfo, true);
+ tcKeyReq->tableSchemaVersion = indexTabPtr.p->currentSchemaVersion;
+ tcKeyReq->transId1 = regApiPtr->transid[0];
+ tcKeyReq->transId2 = regApiPtr->transid[1];
+ Uint32 * dataPtr = &tcKeyReq->scanInfo;
+ // Write first part of key in TCKEYREQ
+ Uint32 keyBufSize = 8; // Maximum for key in TCKEYREQ
+ Uint32 dataPos = 0;
+ // Filter out AttributeHeader:s since this should no be in key
+ bool moreKeyData = beforeValues.first(iter);
+ Uint32 headerSize = 0, keyAttrSize = 0, headAndData = 0;
+
+ while (moreKeyData &&
+ (dataPos < keyBufSize)) {
+ /*
+ If we have not read complete key
+ and it fits in the signal
+ */
+ jam();
+ AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
+
+ headerSize = attrHeader->getHeaderSize();
+ keyAttrSize = attrHeader->getDataSize();
+ headAndData = headerSize + attrHeader->getDataSize();
+ // Skip header
+ if (headerSize == 1) {
+ jam();
+ moreKeyData = beforeValues.next(iter);
+ } else {
+ jam();
+ moreKeyData = beforeValues.next(iter, headerSize - 1);
+ }//if
+ while((keyAttrSize != 0) &&
+ (dataPos < keyBufSize)) {
+ // If we have not read complete key
+ jam();
+ *dataPtr++ = *iter.data;
+ dataPos++;
+ keyAttrSize--;
+ moreKeyData = beforeValues.next(iter);
+ }
+ if (keyAttrSize != 0) {
+ jam();
+ break;
+ }//if
+ }
+
+ tcKeyLength += dataPos;
+ tcKeyReq->requestInfo = tcKeyRequestInfo;
+
+ /**
+ * Fix savepoint id -
+ * fix so that delete has same savepoint id as triggering operation
+ */
+ const Uint32 currSavePointId = regApiPtr->currSavePointId;
+ regApiPtr->currSavePointId = opRecord->savePointId;
+ EXECUTE_DIRECT(DBTC, GSN_TCKEYREQ, signal, tcKeyLength);
+ regApiPtr->currSavePointId = currSavePointId;
+ tcConnectptr.p->currentIndexId = indexData->indexId;
+ jamEntry();
+
+ // *********** KEYINFO ***********
+ if (moreKeyData) {
+ jam();
+ // Send KEYINFO sequence
+ KeyInfo * const keyInfo = (KeyInfo *)signal->getDataPtrSend();
+
+ keyInfo->connectPtr = transPtr->i;
+ keyInfo->transId[0] = regApiPtr->transid[0];
+ keyInfo->transId[1] = regApiPtr->transid[1];
+ dataPtr = (Uint32 *) &keyInfo->keyData;
+ dataPos = 0;
+ // Pack any part of a key attribute that did no fit TCKEYREQ
+ while((keyAttrSize != 0) &&
+ (dataPos < KeyInfo::DataLength)) {
+ // If we have not read complete key
+ *dataPtr++ = *iter.data;
+ dataPos++;
+ keyAttrSize--;
+ if (dataPos == KeyInfo::DataLength) {
+ jam();
+ // Flush KEYINFO
+#if INTERNAL_TRIGGER_TCKEYREQ_JBA
+ sendSignal(reference(), GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + KeyInfo::DataLength, JBA);
+#else
+ EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + KeyInfo::DataLength);
+ jamEntry();
+#endif
+ dataPtr = (Uint32 *) &keyInfo->keyData;
+ dataPos = 0;
+ }
+ moreKeyData = beforeValues.next(iter);
+ }
+
+ while(moreKeyData) {
+ jam();
+ AttributeHeader* attrHeader = (AttributeHeader *) iter.data;
+
+ headerSize = attrHeader->getHeaderSize();
+ keyAttrSize = attrHeader->getDataSize();
+ headAndData = headerSize + attrHeader->getDataSize();
+ // Skip header
+ if (headerSize == 1) {
+ jam();
+ moreKeyData = beforeValues.next(iter);
+ } else {
+ jam();
+ moreKeyData = beforeValues.next(iter,
+ headerSize - 1);
+ }//if
+ while (keyAttrSize-- != 0) {
+ *dataPtr++ = *iter.data;
+ dataPos++;
+ if (dataPos == KeyInfo::DataLength) {
+ jam();
+ // Flush KEYINFO
+#if INTERNAL_TRIGGER_TCKEYREQ_JBA
+ sendSignal(reference(), GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + KeyInfo::DataLength, JBA);
+#else
+ EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + KeyInfo::DataLength);
+ jamEntry();
+#endif
+ dataPtr = (Uint32 *) &keyInfo->keyData;
+ dataPos = 0;
+ }
+ moreKeyData = beforeValues.next(iter);
+ }
+ }
+ if (dataPos != 0) {
+ jam();
+ // Flush last KEYINFO
+#if INTERNAL_TRIGGER_TCKEYREQ_JBA
+ sendSignal(reference(), GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + dataPos, JBA);
+#else
+ EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + dataPos);
+ jamEntry();
+#endif
+ }
+ }
+}
+
+Uint32
+Dbtc::TableRecord::getErrorCode(Uint32 schemaVersion) const {
+ if(!enabled)
+ return ZNO_SUCH_TABLE;
+ if(dropping)
+ return ZDROP_TABLE_IN_PROGRESS;
+ if(schemaVersion != currentSchemaVersion)
+ return ZWRONG_SCHEMA_VERSION_ERROR;
+ ErrorReporter::handleAssert("Dbtc::TableRecord::getErrorCode",
+ __FILE__, __LINE__);
+ return 0;
+}
+
diff --git a/storage/ndb/src/kernel/blocks/dbtc/Makefile.am b/storage/ndb/src/kernel/blocks/dbtc/Makefile.am
new file mode 100644
index 00000000000..98ee2639bac
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtc/Makefile.am
@@ -0,0 +1,23 @@
+noinst_LIBRARIES = libdbtc.a
+
+libdbtc_a_SOURCES = DbtcInit.cpp DbtcMain.cpp
+
+include $(top_srcdir)/ndb/config/common.mk.am
+include $(top_srcdir)/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libdbtc.dsp
+
+libdbtc.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-lib.am \
+ $(top_srcdir)/ndb/config/win-name \
+ $(top_srcdir)/ndb/config/win-includes \
+ $(top_srcdir)/ndb/config/win-sources \
+ $(top_srcdir)/ndb/config/win-libraries
+ cat $(top_srcdir)/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/ndb/config/win-sources $@ $(libdbtc_a_SOURCES)
+ @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/storage/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp b/storage/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp
new file mode 100644
index 00000000000..2c62adab3e5
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp
@@ -0,0 +1,136 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef ATTRIBUTE_OFFSET_HPP
+#define ATTRIBUTE_OFFSET_HPP
+
+class AttributeOffset {
+ friend class Dbtup;
+
+private:
+ static void setOffset(Uint32 & desc, Uint32 offset);
+ static void setCharsetPos(Uint32 & desc, Uint32 offset);
+ static void setNullFlagPos(Uint32 & desc, Uint32 offset);
+
+ static Uint32 getOffset(const Uint32 &);
+ static bool getCharsetFlag(const Uint32 &);
+ static Uint32 getCharsetPos(const Uint32 &);
+ static Uint32 getNullFlagPos(const Uint32 &);
+ static Uint32 getNullFlagOffset(const Uint32 &);
+ static Uint32 getNullFlagBitOffset(const Uint32 &);
+ static bool isNULL(const Uint32 &, const Uint32 &);
+};
+
+/**
+ * Allow for 4096 attributes, all nullable, and for 128 different
+ * character sets.
+ *
+ * a = Attribute offset - 11 bits 0-10 ( addr word in 8 kb )
+ * c = Has charset flag 1 bits 11-11
+ * s = Charset pointer position - 7 bits 12-18 ( in table descriptor )
+ * f = Null flag offset in word - 5 bits 20-24 ( address 32 bits )
+ * w = Null word offset - 7 bits 25-32 ( f+w addr 4096 attrs )
+ *
+ * 1111111111222222222233
+ * 01234567890123456789012345678901
+ * aaaaaaaaaaacsssssss fffffwwwwwww
+ */
+
+#define AO_ATTRIBUTE_OFFSET_SHIFT 0
+#define AO_ATTRIBUTE_OFFSET_MASK 0x7ff
+
+#define AO_CHARSET_FLAG_SHIFT 11
+#define AO_CHARSET_POS_SHIFT 12
+#define AO_CHARSET_POS_MASK 127
+
+#define AO_NULL_FLAG_POS_MASK 0xfff // f+w
+#define AO_NULL_FLAG_POS_SHIFT 20
+
+#define AO_NULL_FLAG_WORD_MASK 31 // f
+#define AO_NULL_FLAG_OFFSET_SHIFT 5
+
+inline
+void
+AttributeOffset::setOffset(Uint32 & desc, Uint32 offset){
+ ASSERT_MAX(offset, AO_ATTRIBUTE_OFFSET_MASK, "AttributeOffset::setOffset");
+ desc |= (offset << AO_ATTRIBUTE_OFFSET_SHIFT);
+}
+
+inline
+void
+AttributeOffset::setCharsetPos(Uint32 & desc, Uint32 offset) {
+ ASSERT_MAX(offset, AO_CHARSET_POS_MASK, "AttributeOffset::setCharsetPos");
+ desc |= (1 << AO_CHARSET_FLAG_SHIFT);
+ desc |= (offset << AO_CHARSET_POS_SHIFT);
+}
+
+inline
+void
+AttributeOffset::setNullFlagPos(Uint32 & desc, Uint32 pos){
+ ASSERT_MAX(pos, AO_NULL_FLAG_POS_MASK, "AttributeOffset::setNullFlagPos");
+ desc |= (pos << AO_NULL_FLAG_POS_SHIFT);
+}
+
+inline
+Uint32
+AttributeOffset::getOffset(const Uint32 & desc)
+{
+ return (desc >> AO_ATTRIBUTE_OFFSET_SHIFT) & AO_ATTRIBUTE_OFFSET_MASK;
+}
+
+inline
+bool
+AttributeOffset::getCharsetFlag(const Uint32 & desc)
+{
+ return (desc >> AO_CHARSET_FLAG_SHIFT) & 1;
+}
+
+inline
+Uint32
+AttributeOffset::getCharsetPos(const Uint32 & desc)
+{
+ return (desc >> AO_CHARSET_POS_SHIFT) & AO_CHARSET_POS_MASK;
+}
+
+inline
+Uint32
+AttributeOffset::getNullFlagPos(const Uint32 & desc)
+{
+ return ((desc >> AO_NULL_FLAG_POS_SHIFT) & AO_NULL_FLAG_POS_MASK);
+}
+
+inline
+Uint32
+AttributeOffset::getNullFlagOffset(const Uint32 & desc)
+{
+ return (getNullFlagPos(desc) >> AO_NULL_FLAG_OFFSET_SHIFT);
+}
+
+inline
+Uint32
+AttributeOffset::getNullFlagBitOffset(const Uint32 & desc)
+{
+ return (getNullFlagPos(desc) & AO_NULL_FLAG_WORD_MASK);
+}
+
+inline
+bool
+AttributeOffset::isNULL(const Uint32 & pageWord, const Uint32 & desc)
+{
+ return (((pageWord >> getNullFlagBitOffset(desc)) & 1) == 1);
+}
+
+#endif
diff --git a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
new file mode 100644
index 00000000000..6d169d20d16
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
@@ -0,0 +1,2409 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef DBTUP_H
+#define DBTUP_H
+
+#include <pc.hpp>
+#include <SimulatedBlock.hpp>
+#include <ndb_limits.h>
+#include <trigger_definitions.h>
+#include <ArrayList.hpp>
+#include <AttributeHeader.hpp>
+#include <Bitmask.hpp>
+#include <signaldata/TupKey.hpp>
+#include <signaldata/CreateTrig.hpp>
+#include <signaldata/DropTrig.hpp>
+#include <signaldata/TrigAttrInfo.hpp>
+#include <signaldata/BuildIndx.hpp>
+
+#define ZWORDS_ON_PAGE 8192 /* NUMBER OF WORDS ON A PAGE. */
+#define ZATTRBUF_SIZE 32 /* SIZE OF ATTRIBUTE RECORD BUFFER */
+#define ZMIN_PAGE_LIMIT_TUPKEYREQ 5
+#define ZTUP_VERSION_BITS 15
+
+#ifdef DBTUP_C
+//------------------------------------------------------------------
+// Jam Handling:
+//
+// When DBTUP reports lines through jam in the trace files it has to
+// be interpreted. 4024 means as an example line 24 in DbtupCommit.cpp
+// Thus 4000 is added to the line number beacuse it is located in the
+// file DbtupCommit.cpp. The following is the exhaustive list of the
+// added value in the various files. ndbrequire, ptrCheckGuard still
+// only reports the line number in the file it currently is located in.
+//
+// DbtupExecQuery.cpp 0
+// DbtupBuffer.cpp 2000
+// DbtupRoutines.cpp 3000
+// DbtupCommit.cpp 5000
+// DbtupFixAlloc.cpp 6000
+// DbtupTrigger.cpp 7000
+// DbtupAbort.cpp 9000
+// DbtupLCP.cpp 10000
+// DbtupUndoLog.cpp 12000
+// DbtupPageMap.cpp 14000
+// DbtupPagMan.cpp 16000
+// DbtupStoredProcDef.cpp 18000
+// DbtupMeta.cpp 20000
+// DbtupTabDesMan.cpp 22000
+// DbtupGen.cpp 24000
+// DbtupSystemRestart.cpp 26000
+// DbtupIndex.cpp 28000
+// DbtupDebug.cpp 30000
+//------------------------------------------------------------------
+
+/*
+2.2 LOCAL SYMBOLS
+-----------------
+*/
+/* ---------------------------------------------------------------- */
+/* S I Z E O F R E C O R D S */
+/* ---------------------------------------------------------------- */
+#define ZNO_OF_ATTRBUFREC 10000 /* SIZE OF ATTRIBUTE INFO FILE */
+#define ZNO_OF_CONCURRENT_OPEN_OP 40 /* NUMBER OF CONCURRENT OPENS */
+#define ZNO_OF_CONCURRENT_WRITE_OP 80 /* NUMBER OF CONCURRENT DISK WRITES*/
+#define ZNO_OF_FRAGOPREC 20 /* NUMBER OF CONCURRENT ADD FRAG. */
+#define ZNO_OF_LCP_REC 10 /* NUMBER OF CONCURRENT CHECKPOINTS*/
+#define TOT_PAGE_RECORD_SPACE 262144 /* SIZE OF PAGE RECORD FILE. */
+#define ZNO_OF_PAGE TOT_PAGE_RECORD_SPACE/ZWORDS_ON_PAGE
+#define ZNO_OF_PAGE_RANGE_REC 128 /* SIZE OF PAGE RANGE FILE */
+#define ZNO_OF_PARALLELL_UNDO_FILES 16 /* NUMBER OF PARALLEL UNDO FILES */
+#define ZNO_OF_RESTART_INFO_REC 10 /* MAXIMUM PARALLELL RESTART INFOS */
+ /* 24 SEGMENTS WITH 8 PAGES IN EACH*/
+ /* PLUS ONE UNDO BUFFER CACHE */
+// Undo record identifiers are 32-bits with page index 13-bits
+#define ZUNDO_RECORD_ID_PAGE_INDEX 13 /* 13 BITS = 8192 WORDS/PAGE */
+#define ZUNDO_RECORD_ID_PAGE_INDEX_MASK (ZWORDS_ON_PAGE - 1) /* 1111111111111 */
+
+// Trigger constants
+#define ZDEFAULT_MAX_NO_TRIGGERS_PER_TABLE 16
+
+/* ---------------------------------------------------------------- */
+// VARIABLE NUMBERS OF PAGE_WORD, UNDO_WORD AND LOGIC_WORD FOR
+// COMMUNICATION WITH FILE SYSTEM
+/* ---------------------------------------------------------------- */
+#define ZBASE_ADDR_PAGE_WORD 1 /* BASE ADDRESS OF PAGE_WORD VAR */
+#define ZBASE_ADDR_UNDO_WORD 2 /* BASE ADDRESS OF UNDO_WORD VAR */
+#define ZBASE_ADDR_LOGIC_WORD 3 /* BASE ADDRESS OF LOGIC_WORD VAR */
+
+/* ---------------------------------------------------------------- */
+// NUMBER OF PAGES SENT TO DISK IN DATA BUFFER AND UNDO BUFFER WHEN
+// OPTIMUM PERFORMANCE IS ACHIEVED.
+/* ---------------------------------------------------------------- */
+#define ZUB_SEGMENT_SIZE 8 /* SEGMENT SIZE OF UNDO BUFFER */
+#define ZDB_SEGMENT_SIZE 8 /* SEGMENT SIZE OF DATA BUFFER */
+
+/* ---------------------------------------------------------------- */
+/* A ATTRIBUTE MAY BE NULL, DYNAMIC OR NORMAL. A NORMAL ATTRIBUTE */
+/* IS A ATTRIBUTE THAT IS NOT NULL OR DYNAMIC. A NULL ATTRIBUTE */
+/* MAY HAVE NO VALUE. A DYNAMIC ATTRIBUTE IS A NULL ATTRIBUTE THAT */
+/* DOES NOT HAVE TO BE A MEMBER OF EVERY TUPLE I A CERTAIN TABLE. */
+/* ---------------------------------------------------------------- */
+/**
+ * #defines moved into include/kernel/Interpreter.hpp
+ */
+#define ZMAX_REGISTER 21
+#define ZINSERT_DELETE 0
+/* ---------------------------------------------------------------- */
+/* THE MINIMUM SIZE OF AN 'EMPTY' TUPLE HEADER IN R-WORDS */
+/* ---------------------------------------------------------------- */
+#define ZTUP_HEAD_MINIMUM_SIZE 2
+ /* THE TUPLE HEADER FIELD 'SIZE OF NULL ATTR. FIELD' SPECIFYES */
+ /* THE SIZE OF THE TUPLE HEADER FIELD 'NULL ATTR. FIELD'. */
+ /* THE TUPLE HEADER FIELD 'TYPE' SPECIFYES THE TYPE OF THE TUPLE */
+ /* HEADER. */
+ /* TUPLE ATTRIBUTE INDEX CLUSTERS, ATTRIBUTE */
+ /* CLUSTERS AND A DYNAMIC ATTRIBUTE HEADER. */
+ /* IT MAY ALSO CONTAIN SHORT ATTRIBUTES AND */
+ /* POINTERS TO LONG ATTRIBUTE HEADERS. */
+ /* TUPLE ATTRIBUTE INDEX CLUSTERS, ATTRIBUTE */
+ /* CLUSTERS AND A DYNAMIC ATTRIBUTE HEADER. */
+
+#define ZTH_TYPE3 2 /* TUPLE HEADER THAT MAY HAVE A POINTER TO */
+ /* A DYNAMIC ATTRIBUTE HEADER. IT MAY ALSO */
+ /* CONTAIN SHORT ATTRIBUTES AND POINTERS */
+ /* TO LONG ATTRIBUTE HEADERS. */
+
+ /* DATA STRUCTURE TYPES */
+ /* WHEN ATTRIBUTE INFO IS SENT WITH A ATTRINFO-SIGNAL THE */
+ /* VARIABLE TYPE IS SPECIFYED. THIS MUST BE DONE TO BE ABLE TO */
+ /* NOW HOW MUCH DATA OF A ATTRIBUTE TO READ FROM ATTRINFO. */
+#define ZFIXED_ARRAY 2 /* ZFIXED ARRAY FIELD. */
+#define ZNON_ARRAY 1 /* NORMAL FIELD. */
+#define ZVAR_ARRAY 0 /* VARIABLE ARRAY FIELD */
+#define ZNOT_STORE 3 /* THE ATTR IS STORED IN THE INDEX BLOCK */
+#define ZMAX_SMALL_VAR_ARRAY 256
+
+ /* PLEASE OBSERVE THAT THEESE CONSTANTS CORRESPONDS TO THE NUMBER */
+ /* OF BITS NEEDED TO REPRESENT THEM D O N O T C H A N G E */
+#define Z1BIT_VAR 0 /* 1 BIT VARIABLE. */
+#define Z2BIT_VAR 1 /* 2 BIT VARIABLE. */
+#define Z4BIT_VAR 2 /* 4 BIT VARIABLE. */
+#define Z8BIT_VAR 3 /* 8 BIT VARIABLE. */
+#define Z16BIT_VAR 4 /* 16 BIT VARIABLE. */
+#define Z32BIT_VAR 5 /* 32 BIT VARIABLE. */
+#define Z64BIT_VAR 6 /* 64 BIT VARIABLE. */
+#define Z128BIT_VAR 7 /* 128 BIT VARIABLE. */
+
+ /* WHEN A REQUEST CAN NOT BE EXECUTED BECAUSE OF A ERROR THE */
+ /* ERROR MUST BE IDENTIFYED BY MEANS OF A ERROR CODE AND SENT TO */
+ /* THE REQUESTER. */
+#define ZGET_OPREC_ERROR 804 // TUP_SEIZEREF
+
+#define ZEXIST_FRAG_ERROR 816 // Add fragment
+#define ZFULL_FRAGRECORD_ERROR 817 // Add fragment
+#define ZNO_FREE_PAGE_RANGE_ERROR 818 // Add fragment
+#define ZNOFREE_FRAGOP_ERROR 830 // Add fragment
+#define ZTOO_LARGE_TUPLE_ERROR 851 // Add fragment
+#define ZNO_FREE_TAB_ENTRY_ERROR 852 // Add fragment
+#define ZNO_PAGES_ALLOCATED_ERROR 881 // Add fragment
+
+#define ZGET_REALPID_ERROR 809
+#define ZNOT_IMPLEMENTED_ERROR 812
+#define ZSEIZE_ATTRINBUFREC_ERROR 805
+#define ZTOO_MUCH_ATTRINFO_ERROR 823
+#define ZMEM_NOTABDESCR_ERROR 826
+#define ZMEM_NOMEM_ERROR 827
+#define ZAI_INCONSISTENCY_ERROR 829
+#define ZNO_ILLEGAL_NULL_ATTR 839
+#define ZNOT_NULL_ATTR 840
+#define ZNO_INSTRUCTION_ERROR 871
+#define ZOUTSIDE_OF_PROGRAM_ERROR 876
+#define ZSTORED_PROC_ID_ERROR 877
+#define ZREGISTER_INIT_ERROR 878
+#define ZATTRIBUTE_ID_ERROR 879
+#define ZTRY_TO_READ_TOO_MUCH_ERROR 880
+#define ZTOTAL_LEN_ERROR 882
+#define ZATTR_INTERPRETER_ERROR 883
+#define ZSTACK_OVERFLOW_ERROR 884
+#define ZSTACK_UNDERFLOW_ERROR 885
+#define ZTOO_MANY_INSTRUCTIONS_ERROR 886
+#define ZTRY_TO_UPDATE_ERROR 888
+#define ZCALL_ERROR 890
+#define ZTEMPORARY_RESOURCE_FAILURE 891
+
+#define ZSTORED_SEIZE_ATTRINBUFREC_ERROR 873 // Part of Scan
+
+#define ZREAD_ONLY_CONSTRAINT_VIOLATION 893
+#define ZVAR_SIZED_NOT_SUPPORTED 894
+#define ZINCONSISTENT_NULL_ATTRIBUTE_COUNT 895
+#define ZTUPLE_CORRUPTED_ERROR 896
+#define ZTRY_UPDATE_PRIMARY_KEY 897
+#define ZMUST_BE_ABORTED_ERROR 898
+#define ZTUPLE_DELETED_ERROR 626
+#define ZINSERT_ERROR 630
+
+#define ZINVALID_CHAR_FORMAT 744
+
+
+ /* SOME WORD POSITIONS OF FIELDS IN SOME HEADERS */
+#define ZPAGE_STATE_POS 0 /* POSITION OF PAGE STATE */
+#define ZPAGE_NEXT_POS 1 /* POSITION OF THE NEXT POINTER WHEN IN FREELIST */
+#define ZPAGE_PREV_POS 2 /* POSITION OF THE PREVIOUS POINTER WHEN IN FREELIST */
+#define ZFREELIST_HEADER_POS 3 /* POSITION OF THE FIRST FREELIST */
+#define ZPAGE_FRAG_PAGE_ID_POS 4 /* POSITION OF FRAG PAGE ID WHEN USED*/
+#define ZPAGE_NEXT_CLUST_POS 5 /* POSITION OF NEXT FREE SET OF PAGES */
+#define ZPAGE_FIRST_CLUST_POS 2 /* POSITION OF THE POINTER TO THE FIRST PAGE IN A CLUSTER */
+#define ZPAGE_LAST_CLUST_POS 6 /* POSITION OF THE POINTER TO THE LAST PAGE IN A CLUSTER */
+#define ZPAGE_PREV_CLUST_POS 7 /* POSITION OF THE PREVIOUS POINTER */
+#define ZPAGE_HEADER_SIZE 32 /* NUMBER OF WORDS IN MEM PAGEHEADER */
+#define ZDISK_PAGE_HEADER_SIZE 32 /* NUMBER OF WORDS IN DISK PAGEHEADER */
+#define ZNO_OF_FREE_BLOCKS 3 /* NO OF FREE BLOCK IN THE DISK PAGE */
+#define ZDISK_PAGE_ID 8 /* ID OF THE PAGE ON THE DISK */
+#define ZBLOCK_LIST 9
+#define ZCOPY_OF_PAGE 10
+#define ZPAGE_PHYSICAL_INDEX 11
+#define ZNEXT_IN_PAGE_USED_LIST 12
+#define ZPREV_IN_PAGE_USED_LIST 13
+#define ZDISK_USED_TYPE 14
+#define ZFREE_COMMON 1 /* PAGE STATE, PAGE IN COMMON AREA */
+#define ZEMPTY_MM 2 /* PAGE STATE, PAGE IN EMPTY LIST */
+#define ZTH_MM_FREE 3 /* PAGE STATE, TUPLE HEADER PAGE WITH FREE AREA */
+#define ZTH_MM_FULL 4 /* PAGE STATE, TUPLE HEADER PAGE WHICH IS FULL */
+#define ZAC_MM_FREE 5 /* PAGE STATE, ATTRIBUTE CLUSTER PAGE WITH FREE AREA */
+#define ZTH_MM_FREE_COPY 7 /* PAGE STATE, TH COPY PAGE WITH FREE AREA */
+#define ZTH_MM_FULL_COPY 8 /* PAGE STATE, TH COPY PAGE WHICH IS FULL */
+#define ZAC_MM_FREE_COPY 9 /* PAGE STATE, AC COPY PAGE WITH FREE AREA */
+#define ZMAX_NO_COPY_PAGES 4 /* THE MAXIMUM NUMBER OF COPY PAGES ALLOWED PER FRAGMENT */
+
+ /* CONSTANTS USED TO HANDLE TABLE DESCRIPTOR RECORDS */
+ /* ALL POSITIONS AND SIZES IS BASED ON R-WORDS (32-BIT ON APZ 212) */
+#define ZTD_HEADER 0 /* HEADER POSITION */
+#define ZTD_DATASIZE 1 /* SIZE OF THE DATA IN THIS CHUNK */
+#define ZTD_SIZE 2 /* TOTAL SIZE OF TABLE DESCRIPTOR */
+
+ /* TRAILER POSITIONS FROM END OF TABLE DESCRIPTOR RECORD */
+#define ZTD_TR_SIZE 1 /* SIZE DESCRIPTOR POS FROM END+1 */
+#define ZTD_TR_TYPE 2
+#define ZTD_TRAILER_SIZE 2 /* TOTAL SIZE OF TABLE TRAILER */
+#define ZAD_SIZE 2 /* TOTAL SIZE OF ATTR DESCRIPTOR */
+#define ZAD_LOG_SIZE 1 /* TWO LOG OF TOTAL SIZE OF ATTR DESCRIPTOR */
+
+ /* CONSTANTS USED TO HANDLE TABLE DESCRIPTOR AS A FREELIST */
+#define ZTD_FL_HEADER 0 /* HEADER POSITION */
+#define ZTD_FL_SIZE 1 /* TOTAL SIZE OF THIS FREELIST ENTRY */
+#define ZTD_FL_PREV 2 /* PREVIOUS RECORD IN FREELIST */
+#define ZTD_FL_NEXT 3 /* NEXT RECORD IN FREELIST */
+#define ZTD_FREE_SIZE 16 /* SIZE NEEDED TO HOLD ONE FL ENTRY */
+
+ /* CONSTANTS USED IN LSB OF TABLE DESCRIPTOR HEADER DESCRIBING USAGE */
+#define ZTD_TYPE_FREE 0 /* RECORD LINKED INTO FREELIST */
+#define ZTD_TYPE_NORMAL 1 /* RECORD USED AS TABLE DESCRIPTOR */
+ /* ATTRIBUTE OPERATION CONSTANTS */
+#define ZLEAF 1
+#define ZNON_LEAF 2
+
+ /* ATTRINBUFREC VARIABLE POSITIONS. */
+#define ZBUF_PREV 29 /* POSITION OF 'PREV'-VARIABLE (USED BY INTERPRETED EXEC) */
+#define ZBUF_DATA_LEN 30 /* POSITION OF 'DATA LENGTH'-VARIABLE. */
+#define ZBUF_NEXT 31 /* POSITION OF 'NEXT'-VARIABLE. */
+#define ZSAVE_BUF_NEXT 28
+#define ZSAVE_BUF_DATA_LEN 27
+
+ /* RETURN POINTS. */
+ /* RESTART PHASES */
+#define ZSTARTPHASE1 1
+#define ZSTARTPHASE2 2
+#define ZSTARTPHASE3 3
+#define ZSTARTPHASE4 4
+#define ZSTARTPHASE6 6
+
+#define ZADDFRAG 0
+
+ /* CHECKPOINT RECORD TYPES */
+#define ZLCPR_TYPE_INSERT_TH 0 /* INSERT TUPLE HEADER */
+#define ZLCPR_TYPE_DELETE_TH 1 /* DELETE TUPLE HEADER */
+#define ZLCPR_TYPE_UPDATE_TH 2 /* DON'T CREATE IT, JUST UPDETE */
+#define ZLCPR_TYPE_INSERT_TH_NO_DATA 3 /* INSERT TUPLE HEADER */
+#define ZLCPR_ABORT_UPDATE 4 /* UNDO AN UPDATE OPERATION THAT WAS ACTIVE IN LCP */
+#define ZLCPR_ABORT_INSERT 5 /* UNDO AN INSERT OPERATION THAT WAS ACTIVE IN LCP */
+#define ZTABLE_DESCRIPTOR 6 /* TABLE DESCRIPTOR */
+#define ZINDICATE_NO_OP_ACTIVE 7 /* ENSURE THAT NO OPERATION ACTIVE AFTER RESTART */
+#define ZLCPR_UNDO_LOG_PAGE_HEADER 8 /* CHANGE IN PAGE HEADER IS UNDO LOGGED */
+#define ZLCPR_TYPE_UPDATE_GCI 9 /* Update GCI at commit time */
+#define ZNO_CHECKPOINT_RECORDS 10 /* NUMBER OF CHECKPOINTRECORD TYPES */
+
+ /* RESULT CODES */
+ /* ELEMENT POSITIONS IN SYSTEM RESTART INFO PAGE OF THE DATA FILE */
+#define ZSRI_NO_OF_FRAG_PAGES_POS 10 /* NUMBER OF FRAGMENT PAGES WHEN CHECKPOINT STARTED */
+#define ZSRI_TUP_RESERVED_SIZE_POS 11 /* RESERVED SIZE OF THE TUPLE WHEN CP STARTED */
+#define ZSRI_TUP_FIXED_AREA_POS 12 /* SIZE OF THE TUPLE FIXED AREA WHEN CP STARTED */
+#define ZSRI_TAB_DESCR_SIZE 13 /* SIZE OF THE TABLE DESCRIPTOR WHEN CP STARTED */
+#define ZSRI_NO_OF_ATTRIBUTES_POS 14 /* NUMBER OF ATTRIBUTES */
+#define ZSRI_UNDO_LOG_END_REC_ID 15 /* LAST UNDO LOG RECORD ID FOR THIS CHECKPOINT */
+#define ZSRI_UNDO_LOG_END_PAGE_ID 16 /* LAST USED LOG PAGE ID FOR THIS CHECKPOINT */
+#define ZSRI_TH_FREE_FIRST 17 /* FIRST FREE PAGE OF TUPLE HEADERS */
+#define ZSRI_TH_FREE_COPY_FIRST 18 /* FIRST FREE PAGE OF TUPLE HEADER COPIES */
+#define ZSRI_EMPTY_PRIM_PAGE 27 /* FIRST EMPTY PAGE */
+#define ZSRI_NO_COPY_PAGES_ALLOC 28 /* NO COPY PAGES IN FRAGMENT AT LOCAL CHECKPOINT */
+#define ZSRI_UNDO_FILE_VER 29 /* CHECK POINT ID OF THE UNDO FILE */
+#define ZSRI_NO_OF_INDEX_ATTR 30 /* No of index attributes */
+#define ZNO_OF_PAGES_CLUSTER_REC 0
+
+//------------------------------------------------------------
+// TUP_CONTINUEB codes
+//------------------------------------------------------------
+#define ZSTART_EXEC_UNDO_LOG 0
+#define ZCONT_START_SAVE_CL 1
+#define ZCONT_SAVE_DP 2
+#define ZCONT_EXECUTE_LC 3
+#define ZCONT_LOAD_DP 4
+#define ZLOAD_BAL_LCP_TIMER 5
+#define ZINITIALISE_RECORDS 6
+#define ZREL_FRAG 7
+#define ZREPORT_MEMORY_USAGE 8
+#define ZBUILD_INDEX 9
+
+#define ZINDEX_STORAGE 0
+#define ZDATA_WORD_AT_DISK_PAGE 2030
+#define ZALLOC_DISK_PAGE_LAST_INDEX 2047
+#define ZWORD_IN_BLOCK 127 /* NO OF WORD IN A BLOCK */
+#define ZNO_DISK_PAGES_FILE_REC 100
+#define ZMASK_PAGE_INDEX 0x7ff
+#define ZBIT_PAGE_INDEX 11 /* 8 KBYT PAGE = 2048 WORDS */
+#define ZSCAN_PROCEDURE 0
+#define ZCOPY_PROCEDURE 2
+#define ZSTORED_PROCEDURE_DELETE 3
+#define ZSTORED_PROCEDURE_FREE 0xffff
+#define ZMIN_PAGE_LIMIT_TUP_COMMITREQ 2
+#define ZUNDO_PAGE_HEADER_SIZE 2 /* SIZE OF UNDO PAGE HEADER */
+#endif
+
+class Dbtup: public SimulatedBlock {
+public:
+
+ typedef bool (Dbtup::* ReadFunction)(Uint32*,
+ AttributeHeader*,
+ Uint32,
+ Uint32);
+ typedef bool (Dbtup::* UpdateFunction)(Uint32*,
+ Uint32,
+ Uint32);
+// State values
+enum State {
+ NOT_INITIALIZED = 0,
+ COMMON_AREA_PAGES = 1,
+ UNDO_RESTART_PAGES = 2,
+ UNDO_PAGES = 3,
+ READ_ONE_PAGE = 4,
+ CHECKPOINT_DATA_READ = 7,
+ CHECKPOINT_DATA_READ_PAGE_ZERO = 8,
+ CHECKPOINT_DATA_WRITE = 9,
+ CHECKPOINT_DATA_WRITE_LAST = 10,
+ CHECKPOINT_DATA_WRITE_FLUSH = 11,
+ CHECKPOINT_UNDO_READ = 12,
+ CHECKPOINT_UNDO_READ_FIRST = 13,
+ CHECKPOINT_UNDO_WRITE = 14,
+ CHECKPOINT_UNDO_WRITE_FLUSH = 15,
+ CHECKPOINT_TD_READ = 16,
+ IDLE = 17,
+ ACTIVE = 18,
+ SYSTEM_RESTART = 19,
+ NO_OTHER_OP = 20,
+ COMMIT_DELETE = 21,
+ TO_BE_COMMITTED = 22,
+ ABORTED = 23,
+ ALREADY_ABORTED_INSERT = 24,
+ ALREADY_ABORTED = 25,
+ ABORT_INSERT = 26,
+ ABORT_UPDATE = 27,
+ INIT = 28,
+ INITIAL_READ = 29,
+ INTERPRETED_EXECUTION = 30,
+ FINAL_READ = 31,
+ FINAL_UPDATE = 32,
+ DISCONNECTED = 33,
+ DEFINED = 34,
+ ERROR_WAIT_TUPKEYREQ = 35,
+ STARTED = 36,
+ NOT_DEFINED = 37,
+ COMPLETED = 38,
+ WAIT_ABORT = 39,
+ NORMAL_PAGE = 40,
+ COPY_PAGE = 41,
+ DELETE_BLOCK = 42,
+ WAIT_STORED_PROCEDURE_ATTR_INFO = 43,
+ DATA_FILE_READ = 45,
+ DATA_FILE_WRITE = 46,
+ LCP_DATA_FILE_READ = 47,
+ LCP_DATA_FILE_WRITE = 48,
+ LCP_DATA_FILE_WRITE_WITH_UNDO = 49,
+ LCP_DATA_FILE_CLOSE = 50,
+ LCP_UNDO_FILE_READ = 51,
+ LCP_UNDO_FILE_CLOSE = 52,
+ LCP_UNDO_FILE_WRITE = 53,
+ OPENING_DATA_FILE = 54,
+ INITIATING_RESTART_INFO = 55,
+ INITIATING_FRAGMENT = 56,
+ OPENING_UNDO_FILE = 57,
+ READING_RESTART_INFO = 58,
+ INIT_UNDO_SEGMENTS = 59,
+ READING_TAB_DESCR = 60,
+ READING_DATA_PAGES = 61,
+ WAIT_COPY_PROCEDURE = 62,
+ TOO_MUCH_AI = 63,
+ SAME_PAGE = 64,
+ DEFINING = 65,
+ TUPLE_BLOCKED = 66,
+ ERROR_WAIT_STORED_PROCREQ = 67
+};
+
+// Records
+/* ************** ATTRIBUTE INFO BUFFER RECORD ****************** */
+/* THIS RECORD IS USED AS A BUFFER FOR INCOMING AND OUTGOING DATA */
+/* ************************************************************** */
+struct Attrbufrec {
+ Uint32 attrbuf[ZATTRBUF_SIZE];
+}; /* p2c: size = 128 bytes */
+
+typedef Ptr<Attrbufrec> AttrbufrecPtr;
+
+/* ********** CHECKPOINT INFORMATION ************ */
+/* THIS RECORD HOLDS INFORMATION NEEDED TO */
+/* PERFORM A CHECKPOINT. IT'S POSSIBLE TO RUN */
+/* MULTIPLE CHECKPOINTS AT A TIME. THIS RECORD */
+/* MAKES IT POSSIBLE TO DISTINGER BETWEEN THE */
+/* DIFFERENT CHECKPOINTS. */
+/* ********************************************** */
+struct CheckpointInfo {
+ Uint32 lcpNextRec; /* NEXT RECORD IN FREELIST */
+ Uint32 lcpCheckpointVersion; /* VERSION OF THE CHECKPOINT */
+ Uint32 lcpLocalLogInfoP; /* POINTER TO A LOCAL LOG INFO RECORD */
+ Uint32 lcpUserptr; /* USERPOINTER TO THE BLOCK REQUESTING THE CP */
+ Uint32 lcpFragmentP; /* FRAGMENT POINTER TO WHICH THE CHECKPOINT APPLIES */
+ Uint32 lcpFragmentId; /* FRAGMENT ID */
+ Uint32 lcpTabPtr; /* TABLE POINTER */
+ Uint32 lcpDataBufferSegmentP; /* POINTER TO A DISK BUFFER SEGMENT POINTER (DATA) */
+ Uint32 lcpDataFileHandle; /* FILE HANDLES FOR DATA FILE. LOG FILE HANDLE IN LOCAL_LOG_INFO_RECORD */
+ /* FILE HANDLE TO THE OPEN DATA FILE */
+ Uint32 lcpNoOfPages;
+ Uint32 lcpThFreeFirst;
+ Uint32 lcpThFreeCopyFirst;
+ Uint32 lcpEmptyPrimPage;
+ Uint32 lcpNoCopyPagesAlloc;
+ Uint32 lcpTmpOperPtr; /* TEMPORARY STORAGE OF OPER_PTR DURING SAVE */
+ BlockReference lcpBlockref; /* BLOCKREFERENCE TO THE BLOCK REQUESTING THE CP */
+};
+typedef Ptr<CheckpointInfo> CheckpointInfoPtr;
+
+/* *********** DISK BUFFER SEGMENT INFO ********* */
+/* THIS RECORD HOLDS INFORMATION NEEDED DURING */
+/* A WRITE OF THE DATA BUFFER TO DISK. WHEN THE */
+/* WRITE SIGNAL IS SENT A POINTER TO THIS RECORD */
+/* IS INCLUDED. WHEN THE WRITE IS COMPLETED AND */
+/* CONFIRMED THE PTR TO THIS RECORD IS RETURNED */
+/* AND THE BUFFER PAGES COULD EASILY BE LOCATED */
+/* AND DEALLOCATED. THE CHECKPOINT_INFO_VERSION */
+/* KEEPS TRACK OF THE CHECPOINT_INFO_RECORD THAT */
+/* INITIATED THE WRITE AND THE CP_PAGE_TO_DISK */
+/* ELEMENT COULD BE INCREASED BY THE NUMBER OF */
+/* PAGES WRITTEN. */
+/* ********************************************** */
+struct DiskBufferSegmentInfo {
+ Uint32 pdxDataPage[16]; /* ARRAY OF DATA BUFFER PAGES */
+ Uint32 pdxUndoBufferSet[2];
+ Uint32 pdxNextRec;
+ State pdxBuffertype;
+ State pdxOperation;
+ /*---------------------------------------------------------------------------*/
+ /* PDX_FLAGS BITS AND THEIR USAGE: */
+ /* BIT 0 1 COMMENT */
+ /*---------------------------------------------------------------------------*/
+ /* 0 SEGMENT INVALID SEGMENT VALID USED DURING READS */
+ /* 1-15 NOT USED */
+ /*---------------------------------------------------------------------------*/
+ Uint32 pdxCheckpointInfoP; /* USED DURING LOCAL CHKP */
+ Uint32 pdxRestartInfoP; /* USED DURING RESTART */
+ Uint32 pdxLocalLogInfoP; /* POINTS TO A LOCAL LOG INFO */
+ Uint32 pdxFilePage; /* START PAGE IN FILE */
+ Uint32 pdxNumDataPages; /* NUMBER OF DATA PAGES */
+};
+typedef Ptr<DiskBufferSegmentInfo> DiskBufferSegmentInfoPtr;
+
+struct Fragoperrec {
+ bool definingFragment;
+ Uint32 nextFragoprec;
+ Uint32 lqhPtrFrag;
+ Uint32 fragidFrag;
+ Uint32 tableidFrag;
+ Uint32 fragPointer;
+ Uint32 attributeCount;
+ Uint32 currNullBit;
+ Uint32 noOfNullBits;
+ Uint32 noOfNewAttrCount;
+ Uint32 charsetIndex;
+ BlockReference lqhBlockrefFrag;
+ bool inUse;
+};
+typedef Ptr<Fragoperrec> FragoperrecPtr;
+
+struct Fragrecord {
+ Uint32 nextStartRange;
+ Uint32 currentPageRange;
+ Uint32 rootPageRange;
+ Uint32 noOfPages;
+ Uint32 emptyPrimPage;
+
+ Uint32 firstusedOprec;
+ Uint32 lastusedOprec;
+
+ Uint32 thFreeFirst;
+ Uint32 thFreeCopyFirst;
+ Uint32 noCopyPagesAlloc;
+
+ Uint32 checkpointVersion;
+ Uint32 minPageNotWrittenInCheckpoint;
+ Uint32 maxPageWrittenInCheckpoint;
+ State fragStatus;
+ Uint32 fragTableId;
+ Uint32 fragmentId;
+ Uint32 nextfreefrag;
+};
+typedef Ptr<Fragrecord> FragrecordPtr;
+
+ /* ************ LOCAL LOG FILE INFO ************* */
+ /* THIS RECORD HOLDS INFORMATION NEEDED DURING */
+ /* CHECKPOINT AND RESTART. THERE ARE FOUR */
+ /* PARALLELL UNDO LOG FILES, EACH ONE REPRESENTED */
+ /* BY AN ENTITY OF THIS RECORD. */
+ /* BECAUSE EACH FILE IS SHARED BETWEEN FOUR */
+ /* TABLES AND HAS ITS OWN PAGEPOINTERS AND */
+ /* WORDPOINTERS. */
+ /* ********************************************** */
+struct LocalLogInfo {
+ Uint32 lliActiveLcp; /* NUMBER OF ACTIVE LOCAL CHECKPOINTS ON THIS FILE */
+ Uint32 lliEndPageId; /* PAGE IDENTIFIER OF LAST PAGE WITH LOG DATA */
+ Uint32 lliPrevRecordId; /* PREVIOUS RECORD IN THIS LOGFILE */
+ Uint32 lliLogFilePage; /* PAGE IN LOGFILE */
+ Uint32 lliNumFragments; /* NO OF FRAGMENTS RESTARTING FROM THIS LOCAL LOG */
+ Uint32 lliUndoBufferSegmentP; /* POINTER TO A DISK BUFFER SEGMENT POINTER (UNDO) */
+ Uint32 lliUndoFileHandle; /* FILE HANDLE OF UNDO LOG FILE */
+ Uint32 lliUndoPage; /* UNDO PAGE IN BUFFER */
+ Uint32 lliUndoWord;
+ Uint32 lliUndoPagesToDiskWithoutSynch;
+};
+typedef Ptr<LocalLogInfo> LocalLogInfoPtr;
+
+struct Operationrec {
+// Easy to remove (2 words)
+ Uint32 attroutbufLen;
+ Uint32 logSize;
+
+// Needed (20 words)
+ State tupleState;
+ Uint32 prevActiveOp;
+ Uint32 nextActiveOp;
+ Uint32 nextOprecInList;
+ Uint32 prevOprecInList;
+ Uint32 tableRef;
+ Uint32 fragId;
+ Uint32 fragmentPtr;
+ Uint32 fragPageId;
+ Uint32 realPageId;
+ bool undoLogged;
+ Uint32 realPageIdC;
+ Uint32 fragPageIdC;
+ Uint32 firstAttrinbufrec;
+ Uint32 lastAttrinbufrec;
+ Uint32 attrinbufLen;
+ Uint32 currentAttrinbufLen;
+ Uint32 userpointer;
+ State transstate;
+ Uint32 savePointId;
+
+// Easy to remove (3 words)
+ Uint32 tcOperationPtr;
+ Uint32 transid1;
+ Uint32 transid2;
+
+// Needed (2 words)
+ Uint16 pageIndex;
+ Uint16 pageOffset;
+ Uint16 pageOffsetC;
+ Uint16 pageIndexC;
+// Hard to remove
+ Uint16 tupVersion;
+
+// Easy to remove (1.5 word)
+ BlockReference recBlockref;
+ BlockReference userblockref;
+ Uint16 storedProcedureId;
+
+ Uint8 inFragList;
+ Uint8 inActiveOpList;
+ Uint8 deleteInsertFlag;
+
+// Needed (1 word)
+ Uint8 dirtyOp;
+ Uint8 interpretedExec;
+ Uint8 optype;
+ Uint8 opSimple;
+
+// Used by triggers
+ Uint32 primaryReplica;
+ BlockReference coordinatorTC;
+ Uint32 tcOpIndex;
+ Uint32 gci;
+ Uint32 noFiredTriggers;
+ union {
+ Uint32 hashValue; // only used in TUP_COMMITREQ
+ Uint32 lastRow;
+ };
+ Bitmask<MAXNROFATTRIBUTESINWORDS> changeMask;
+};
+typedef Ptr<Operationrec> OperationrecPtr;
+
+struct Page {
+ Uint32 pageWord[ZWORDS_ON_PAGE];
+};
+typedef Ptr<Page> PagePtr;
+
+ /* ****************************** PAGE RANGE RECORD ************************** */
+ /* PAGE RANGES AND BASE PAGE ID. EACH RANGE HAS A CORRESPONDING BASE PAGE ID */
+ /* THAT IS USED TO CALCULATE REAL PAGE ID FROM A FRAGMENT PAGE ID AND A TABLE */
+ /* REFERENCE. */
+ /* THE PAGE RANGES ARE ORGANISED IN A B-TREE FASHION WHERE THE VARIABLE TYPE */
+ /* SPECIFIES IF A LEAF NODE HAS BEEN REACHED. IF A LEAF NODE HAS BEEN REACHED */
+ /* THEN BASE_PAGE_ID IS THE BASE_PAGE_ID OF THE SET OF PAGES THAT WAS */
+ /* ALLOCATED IN THAT RANGE. OTHERWISE BASE_PAGE_ID IS THE POINTER TO THE NEXT */
+ /* PAGE_RANGE RECORD. */
+ /* *************************************************************************** */
+struct PageRange {
+ Uint32 startRange[4]; /* START OF RANGE */
+ Uint32 endRange[4]; /* END OF THIS RANGE */
+ Uint32 basePageId[4]; /* BASE PAGE ID. */
+/*---- VARIABLE BASE_PAGE_ID2 (4) 8 DS NEEDED WHEN SUPPORTING 40 BIT PAGE ID -------*/
+ Uint8 type[4]; /* TYPE OF BASE PAGE ID */
+ Uint32 nextFree; /* NEXT FREE PAGE RANGE RECORD */
+ Uint32 parentPtr; /* THE PARENT TO THE PAGE RANGE REC IN THE B-TREE */
+ Uint8 currentIndexPos;
+};
+typedef Ptr<PageRange> PageRangePtr;
+
+ /* *********** PENDING UNDO WRITE INFO ********** */
+ /* THIS RECORD HOLDS INFORMATION NEEDED DURING */
+ /* A FILE OPEN OPERATION */
+ /* IF THE FILE OPEN IS A PART OF A CHECKPOINT THE */
+ /* CHECKPOINT_INFO_P WILL HOLD A POINTER TO THE */
+ /* CHECKPOINT_INFOR_PTR RECORD */
+ /* IF IT IS A PART OF RESTART THE PFO_RESTART_INFO*/
+ /* ELEMENT WILL POINT TO A RESTART INFO RECORD */
+ /* ********************************************** */
+struct PendingFileOpenInfo {
+ Uint32 pfoNextRec;
+ State pfoOpenType;
+ Uint32 pfoCheckpointInfoP;
+ Uint32 pfoRestartInfoP;
+};
+typedef Ptr<PendingFileOpenInfo> PendingFileOpenInfoPtr;
+
+struct RestartInfoRecord {
+ Uint32 sriNextRec;
+ State sriState; /* BLOCKREFERENCE TO THE REQUESTING BLOCK */
+ Uint32 sriUserptr; /* USERPOINTER TO THE REQUESTING BLOCK */
+ Uint32 sriDataBufferSegmentP; /* POINTER TO A DISK BUFFER SEGMENT POINTER (DATA) */
+ Uint32 sriDataFileHandle; /* FILE HANDLE TO THE OPEN DATA FILE */
+ Uint32 sriCheckpointVersion; /* CHECKPOINT VERSION TO RESTART FROM */
+ Uint32 sriFragid; /* FRAGMENT ID */
+ Uint32 sriFragP; /* FRAGMENT POINTER */
+ Uint32 sriTableId; /* TABLE ID */
+ Uint32 sriLocalLogInfoP; /* POINTER TO A LOCAL LOG INFO RECORD */
+ Uint32 sriNumDataPages; /* NUMBER OF DATA PAGES TO READ */
+ Uint32 sriCurDataPageFromBuffer; /* THE CHECKPOINT IS COMPLETED */
+ BlockReference sriBlockref;
+};
+typedef Ptr<RestartInfoRecord> RestartInfoRecordPtr;
+
+ /* ************* TRIGGER DATA ************* */
+ /* THIS RECORD FORMS LISTS OF ACTIVE */
+ /* TRIGGERS FOR EACH TABLE. */
+ /* THE RECORDS ARE MANAGED BY A TRIGGER */
+ /* POOL wHERE A TRIGGER RECORD IS SEIZED */
+ /* WHEN A TRIGGER IS ACTIVATED AND RELEASED */
+ /* WHEN THE TRIGGER IS DEACTIVATED. */
+ /* **************************************** */
+struct TupTriggerData {
+
+ /**
+ * Trigger id, used by DICT/TRIX to identify the trigger
+ */
+ Uint32 triggerId;
+
+ /**
+ * Index id is needed for ordered index.
+ */
+ Uint32 indexId;
+
+ /**
+ * Trigger type etc, defines what the trigger is used for
+ */
+ TriggerType::Value triggerType;
+ TriggerActionTime::Value triggerActionTime;
+ TriggerEvent::Value triggerEvent;
+ /**
+ * Receiver block
+ */
+ Uint32 m_receiverBlock;
+
+ /**
+ * Monitor all replicas, i.e. trigger will fire on all nodes where tuples
+ * are stored
+ */
+ bool monitorReplicas;
+
+ /**
+ * Monitor all attributes, the trigger monitors all changes to attributes
+ * in the table
+ */
+ bool monitorAllAttributes;
+
+ /**
+ * Send only changed attributes at trigger firing time.
+ */
+ bool sendOnlyChangedAttributes;
+
+ /**
+ * Send also before values at trigger firing time.
+ */
+ bool sendBeforeValues;
+
+ /**
+ * Attribute mask, defines what attributes are to be monitored
+ * Can be seen as a compact representation of SQL column name list
+ */
+ Bitmask<MAXNROFATTRIBUTESINWORDS> attributeMask;
+
+ /**
+ * Next ptr (used in pool/list)
+ */
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ };
+
+ /**
+ * Prev pointer (used in list)
+ */
+ Uint32 prevList;
+
+ inline void print(NdbOut & s) const { s << "[TriggerData = " << triggerId << "]"; };
+};
+
+typedef Ptr<TupTriggerData> TriggerPtr;
+
+/**
+ * Pool of trigger data record
+ */
+ArrayPool<TupTriggerData> c_triggerPool;
+
+ /* ************ TABLE RECORD ************ */
+ /* THIS RECORD FORMS A LIST OF TABLE */
+ /* REFERENCE INFORMATION. ONE RECORD */
+ /* PER TABLE REFERENCE. */
+ /* ************************************** */
+struct Tablerec {
+ Tablerec(ArrayPool<TupTriggerData> & triggerPool) :
+ afterInsertTriggers(triggerPool),
+ afterDeleteTriggers(triggerPool),
+ afterUpdateTriggers(triggerPool),
+ subscriptionInsertTriggers(triggerPool),
+ subscriptionDeleteTriggers(triggerPool),
+ subscriptionUpdateTriggers(triggerPool),
+ constraintUpdateTriggers(triggerPool),
+ tuxCustomTriggers(triggerPool)
+ {}
+
+ Bitmask<MAXNROFATTRIBUTESINWORDS> notNullAttributeMask;
+
+ ReadFunction* readFunctionArray;
+ UpdateFunction* updateFunctionArray;
+ CHARSET_INFO** charsetArray;
+
+ Uint32 readKeyArray;
+ Uint32 tabDescriptor;
+ Uint32 attributeGroupDescriptor;
+
+ bool GCPIndicator;
+ bool checksumIndicator;
+
+ Uint16 tupheadsize;
+ Uint16 noOfAttr;
+ Uint16 noOfKeyAttr;
+ Uint16 noOfCharsets;
+ Uint16 noOfNewAttr;
+ Uint16 noOfNullAttr;
+ Uint16 noOfAttributeGroups;
+
+ Uint8 tupChecksumIndex;
+ Uint8 tupNullIndex;
+ Uint8 tupNullWords;
+ Uint8 tupGCPIndex;
+
+ // Lists of trigger data for active triggers
+ ArrayList<TupTriggerData> afterInsertTriggers;
+ ArrayList<TupTriggerData> afterDeleteTriggers;
+ ArrayList<TupTriggerData> afterUpdateTriggers;
+ ArrayList<TupTriggerData> subscriptionInsertTriggers;
+ ArrayList<TupTriggerData> subscriptionDeleteTriggers;
+ ArrayList<TupTriggerData> subscriptionUpdateTriggers;
+ ArrayList<TupTriggerData> constraintUpdateTriggers;
+
+ // List of ordered indexes
+ ArrayList<TupTriggerData> tuxCustomTriggers;
+
+ Uint32 fragid[2 * MAX_FRAG_PER_NODE];
+ Uint32 fragrec[2 * MAX_FRAG_PER_NODE];
+
+ struct {
+ Uint32 tabUserPtr;
+ Uint32 tabUserRef;
+ } m_dropTable;
+ State tableStatus;
+};
+
+typedef Ptr<Tablerec> TablerecPtr;
+
+struct storedProc {
+ Uint32 storedLinkFirst;
+ Uint32 storedLinkLast;
+ Uint32 storedCounter;
+ Uint32 nextPool;
+ Uint16 storedCode;
+ Uint16 storedProcLength;
+};
+
+typedef Ptr<storedProc> StoredProcPtr;
+
+ArrayPool<storedProc> c_storedProcPool;
+
+/* **************************** TABLE_DESCRIPTOR RECORD ******************************** */
+/* THIS VARIABLE IS USED TO STORE TABLE DESCRIPTIONS. A TABLE DESCRIPTION IS STORED AS A */
+/* CONTIGUOS ARRAY IN THIS VARIABLE. WHEN A NEW TABLE IS ADDED A CHUNK IS ALLOCATED IN */
+/* THIS RECORD. WHEN ATTRIBUTES ARE ADDED TO THE TABLE, A NEW CHUNK OF PROPER SIZE IS */
+/* ALLOCATED AND ALL DATA IS COPIED TO THIS NEW CHUNK AND THEN THE OLD CHUNK IS PUT IN */
+/* THE FREE LIST. EACH TABLE IS DESCRIBED BY A NUMBER OF TABLE DESCRIPTIVE ATTRIBUTES */
+/* AND A NUMBER OF ATTRIBUTE DESCRIPTORS AS SHOWN IN FIGURE BELOW */
+/* */
+/* WHEN ALLOCATING A TABLE DESCRIPTOR THE SIZE IS ALWAYS A MULTIPLE OF 16 WORDS. */
+/* */
+/* ---------------------------------------------- */
+/* | TRAILER USED FOR ALLOC/DEALLOC | */
+/* ---------------------------------------------- */
+/* | TABLE DESCRIPTIVE ATTRIBUTES | */
+/* ---------------------------------------------- */
+/* | ATTRIBUTE DESCRIPTION 1 | */
+/* ---------------------------------------------- */
+/* | ATTRIBUTE DESCRIPTION 2 | */
+/* ---------------------------------------------- */
+/* | | */
+/* | | */
+/* | | */
+/* ---------------------------------------------- */
+/* | ATTRIBUTE DESCRIPTION N | */
+/* ---------------------------------------------- */
+/* */
+/* THE TABLE DESCRIPTIVE ATTRIBUTES CONTAINS THE FOLLOWING ATTRIBUTES: */
+/* */
+/* ---------------------------------------------- */
+/* | HEADER (TYPE OF INFO) | */
+/* ---------------------------------------------- */
+/* | SIZE OF WHOLE CHUNK (INCL. TRAILER) | */
+/* ---------------------------------------------- */
+/* | TABLE IDENTITY | */
+/* ---------------------------------------------- */
+/* | FRAGMENT IDENTITY | */
+/* ---------------------------------------------- */
+/* | NUMBER OF ATTRIBUTES | */
+/* ---------------------------------------------- */
+/* | SIZE OF FIXED ATTRIBUTES | */
+/* ---------------------------------------------- */
+/* | NUMBER OF NULL FIELDS | */
+/* ---------------------------------------------- */
+/* | NOT USED | */
+/* ---------------------------------------------- */
+/* */
+/* THESE ATTRIBUTES ARE ALL ONE R-VARIABLE IN THE RECORD. */
+/* NORMALLY ONLY ONE TABLE DESCRIPTOR IS USED. DURING SCHEMA CHANGES THERE COULD */
+/* HOWEVER EXIST MORE THAN ONE TABLE DESCRIPTION SINCE THE SCHEMA CHANGE OF VARIOUS */
+/* FRAGMENTS ARE NOT SYNCHRONISED. THIS MEANS THAT ALTHOUGH THE SCHEMA HAS CHANGED */
+/* IN ALL FRAGMENTS, BUT THE FRAGMENTS HAVE NOT REMOVED THE ATTRIBUTES IN THE SAME */
+/* TIME-FRAME. THEREBY SOME ATTRIBUTE INFORMATION MIGHT DIFFER BETWEEN FRAGMENTS. */
+/* EXAMPLES OF ATTRIBUTES THAT MIGHT DIFFER ARE SIZE OF FIXED ATTRIBUTES, NUMBER OF */
+/* ATTRIBUTES, FIELD START WORD, START BIT. */
+/* */
+/* AN ATTRIBUTE DESCRIPTION CONTAINS THE FOLLOWING ATTRIBUTES: */
+/* */
+/* ---------------------------------------------- */
+/* | Field Type, 4 bits (LSB Bits) | */
+/* ---------------------------------------------- */
+/* | Attribute Size, 4 bits | */
+/* ---------------------------------------------- */
+/* | NULL indicator 1 bit | */
+/* ---------------------------------------------- */
+/* | Indicator if TUP stores attr. 1 bit | */
+/* ---------------------------------------------- */
+/* | Not used 6 bits | */
+/* ---------------------------------------------- */
+/* | No. of elements in fixed array 16 bits | */
+/* ---------------------------------------------- */
+/* ---------------------------------------------- */
+/* | Field Start Word, 21 bits (LSB Bits) | */
+/* ---------------------------------------------- */
+/* | NULL Bit, 11 bits | */
+/* ---------------------------------------------- */
+/* */
+/* THE ATTRIBUTE SIZE CAN BE 1,2,4,8,16,32,64 AND 128 BITS. */
+/* */
+/* THE UNUSED PARTS OF THE RECORDS ARE PUT IN A LINKED LIST OF FREE PARTS. EACH OF */
+/* THOSE FREE PARTS HAVE THREE RECORDS ASSIGNED AS SHOWN IN THIS STRUCTURE */
+/* ALL FREE PARTS ARE SET INTO A CHUNK LIST WHERE EACH CHUNK IS AT LEAST 16 WORDS */
+/* */
+/* ---------------------------------------------- */
+/* | HEADER = RNIL | */
+/* ---------------------------------------------- */
+/* | SIZE OF FREE AREA | */
+/* ---------------------------------------------- */
+/* | POINTER TO PREVIOUS FREE AREA | */
+/* ---------------------------------------------- */
+/* | POINTER TO NEXT FREE AREA | */
+/* ---------------------------------------------- */
+/* */
+/* IF THE POINTER TO THE NEXT AREA IS RNIL THEN THIS IS THE LAST FREE AREA. */
+/* */
+/*****************************************************************************************/
+struct TableDescriptor {
+ Uint32 tabDescr;
+};
+typedef Ptr<TableDescriptor> TableDescriptorPtr;
+
+struct HostBuffer {
+ bool inPackedList;
+ Uint32 packetLenTA;
+ Uint32 noOfPacketsTA;
+ Uint32 packetBufferTA[30];
+};
+typedef Ptr<HostBuffer> HostBufferPtr;
+
+ /* **************** UNDO PAGE RECORD ******************* */
+ /* THIS RECORD FORMS AN UNDO PAGE CONTAINING A NUMBER OF */
+ /* DATA WORDS. CURRENTLY THERE ARE 2048 WORDS ON A PAGE */
+ /* EACH OF 32 BITS (4 BYTES) WHICH FORMS AN UNDO PAGE */
+ /* WITH A TOTAL OF 8192 BYTES */
+ /* ***************************************************** */
+struct UndoPage {
+ Uint32 undoPageWord[ZWORDS_ON_PAGE]; /* 32 KB */
+};
+typedef Ptr<UndoPage> UndoPagePtr;
+
+ /*
+ * Build index operation record.
+ */
+ struct BuildIndexRec {
+ // request cannot use signal class due to extra members
+ Uint32 m_request[BuildIndxReq::SignalLength];
+ Uint32 m_triggerPtrI; // the index trigger
+ Uint32 m_fragNo; // fragment number under Tablerec
+ Uint32 m_pageId; // logical fragment page id
+ Uint32 m_tupleNo; // tuple number on page (pageIndex >> 1)
+ BuildIndxRef::ErrorCode m_errorCode;
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ };
+ Uint32 prevList;
+ };
+ typedef Ptr<BuildIndexRec> BuildIndexPtr;
+ ArrayPool<BuildIndexRec> c_buildIndexPool;
+ ArrayList<BuildIndexRec> c_buildIndexList;
+ Uint32 c_noOfBuildIndexRec;
+
+public:
+ Dbtup(const class Configuration &);
+ virtual ~Dbtup();
+
+ /*
+ * TUX uses logical tuple address when talking to ACC and LQH.
+ */
+ void tuxGetTupAddr(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32& tupAddr);
+
+ /*
+ * TUX index in TUP has single Uint32 array attribute which stores an
+ * index node. TUX reads and writes the node directly via pointer.
+ */
+ int tuxAllocNode(Signal* signal, Uint32 fragPtrI, Uint32& pageId, Uint32& pageOffset, Uint32*& node);
+ void tuxFreeNode(Signal* signal, Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* node);
+ void tuxGetNode(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32*& node);
+
+ /*
+ * TUX reads primary table attributes for index keys. Tuple is
+ * specified by location of original tuple and version number. Input
+ * is attribute ids in AttributeHeader format. Output is attribute
+ * data with headers. Uses readAttributes with xfrm option set.
+ * Returns number of words or negative (-terrorCode) on error.
+ */
+ int tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tupVersion, const Uint32* attrIds, Uint32 numAttrs, Uint32* dataOut);
+
+ /*
+ * TUX reads primary key without headers into an array of words. Used
+ * for md5 summing and when returning keyinfo. Returns number of
+ * words or negative (-terrorCode) on error.
+ */
+ int tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* dataOut, bool xfrmFlag);
+
+ /*
+ * ACC reads primary key without headers into an array of words. At
+ * this point in ACC deconstruction, ACC still uses logical references
+ * to fragment and tuple.
+ */
+ int accReadPk(Uint32 tableId, Uint32 fragId, Uint32 fragPageId, Uint32 pageIndex, Uint32* dataOut, bool xfrmFlag);
+
+ /*
+ * TUX checks if tuple is visible to scan.
+ */
+ bool tuxQueryTh(Uint32 fragPtrI, Uint32 tupAddr, Uint32 tupVersion, Uint32 transId1, Uint32 transId2, Uint32 savePointId);
+
+private:
+ BLOCK_DEFINES(Dbtup);
+
+ // Transit signals
+ void execDEBUG_SIG(Signal* signal);
+ void execCONTINUEB(Signal* signal);
+
+ // Received signals
+ void execDUMP_STATE_ORD(Signal* signal);
+ void execSEND_PACKED(Signal* signal);
+ void execSTTOR(Signal* signal);
+ void execTUP_LCPREQ(Signal* signal);
+ void execEND_LCPREQ(Signal* signal);
+ void execSTART_RECREQ(Signal* signal);
+ void execMEMCHECKREQ(Signal* signal);
+ void execTUPSEIZEREQ(Signal* signal);
+ void execTUPRELEASEREQ(Signal* signal);
+ void execSTORED_PROCREQ(Signal* signal);
+ void execTUPFRAGREQ(Signal* signal);
+ void execTUP_ADD_ATTRREQ(Signal* signal);
+ void execTUP_COMMITREQ(Signal* signal);
+ void execTUP_ABORTREQ(Signal* signal);
+ void execTUP_SRREQ(Signal* signal);
+ void execTUP_PREPLCPREQ(Signal* signal);
+ void execFSOPENCONF(Signal* signal);
+ void execFSOPENREF(Signal* signal);
+ void execFSCLOSECONF(Signal* signal);
+ void execFSCLOSEREF(Signal* signal);
+ void execFSWRITECONF(Signal* signal);
+ void execFSWRITEREF(Signal* signal);
+ void execFSREADCONF(Signal* signal);
+ void execFSREADREF(Signal* signal);
+ void execNDB_STTOR(Signal* signal);
+ void execREAD_CONFIG_REQ(Signal* signal);
+ void execSET_VAR_REQ(Signal* signal);
+ void execDROP_TAB_REQ(Signal* signal);
+ void execALTER_TAB_REQ(Signal* signal);
+ void execFSREMOVECONF(Signal* signal);
+ void execFSREMOVEREF(Signal* signal);
+ void execTUP_ALLOCREQ(Signal* signal);
+ void execTUP_DEALLOCREQ(Signal* signal);
+ void execTUP_WRITELOG_REQ(Signal* signal);
+
+ // Ordered index related
+ void execBUILDINDXREQ(Signal* signal);
+ void buildIndex(Signal* signal, Uint32 buildPtrI);
+ void buildIndexReply(Signal* signal, const BuildIndexRec* buildRec);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+// Methods to handle execution of TUPKEYREQ + ATTRINFO.
+//
+// Module Execution Manager
+//
+// The TUPKEYREQ signal is central to this block. This signal is used
+// by everybody that needs to read data residing in DBTUP. The data is
+// read using an interpreter approach.
+//
+// Operations only needing to read execute a simplified version of the
+// interpreter where the only instruction is read Attribute to send.
+// Operations only needing to update the record (insert or update)
+// execute a simplified version of the interpreter where the only
+// instruction is write Attribute.
+//
+// Currently TUPKEYREQ is used in the following situations.
+// 1) Normal transaction execution. Can be any of the types described
+// below.
+// 2) Execution of fragment redo log during system restart.
+// In this situation there will only be normal updates, inserts
+// and deletes performed.
+// 3) A special type of normal transaction execution is to write the
+// records arriving from the primary replica in the node restart
+// processing. This will always be normal write operations which
+// are translated to inserts or updates before arriving to TUP.
+// 4) Scan processing. The scan processing will use normal reads or
+// interpreted reads in their execution. There will be one TUPKEYREQ
+// signal for each record processed.
+// 5) Copy fragment processing. This is a special type of scan used in the
+// primary replica at system restart. It reads the entire reads and
+// converts those to writes to the starting node. In this special case
+// LQH acts as an API node and receives also the ATTRINFO sent in the
+// TRANSID_AI signals.
+//
+// Signal Diagram:
+//
+// In Signals:
+// -----------
+//
+// Logically there is one request TUPKEYREQ which requests to read/write data
+// of one tuple in the database. Since the definition of what to read and write
+// can be bigger than the maximum signal size we segment the signal. The definition
+// of what to read/write/interpreted program is sent before the TUPKEYREQ signal.
+//
+// ---> ATTRINFO
+// ...
+// ---> ATTRINFO
+// ---> TUPKEYREQ
+// The number of ATTRINFO signals can be anything between 0 and upwards.
+// The total size of the ATTRINFO is not allowed to be more than 16384 words.
+// There is always one and only one TUPKEYREQ.
+//
+// Response Signals (successful case):
+//
+// Simple/Dirty Read Operation
+// ---------------------------
+//
+// <---- TRANSID_AI (to API)
+// ...
+// <---- TRANSID_AI (to API)
+// <---- READCONF (to API)
+// <---- TUPKEYCONF (to LQH)
+// There is always exactly one READCONF25 sent last. The number of
+// TRANSID_AI is dependent on how much that was read. The maximum size
+// of the ATTRINFO sent back is 16384 words. The signals are sent
+// directly to the application with an address provided by the
+// TUPKEYREQ signal.
+// A positive response signal is also sent to LQH.
+//
+// Normal Read Operation
+// ---------------------
+//
+// <---- TRANSID_AI (to API)
+// ...
+// <---- TRANSID_AI (to API)
+// <---- TUPKEYCONF (to LQH)
+// The number of TRANSID_AI is dependent on how much that was read.
+// The maximum size of the ATTRINFO sent back is 16384 words. The
+// signals are sent directly to the application with an address
+// provided by the TUPKEYREQ signal.
+// A positive response signal is also sent to LQH.
+//
+// Normal update/insert/delete operation
+// -------------------------------------
+//
+// <---- TUPKEYCONF
+// After successful updating of the tuple LQH is informed of this.
+//
+// Delete with read
+// ----------------
+//
+// Will behave as a normal read although it also prepares the
+// deletion of the tuple.
+//
+// Interpreted Update
+// ------------------
+//
+// <---- TRANSID_AI (to API)
+// ...
+// <---- TRANSID_AI (to API)
+// <---- TUP_ATTRINFO (to LQH)
+// ...
+// <---- TUP_ATTRINFO (to LQH)
+// <---- TUPKEYCONF (to LQH)
+//
+// The interpreted Update contains five sections:
+// The first section performs read Attribute operations
+// that send results back to the API.
+//
+// The second section executes the interpreted program
+// where data from attributes can be updated and it
+// can also read attribute values into the registers.
+//
+// The third section performs unconditional updates of
+// attributes.
+//
+// The fourth section can read the attributes to be sent to the
+// API after updating the record.
+//
+// The fifth section contains subroutines used by the interpreter
+// in the second section.
+//
+// All types of interpreted programs contains the same five sections.
+// The only difference is that only interpreted updates can update
+// attributes. Interpreted inserts are not allowed.
+//
+// Interpreted Updates have to send back the information about the
+// attributes they have updated. This information will be shipped to
+// the log and also to any other replicas. Thus interpreted updates
+// are only performed in the primary replica. The fragment redo log
+// in LQH will contain information so that normal update/inserts/deletes
+// can be performed using TUPKEYREQ.
+//
+// Interpreted Read
+// ----------------
+//
+// From a signalling point of view the Interpreted Read behaves as
+// as a Normal Read. The interpreted Read is often used by Scan's.
+//
+// Interpreted Delete
+// ------------------
+//
+// <---- TUPKEYCONF
+// After successful prepartion to delete the tuple LQH is informed
+// of this.
+//
+// Interpreted Delete with Read
+// ----------------------------
+//
+// From a signalling point of view an interpreted delete with read
+// behaves as a normal read.
+//
+// Continuation after successful case:
+//
+// After a read of any kind the operation record is ready to be used
+// again by a new operation.
+//
+// Any updates, inserts or deletes waits for either of two messages.
+// A commit specifying that the operation is to be performed for real
+// or an abort specifying that the operation is to be rolled back and
+// the record to be restored in its original format.
+//
+// This is handled by the module Transaction Manager.
+//
+// Response Signals (unsuccessful case):
+//
+// <---- TUPKEYREF (to LQH)
+// A signal is sent back to LQH informing about the unsuccessful
+// operation. In this case TUP waits for an abort signal to arrive
+// before the operation record is ready for the next operation.
+// This is handled by the Transaction Manager.
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+
+// *****************************************************************
+// Signal Reception methods.
+// *****************************************************************
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ void execTUPKEYREQ(Signal* signal);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ void execATTRINFO(Signal* signal);
+
+// Trigger signals
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ void execCREATE_TRIG_REQ(Signal* signal);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ void execDROP_TRIG_REQ(Signal* signal);
+
+// *****************************************************************
+// Support methods for ATTRINFO.
+// *****************************************************************
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ void handleATTRINFOforTUPKEYREQ(Signal* signal,
+ Uint32 length,
+ Operationrec * const regOperPtr);
+
+// *****************************************************************
+// Setting up the environment for reads, inserts, updates and deletes.
+// *****************************************************************
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ int handleReadReq(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const regTabPtr,
+ Page* pagePtr);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ int handleUpdateReq(Signal* signal,
+ Operationrec* const regOperPtr,
+ Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr,
+ Page* const pagePtr);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ int handleInsertReq(Signal* signal,
+ Operationrec* const regOperPtr,
+ Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr,
+ Page* const pagePtr);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ int handleDeleteReq(Signal* signal,
+ Operationrec* const regOperPtr,
+ Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr,
+ Page* const pagePtr);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ int updateStartLab(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const regTabPtr,
+ Page* const pagePtr);
+
+// *****************************************************************
+// Interpreter Handling methods.
+// *****************************************************************
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ int interpreterStartLab(Signal* signal,
+ Page* const pagePtr,
+ Uint32 TupHeadOffset);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ int interpreterNextLab(Signal* signal,
+ Page* const pagePtr,
+ Uint32 TupHeadOffset,
+ Uint32* logMemory,
+ Uint32* mainProgram,
+ Uint32 TmainProgLen,
+ Uint32* subroutineProg,
+ Uint32 TsubroutineLen,
+ Uint32 * tmpArea,
+ Uint32 tmpAreaSz);
+
+// *****************************************************************
+// Signal Sending methods.
+// *****************************************************************
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ void sendReadAttrinfo(Signal* signal,
+ Uint32 TnoOfData,
+ const Operationrec * const regOperPtr);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ void sendLogAttrinfo(Signal* signal,
+ Uint32 TlogSize,
+ Operationrec * const regOperPtr);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ void sendTUPKEYCONF(Signal* signal, Operationrec *
+ const regOperPtr,
+ Uint32 TlogSize);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+// *****************************************************************
+// The methods that perform the actual read and update of attributes
+// in the tuple.
+// *****************************************************************
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ int readAttributes(Page* const pagePtr,
+ Uint32 TupHeadOffset,
+ const Uint32* inBuffer,
+ Uint32 inBufLen,
+ Uint32* outBuffer,
+ Uint32 TmaxRead,
+ bool xfrmFlag);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ int readAttributesWithoutHeader(Page* const pagePtr,
+ Uint32 TupHeadOffset,
+ Uint32* inBuffer,
+ Uint32 inBufLen,
+ Uint32* outBuffer,
+ Uint32* attrBuffer,
+ Uint32 TmaxRead);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ int updateAttributes(Page* const pagePtr,
+ Uint32 TupHeadOffset,
+ Uint32* inBuffer,
+ Uint32 inBufLen);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readFixedSizeTHOneWordNotNULL(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateFixedSizeTHOneWordNotNULL(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readFixedSizeTHTwoWordNotNULL(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateFixedSizeTHTwoWordNotNULL(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readFixedSizeTHManyWordNotNULL(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateFixedSizeTHManyWordNotNULL(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readFixedSizeTHOneWordNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateFixedSizeTHOneWordNULLable(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readFixedSizeTHTwoWordNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateFixedSizeTHTwoWordNULLable(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readFixedSizeTHManyWordNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readFixedSizeTHZeroWordNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateFixedSizeTHManyWordNULLable(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readVariableSizedAttr(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateVariableSizedAttr(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readVarSizeUnlimitedNotNULL(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateVarSizeUnlimitedNotNULL(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readVarSizeUnlimitedNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateVarSizeUnlimitedNULLable(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readBigVarSizeNotNULL(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateBigVarSizeNotNULL(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readBigVarSizeNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateBigVarSizeNULLable(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readSmallVarSizeNotNULL(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateSmallVarSizeNotNULL(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readSmallVarSizeNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateSmallVarSizeNULLable(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readDynFixedSize(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateDynFixedSize(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readDynVarSizeUnlimited(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateDynVarSizeUnlimited(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readDynBigVarSize(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateDynBigVarSize(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool readDynSmallVarSize(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool updateDynSmallVarSize(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+
+ bool readBitsNULLable(Uint32* outBuffer, AttributeHeader*, Uint32, Uint32);
+ bool updateBitsNULLable(Uint32* inBuffer, Uint32, Uint32);
+ bool readBitsNotNULL(Uint32* outBuffer, AttributeHeader*, Uint32, Uint32);
+ bool updateBitsNotNULL(Uint32* inBuffer, Uint32, Uint32);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ bool nullFlagCheck(Uint32 attrDes2);
+ Uint32 read_psuedo(Uint32 attrId, Uint32* outBuffer);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ void setUpQueryRoutines(Tablerec* const regTabPtr);
+
+// *****************************************************************
+// Service methods.
+// *****************************************************************
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ void copyAttrinfo(Signal* signal, Operationrec * const regOperPtr, Uint32* inBuffer);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ void initOpConnection(Operationrec* regOperPtr, Fragrecord*);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ void initOperationrec(Signal* signal);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ int initStoredOperationrec(Operationrec* const regOperPtr,
+ Uint32 storedId);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ void insertActiveOpList(Signal* signal,
+ OperationrecPtr regOperPtr,
+ Page * const pagePtr,
+ Uint32 pageOffset);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ void linkOpIntoFragList(OperationrecPtr regOperPtr,
+ Fragrecord* const regFragPtr);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ void bufferTRANSID_AI(Signal* signal, BlockReference aRef, Uint32 Tlen);
+
+//------------------------------------------------------------------
+// Trigger handling routines
+//------------------------------------------------------------------
+ ArrayList<TupTriggerData>* findTriggerList(Tablerec* table,
+ TriggerType::Value ttype,
+ TriggerActionTime::Value ttime,
+ TriggerEvent::Value tevent);
+
+ bool createTrigger(Tablerec* table, const CreateTrigReq* req);
+
+ Uint32 dropTrigger(Tablerec* table, const DropTrigReq* req);
+
+ void checkImmediateTriggersAfterInsert(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const tablePtr);
+
+ void checkImmediateTriggersAfterUpdate(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const tablePtr);
+
+ void checkImmediateTriggersAfterDelete(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const tablePtr);
+
+#if 0
+ void checkDeferredTriggers(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const regTablePtr);
+#endif
+ void checkDetachedTriggers(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const regTablePtr);
+
+ void fireImmediateTriggers(Signal* signal,
+ ArrayList<TupTriggerData>& triggerList,
+ Operationrec* const regOperPtr);
+
+ void fireDeferredTriggers(Signal* signal,
+ ArrayList<TupTriggerData>& triggerList,
+ Operationrec* const regOperPtr);
+
+ void fireDetachedTriggers(Signal* signal,
+ ArrayList<TupTriggerData>& triggerList,
+ Operationrec* const regOperPtr);
+
+ void executeTriggers(Signal* signal,
+ ArrayList<TupTriggerData>& triggerList,
+ Operationrec* const regOperPtr);
+
+ void executeTrigger(Signal* signal,
+ TupTriggerData* const trigPtr,
+ Operationrec* const regOperPtr);
+
+ bool readTriggerInfo(TupTriggerData* const trigPtr,
+ Operationrec* const regOperPtr,
+ Uint32* const keyBuffer,
+ Uint32& noPrimKey,
+ Uint32* const mainBuffer,
+ Uint32& noMainWords,
+ Uint32* const copyBuffer,
+ Uint32& noCopyWords);
+
+ void sendTrigAttrInfo(Signal* signal,
+ Uint32* data,
+ Uint32 dataLen,
+ bool executeDirect,
+ BlockReference receiverReference);
+
+ Uint32 setAttrIds(Bitmask<MAXNROFATTRIBUTESINWORDS>& attributeMask,
+ Uint32 noOfAttributes,
+ Uint32* inBuffer);
+
+ void sendFireTrigOrd(Signal* signal,
+ Operationrec * const regOperPtr,
+ TupTriggerData* const trigPtr,
+ Uint32 noPrimKeySignals,
+ Uint32 noBeforeSignals,
+ Uint32 noAfterSignals);
+
+ bool primaryKey(Tablerec* const, Uint32);
+
+ // these set terrorCode and return non-zero on error
+
+ int executeTuxInsertTriggers(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const regTabPtr);
+
+ int executeTuxUpdateTriggers(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const regTabPtr);
+
+ int executeTuxDeleteTriggers(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const regTabPtr);
+
+ // these crash the node on error
+
+ void executeTuxCommitTriggers(Signal* signal,
+ Operationrec* regOperPtr,
+ Tablerec* const regTabPtr);
+
+ void executeTuxAbortTriggers(Signal* signal,
+ Operationrec* regOperPtr,
+ Tablerec* const regTabPtr);
+
+// *****************************************************************
+// Error Handling routines.
+// *****************************************************************
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ int TUPKEY_abort(Signal* signal, int error_type);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+ void tupkeyErrorLab(Signal* signal);
+
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+// Methods to handle execution of TUP_COMMITREQ + TUP_ABORTREQ.
+//
+// Module Transaction Manager
+//
+// The Transaction Manager module is responsible for the commit
+// and abort of operations started by the Execution Manager.
+//
+// Commit Operation:
+// ----------------
+//
+// Failures in commit processing is not allowed since that would
+// leave the database in an unreliable state. Thus the only way
+// to handle failures in commit processing is to crash the node.
+//
+// TUP_COMMITREQ can only be received in the wait state after a
+// successful TUPKEYREQ which was not a read operation.
+//
+// Commit of Delete:
+// -----------------
+//
+// This will actually perform the deletion of the record unless
+// other operations also are connected to the record. In this case
+// we will set the delete state on the record that becomes the owner
+// of the record.
+//
+// Commit of Update:
+// ----------------
+//
+// We will release the copy record where the original record was kept.
+// Also here we will take special care if more operations are updating
+// the record simultaneously.
+//
+// Commit of Insert:
+// -----------------
+//
+// Will simply reset the state of the operation record.
+//
+// Signal Diagram:
+// ---> TUP_COMMITREQ (from LQH)
+// <---- TUP_COMMITCONF (to LQH)
+//
+//
+// Abort Operation:
+// ----------------
+//
+// Signal Diagram:
+// ---> TUP_ABORTREQ (from LQH)
+// <---- TUP_ABORTCONF (to LQH)
+//
+// Failures in abort processing is not allowed since that would
+// leave the database in an unreliable state. Thus the only way
+// to handle failures in abort processing is to crash the node.
+//
+// Abort messages can arrive at any time. It can arrive even before
+// anything at all have arrived of the operation. It can arrive after
+// receiving a number of ATTRINFO but before TUPKEYREQ has been received.
+// It must arrive after that we sent TUPKEYREF in response to TUPKEYREQ
+// and finally it can arrive after successfully performing the TUPKEYREQ
+// in all cases including the read case.
+//------------------------------------------------------------------
+//------------------------------------------------------------------
+
+#if 0
+ void checkPages(Fragrecord* const regFragPtr);
+#endif
+ void printoutTuplePage(Uint32 fragid, Uint32 pageid, Uint32 printLimit);
+
+ bool checkUpdateOfPrimaryKey(Uint32* updateBuffer, Tablerec* const regTabPtr);
+
+ void setNullBits(Page* const regPage, Tablerec* const regTabPtr, Uint32 pageOffset);
+ bool checkNullAttributes(Operationrec* const, Tablerec* const);
+ bool getPage(PagePtr& pagePtr,
+ Operationrec* const regOperPtr,
+ Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr);
+
+ bool getPageLastCommitted(Operationrec* const regOperPtr,
+ Operationrec* const leaderOpPtr);
+
+ bool getPageThroughSavePoint(Operationrec* const regOperPtr,
+ Operationrec* const leaderOpPtr);
+
+ Uint32 calculateChecksum(Page* const pagePtr, Uint32 tupHeadOffset, Uint32 tupHeadSize);
+ void setChecksum(Page* const pagePtr, Uint32 tupHeadOffset, Uint32 tupHeadSize);
+
+ void commitSimple(Signal* signal,
+ Operationrec* const regOperPtr,
+ Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr);
+
+ void commitRecord(Signal* signal,
+ Operationrec* const regOperPtr,
+ Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr);
+
+ void setTupleStatesSetOpType(Operationrec* const regOperPtr,
+ Page* const pagePtr,
+ Uint32& opType,
+ OperationrecPtr& firstOpPtr);
+
+ void findBeforeValueOperation(OperationrecPtr& befOpPtr,
+ OperationrecPtr firstOpPtr);
+
+ void calculateChangeMask(Page* const PagePtr,
+ Tablerec* const regTabPtr,
+ Uint32 pageOffset,
+ Bitmask<MAXNROFATTRIBUTESINWORDS>& attributeMask);
+
+ void updateGcpId(Signal* signal,
+ Operationrec* const regOperPtr,
+ Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr);
+
+ void abortUpdate(Signal* signal,
+ Operationrec* const regOperPtr,
+ Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr);
+ void commitUpdate(Signal* signal,
+ Operationrec* const regOperPtr,
+ Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr);
+
+ void setTupleStateOnPreviousOps(Uint32 prevOpIndex);
+ void copyMem(Signal* signal, Uint32 sourceIndex, Uint32 destIndex);
+
+ void freeAllAttrBuffers(Operationrec* const regOperPtr);
+ void freeAttrinbufrec(Uint32 anAttrBufRec);
+ void removeActiveOpList(Operationrec* const regOperPtr);
+
+ void updatePackedList(Signal* signal, Uint16 ahostIndex);
+
+ void setUpDescriptorReferences(Uint32 descriptorReference,
+ Tablerec* const regTabPtr,
+ const Uint32* offset);
+ void setUpKeyArray(Tablerec* const regTabPtr);
+ bool addfragtotab(Tablerec* const regTabPtr, Uint32 fragId, Uint32 fragIndex);
+ void deleteFragTab(Tablerec* const regTabPtr, Uint32 fragId);
+ void abortAddFragOp(Signal* signal);
+ void releaseTabDescr(Tablerec* const regTabPtr);
+ void getFragmentrec(FragrecordPtr& regFragPtr, Uint32 fragId, Tablerec* const regTabPtr);
+
+ void initialiseRecordsLab(Signal* signal, Uint32 switchData, Uint32, Uint32);
+ void initializeAttrbufrec();
+ void initializeCheckpointInfoRec();
+ void initializeDiskBufferSegmentRecord();
+ void initializeFragoperrec();
+ void initializeFragrecord();
+ void initializeHostBuffer();
+ void initializeLocalLogInfo();
+ void initializeOperationrec();
+ void initializePendingFileOpenInfoRecord();
+ void initializeRestartInfoRec();
+ void initializeTablerec();
+ void initializeTabDescr();
+ void initializeUndoPage();
+
+ void initTab(Tablerec* const regTabPtr);
+
+ void startphase3Lab(Signal* signal, Uint32 config1, Uint32 config2);
+
+ void fragrefuseLab(Signal* signal, FragoperrecPtr fragOperPtr);
+ void fragrefuse1Lab(Signal* signal, FragoperrecPtr fragOperPtr);
+ void fragrefuse2Lab(Signal* signal, FragoperrecPtr fragOperPtr, FragrecordPtr regFragPtr);
+ void fragrefuse3Lab(Signal* signal,
+ FragoperrecPtr fragOperPtr,
+ FragrecordPtr regFragPtr,
+ Tablerec* const regTabPtr,
+ Uint32 fragId);
+ void fragrefuse4Lab(Signal* signal,
+ FragoperrecPtr fragOperPtr,
+ FragrecordPtr regFragPtr,
+ Tablerec* const regTabPtr,
+ Uint32 fragId);
+ void addattrrefuseLab(Signal* signal,
+ FragrecordPtr regFragPtr,
+ FragoperrecPtr fragOperPtr,
+ Tablerec* const regTabPtr,
+ Uint32 fragId);
+
+
+ void checkLcpActiveBufferPage(Uint32 minPageNotWrittenInCheckpoint, DiskBufferSegmentInfoPtr dbsiPtr);
+ void lcpWriteListDataPageSegment(Signal* signal,
+ DiskBufferSegmentInfoPtr dbsiPtr,
+ CheckpointInfoPtr ciPtr,
+ bool flushFlag);
+ void lcpFlushLogLab(Signal* signal, CheckpointInfoPtr ciPtr);
+ void lcpClosedDataFileLab(Signal* signal, CheckpointInfoPtr ciPtr);
+ void lcpEndconfLab(Signal* signal);
+ void lcpSaveDataPageLab(Signal* signal, Uint32 ciIndex);
+ void lcpCompletedLab(Signal* signal, Uint32 ciIndex);
+ void lcpFlushRestartInfoLab(Signal* signal, Uint32 ciIndex);
+ void lcpSaveCopyListLab(Signal* signal, CheckpointInfoPtr ciPtr);
+
+ void sendFSREMOVEREQ(Signal* signal, TablerecPtr tabPtr);
+ void releaseFragment(Signal* signal, Uint32 tableId);
+
+ void allocDataBufferSegment(Signal* signal, DiskBufferSegmentInfoPtr& dbsiPtr);
+ void allocRestartUndoBufferSegment(Signal* signal, DiskBufferSegmentInfoPtr& dbsiPtr, LocalLogInfoPtr lliPtr);
+ void freeDiskBufferSegmentRecord(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr);
+ void freeUndoBufferPages(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr);
+
+ void releaseCheckpointInfoRecord(CheckpointInfoPtr ciPtr);
+ void releaseDiskBufferSegmentRecord(DiskBufferSegmentInfoPtr dbsiPtr);
+ void releaseFragoperrec(FragoperrecPtr fragOperPtr);
+ void releaseFragrec(FragrecordPtr regFragPtr);
+ void releasePendingFileOpenInfoRecord(PendingFileOpenInfoPtr pfoPtr);
+ void releaseRestartInfoRecord(RestartInfoRecordPtr riPtr);
+
+ void seizeDiskBufferSegmentRecord(DiskBufferSegmentInfoPtr& dbsiPtr);
+ void seizeCheckpointInfoRecord(CheckpointInfoPtr& ciPtr);
+ void seizeFragoperrec(FragoperrecPtr& fragOperPtr);
+ void seizeFragrecord(FragrecordPtr& regFragPtr);
+ void seizeOpRec(OperationrecPtr& regOperPtr);
+ void seizePendingFileOpenInfoRecord(PendingFileOpenInfoPtr& pfoiPtr);
+ void seizeRestartInfoRecord(RestartInfoRecordPtr& riPtr);
+
+ // Initialisation
+ void initData();
+ void initRecords();
+
+ void rfrClosedDataFileLab(Signal* signal, Uint32 restartIndex);
+ void rfrCompletedLab(Signal* signal, RestartInfoRecordPtr riPtr);
+ void rfrInitRestartInfoLab(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr);
+ void rfrLoadDataPagesLab(Signal* signal, RestartInfoRecordPtr riPtr, DiskBufferSegmentInfoPtr dbsiPtr);
+ void rfrReadFirstUndoSegment(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr, LocalLogInfoPtr lliPtr);
+ void rfrReadNextDataSegment(Signal* signal, RestartInfoRecordPtr riPtr, DiskBufferSegmentInfoPtr dbsiPtr);
+ void rfrReadNextUndoSegment(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr, LocalLogInfoPtr lliPtr);
+ void rfrReadRestartInfoLab(Signal* signal, RestartInfoRecordPtr riPtr);
+ void rfrReadSecondUndoLogLab(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr);
+
+ void startExecUndoLogLab(Signal* signal, Uint32 lliIndex);
+ void readExecUndoLogLab(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr, LocalLogInfoPtr lliPtr);
+ void closeExecUndoLogLab(Signal* signal, LocalLogInfoPtr lliPtr);
+ void endExecUndoLogLab(Signal* signal, Uint32 lliIndex);
+
+ struct XlcStruct {
+ Uint32 PageId;
+ Uint32 PageIndex;
+ Uint32 LogRecordType;
+ Uint32 FragId;
+ FragrecordPtr FragPtr;
+ LocalLogInfoPtr LliPtr;
+ DiskBufferSegmentInfoPtr DbsiPtr;
+ UndoPagePtr UPPtr;
+ TablerecPtr TabPtr;
+ };
+
+ void xlcGetNextRecordLab(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr, LocalLogInfoPtr lliPtr);
+ void xlcRestartCompletedLab(Signal* signal);
+
+ void xlcCopyData(XlcStruct& xlcStruct, Uint32 pageOffset, Uint32 noOfWords, PagePtr pagePtr);
+ void xlcGetLogHeader(XlcStruct& xlcStruct);
+ Uint32 xlcGetLogWord(XlcStruct& xlcStruct);
+
+ void xlcAbortInsert(Signal* signal, XlcStruct& xlcStruct);
+ void xlcAbortUpdate(Signal* signal, XlcStruct& xlcStruct);
+ void xlcDeleteTh(XlcStruct& xlcStruct);
+ void xlcIndicateNoOpActive(XlcStruct& xlcStruct);
+ void xlcInsertTh(XlcStruct& xlcStruct);
+ void xlcTableDescriptor(XlcStruct& xlcStruct);
+ void xlcUndoLogPageHeader(XlcStruct& xlcStruct);
+ void xlcUpdateTh(XlcStruct& xlcStruct);
+ void xlcUpdateGCI(XlcStruct& xlcStruct);
+
+
+ void cprAddData(Signal* signal,
+ Fragrecord* const regFragPtr,
+ Uint32 pageIndex,
+ Uint32 noOfWords,
+ Uint32 startOffset);
+ void cprAddGCIUpdate(Signal* signal,
+ Uint32 prevGCI,
+ Fragrecord* const regFragPtr);
+ void cprAddLogHeader(Signal* signal,
+ LocalLogInfo* const lliPtr,
+ Uint32 recordType,
+ Uint32 tableId,
+ Uint32 fragId);
+ void cprAddUndoLogPageHeader(Signal* signal,
+ Page* const regPagePtr,
+ Fragrecord* const regFragPtr);
+ void cprAddUndoLogRecord(Signal* signal,
+ Uint32 recordType,
+ Uint32 pageId,
+ Uint32 pageIndex,
+ Uint32 tableId,
+ Uint32 fragId,
+ Uint32 localLogIndex);
+ void cprAddAbortUpdate(Signal* signal,
+ LocalLogInfo* const lliPtr,
+ Operationrec* const regOperPtr);
+ void cprAddUndoLogWord(Signal* signal,
+ LocalLogInfo* const lliPtr,
+ Uint32 undoWord);
+ bool isUndoLoggingNeeded(Fragrecord* const regFragPtr, Uint32 pageId);
+ bool isUndoLoggingActive(Fragrecord* const regFragPtr);
+ bool isUndoLoggingBlocked(Fragrecord* const regFragPtr);
+ bool isPageUndoLogged(Fragrecord* const regFragPtr, Uint32 pageId);
+
+ void seizeUndoBufferSegment(Signal* signal, UndoPagePtr& regUndoPagePtr);
+ void lcpWriteUndoSegment(Signal* signal, LocalLogInfo* const lliPtr, bool flushFlag);
+
+
+ void deleteScanProcedure(Signal* signal, Operationrec* regOperPtr);
+ void copyProcedure(Signal* signal,
+ TablerecPtr regTabPtr,
+ Operationrec* regOperPtr);
+ void scanProcedure(Signal* signal,
+ Operationrec* regOperPtr,
+ Uint32 lenAttrInfo);
+ void storedSeizeAttrinbufrecErrorLab(Signal* signal,
+ Operationrec* regOperPtr);
+ bool storedProcedureAttrInfo(Signal* signal,
+ Operationrec* regOperPtr,
+ Uint32 length,
+ Uint32 firstWord,
+ bool copyProc);
+
+//-----------------------------------------------------------------------------
+// Table Descriptor Memory Manager
+//-----------------------------------------------------------------------------
+
+// Public methods
+ Uint32 getTabDescrOffsets(const Tablerec* regTabPtr, Uint32* offset);
+ Uint32 allocTabDescr(const Tablerec* regTabPtr, Uint32* offset);
+ void freeTabDescr(Uint32 retRef, Uint32 retNo);
+ Uint32 getTabDescrWord(Uint32 index);
+ void setTabDescrWord(Uint32 index, Uint32 word);
+
+// Private methods
+ Uint32 sizeOfReadFunction();
+ void removeTdArea(Uint32 tabDesRef, Uint32 list);
+ void insertTdArea(Uint32 sizeOfChunk, Uint32 tabDesRef, Uint32 list);
+ Uint32 itdaMergeTabDescr(Uint32 retRef, Uint32 retNo);
+
+//------------------------------------------------------------------------------------------------------
+// Page Memory Manager
+//------------------------------------------------------------------------------------------------------
+
+// Public methods
+ void allocConsPages(Uint32 noOfPagesToAllocate,
+ Uint32& noOfPagesAllocated,
+ Uint32& allocPageRef);
+ void returnCommonArea(Uint32 retPageRef, Uint32 retNo);
+ void initializePage();
+
+// Private methods
+ void removeCommonArea(Uint32 remPageRef, Uint32 list);
+ void insertCommonArea(Uint32 insPageRef, Uint32 list);
+ void findFreeLeftNeighbours(Uint32& allocPageRef, Uint32& noPagesAllocated, Uint32 noPagesToAllocate);
+ void findFreeRightNeighbours(Uint32& allocPageRef, Uint32& noPagesAllocated, Uint32 noPagesToAllocate);
+ Uint32 nextHigherTwoLog(Uint32 input);
+
+// Private data
+ Uint32 cfreepageList[16];
+
+//------------------------------------------------------------------------------------------------------
+// Page Mapper, convert logical page id's to physical page id's
+// The page mapper also handles the pages allocated to the fragment.
+//------------------------------------------------------------------------------------------------------
+//
+// Public methods
+ Uint32 getRealpid(Fragrecord* const regFragPtr, Uint32 logicalPageId);
+ Uint32 getNoOfPages(Fragrecord* const regFragPtr);
+ void initPageRangeSize(Uint32 size);
+ bool insertPageRangeTab(Fragrecord* const regFragPtr,
+ Uint32 startPageId,
+ Uint32 noPages);
+ void releaseFragPages(Fragrecord* const regFragPtr);
+ void initFragRange(Fragrecord* const regFragPtr);
+ void initializePageRange();
+ Uint32 getEmptyPage(Fragrecord* const regFragPtr);
+ Uint32 allocFragPages(Fragrecord* const regFragPtr, Uint32 noOfPagesAllocated);
+
+// Private methods
+ Uint32 leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr currPageRangePtr);
+ void releasePagerange(PageRangePtr regPRPtr);
+ void seizePagerange(PageRangePtr& regPageRangePtr);
+ void errorHandler(Uint32 errorCode);
+ void allocMoreFragPages(Fragrecord* const regFragPtr);
+
+// Private data
+ Uint32 cfirstfreerange;
+ PageRange *pageRange;
+ Uint32 c_noOfFreePageRanges;
+ Uint32 cnoOfPageRangeRec;
+
+//------------------------------------------------------------------------------------------------------
+// Fixed Allocator
+// Allocates and deallocates tuples of fixed size on a fragment.
+//------------------------------------------------------------------------------------------------------
+//
+// Public methods
+ bool allocTh(Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr,
+ Uint32 pageType,
+ Signal* signal,
+ Uint32& pageOffset,
+ PagePtr& pagePtr);
+
+ void freeThSr(Tablerec* const regTabPtr,
+ Page* const regPagePtr,
+ Uint32 freePageOffset);
+
+ void freeTh(Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr,
+ Signal* signal,
+ Page* const regPagePtr,
+ Uint32 freePageOffset);
+
+ void getThAtPageSr(Page* const regPagePtr,
+ Uint32& pageOffset);
+
+// Private methods
+ void convertThPage(Uint32 Tupheadsize,
+ Page* const regPagePtr);
+
+ void getThAtPage(Fragrecord* const regFragPtr,
+ Page* const regPagePtr,
+ Signal* signal,
+ Uint32& pageOffset);
+
+ void getEmptyPageThCopy(Fragrecord* const regFragPtr,
+ Signal* signal,
+ Page* const regPagePtr);
+
+ void getEmptyPageTh(Fragrecord* const regFragPtr,
+ Signal* signal,
+ Page* const regPagePtr);
+
+//------------------------------------------------------------------------------------------------------
+// Temporary variables used for storing commonly used variables in certain modules
+//------------------------------------------------------------------------------------------------------
+
+ FragrecordPtr fragptr;
+ OperationrecPtr operPtr;
+ TablerecPtr tabptr;
+
+// readAttributes and updateAttributes module
+ Uint32 tCheckOffset;
+ Uint32 tMaxRead;
+ Uint32 tOutBufIndex;
+ Uint32* tTupleHeader;
+ bool tXfrmFlag;
+
+// updateAttributes module
+ Uint32 tInBufIndex;
+ Uint32 tInBufLen;
+
+ Uint32 terrorCode;
+
+//------------------------------------------------------------------------------------------------------
+// Common stored variables. Variables that have a valid value always.
+//------------------------------------------------------------------------------------------------------
+ Uint32 cnoOfLcpRec;
+ Uint32 cnoOfParallellUndoFiles;
+ Uint32 cnoOfUndoPage;
+
+ Attrbufrec *attrbufrec;
+ Uint32 cfirstfreeAttrbufrec;
+ Uint32 cnoOfAttrbufrec;
+ Uint32 cnoFreeAttrbufrec;
+
+ CheckpointInfo *checkpointInfo;
+ Uint32 cfirstfreeLcp;
+
+ DiskBufferSegmentInfo *diskBufferSegmentInfo;
+ Uint32 cfirstfreePdx;
+ Uint32 cnoOfConcurrentWriteOp;
+
+ Fragoperrec *fragoperrec;
+ Uint32 cfirstfreeFragopr;
+ Uint32 cnoOfFragoprec;
+
+ Fragrecord *fragrecord;
+ Uint32 cfirstfreefrag;
+ Uint32 cnoOfFragrec;
+
+ HostBuffer *hostBuffer;
+
+ LocalLogInfo *localLogInfo;
+ Uint32 cnoOfLocalLogInfo;
+
+ Uint32 cfirstfreeOprec;
+ Operationrec *operationrec;
+ Uint32 cnoOfOprec;
+
+ Page *page;
+ Uint32 cnoOfPage;
+ Uint32 cnoOfAllocatedPages;
+
+ PendingFileOpenInfo *pendingFileOpenInfo;
+ Uint32 cfirstfreePfo;
+ Uint32 cnoOfConcurrentOpenOp;
+
+ RestartInfoRecord *restartInfoRecord;
+ Uint32 cfirstfreeSri;
+ Uint32 cnoOfRestartInfoRec;
+
+ Tablerec *tablerec;
+ Uint32 cnoOfTablerec;
+
+ TableDescriptor *tableDescriptor;
+ Uint32 cnoOfTabDescrRec;
+
+ UndoPage *undoPage;
+ Uint32 cfirstfreeUndoSeg;
+ Int32 cnoFreeUndoSeg;
+
+
+
+ Uint32 cnoOfDataPagesToDiskWithoutSynch;
+
+ Uint32 cdata[32];
+ Uint32 cdataPages[16];
+ Uint32 cpackedListIndex;
+ Uint32 cpackedList[MAX_NODES];
+ Uint32 cfreeTdList[16];
+ Uint32 clastBitMask;
+ Uint32 clblPageCounter;
+ Uint32 clblPagesPerTick;
+ Uint32 clblPagesPerTickAfterSr;
+ BlockReference clqhBlockref;
+ Uint32 clqhUserpointer;
+ Uint32 cminusOne;
+ BlockReference cndbcntrRef;
+ Uint32 cundoFileVersion;
+ BlockReference cownref;
+ Uint32 cownNodeId;
+ Uint32 czero;
+
+ // A little bit bigger to cover overwrites in copy algorithms (16384 real size).
+#define ZATTR_BUFFER_SIZE 16384
+ Uint32 clogMemBuffer[ZATTR_BUFFER_SIZE + 16];
+ Uint32 coutBuffer[ZATTR_BUFFER_SIZE + 16];
+ Uint32 cinBuffer[ZATTR_BUFFER_SIZE + 16];
+ Uint32 totNoOfPagesAllocated;
+
+ // Trigger variables
+ Uint32 c_maxTriggersPerTable;
+
+ // Counters for num UNDO log records executed
+ Uint32 cSrUndoRecords[9];
+
+ STATIC_CONST(MAX_PARALLELL_TUP_SRREQ = 2);
+ Uint32 c_sr_free_page_0;
+
+ Uint32 c_errorInsert4000TableId;
+
+ void initGlobalTemporaryVars();
+ void reportMemoryUsage(Signal* signal, int incDec);
+
+
+#ifdef VM_TRACE
+ struct Th {
+ Uint32 data[1];
+ };
+ friend class NdbOut& operator<<(NdbOut&, const Operationrec&);
+ friend class NdbOut& operator<<(NdbOut&, const Th&);
+#endif
+};
+
+inline
+bool Dbtup::isUndoLoggingNeeded(Fragrecord* const regFragPtr,
+ Uint32 pageId)
+{
+ if ((regFragPtr->checkpointVersion != RNIL) &&
+ (pageId >= regFragPtr->minPageNotWrittenInCheckpoint) &&
+ (pageId < regFragPtr->maxPageWrittenInCheckpoint)) {
+ return true;
+ }//if
+ return false;
+}//Dbtup::isUndoLoggingNeeded()
+
+inline
+bool Dbtup::isUndoLoggingActive(Fragrecord* const regFragPtr)
+{
+ if (regFragPtr->checkpointVersion != RNIL) {
+ return true;
+ }//if
+ return false;
+}//Dbtup::isUndoLoggingNeeded()
+
+inline
+bool Dbtup::isUndoLoggingBlocked(Fragrecord* const regFragPtr)
+{
+ if ((regFragPtr->checkpointVersion != RNIL) &&
+ (cnoFreeUndoSeg < ZMIN_PAGE_LIMIT_TUPKEYREQ)) {
+ return true;
+ }//if
+ return false;
+}//Dbtup::isUndoLoggingNeeded()
+
+inline
+bool Dbtup::isPageUndoLogged(Fragrecord* const regFragPtr,
+ Uint32 pageId)
+{
+ if ((pageId >= regFragPtr->minPageNotWrittenInCheckpoint) &&
+ (pageId < regFragPtr->maxPageWrittenInCheckpoint)) {
+ return true;
+ }//if
+ return false;
+}//Dbtup::isUndoLoggingNeeded()
+
+#endif
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp
new file mode 100644
index 00000000000..e9043a8b52d
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp
@@ -0,0 +1,473 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#define DBTUP_C
+#include "Dbtup.hpp"
+#include <RefConvert.hpp>
+#include <ndb_limits.h>
+#include <pc.hpp>
+
+#define ljam() { jamLine(9000 + __LINE__); }
+#define ljamEntry() { jamEntryLine(9000 + __LINE__); }
+
+void Dbtup::freeAllAttrBuffers(Operationrec* const regOperPtr)
+{
+ if (regOperPtr->storedProcedureId == ZNIL) {
+ ljam();
+ freeAttrinbufrec(regOperPtr->firstAttrinbufrec);
+ } else {
+ StoredProcPtr storedPtr;
+ c_storedProcPool.getPtr(storedPtr, (Uint32)regOperPtr->storedProcedureId);
+ ndbrequire(storedPtr.p->storedCode == ZSCAN_PROCEDURE);
+ ljam();
+ storedPtr.p->storedCounter--;
+ regOperPtr->storedProcedureId = ZNIL;
+ }//if
+ regOperPtr->firstAttrinbufrec = RNIL;
+ regOperPtr->lastAttrinbufrec = RNIL;
+}//Dbtup::freeAllAttrBuffers()
+
+void Dbtup::freeAttrinbufrec(Uint32 anAttrBuf)
+{
+ Uint32 Ttemp;
+ AttrbufrecPtr localAttrBufPtr;
+ Uint32 RnoFree = cnoFreeAttrbufrec;
+ localAttrBufPtr.i = anAttrBuf;
+ while (localAttrBufPtr.i != RNIL) {
+ ljam();
+ ptrCheckGuard(localAttrBufPtr, cnoOfAttrbufrec, attrbufrec);
+ Ttemp = localAttrBufPtr.p->attrbuf[ZBUF_NEXT];
+ localAttrBufPtr.p->attrbuf[ZBUF_NEXT] = cfirstfreeAttrbufrec;
+ cfirstfreeAttrbufrec = localAttrBufPtr.i;
+ localAttrBufPtr.i = Ttemp;
+ RnoFree++;
+ }//if
+ cnoFreeAttrbufrec = RnoFree;
+}//Dbtup::freeAttrinbufrec()
+
+/* ----------------------------------------------------------------- */
+/* ----------- ABORT THIS PART OF THE TRANSACTION ------------------ */
+/* ----------------------------------------------------------------- */
+void Dbtup::execTUP_ABORTREQ(Signal* signal)
+{
+ OperationrecPtr regOperPtr;
+ FragrecordPtr regFragPtr;
+ TablerecPtr regTabPtr;
+
+ ljamEntry();
+ regOperPtr.i = signal->theData[0];
+ ptrCheckGuard(regOperPtr, cnoOfOprec, operationrec);
+ ndbrequire((regOperPtr.p->transstate == STARTED) ||
+ (regOperPtr.p->transstate == TOO_MUCH_AI) ||
+ (regOperPtr.p->transstate == ERROR_WAIT_TUPKEYREQ) ||
+ (regOperPtr.p->transstate == IDLE));
+ if (regOperPtr.p->optype == ZREAD) {
+ ljam();
+ freeAllAttrBuffers(regOperPtr.p);
+ initOpConnection(regOperPtr.p, 0);
+ return;
+ }//if
+
+ regTabPtr.i = regOperPtr.p->tableRef;
+ ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec);
+
+ regFragPtr.i = regOperPtr.p->fragmentPtr;
+ ptrCheckGuard(regFragPtr, cnoOfFragrec, fragrecord);
+
+ // XXX should be integrated into the code that comes after
+ if (!regTabPtr.p->tuxCustomTriggers.isEmpty() &&
+ regOperPtr.p->tupleState == NO_OTHER_OP) {
+ ljam();
+ executeTuxAbortTriggers(signal,
+ regOperPtr.p,
+ regTabPtr.p);
+ OperationrecPtr loopOpPtr;
+ loopOpPtr.i = regOperPtr.p->prevActiveOp;
+ while (loopOpPtr.i != RNIL) {
+ ljam();
+ ptrCheckGuard(loopOpPtr, cnoOfOprec, operationrec);
+ if (loopOpPtr.p->tupleState != ALREADY_ABORTED) {
+ ljam();
+ executeTuxAbortTriggers(signal,
+ loopOpPtr.p,
+ regTabPtr.p);
+ }
+ loopOpPtr.i = loopOpPtr.p->prevActiveOp;
+ }
+ }
+
+ Uint32 prevActiveOp = regOperPtr.p->prevActiveOp;
+ removeActiveOpList(regOperPtr.p);
+ if (regOperPtr.p->tupleState == NO_OTHER_OP) {
+ if (prevActiveOp == RNIL) {
+ ljam();
+ abortUpdate(signal, regOperPtr.p, regFragPtr.p, regTabPtr.p);
+ } else { //prevActiveOp != RNIL
+ setTupleStateOnPreviousOps(prevActiveOp);
+ if (regOperPtr.p->optype == ZDELETE) {
+ ljam();
+ OperationrecPtr prevOpPtr;
+ prevOpPtr.i = prevActiveOp;
+ ptrCheckGuard(prevOpPtr, cnoOfOprec, operationrec);
+ ndbrequire(prevOpPtr.p->realPageIdC != RNIL);
+ ndbrequire(prevOpPtr.p->optype == ZINSERT);
+ abortUpdate(signal, prevOpPtr.p, regFragPtr.p, regTabPtr.p);
+ } else {
+ jam();
+ abortUpdate(signal, regOperPtr.p, regFragPtr.p, regTabPtr.p);
+ }//if
+ }//if
+ } else {
+ ndbrequire(regOperPtr.p->tupleState == ALREADY_ABORTED);
+ commitUpdate(signal, regOperPtr.p, regFragPtr.p, regTabPtr.p);
+ }//if
+ initOpConnection(regOperPtr.p, regFragPtr.p);
+}//execTUP_ABORTREQ()
+
+void Dbtup::setTupleStateOnPreviousOps(Uint32 prevOpIndex)
+{
+ OperationrecPtr loopOpPtr;
+ loopOpPtr.i = prevOpIndex;
+ do {
+ ljam();
+ ptrCheckGuard(loopOpPtr, cnoOfOprec, operationrec);
+ loopOpPtr.p->tupleState = ALREADY_ABORTED;
+ loopOpPtr.i = loopOpPtr.p->prevActiveOp;
+ } while (loopOpPtr.i != RNIL);
+}//Dbtup::setTupleStateOnPreviousOps()
+
+/* ---------------------------------------------------------------- */
+/* ------------ PERFORM AN ABORT OF AN UPDATE OPERATION ----------- */
+/* ---------------------------------------------------------------- */
+void Dbtup::abortUpdate(Signal* signal,
+ Operationrec* const regOperPtr,
+ Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr)
+{
+ /* RESTORE THE ORIGINAL DATA */
+ /* THE OPER_PTR ALREADY CONTAINS BOTH THE PAGE AND THE COPY PAGE */
+ if (regOperPtr->realPageIdC != RNIL) {
+ ljam();
+ /***********************/
+ /* CHECKPOINT SPECIFIC */
+ /***********************/
+ if (isUndoLoggingNeeded(regFragPtr, regOperPtr->fragPageIdC)) {
+ if (regOperPtr->undoLogged) {
+ ljam();
+/* ---------------------------------------------------------------- */
+/* THE UPDATE WAS MADE AFTER THE LOCAL CHECKPOINT STARTED. */
+/* THUS THE ORIGINAL TUPLE WILL BE RESTORED BY A LOG RECORD */
+/* CREATED WHEN UPDATING. THUS IT IS ENOUGH TO LOG THE UNDO */
+/* OF THE COPY RELEASE == INSERT THE COPY TUPLE HEADER WITH */
+/* NO DATA. */
+/* ---------------------------------------------------------------- */
+ cprAddUndoLogRecord(signal,
+ ZLCPR_TYPE_INSERT_TH_NO_DATA,
+ regOperPtr->fragPageIdC,
+ regOperPtr->pageIndexC,
+ regOperPtr->tableRef,
+ regOperPtr->fragId,
+ regFragPtr->checkpointVersion);
+ } else {
+ ljam();
+/* ---------------------------------------------------------------- */
+/* THE UPDATE WAS MADE BEFORE THE LOCAL CHECKPOINT STARTED. */
+/* THE TUPLE WILL THUS BE RESTORED BY COPYING FROM THE COPY. */
+/* THUS WE DO NOT NEED TO RESTORE THE DATA IN THE ORIGINAL. */
+/* WE DO HOWEVER NEED TO ENSURE THAT THE COPY CONTAINS THE */
+/* CORRECT DATA. */
+/* ---------------------------------------------------------------- */
+ cprAddUndoLogRecord(signal,
+ ZLCPR_TYPE_INSERT_TH,
+ regOperPtr->fragPageIdC,
+ regOperPtr->pageIndexC,
+ regOperPtr->tableRef,
+ regOperPtr->fragId,
+ regFragPtr->checkpointVersion);
+ cprAddData(signal,
+ regFragPtr,
+ regOperPtr->realPageIdC,
+ regTabPtr->tupheadsize,
+ regOperPtr->pageOffsetC);
+ }//if
+ }//if
+ Uint32 rpid = regOperPtr->realPageId;
+ Uint32 rpid_copy = regOperPtr->realPageIdC;
+ Uint32 offset = regOperPtr->pageOffset;
+ Uint32 offset_copy = regOperPtr->pageOffsetC;
+ Uint32 tuple_size = regTabPtr->tupheadsize;
+ Uint32 end = offset + tuple_size;
+ Uint32 end_copy = offset_copy + tuple_size;
+ ndbrequire(rpid < cnoOfPage &&
+ rpid_copy < cnoOfPage &&
+ end <= ZWORDS_ON_PAGE &&
+ end_copy <= ZWORDS_ON_PAGE);
+ void* Tdestination = (void*)&page[rpid].pageWord[offset + 1];
+ const void* Tsource = (void*)&page[rpid_copy].pageWord[offset_copy + 1];
+ MEMCOPY_NO_WORDS(Tdestination, Tsource, (tuple_size - 1));
+ {
+ PagePtr pagePtr;
+
+ pagePtr.i = rpid_copy;
+ ptrAss(pagePtr, page);
+ freeTh(regFragPtr,
+ regTabPtr,
+ signal,
+ pagePtr.p,
+ offset_copy);
+ }
+ regOperPtr->realPageIdC = RNIL;
+ regOperPtr->fragPageIdC = RNIL;
+ regOperPtr->pageOffsetC = ZNIL;
+ regOperPtr->pageIndexC = ZNIL;
+ }//if
+}//Dbtup::abortUpdate()
+
+/* **************************************************************** */
+/* ********************** TRANSACTION ERROR MODULE **************** */
+/* **************************************************************** */
+int Dbtup::TUPKEY_abort(Signal* signal, int error_type)
+{
+ switch(error_type) {
+ case 0:
+ ndbrequire(false);
+ break;
+// Not used currently
+
+ case 1:
+//tmupdate_alloc_error:
+ ljam();
+ break;
+
+ case 2:
+ ndbrequire(false);
+ break;
+// Not used currently
+
+ break;
+
+ case 3:
+//tmupdate_alloc_error:
+ ljam();
+ break;
+
+ case 4:
+//Trying to read non-existing attribute identity
+ ljam();
+ terrorCode = ZATTRIBUTE_ID_ERROR;
+ break;
+
+ case 6:
+ ljam();
+ terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
+ break;
+
+ case 7:
+ ljam();
+ terrorCode = ZAI_INCONSISTENCY_ERROR;
+ break;
+
+ case 8:
+ ljam();
+ terrorCode = ZATTR_INTERPRETER_ERROR;
+ break;
+
+ case 9:
+ ljam();
+//Trying to read non-existing attribute identity
+ ljam();
+ terrorCode = ZATTRIBUTE_ID_ERROR;
+ break;
+
+ case 11:
+ ljam();
+ terrorCode = ZATTR_INTERPRETER_ERROR;
+ break;
+
+ case 12:
+ ljam();
+ ndbrequire(false);
+ break;
+
+ case 13:
+ ljam();
+ ndbrequire(false);
+ break;
+
+ case 14:
+ ljam();
+ terrorCode = ZREGISTER_INIT_ERROR;
+ break;
+
+ case 15:
+ ljam();
+ terrorCode = ZREGISTER_INIT_ERROR;
+ break;
+
+ case 16:
+ ljam();
+ terrorCode = ZTRY_TO_UPDATE_ERROR;
+ break;
+
+ case 17:
+ ljam();
+ terrorCode = ZNO_ILLEGAL_NULL_ATTR;
+ break;
+
+ case 18:
+ ljam();
+ terrorCode = ZNOT_NULL_ATTR;
+ break;
+
+ case 19:
+ ljam();
+ terrorCode = ZTRY_TO_UPDATE_ERROR;
+ break;
+
+ case 20:
+ ljam();
+ terrorCode = ZREGISTER_INIT_ERROR;
+ break;
+
+ case 21:
+ ljam();
+ terrorCode = ZREGISTER_INIT_ERROR;
+ break;
+
+ case 22:
+ ljam();
+ terrorCode = ZTOTAL_LEN_ERROR;
+ break;
+
+ case 23:
+ ljam();
+ terrorCode = ZREGISTER_INIT_ERROR;
+ break;
+
+ case 24:
+ ljam();
+ terrorCode = ZREGISTER_INIT_ERROR;
+ break;
+
+ case 25:
+ ljam();
+ terrorCode = ZREGISTER_INIT_ERROR;
+ break;
+
+ case 26:
+ ljam();
+ terrorCode = ZREGISTER_INIT_ERROR;
+ break;
+
+ case 27:
+ ljam();
+ terrorCode = ZREGISTER_INIT_ERROR;
+ break;
+
+ case 28:
+ ljam();
+ terrorCode = ZREGISTER_INIT_ERROR;
+ break;
+
+ case 29:
+ ljam();
+ break;
+
+ case 30:
+ ljam();
+ terrorCode = ZCALL_ERROR;
+ break;
+
+ case 31:
+ ljam();
+ terrorCode = ZSTACK_OVERFLOW_ERROR;
+ break;
+
+ case 32:
+ ljam();
+ terrorCode = ZSTACK_UNDERFLOW_ERROR;
+ break;
+
+ case 33:
+ ljam();
+ terrorCode = ZNO_INSTRUCTION_ERROR;
+ break;
+
+ case 34:
+ ljam();
+ terrorCode = ZOUTSIDE_OF_PROGRAM_ERROR;
+ break;
+
+ case 35:
+ ljam();
+ terrorCode = ZTOO_MANY_INSTRUCTIONS_ERROR;
+ break;
+
+ case 36:
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ break;
+
+ case 37:
+ ljam();
+ terrorCode = ZTEMPORARY_RESOURCE_FAILURE;
+ break;
+
+ case 38:
+ ljam();
+ terrorCode = ZTEMPORARY_RESOURCE_FAILURE;
+ break;
+
+ case 39:
+ ljam();
+ if (operPtr.p->transstate == TOO_MUCH_AI) {
+ ljam();
+ terrorCode = ZTOO_MUCH_ATTRINFO_ERROR;
+ } else if (operPtr.p->transstate == ERROR_WAIT_TUPKEYREQ) {
+ ljam();
+ terrorCode = ZSEIZE_ATTRINBUFREC_ERROR;
+ } else {
+ ndbrequire(false);
+ }//if
+ break;
+
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ tupkeyErrorLab(signal);
+ return -1;
+}//Dbtup::TUPKEY_abort()
+
+void Dbtup::tupkeyErrorLab(Signal* signal)
+{
+ Operationrec * const regOperPtr = operPtr.p;
+
+ freeAllAttrBuffers(regOperPtr);
+ abortUpdate(signal, regOperPtr, fragptr.p, tabptr.p);
+ removeActiveOpList(regOperPtr);
+ initOpConnection(regOperPtr, fragptr.p);
+ regOperPtr->transstate = IDLE;
+ regOperPtr->tupleState = NO_OTHER_OP;
+ TupKeyRef * const tupKeyRef = (TupKeyRef *)signal->getDataPtrSend();
+
+ tupKeyRef->userRef = regOperPtr->userpointer;
+ tupKeyRef->errorCode = terrorCode;
+ sendSignal(regOperPtr->userblockref, GSN_TUPKEYREF, signal,
+ TupKeyRef::SignalLength, JBB);
+ return;
+}//Dbtup::tupkeyErrorLab()
+
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp
new file mode 100644
index 00000000000..6527864135b
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp
@@ -0,0 +1,273 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#define DBTUP_C
+#include "Dbtup.hpp"
+#include <RefConvert.hpp>
+#include <ndb_limits.h>
+#include <pc.hpp>
+#include <signaldata/TransIdAI.hpp>
+
+#define ljam() { jamLine(2000 + __LINE__); }
+#define ljamEntry() { jamEntryLine(2000 + __LINE__); }
+
+void Dbtup::execSEND_PACKED(Signal* signal)
+{
+ Uint16 hostId;
+ Uint32 i;
+ Uint32 TpackedListIndex = cpackedListIndex;
+ ljamEntry();
+ for (i = 0; i < TpackedListIndex; i++) {
+ ljam();
+ hostId = cpackedList[i];
+ ndbrequire((hostId - 1) < (MAX_NODES - 1)); // Also check not zero
+ Uint32 TpacketTA = hostBuffer[hostId].noOfPacketsTA;
+ if (TpacketTA != 0) {
+ ljam();
+ BlockReference TBref = numberToRef(API_PACKED, hostId);
+ Uint32 TpacketLen = hostBuffer[hostId].packetLenTA;
+ MEMCOPY_NO_WORDS(&signal->theData[0],
+ &hostBuffer[hostId].packetBufferTA[0],
+ TpacketLen);
+ sendSignal(TBref, GSN_TRANSID_AI, signal, TpacketLen, JBB);
+ hostBuffer[hostId].noOfPacketsTA = 0;
+ hostBuffer[hostId].packetLenTA = 0;
+ }//if
+ hostBuffer[hostId].inPackedList = false;
+ }//for
+ cpackedListIndex = 0;
+}//Dbtup::execSEND_PACKED()
+
+void Dbtup::bufferTRANSID_AI(Signal* signal, BlockReference aRef,
+ Uint32 Tlen)
+{
+ if(Tlen == 3)
+ return;
+
+ Uint32 hostId = refToNode(aRef);
+ Uint32 Theader = ((refToBlock(aRef) << 16)+(Tlen-3));
+
+ ndbrequire(hostId < MAX_NODES);
+ Uint32 TpacketLen = hostBuffer[hostId].packetLenTA;
+ Uint32 TnoOfPackets = hostBuffer[hostId].noOfPacketsTA;
+ Uint32 sig0 = signal->theData[0];
+ Uint32 sig1 = signal->theData[1];
+ Uint32 sig2 = signal->theData[2];
+
+ BlockReference TBref = numberToRef(API_PACKED, hostId);
+
+ if ((Tlen + TpacketLen + 1) <= 25) {
+// ----------------------------------------------------------------
+// There is still space in the buffer. We will copy it into the
+// buffer.
+// ----------------------------------------------------------------
+ ljam();
+ updatePackedList(signal, hostId);
+ } else if (false && TnoOfPackets == 1) {
+// ----------------------------------------------------------------
+// The buffer is full and there was only one packet buffered. We
+// will send this as a normal signal.
+// ----------------------------------------------------------------
+ Uint32 TnewRef = numberToRef((hostBuffer[hostId].packetBufferTA[0] >> 16),
+ hostId);
+ MEMCOPY_NO_WORDS(&signal->theData[0],
+ &hostBuffer[hostId].packetBufferTA[1],
+ TpacketLen - 1);
+ sendSignal(TnewRef, GSN_TRANSID_AI, signal, (TpacketLen - 1), JBB);
+ TpacketLen = 0;
+ TnoOfPackets = 0;
+ } else {
+// ----------------------------------------------------------------
+// The buffer is full but at least two packets. Send those in
+// packed form.
+// ----------------------------------------------------------------
+ MEMCOPY_NO_WORDS(&signal->theData[0],
+ &hostBuffer[hostId].packetBufferTA[0],
+ TpacketLen);
+ sendSignal(TBref, GSN_TRANSID_AI, signal, TpacketLen, JBB);
+ TpacketLen = 0;
+ TnoOfPackets = 0;
+ }//if
+// ----------------------------------------------------------------
+// Copy the signal into the buffer
+// ----------------------------------------------------------------
+ hostBuffer[hostId].packetBufferTA[TpacketLen + 0] = Theader;
+ hostBuffer[hostId].packetBufferTA[TpacketLen + 1] = sig0;
+ hostBuffer[hostId].packetBufferTA[TpacketLen + 2] = sig1;
+ hostBuffer[hostId].packetBufferTA[TpacketLen + 3] = sig2;
+ hostBuffer[hostId].noOfPacketsTA = TnoOfPackets + 1;
+ hostBuffer[hostId].packetLenTA = Tlen + TpacketLen + 1;
+ MEMCOPY_NO_WORDS(&hostBuffer[hostId].packetBufferTA[TpacketLen + 4],
+ &signal->theData[25],
+ Tlen - 3);
+}//Dbtup::bufferTRANSID_AI()
+
+void Dbtup::updatePackedList(Signal* signal, Uint16 hostId)
+{
+ if (hostBuffer[hostId].inPackedList == false) {
+ Uint32 TpackedListIndex = cpackedListIndex;
+ ljam();
+ hostBuffer[hostId].inPackedList = true;
+ cpackedList[TpackedListIndex] = hostId;
+ cpackedListIndex = TpackedListIndex + 1;
+ }//if
+}//Dbtup::updatePackedList()
+
+/* ---------------------------------------------------------------- */
+/* ----------------------- SEND READ ATTRINFO --------------------- */
+/* ---------------------------------------------------------------- */
+void Dbtup::sendReadAttrinfo(Signal* signal,
+ Uint32 ToutBufIndex,
+ const Operationrec * const regOperPtr)
+{
+ const BlockReference recBlockref = regOperPtr->recBlockref;
+ const Uint32 sig0 = regOperPtr->tcOperationPtr;
+ const Uint32 sig1 = regOperPtr->transid1;
+ const Uint32 sig2 = regOperPtr->transid2;
+
+ const Uint32 block = refToBlock(recBlockref);
+ const Uint32 nodeId = refToNode(recBlockref);
+
+ bool connectedToNode = getNodeInfo(nodeId).m_connected;
+ const Uint32 type = getNodeInfo(nodeId).m_type;
+ bool is_api = (type >= NodeInfo::API && type <= NodeInfo::REP);
+ bool old_dest = (getNodeInfo(nodeId).m_version < MAKE_VERSION(3,5,0));
+ const Uint32 TpacketTA = hostBuffer[nodeId].noOfPacketsTA;
+ const Uint32 TpacketLen = hostBuffer[nodeId].packetLenTA;
+
+ if (ERROR_INSERTED(4006) && (nodeId != getOwnNodeId())){
+ // Use error insert to turn routing on
+ ljam();
+ connectedToNode = false;
+ }
+
+ TransIdAI * transIdAI = (TransIdAI *)signal->getDataPtrSend();
+ transIdAI->connectPtr = sig0;
+ transIdAI->transId[0] = sig1;
+ transIdAI->transId[1] = sig2;
+
+ if (connectedToNode){
+ /**
+ * Own node -> execute direct
+ */
+ if(nodeId != getOwnNodeId()){
+ ljam();
+
+ /**
+ * Send long sig
+ */
+ if(ToutBufIndex >= 22 && is_api && !old_dest) {
+ ljam();
+ /**
+ * Flush buffer so that order is maintained
+ */
+ if (TpacketTA != 0) {
+ ljam();
+ BlockReference TBref = numberToRef(API_PACKED, nodeId);
+ MEMCOPY_NO_WORDS(&signal->theData[0],
+ &hostBuffer[nodeId].packetBufferTA[0],
+ TpacketLen);
+ sendSignal(TBref, GSN_TRANSID_AI, signal, TpacketLen, JBB);
+ hostBuffer[nodeId].noOfPacketsTA = 0;
+ hostBuffer[nodeId].packetLenTA = 0;
+ transIdAI->connectPtr = sig0;
+ transIdAI->transId[0] = sig1;
+ transIdAI->transId[1] = sig2;
+ }//if
+ LinearSectionPtr ptr[3];
+ ptr[0].p = &signal->theData[25];
+ ptr[0].sz = ToutBufIndex;
+ sendSignal(recBlockref, GSN_TRANSID_AI, signal, 3, JBB, ptr, 1);
+ return;
+ }
+
+ /**
+ * short sig + api -> buffer
+ */
+#ifndef NDB_NO_DROPPED_SIGNAL
+ if (ToutBufIndex < 22 && is_api){
+ ljam();
+ bufferTRANSID_AI(signal, recBlockref, 3+ToutBufIndex);
+ return;
+ }//if
+#endif
+
+ /**
+ * rest -> old send sig
+ */
+ Uint32 * src = signal->theData+25;
+ if(ToutBufIndex >= 22){
+ do {
+ ljam();
+ MEMCOPY_NO_WORDS(&signal->theData[3], src, 22);
+ sendSignal(recBlockref, GSN_TRANSID_AI, signal, 25, JBB);
+ ToutBufIndex -= 22;
+ src += 22;
+ } while(ToutBufIndex >= 22);
+ }
+
+ if(ToutBufIndex > 0){
+ ljam();
+ MEMCOPY_NO_WORDS(&signal->theData[3], src, ToutBufIndex);
+ sendSignal(recBlockref, GSN_TRANSID_AI, signal, 3+ToutBufIndex, JBB);
+ }
+ return;
+ }
+ EXECUTE_DIRECT(block, GSN_TRANSID_AI, signal, 3 + ToutBufIndex);
+ ljamEntry();
+ return;
+ }
+
+ /**
+ * If this node does not have a direct connection
+ * to the receiving node we want to send the signals
+ * routed via the node that controls this read
+ */
+ Uint32 routeBlockref = regOperPtr->coordinatorTC;
+
+ if(true){ // TODO is_api && !old_dest){
+ ljam();
+ transIdAI->attrData[0] = recBlockref;
+ LinearSectionPtr ptr[3];
+ ptr[0].p = &signal->theData[25];
+ ptr[0].sz = ToutBufIndex;
+ sendSignal(routeBlockref, GSN_TRANSID_AI_R, signal, 4, JBB, ptr, 1);
+ return;
+ }
+
+ /**
+ * Fill in a TRANSID_AI signal, use last word to store
+ * final destination and send it to route node
+ * as signal TRANSID_AI_R (R as in Routed)
+ */
+ Uint32 tot = ToutBufIndex;
+ Uint32 sent = 0;
+ Uint32 maxLen = TransIdAI::DataLength - 1;
+ while (sent < tot) {
+ ljam();
+ Uint32 dataLen = (tot - sent > maxLen) ? maxLen : tot - sent;
+ Uint32 sigLen = dataLen + TransIdAI::HeaderLength + 1;
+ MEMCOPY_NO_WORDS(&transIdAI->attrData,
+ &signal->theData[25+sent],
+ dataLen);
+ // Set final destination in last word
+ transIdAI->attrData[dataLen] = recBlockref;
+
+ sendSignal(routeBlockref, GSN_TRANSID_AI_R,
+ signal, sigLen, JBB);
+ sent += dataLen;
+ }
+}//Dbtup::sendReadAttrinfo()
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
new file mode 100644
index 00000000000..cbd56c3281f
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
@@ -0,0 +1,586 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#define DBTUP_C
+#include "Dbtup.hpp"
+#include <RefConvert.hpp>
+#include <ndb_limits.h>
+#include <pc.hpp>
+#include <signaldata/TupCommit.hpp>
+
+#define ljam() { jamLine(5000 + __LINE__); }
+#define ljamEntry() { jamEntryLine(5000 + __LINE__); }
+
+void Dbtup::execTUP_WRITELOG_REQ(Signal* signal)
+{
+ jamEntry();
+ OperationrecPtr loopOpPtr;
+ loopOpPtr.i = signal->theData[0];
+ Uint32 gci = signal->theData[1];
+ ptrCheckGuard(loopOpPtr, cnoOfOprec, operationrec);
+ while (loopOpPtr.p->nextActiveOp != RNIL) {
+ ljam();
+ loopOpPtr.i = loopOpPtr.p->nextActiveOp;
+ ptrCheckGuard(loopOpPtr, cnoOfOprec, operationrec);
+ }//while
+ do {
+ Uint32 blockNo = refToBlock(loopOpPtr.p->userblockref);
+ ndbrequire(loopOpPtr.p->transstate == STARTED);
+ signal->theData[0] = loopOpPtr.p->userpointer;
+ signal->theData[1] = gci;
+ if (loopOpPtr.p->prevActiveOp == RNIL) {
+ ljam();
+ EXECUTE_DIRECT(blockNo, GSN_LQH_WRITELOG_REQ, signal, 2);
+ return;
+ }//if
+ ljam();
+ EXECUTE_DIRECT(blockNo, GSN_LQH_WRITELOG_REQ, signal, 2);
+ jamEntry();
+ loopOpPtr.i = loopOpPtr.p->prevActiveOp;
+ ptrCheckGuard(loopOpPtr, cnoOfOprec, operationrec);
+ } while (true);
+}//Dbtup::execTUP_WRITELOG_REQ()
+
+void Dbtup::execTUP_DEALLOCREQ(Signal* signal)
+{
+ TablerecPtr regTabPtr;
+ FragrecordPtr regFragPtr;
+
+ jamEntry();
+
+ Uint32 fragId = signal->theData[0];
+ regTabPtr.i = signal->theData[1];
+ Uint32 fragPageId = signal->theData[2];
+ Uint32 pageIndex = signal->theData[3];
+
+ ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec);
+ getFragmentrec(regFragPtr, fragId, regTabPtr.p);
+ ndbrequire(regFragPtr.p != NULL);
+
+ PagePtr pagePtr;
+ pagePtr.i = getRealpid(regFragPtr.p, fragPageId);
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ Uint32 pageIndexScaled = pageIndex >> 1;
+ ndbrequire((pageIndex & 1) == 0);
+ Uint32 pageOffset = ZPAGE_HEADER_SIZE +
+ (regTabPtr.p->tupheadsize * pageIndexScaled);
+//---------------------------------------------------
+/* --- Deallocate a tuple as requested by ACC --- */
+//---------------------------------------------------
+ if (isUndoLoggingNeeded(regFragPtr.p, fragPageId)) {
+ ljam();
+ cprAddUndoLogRecord(signal,
+ ZLCPR_TYPE_INSERT_TH,
+ fragPageId,
+ pageIndex,
+ regTabPtr.i,
+ fragId,
+ regFragPtr.p->checkpointVersion);
+ cprAddData(signal,
+ regFragPtr.p,
+ pagePtr.i,
+ regTabPtr.p->tupheadsize,
+ pageOffset);
+ }//if
+ {
+ freeTh(regFragPtr.p,
+ regTabPtr.p,
+ signal,
+ pagePtr.p,
+ pageOffset);
+ }
+}
+
+/* ---------------------------------------------------------------- */
+/* ------------ PERFORM A COMMIT ON AN UPDATE OPERATION ---------- */
+/* ---------------------------------------------------------------- */
+void Dbtup::commitUpdate(Signal* signal,
+ Operationrec* const regOperPtr,
+ Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr)
+{
+ if (regOperPtr->realPageIdC != RNIL) {
+ if (isUndoLoggingNeeded(regFragPtr, regOperPtr->fragPageIdC)) {
+/* ------------------------------------------------------------------------ */
+/* IF THE COPY WAS CREATED WITHIN THIS CHECKPOINT WE ONLY HAVE */
+/* TO LOG THE CREATION OF THE COPY. IF HOWEVER IT WAS CREATED BEFORE SAVE */
+/* THIS CHECKPOINT, WE HAVE TO THE DATA AS WELL. */
+/* ------------------------------------------------------------------------ */
+ if (regOperPtr->undoLogged) {
+ ljam();
+ cprAddUndoLogRecord(signal,
+ ZLCPR_TYPE_INSERT_TH_NO_DATA,
+ regOperPtr->fragPageIdC,
+ regOperPtr->pageIndexC,
+ regOperPtr->tableRef,
+ regOperPtr->fragId,
+ regFragPtr->checkpointVersion);
+ } else {
+ ljam();
+ cprAddUndoLogRecord(signal,
+ ZLCPR_TYPE_INSERT_TH,
+ regOperPtr->fragPageIdC,
+ regOperPtr->pageIndexC,
+ regOperPtr->tableRef,
+ regOperPtr->fragId,
+ regFragPtr->checkpointVersion);
+ cprAddData(signal,
+ regFragPtr,
+ regOperPtr->realPageIdC,
+ regTabPtr->tupheadsize,
+ regOperPtr->pageOffsetC);
+ }//if
+ }//if
+
+ PagePtr copyPagePtr;
+ copyPagePtr.i = regOperPtr->realPageIdC;
+ ptrCheckGuard(copyPagePtr, cnoOfPage, page);
+ freeTh(regFragPtr,
+ regTabPtr,
+ signal,
+ copyPagePtr.p,
+ (Uint32)regOperPtr->pageOffsetC);
+ regOperPtr->realPageIdC = RNIL;
+ regOperPtr->fragPageIdC = RNIL;
+ regOperPtr->pageOffsetC = ZNIL;
+ regOperPtr->pageIndexC = ZNIL;
+ }//if
+}//Dbtup::commitUpdate()
+
+void
+Dbtup::commitSimple(Signal* signal,
+ Operationrec* const regOperPtr,
+ Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr)
+{
+ operPtr.p = regOperPtr;
+ fragptr.p = regFragPtr;
+ tabptr.p = regTabPtr;
+
+ // Checking detached triggers
+ checkDetachedTriggers(signal,
+ regOperPtr,
+ regTabPtr);
+
+ removeActiveOpList(regOperPtr);
+ if (regOperPtr->optype == ZUPDATE) {
+ ljam();
+ commitUpdate(signal, regOperPtr, regFragPtr, regTabPtr);
+ if (regTabPtr->GCPIndicator) {
+ updateGcpId(signal, regOperPtr, regFragPtr, regTabPtr);
+ }//if
+ } else if (regOperPtr->optype == ZINSERT) {
+ ljam();
+ if (regTabPtr->GCPIndicator) {
+ updateGcpId(signal, regOperPtr, regFragPtr, regTabPtr);
+ }//if
+ } else {
+ ndbrequire(regOperPtr->optype == ZDELETE);
+ }//if
+}//Dbtup::commitSimple()
+
+void Dbtup::removeActiveOpList(Operationrec* const regOperPtr)
+{
+ if (regOperPtr->inActiveOpList == ZTRUE) {
+ OperationrecPtr raoOperPtr;
+ regOperPtr->inActiveOpList = ZFALSE;
+ if (regOperPtr->prevActiveOp != RNIL) {
+ ljam();
+ raoOperPtr.i = regOperPtr->prevActiveOp;
+ ptrCheckGuard(raoOperPtr, cnoOfOprec, operationrec);
+ raoOperPtr.p->nextActiveOp = regOperPtr->nextActiveOp;
+ } else {
+ ljam();
+ PagePtr pagePtr;
+ pagePtr.i = regOperPtr->realPageId;
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ ndbrequire(regOperPtr->pageOffset < ZWORDS_ON_PAGE);
+ pagePtr.p->pageWord[regOperPtr->pageOffset] = regOperPtr->nextActiveOp;
+ }//if
+ if (regOperPtr->nextActiveOp != RNIL) {
+ ljam();
+ raoOperPtr.i = regOperPtr->nextActiveOp;
+ ptrCheckGuard(raoOperPtr, cnoOfOprec, operationrec);
+ raoOperPtr.p->prevActiveOp = regOperPtr->prevActiveOp;
+ }//if
+ regOperPtr->prevActiveOp = RNIL;
+ regOperPtr->nextActiveOp = RNIL;
+ }//if
+}//Dbtup::removeActiveOpList()
+
+/* ---------------------------------------------------------------- */
+/* INITIALIZATION OF ONE CONNECTION RECORD TO PREPARE FOR NEXT OP. */
+/* ---------------------------------------------------------------- */
+void Dbtup::initOpConnection(Operationrec* regOperPtr,
+ Fragrecord * fragPtrP)
+{
+ Uint32 RinFragList = regOperPtr->inFragList;
+ regOperPtr->transstate = IDLE;
+ regOperPtr->currentAttrinbufLen = 0;
+ regOperPtr->optype = ZREAD;
+ if (RinFragList == ZTRUE) {
+ OperationrecPtr tropNextLinkPtr;
+ OperationrecPtr tropPrevLinkPtr;
+/*----------------------------------------------------------------- */
+/* TO ENSURE THAT WE HAVE SUCCESSFUL ABORTS OF FOLLOWING */
+/* OPERATIONS WHICH NEVER STARTED WE SET THE OPTYPE TO READ. */
+/*----------------------------------------------------------------- */
+/* REMOVE IT FROM THE DOUBLY LINKED LIST ON THE FRAGMENT */
+/*----------------------------------------------------------------- */
+ tropPrevLinkPtr.i = regOperPtr->prevOprecInList;
+ tropNextLinkPtr.i = regOperPtr->nextOprecInList;
+ regOperPtr->inFragList = ZFALSE;
+ if (tropPrevLinkPtr.i == RNIL) {
+ ljam();
+ fragPtrP->firstusedOprec = tropNextLinkPtr.i;
+ } else {
+ ljam();
+ ptrCheckGuard(tropPrevLinkPtr, cnoOfOprec, operationrec);
+ tropPrevLinkPtr.p->nextOprecInList = tropNextLinkPtr.i;
+ }//if
+ if (tropNextLinkPtr.i == RNIL) {
+ fragPtrP->lastusedOprec = tropPrevLinkPtr.i;
+ } else {
+ ptrCheckGuard(tropNextLinkPtr, cnoOfOprec, operationrec);
+ tropNextLinkPtr.p->prevOprecInList = tropPrevLinkPtr.i;
+ }
+ regOperPtr->prevOprecInList = RNIL;
+ regOperPtr->nextOprecInList = RNIL;
+ }//if
+}//Dbtup::initOpConnection()
+
+/* ----------------------------------------------------------------- */
+/* --------------- COMMIT THIS PART OF A TRANSACTION --------------- */
+/* ----------------------------------------------------------------- */
+void Dbtup::execTUP_COMMITREQ(Signal* signal)
+{
+ FragrecordPtr regFragPtr;
+ OperationrecPtr regOperPtr;
+ TablerecPtr regTabPtr;
+
+ TupCommitReq * const tupCommitReq = (TupCommitReq *)signal->getDataPtr();
+
+ ljamEntry();
+ regOperPtr.i = tupCommitReq->opPtr;
+ ptrCheckGuard(regOperPtr, cnoOfOprec, operationrec);
+
+ ndbrequire(regOperPtr.p->transstate == STARTED);
+ regOperPtr.p->gci = tupCommitReq->gci;
+ regOperPtr.p->hashValue = tupCommitReq->hashValue;
+
+ regFragPtr.i = regOperPtr.p->fragmentPtr;
+ ptrCheckGuard(regFragPtr, cnoOfFragrec, fragrecord);
+
+ regTabPtr.i = regOperPtr.p->tableRef;
+ ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec);
+
+ if (!regTabPtr.p->tuxCustomTriggers.isEmpty()) {
+ ljam();
+ executeTuxCommitTriggers(signal,
+ regOperPtr.p,
+ regTabPtr.p);
+ }
+
+ if (regOperPtr.p->tupleState == NO_OTHER_OP) {
+ if ((regOperPtr.p->prevActiveOp == RNIL) &&
+ (regOperPtr.p->nextActiveOp == RNIL)) {
+ ljam();
+/* ---------------------------------------------------------- */
+// We handle the simple case separately as an optimisation
+/* ---------------------------------------------------------- */
+ commitSimple(signal,
+ regOperPtr.p,
+ regFragPtr.p,
+ regTabPtr.p);
+ } else {
+/* ---------------------------------------------------------- */
+// This is the first commit message of this record in this
+// transaction. We will commit this record completely for this
+// transaction. If there are other operations they will be
+// responsible to release their own resources. Also commit of
+// a delete is postponed until the last operation is committed
+// on the tuple.
+//
+// As part of this commitRecord we will also handle detached
+// triggers and release of resources for this operation.
+/* ---------------------------------------------------------- */
+ ljam();
+ commitRecord(signal,
+ regOperPtr.p,
+ regFragPtr.p,
+ regTabPtr.p);
+ removeActiveOpList(regOperPtr.p);
+ }//if
+ } else {
+ ljam();
+/* ---------------------------------------------------------- */
+// Release any copy tuples
+/* ---------------------------------------------------------- */
+ ndbrequire(regOperPtr.p->tupleState == TO_BE_COMMITTED);
+ commitUpdate(signal, regOperPtr.p, regFragPtr.p, regTabPtr.p);
+ removeActiveOpList(regOperPtr.p);
+ }//if
+ initOpConnection(regOperPtr.p, regFragPtr.p);
+}//execTUP_COMMITREQ()
+
+void
+Dbtup::updateGcpId(Signal* signal,
+ Operationrec* const regOperPtr,
+ Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr)
+{
+ PagePtr pagePtr;
+ ljam();
+//--------------------------------------------------------------------
+// Is this code safe for UNDO logging. Not sure currently. RONM
+//--------------------------------------------------------------------
+ pagePtr.i = regOperPtr->realPageId;
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ Uint32 temp = regOperPtr->pageOffset + regTabPtr->tupGCPIndex;
+ ndbrequire((temp < ZWORDS_ON_PAGE) &&
+ (regTabPtr->tupGCPIndex < regTabPtr->tupheadsize));
+ if (isUndoLoggingNeeded(regFragPtr, regOperPtr->fragPageId)) {
+ Uint32 prevGCI = pagePtr.p->pageWord[temp];
+ ljam();
+ cprAddUndoLogRecord(signal,
+ ZLCPR_TYPE_UPDATE_GCI,
+ regOperPtr->fragPageId,
+ regOperPtr->pageIndex,
+ regOperPtr->tableRef,
+ regOperPtr->fragId,
+ regFragPtr->checkpointVersion);
+ cprAddGCIUpdate(signal,
+ prevGCI,
+ regFragPtr);
+ }//if
+ pagePtr.p->pageWord[temp] = regOperPtr->gci;
+ if (regTabPtr->checksumIndicator) {
+ ljam();
+ setChecksum(pagePtr.p, regOperPtr->pageOffset, regTabPtr->tupheadsize);
+ }//if
+}//Dbtup::updateGcpId()
+
+void
+Dbtup::commitRecord(Signal* signal,
+ Operationrec* const regOperPtr,
+ Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr)
+{
+ Uint32 opType;
+ OperationrecPtr firstOpPtr;
+ PagePtr pagePtr;
+
+ pagePtr.i = regOperPtr->realPageId;
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+
+ setTupleStatesSetOpType(regOperPtr, pagePtr.p, opType, firstOpPtr);
+
+ fragptr.p = regFragPtr;
+ tabptr.p = regTabPtr;
+
+ if (opType == ZINSERT_DELETE) {
+ ljam();
+//--------------------------------------------------------------------
+// We started by inserting the tuple and ended by deleting. Seen from
+// transactions point of view no changes were made.
+//--------------------------------------------------------------------
+ commitUpdate(signal, regOperPtr, regFragPtr, regTabPtr);
+ return;
+ } else if (opType == ZINSERT) {
+ ljam();
+//--------------------------------------------------------------------
+// We started by inserting whereafter we made several changes to the
+// tuple that could include updates, deletes and new inserts. The final
+// state of the tuple is the original tuple. This is reached from this
+// operation. We change the optype on this operation to ZINSERT to
+// ensure proper operation of the detached trigger.
+// We restore the optype after executing triggers although not really
+// needed.
+//--------------------------------------------------------------------
+ Uint32 saveOpType = regOperPtr->optype;
+ regOperPtr->optype = ZINSERT;
+ operPtr.p = regOperPtr;
+
+ checkDetachedTriggers(signal,
+ regOperPtr,
+ regTabPtr);
+
+ regOperPtr->optype = saveOpType;
+ } else if (opType == ZUPDATE) {
+ ljam();
+//--------------------------------------------------------------------
+// We want to use the first operation which contains a copy tuple
+// reference. This operation contains the before value of this record
+// for this transaction. Then this operation is used for executing
+// triggers with optype set to update.
+//--------------------------------------------------------------------
+ OperationrecPtr befOpPtr;
+ findBeforeValueOperation(befOpPtr, firstOpPtr);
+
+ Uint32 saveOpType = befOpPtr.p->optype;
+ Bitmask<MAXNROFATTRIBUTESINWORDS> attributeMask;
+ Bitmask<MAXNROFATTRIBUTESINWORDS> saveAttributeMask;
+
+ calculateChangeMask(pagePtr.p,
+ regTabPtr,
+ befOpPtr.p->pageOffset,
+ attributeMask);
+
+ saveAttributeMask.clear();
+ saveAttributeMask.bitOR(befOpPtr.p->changeMask);
+ befOpPtr.p->changeMask.clear();
+ befOpPtr.p->changeMask.bitOR(attributeMask);
+
+ operPtr.p = befOpPtr.p;
+ checkDetachedTriggers(signal,
+ befOpPtr.p,
+ regTabPtr);
+
+ befOpPtr.p->changeMask.clear();
+ befOpPtr.p->changeMask.bitOR(saveAttributeMask);
+
+ befOpPtr.p->optype = saveOpType;
+ } else if (opType == ZDELETE) {
+ ljam();
+//--------------------------------------------------------------------
+// We want to use the first operation which contains a copy tuple.
+// We benefit from the fact that we know that it cannot be a simple
+// delete and it cannot be an insert followed by a delete. Thus there
+// must either be an update or a insert following a delete. In both
+// cases we will find a before value in a copy tuple.
+//
+// An added complexity is that the trigger handling assumes that the
+// before value is located in the original tuple so we have to move the
+// copy tuple reference to the original tuple reference and afterwards
+// restore it again.
+//--------------------------------------------------------------------
+ OperationrecPtr befOpPtr;
+ findBeforeValueOperation(befOpPtr, firstOpPtr);
+ Uint32 saveOpType = befOpPtr.p->optype;
+
+ Uint32 realPageId = befOpPtr.p->realPageId;
+ Uint32 pageOffset = befOpPtr.p->pageOffset;
+ Uint32 fragPageId = befOpPtr.p->fragPageId;
+ Uint32 pageIndex = befOpPtr.p->pageIndex;
+
+ befOpPtr.p->realPageId = befOpPtr.p->realPageIdC;
+ befOpPtr.p->pageOffset = befOpPtr.p->pageOffsetC;
+ befOpPtr.p->fragPageId = befOpPtr.p->fragPageIdC;
+ befOpPtr.p->pageIndex = befOpPtr.p->pageIndexC;
+
+ operPtr.p = befOpPtr.p;
+ checkDetachedTriggers(signal,
+ befOpPtr.p,
+ regTabPtr);
+
+ befOpPtr.p->realPageId = realPageId;
+ befOpPtr.p->pageOffset = pageOffset;
+ befOpPtr.p->fragPageId = fragPageId;
+ befOpPtr.p->pageIndex = pageIndex;
+ befOpPtr.p->optype = saveOpType;
+ } else {
+ ndbrequire(false);
+ }//if
+
+ commitUpdate(signal, regOperPtr, regFragPtr, regTabPtr);
+ if (regTabPtr->GCPIndicator) {
+ updateGcpId(signal, regOperPtr, regFragPtr, regTabPtr);
+ }//if
+}//Dbtup::commitRecord()
+
+void
+Dbtup::setTupleStatesSetOpType(Operationrec* const regOperPtr,
+ Page* const pagePtr,
+ Uint32& opType,
+ OperationrecPtr& firstOpPtr)
+{
+ OperationrecPtr loopOpPtr;
+ OperationrecPtr lastOpPtr;
+
+ ndbrequire(regOperPtr->pageOffset < ZWORDS_ON_PAGE);
+ loopOpPtr.i = pagePtr->pageWord[regOperPtr->pageOffset];
+ ptrCheckGuard(loopOpPtr, cnoOfOprec, operationrec);
+ lastOpPtr = loopOpPtr;
+ if (loopOpPtr.p->optype == ZDELETE) {
+ ljam();
+ opType = ZDELETE;
+ } else {
+ ljam();
+ opType = ZUPDATE;
+ }//if
+ do {
+ ljam();
+ ptrCheckGuard(loopOpPtr, cnoOfOprec, operationrec);
+ firstOpPtr = loopOpPtr;
+ loopOpPtr.p->tupleState = TO_BE_COMMITTED;
+ loopOpPtr.i = loopOpPtr.p->nextActiveOp;
+ } while (loopOpPtr.i != RNIL);
+ if (opType == ZDELETE) {
+ ljam();
+ if (firstOpPtr.p->optype == ZINSERT) {
+ ljam();
+ opType = ZINSERT_DELETE;
+ }//if
+ } else {
+ ljam();
+ if (firstOpPtr.p->optype == ZINSERT) {
+ ljam();
+ opType = ZINSERT;
+ }//if
+ }///if
+}//Dbtup::setTupleStatesSetOpType()
+
+void Dbtup::findBeforeValueOperation(OperationrecPtr& befOpPtr,
+ OperationrecPtr firstOpPtr)
+{
+ befOpPtr = firstOpPtr;
+ if (befOpPtr.p->realPageIdC != RNIL) {
+ ljam();
+ return;
+ } else {
+ ljam();
+ befOpPtr.i = befOpPtr.p->prevActiveOp;
+ ptrCheckGuard(befOpPtr, cnoOfOprec, operationrec);
+ ndbrequire(befOpPtr.p->realPageIdC != RNIL);
+ }//if
+}//Dbtup::findBeforeValueOperation()
+
+void
+Dbtup::calculateChangeMask(Page* const pagePtr,
+ Tablerec* const regTabPtr,
+ Uint32 pageOffset,
+ Bitmask<MAXNROFATTRIBUTESINWORDS>& attributeMask)
+{
+ OperationrecPtr loopOpPtr;
+
+ attributeMask.clear();
+ ndbrequire(pageOffset < ZWORDS_ON_PAGE);
+ loopOpPtr.i = pagePtr->pageWord[pageOffset];
+ do {
+ ptrCheckGuard(loopOpPtr, cnoOfOprec, operationrec);
+ if (loopOpPtr.p->optype == ZUPDATE) {
+ ljam();
+ attributeMask.bitOR(loopOpPtr.p->changeMask);
+ } else if (loopOpPtr.p->optype == ZINSERT) {
+ ljam();
+ attributeMask.set();
+ return;
+ } else {
+ ndbrequire(loopOpPtr.p->optype == ZDELETE);
+ }//if
+ loopOpPtr.i = loopOpPtr.p->nextActiveOp;
+ } while (loopOpPtr.i != RNIL);
+}//Dbtup::calculateChangeMask()
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp
new file mode 100644
index 00000000000..8c43de52a75
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp
@@ -0,0 +1,411 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+#define DBTUP_C
+#include "Dbtup.hpp"
+#include <RefConvert.hpp>
+#include <ndb_limits.h>
+#include <pc.hpp>
+#include <signaldata/DropTab.hpp>
+#include <signaldata/DumpStateOrd.hpp>
+#include <signaldata/EventReport.hpp>
+#include <Vector.hpp>
+
+#define ljam() { jamLine(30000 + __LINE__); }
+#define ljamEntry() { jamEntryLine(30000 + __LINE__); }
+
+/* **************************************************************** */
+/* ---------------------------------------------------------------- */
+/* ------------------------ DEBUG MODULE -------------------------- */
+/* ---------------------------------------------------------------- */
+/* **************************************************************** */
+void Dbtup::execDEBUG_SIG(Signal* signal)
+{
+ PagePtr regPagePtr;
+ ljamEntry();
+ regPagePtr.i = signal->theData[0];
+ ptrCheckGuard(regPagePtr, cnoOfPage, page);
+}//Dbtup::execDEBUG_SIG()
+
+#ifdef TEST_MR
+#include <time.h>
+
+void startTimer(struct timespec *tp)
+{
+ clock_gettime(CLOCK_REALTIME, tp);
+}//startTimer()
+
+int stopTimer(struct timespec *tp)
+{
+ double timer_count;
+ struct timespec theStopTime;
+ clock_gettime(CLOCK_REALTIME, &theStopTime);
+ timer_count = (double)(1000000*((double)theStopTime.tv_sec - (double)tp->tv_sec)) +
+ (double)((double)((double)theStopTime.tv_nsec - (double)tp->tv_nsec)/(double)1000);
+ return (int)timer_count;
+}//stopTimer()
+
+#endif // end TEST_MR
+
+struct Chunk {
+ Uint32 pageId;
+ Uint32 pageCount;
+};
+
+void
+Dbtup::reportMemoryUsage(Signal* signal, int incDec){
+ signal->theData[0] = NDB_LE_MemoryUsage;
+ signal->theData[1] = incDec;
+ signal->theData[2] = sizeof(Page);
+ signal->theData[3] = cnoOfAllocatedPages;
+ signal->theData[4] = cnoOfPage;
+ signal->theData[5] = DBTUP;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 6, JBB);
+}
+
+void
+Dbtup::execDUMP_STATE_ORD(Signal* signal)
+{
+ Uint32 type = signal->theData[0];
+ if(type == DumpStateOrd::DumpPageMemory){
+ reportMemoryUsage(signal, 0);
+ return;
+ }
+ DumpStateOrd * const dumpState = (DumpStateOrd *)&signal->theData[0];
+
+#if 0
+ if (type == 100) {
+ RelTabMemReq * const req = (RelTabMemReq *)signal->getDataPtrSend();
+ req->primaryTableId = 2;
+ req->secondaryTableId = RNIL;
+ req->userPtr = 2;
+ req->userRef = DBDICT_REF;
+ sendSignal(cownref, GSN_REL_TABMEMREQ, signal,
+ RelTabMemReq::SignalLength, JBB);
+ return;
+ }//if
+ if (type == 101) {
+ RelTabMemReq * const req = (RelTabMemReq *)signal->getDataPtrSend();
+ req->primaryTableId = 4;
+ req->secondaryTableId = 5;
+ req->userPtr = 4;
+ req->userRef = DBDICT_REF;
+ sendSignal(cownref, GSN_REL_TABMEMREQ, signal,
+ RelTabMemReq::SignalLength, JBB);
+ return;
+ }//if
+ if (type == 102) {
+ RelTabMemReq * const req = (RelTabMemReq *)signal->getDataPtrSend();
+ req->primaryTableId = 6;
+ req->secondaryTableId = 8;
+ req->userPtr = 6;
+ req->userRef = DBDICT_REF;
+ sendSignal(cownref, GSN_REL_TABMEMREQ, signal,
+ RelTabMemReq::SignalLength, JBB);
+ return;
+ }//if
+ if (type == 103) {
+ DropTabFileReq * const req = (DropTabFileReq *)signal->getDataPtrSend();
+ req->primaryTableId = 2;
+ req->secondaryTableId = RNIL;
+ req->userPtr = 2;
+ req->userRef = DBDICT_REF;
+ sendSignal(cownref, GSN_DROP_TABFILEREQ, signal,
+ DropTabFileReq::SignalLength, JBB);
+ return;
+ }//if
+ if (type == 104) {
+ DropTabFileReq * const req = (DropTabFileReq *)signal->getDataPtrSend();
+ req->primaryTableId = 4;
+ req->secondaryTableId = 5;
+ req->userPtr = 4;
+ req->userRef = DBDICT_REF;
+ sendSignal(cownref, GSN_DROP_TABFILEREQ, signal,
+ DropTabFileReq::SignalLength, JBB);
+ return;
+ }//if
+ if (type == 105) {
+ DropTabFileReq * const req = (DropTabFileReq *)signal->getDataPtrSend();
+ req->primaryTableId = 6;
+ req->secondaryTableId = 8;
+ req->userPtr = 6;
+ req->userRef = DBDICT_REF;
+ sendSignal(cownref, GSN_DROP_TABFILEREQ, signal,
+ DropTabFileReq::SignalLength, JBB);
+ return;
+ }//if
+#endif
+#ifdef ERROR_INSERT
+ if (type == DumpStateOrd::EnableUndoDelayDataWrite) {
+ ndbout << "Dbtup:: delay write of datapages for table = "
+ << dumpState->args[1]<< endl;
+ c_errorInsert4000TableId = dumpState->args[1];
+ SET_ERROR_INSERT_VALUE(4000);
+ return;
+ }//if
+#endif
+#ifdef VM_TRACE
+ if (type == 1211){
+ ndbout_c("Startar modul test av Page Manager");
+
+ Vector<Chunk> chunks;
+ const Uint32 LOOPS = 1000;
+ for(Uint32 i = 0; i<LOOPS; i++){
+
+ // Case
+ Uint32 c = (rand() % 3);
+ const Uint32 free = cnoOfPage - cnoOfAllocatedPages;
+
+ Uint32 alloc = 0;
+ if(free <= 1){
+ c = 0;
+ alloc = 1;
+ } else
+ alloc = 1 + (rand() % (free - 1));
+
+ if(chunks.size() == 0 && c == 0){
+ c = 1 + rand() % 2;
+ }
+
+ ndbout_c("loop=%d case=%d free=%d alloc=%d", i, c, free, alloc);
+ switch(c){
+ case 0:{ // Release
+ const int ch = rand() % chunks.size();
+ Chunk chunk = chunks[ch];
+ chunks.erase(ch);
+ returnCommonArea(chunk.pageId, chunk.pageCount);
+ }
+ break;
+ case 2: { // Seize(n) - fail
+ alloc += free;
+ // Fall through
+ }
+ case 1: { // Seize(n) (success)
+
+ Chunk chunk;
+ allocConsPages(alloc, chunk.pageCount, chunk.pageId);
+ ndbrequire(chunk.pageCount <= alloc);
+ if(chunk.pageCount != 0){
+ chunks.push_back(chunk);
+ if(chunk.pageCount != alloc) {
+ ndbout_c(" Tried to allocate %d - only allocated %d - free: %d",
+ alloc, chunk.pageCount, free);
+ }
+ } else {
+ ndbout_c(" Failed to alloc %d pages with %d pages free",
+ alloc, free);
+ }
+
+ for(Uint32 i = 0; i<chunk.pageCount; i++){
+ PagePtr pagePtr;
+ pagePtr.i = chunk.pageId + i;
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ pagePtr.p->pageWord[ZPAGE_STATE_POS] = ~ZFREE_COMMON;
+ }
+
+ if(alloc == 1 && free > 0)
+ ndbrequire(chunk.pageCount == alloc);
+ }
+ break;
+ }
+ }
+ while(chunks.size() > 0){
+ Chunk chunk = chunks.back();
+ returnCommonArea(chunk.pageId, chunk.pageCount);
+ chunks.erase(chunks.size() - 1);
+ }
+ }
+#endif
+}//Dbtup::execDUMP_STATE_ORD()
+
+/* ---------------------------------------------------------------- */
+/* --------- MEMORY CHECK ----------------------- */
+/* ---------------------------------------------------------------- */
+void Dbtup::execMEMCHECKREQ(Signal* signal)
+{
+ PagePtr regPagePtr;
+ DiskBufferSegmentInfoPtr dbsiPtr;
+ CheckpointInfoPtr ciPtr;
+ UndoPagePtr regUndoPagePtr;
+ Uint32* data = &signal->theData[0];
+
+ ljamEntry();
+ BlockReference blockref = signal->theData[0];
+ Uint32 i;
+ for (i = 0; i < 25; i++) {
+ ljam();
+ data[i] = 0;
+ }//for
+ for (i = 0; i < 16; i++) {
+ regPagePtr.i = cfreepageList[i];
+ ljam();
+ while (regPagePtr.i != RNIL) {
+ ljam();
+ ptrCheckGuard(regPagePtr, cnoOfPage, page);
+ regPagePtr.i = regPagePtr.p->pageWord[ZPAGE_NEXT_POS];
+ data[0]++;
+ }//while
+ }//for
+ regUndoPagePtr.i = cfirstfreeUndoSeg;
+ while (regUndoPagePtr.i != RNIL) {
+ ljam();
+ ptrCheckGuard(regUndoPagePtr, cnoOfUndoPage, undoPage);
+ regUndoPagePtr.i = regUndoPagePtr.p->undoPageWord[ZPAGE_NEXT_POS];
+ data[1] += ZUB_SEGMENT_SIZE;
+ }//while
+ ciPtr.i = cfirstfreeLcp;
+ while (ciPtr.i != RNIL) {
+ ljam();
+ ptrCheckGuard(ciPtr, cnoOfLcpRec, checkpointInfo);
+ ciPtr.i = ciPtr.p->lcpNextRec;
+ data[2]++;
+ }//while
+ dbsiPtr.i = cfirstfreePdx;
+ while (dbsiPtr.i != ZNIL) {
+ ljam();
+ ptrCheckGuard(dbsiPtr, cnoOfConcurrentWriteOp, diskBufferSegmentInfo);
+ dbsiPtr.i = dbsiPtr.p->pdxNextRec;
+ data[3]++;
+ }//while
+ sendSignal(blockref, GSN_MEMCHECKCONF, signal, 25, JBB);
+}//Dbtup::memCheck()
+
+// ------------------------------------------------------------------------
+// Help function to be used when debugging. Prints out a tuple page.
+// printLimit is the number of bytes that is printed out from the page. A
+// page is of size 32768 bytes as of March 2003.
+// ------------------------------------------------------------------------
+void Dbtup::printoutTuplePage(Uint32 fragid, Uint32 pageid, Uint32 printLimit)
+{
+ PagePtr tmpPageP;
+ FragrecordPtr tmpFragP;
+ TablerecPtr tmpTableP;
+ Uint32 tmpTupleSize;
+
+ tmpPageP.i = pageid;
+ ptrCheckGuard(tmpPageP, cnoOfPage, page);
+
+ tmpFragP.i = fragid;
+ ptrCheckGuard(tmpFragP, cnoOfFragrec, fragrecord);
+
+ tmpTableP.i = tmpFragP.p->fragTableId;
+ ptrCheckGuard(tmpTableP, cnoOfTablerec, tablerec);
+
+ tmpTupleSize = tmpTableP.p->tupheadsize;
+
+ ndbout << "Fragid: " << fragid << " Pageid: " << pageid << endl
+ << "----------------------------------------" << endl;
+
+ ndbout << "PageHead : ";
+ for (Uint32 i1 = 0; i1 < ZPAGE_HEADER_SIZE; i1++) {
+ if (i1 == 3)
+ ndbout << (tmpPageP.p->pageWord[i1] >> 16) << "," << (tmpPageP.p->pageWord[i1] & 0xffff) << " ";
+ else if (tmpPageP.p->pageWord[i1] == 4059165169u)
+ ndbout << "F1F1F1F1 ";
+ else if (tmpPageP.p->pageWord[i1] == 268435455u)
+ ndbout << "RNIL ";
+ else
+ ndbout << tmpPageP.p->pageWord[i1] << " ";
+ }//for
+ ndbout << endl;
+ for (Uint32 i = ZPAGE_HEADER_SIZE; i < printLimit; i += tmpTupleSize) {
+ ndbout << "pagepos " << i << " : ";
+
+ for (Uint32 j = i; j < i + tmpTupleSize; j++) {
+ if (tmpPageP.p->pageWord[j] == 4059165169u)
+ ndbout << "F1F1F1F1 ";
+ else if (tmpPageP.p->pageWord[j] == 268435455u)
+ ndbout << "RNIL ";
+ else
+ ndbout << tmpPageP.p->pageWord[j] << " ";
+ }//for
+ ndbout << endl;
+ }//for
+}//Dbtup::printoutTuplePage
+
+#ifdef VM_TRACE
+NdbOut&
+operator<<(NdbOut& out, const Dbtup::Operationrec& op)
+{
+ out << "[Operationrec " << hex << &op;
+ // table
+ out << " [tableRef " << dec << op.tableRef << "]";
+ out << " [fragId " << dec << op.fragId << "]";
+ out << " [fragmentPtr " << hex << op.fragmentPtr << "]";
+ // type
+ out << " [optype " << dec << op.optype << "]";
+ out << " [deleteInsertFlag " << dec << op.deleteInsertFlag << "]";
+ out << " [dirtyOp " << dec << op.dirtyOp << "]";
+ out << " [interpretedExec " << dec << op.interpretedExec << "]";
+ out << " [opSimple " << dec << op.opSimple << "]";
+ // state
+ out << " [tupleState " << dec << (Uint32) op.tupleState << "]";
+ out << " [transstate " << dec << (Uint32) op.transstate << "]";
+ out << " [inFragList " << dec << op.inFragList << "]";
+ out << " [inActiveOpList " << dec << op.inActiveOpList << "]";
+ out << " [undoLogged " << dec << (Uint32) op.undoLogged << "]";
+ // links
+ out << " [prevActiveOp " << hex << op.prevActiveOp << "]";
+ out << " [nextActiveOp " << hex << op.nextActiveOp << "]";
+ // tuples
+ out << " [tupVersion " << hex << op.tupVersion << "]";
+ out << " [fragPageId " << dec << op.fragPageId << "]";
+ out << " [pageIndex " << dec << op.pageIndex << "]";
+ out << " [realPageId " << hex << op.realPageId << "]";
+ out << " [pageOffset " << dec << op.pageOffset << "]";
+ out << " [fragPageIdC " << dec << op.fragPageIdC << "]";
+ out << " [pageIndexC " << dec << op.pageIndexC << "]";
+ out << " [realPageIdC " << hex << op.realPageIdC << "]";
+ out << " [pageOffsetC " << dec << op.pageOffsetC << "]";
+ // trans
+ out << " [transid1 " << hex << op.transid1 << "]";
+ out << " [transid2 " << hex << op.transid2 << "]";
+ out << "]";
+ return out;
+}
+
+// uses global tabptr
+NdbOut&
+operator<<(NdbOut& out, const Dbtup::Th& th)
+{
+ // ugly
+ Dbtup* tup = (Dbtup*)globalData.getBlock(DBTUP);
+ const Dbtup::Tablerec& tab = *tup->tabptr.p;
+ unsigned i = 0;
+ out << "[Th " << hex << &th;
+ out << " [op " << hex << th.data[i++] << "]";
+ out << " [version " << hex << (Uint16)th.data[i++] << "]";
+ if (tab.checksumIndicator)
+ out << " [checksum " << hex << th.data[i++] << "]";
+ out << " [nullbits";
+ for (unsigned j = 0; j < tab.tupNullWords; j++)
+ out << " " << hex << th.data[i++];
+ out << "]";
+ if (tab.GCPIndicator)
+ out << " [gcp " << dec << th.data[i++] << "]";
+ out << " [data";
+ while (i < tab.tupheadsize)
+ out << " " << hex << th.data[i++];
+ out << "]";
+ out << "]";
+ return out;
+}
+#endif
+
+#ifdef VM_TRACE
+template class Vector<Chunk>;
+#endif
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
new file mode 100644
index 00000000000..761f959acdc
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
@@ -0,0 +1,2052 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+#define DBTUP_C
+#include "Dbtup.hpp"
+#include <RefConvert.hpp>
+#include <ndb_limits.h>
+#include <pc.hpp>
+#include <AttributeDescriptor.hpp>
+#include "AttributeOffset.hpp"
+#include <AttributeHeader.hpp>
+#include <Interpreter.hpp>
+#include <signaldata/TupCommit.hpp>
+#include <signaldata/TupKey.hpp>
+#include <NdbSqlUtil.hpp>
+
+/* ----------------------------------------------------------------- */
+/* ----------- INIT_STORED_OPERATIONREC -------------- */
+/* ----------------------------------------------------------------- */
+int Dbtup::initStoredOperationrec(Operationrec* const regOperPtr,
+ Uint32 storedId)
+{
+ jam();
+ StoredProcPtr storedPtr;
+ c_storedProcPool.getPtr(storedPtr, storedId);
+ if (storedPtr.i != RNIL) {
+ if (storedPtr.p->storedCode == ZSCAN_PROCEDURE) {
+ storedPtr.p->storedCounter++;
+ regOperPtr->firstAttrinbufrec = storedPtr.p->storedLinkFirst;
+ regOperPtr->lastAttrinbufrec = storedPtr.p->storedLinkLast;
+ regOperPtr->attrinbufLen = storedPtr.p->storedProcLength;
+ regOperPtr->currentAttrinbufLen = storedPtr.p->storedProcLength;
+ return ZOK;
+ }//if
+ }//if
+ terrorCode = ZSTORED_PROC_ID_ERROR;
+ return terrorCode;
+}//Dbtup::initStoredOperationrec()
+
+void Dbtup::copyAttrinfo(Signal* signal,
+ Operationrec * const regOperPtr,
+ Uint32* inBuffer)
+{
+ AttrbufrecPtr copyAttrBufPtr;
+ Uint32 RnoOfAttrBufrec = cnoOfAttrbufrec;
+ int RbufLen;
+ Uint32 RinBufIndex = 0;
+ Uint32 Rnext;
+ Uint32 Rfirst;
+ Uint32 TstoredProcedure = (regOperPtr->storedProcedureId != ZNIL);
+ Uint32 RnoFree = cnoFreeAttrbufrec;
+
+//-------------------------------------------------------------------------
+// As a prelude to the execution of the TUPKEYREQ we will copy the program
+// into the inBuffer to enable easy execution without any complex jumping
+// between the buffers. In particular this will make the interpreter less
+// complex. Hopefully it does also improve performance.
+//-------------------------------------------------------------------------
+ copyAttrBufPtr.i = regOperPtr->firstAttrinbufrec;
+ while (copyAttrBufPtr.i != RNIL) {
+ jam();
+ ndbrequire(copyAttrBufPtr.i < RnoOfAttrBufrec);
+ ptrAss(copyAttrBufPtr, attrbufrec);
+ RbufLen = copyAttrBufPtr.p->attrbuf[ZBUF_DATA_LEN];
+ Rnext = copyAttrBufPtr.p->attrbuf[ZBUF_NEXT];
+ Rfirst = cfirstfreeAttrbufrec;
+ MEMCOPY_NO_WORDS(&inBuffer[RinBufIndex],
+ &copyAttrBufPtr.p->attrbuf[0],
+ RbufLen);
+ RinBufIndex += RbufLen;
+ if (!TstoredProcedure) {
+ copyAttrBufPtr.p->attrbuf[ZBUF_NEXT] = Rfirst;
+ cfirstfreeAttrbufrec = copyAttrBufPtr.i;
+ RnoFree++;
+ }//if
+ copyAttrBufPtr.i = Rnext;
+ }//while
+ cnoFreeAttrbufrec = RnoFree;
+ if (TstoredProcedure) {
+ jam();
+ StoredProcPtr storedPtr;
+ c_storedProcPool.getPtr(storedPtr, (Uint32)regOperPtr->storedProcedureId);
+ ndbrequire(storedPtr.p->storedCode == ZSCAN_PROCEDURE);
+ storedPtr.p->storedCounter--;
+ regOperPtr->storedProcedureId = ZNIL;
+ }//if
+ // Release the ATTRINFO buffers
+ regOperPtr->firstAttrinbufrec = RNIL;
+ regOperPtr->lastAttrinbufrec = RNIL;
+}//Dbtup::copyAttrinfo()
+
+void Dbtup::handleATTRINFOforTUPKEYREQ(Signal* signal,
+ Uint32 length,
+ Operationrec * const regOperPtr)
+{
+ AttrbufrecPtr TAttrinbufptr;
+ TAttrinbufptr.i = cfirstfreeAttrbufrec;
+ if ((cfirstfreeAttrbufrec < cnoOfAttrbufrec) &&
+ (cnoFreeAttrbufrec > MIN_ATTRBUF)) {
+ ptrAss(TAttrinbufptr, attrbufrec);
+ MEMCOPY_NO_WORDS(&TAttrinbufptr.p->attrbuf[0],
+ &signal->theData[3],
+ length);
+ Uint32 RnoFree = cnoFreeAttrbufrec;
+ Uint32 Rnext = TAttrinbufptr.p->attrbuf[ZBUF_NEXT];
+ TAttrinbufptr.p->attrbuf[ZBUF_DATA_LEN] = length;
+ TAttrinbufptr.p->attrbuf[ZBUF_NEXT] = RNIL;
+
+ AttrbufrecPtr locAttrinbufptr;
+ Uint32 RnewLen = regOperPtr->currentAttrinbufLen;
+
+ locAttrinbufptr.i = regOperPtr->lastAttrinbufrec;
+ cfirstfreeAttrbufrec = Rnext;
+ cnoFreeAttrbufrec = RnoFree - 1;
+ RnewLen += length;
+ regOperPtr->lastAttrinbufrec = TAttrinbufptr.i;
+ regOperPtr->currentAttrinbufLen = RnewLen;
+ if (locAttrinbufptr.i == RNIL) {
+ regOperPtr->firstAttrinbufrec = TAttrinbufptr.i;
+ return;
+ } else {
+ jam();
+ ptrCheckGuard(locAttrinbufptr, cnoOfAttrbufrec, attrbufrec);
+ locAttrinbufptr.p->attrbuf[ZBUF_NEXT] = TAttrinbufptr.i;
+ }//if
+ if (RnewLen < ZATTR_BUFFER_SIZE) {
+ return;
+ } else {
+ jam();
+ regOperPtr->transstate = TOO_MUCH_AI;
+ return;
+ }//if
+ } else if (cnoFreeAttrbufrec <= MIN_ATTRBUF) {
+ jam();
+ regOperPtr->transstate = ERROR_WAIT_TUPKEYREQ;
+ } else {
+ ndbrequire(false);
+ }//if
+}//Dbtup::handleATTRINFOforTUPKEYREQ()
+
+void Dbtup::execATTRINFO(Signal* signal)
+{
+ OperationrecPtr regOpPtr;
+ Uint32 Rsig0 = signal->theData[0];
+ Uint32 Rlen = signal->length();
+ regOpPtr.i = Rsig0;
+
+ jamEntry();
+
+ ptrCheckGuard(regOpPtr, cnoOfOprec, operationrec);
+ if (regOpPtr.p->transstate == IDLE) {
+ handleATTRINFOforTUPKEYREQ(signal, Rlen - 3, regOpPtr.p);
+ return;
+ } else if (regOpPtr.p->transstate == WAIT_STORED_PROCEDURE_ATTR_INFO) {
+ storedProcedureAttrInfo(signal, regOpPtr.p, Rlen - 3, 3, false);
+ return;
+ }//if
+ switch (regOpPtr.p->transstate) {
+ case ERROR_WAIT_STORED_PROCREQ:
+ jam();
+ case TOO_MUCH_AI:
+ jam();
+ case ERROR_WAIT_TUPKEYREQ:
+ jam();
+ return; /* IGNORE ATTRINFO IN THOSE STATES, WAITING FOR ABORT SIGNAL */
+ break;
+ case DISCONNECTED:
+ jam();
+ case STARTED:
+ jam();
+ default:
+ ndbrequire(false);
+ }//switch
+}//Dbtup::execATTRINFO()
+
+void Dbtup::execTUP_ALLOCREQ(Signal* signal)
+{
+ OperationrecPtr regOperPtr;
+ TablerecPtr regTabPtr;
+ FragrecordPtr regFragPtr;
+
+ jamEntry();
+
+ regOperPtr.i = signal->theData[0];
+ regFragPtr.i = signal->theData[1];
+ regTabPtr.i = signal->theData[2];
+
+ if (!((regOperPtr.i < cnoOfOprec) &&
+ (regFragPtr.i < cnoOfFragrec) &&
+ (regTabPtr.i < cnoOfTablerec))) {
+ ndbrequire(false);
+ }//if
+ ptrAss(regOperPtr, operationrec);
+ ptrAss(regFragPtr, fragrecord);
+ ptrAss(regTabPtr, tablerec);
+
+//---------------------------------------------------
+/* --- Allocate a tuple as requested by ACC --- */
+//---------------------------------------------------
+ PagePtr pagePtr;
+ Uint32 pageOffset;
+ if (!allocTh(regFragPtr.p,
+ regTabPtr.p,
+ NORMAL_PAGE,
+ signal,
+ pageOffset,
+ pagePtr)) {
+ signal->theData[0] = terrorCode; // Indicate failure
+ return;
+ }//if
+ Uint32 fragPageId = pagePtr.p->pageWord[ZPAGE_FRAG_PAGE_ID_POS];
+ Uint32 pageIndex = ((pageOffset - ZPAGE_HEADER_SIZE) /
+ regTabPtr.p->tupheadsize) << 1;
+ regOperPtr.p->tableRef = regTabPtr.i;
+ regOperPtr.p->fragId = regFragPtr.p->fragmentId;
+ regOperPtr.p->realPageId = pagePtr.i;
+ regOperPtr.p->fragPageId = fragPageId;
+ regOperPtr.p->pageOffset = pageOffset;
+ regOperPtr.p->pageIndex = pageIndex;
+ /* -------------------------------------------------------------- */
+ /* AN INSERT IS UNDONE BY FREEING THE DATA OCCUPIED BY THE INSERT */
+ /* THE ONLY DATA WE HAVE TO LOG EXCEPT THE TYPE, PAGE AND INDEX */
+ /* IS THE AMOUNT OF DATA TO FREE */
+ /* -------------------------------------------------------------- */
+ if (isUndoLoggingNeeded(regFragPtr.p, fragPageId)) {
+ jam();
+ cprAddUndoLogRecord(signal,
+ ZLCPR_TYPE_DELETE_TH,
+ fragPageId,
+ pageIndex,
+ regTabPtr.i,
+ regFragPtr.p->fragmentId,
+ regFragPtr.p->checkpointVersion);
+ }//if
+
+ //---------------------------------------------------------------
+ // Initialise Active operation list by setting the list to empty
+ //---------------------------------------------------------------
+ ndbrequire(pageOffset < ZWORDS_ON_PAGE);
+ pagePtr.p->pageWord[pageOffset] = RNIL;
+
+ signal->theData[0] = 0;
+ signal->theData[1] = fragPageId;
+ signal->theData[2] = pageIndex;
+}//Dbtup::execTUP_ALLOCREQ()
+
+void
+Dbtup::setChecksum(Page* const pagePtr, Uint32 tupHeadOffset, Uint32 tupHeadSize)
+{
+ // 2 == regTabPtr.p->tupChecksumIndex
+ pagePtr->pageWord[tupHeadOffset + 2] = 0;
+ Uint32 checksum = calculateChecksum(pagePtr, tupHeadOffset, tupHeadSize);
+ pagePtr->pageWord[tupHeadOffset + 2] = checksum;
+}//Dbtup::setChecksum()
+
+Uint32
+Dbtup::calculateChecksum(Page* pagePtr,
+ Uint32 tupHeadOffset,
+ Uint32 tupHeadSize)
+{
+ Uint32 checksum = 0;
+ Uint32 loopStop = tupHeadOffset + tupHeadSize;
+ ndbrequire(loopStop <= ZWORDS_ON_PAGE);
+ // includes tupVersion
+ for (Uint32 i = tupHeadOffset + 1; i < loopStop; i++) {
+ checksum ^= pagePtr->pageWord[i];
+ }//if
+ return checksum;
+}//Dbtup::calculateChecksum()
+
+/* ----------------------------------------------------------------- */
+/* ----------- INSERT_ACTIVE_OP_LIST -------------- */
+/* ----------------------------------------------------------------- */
+void Dbtup::insertActiveOpList(Signal* signal,
+ OperationrecPtr regOperPtr,
+ Page* const pagePtr,
+ Uint32 pageOffset)
+{
+ OperationrecPtr iaoPrevOpPtr;
+ ndbrequire(regOperPtr.p->inActiveOpList == ZFALSE);
+ regOperPtr.p->inActiveOpList = ZTRUE;
+ ndbrequire(pageOffset < ZWORDS_ON_PAGE);
+ iaoPrevOpPtr.i = pagePtr->pageWord[pageOffset];
+ pagePtr->pageWord[pageOffset] = regOperPtr.i;
+ regOperPtr.p->prevActiveOp = RNIL;
+ regOperPtr.p->nextActiveOp = iaoPrevOpPtr.i;
+ if (iaoPrevOpPtr.i == RNIL) {
+ return;
+ } else {
+ jam();
+ ptrCheckGuard(iaoPrevOpPtr, cnoOfOprec, operationrec);
+ iaoPrevOpPtr.p->prevActiveOp = regOperPtr.i;
+ if (iaoPrevOpPtr.p->optype == ZDELETE &&
+ regOperPtr.p->optype == ZINSERT) {
+ jam();
+ // mark both
+ iaoPrevOpPtr.p->deleteInsertFlag = 1;
+ regOperPtr.p->deleteInsertFlag = 1;
+ }
+ return;
+ }//if
+}//Dbtup::insertActiveOpList()
+
+void Dbtup::linkOpIntoFragList(OperationrecPtr regOperPtr,
+ Fragrecord* const regFragPtr)
+{
+ OperationrecPtr sopTmpOperPtr;
+ Uint32 tail = regFragPtr->lastusedOprec;
+ ndbrequire(regOperPtr.p->inFragList == ZFALSE);
+ regOperPtr.p->inFragList = ZTRUE;
+ regOperPtr.p->prevOprecInList = tail;
+ regOperPtr.p->nextOprecInList = RNIL;
+ sopTmpOperPtr.i = tail;
+ if (tail == RNIL) {
+ regFragPtr->firstusedOprec = regOperPtr.i;
+ } else {
+ jam();
+ ptrCheckGuard(sopTmpOperPtr, cnoOfOprec, operationrec);
+ sopTmpOperPtr.p->nextOprecInList = regOperPtr.i;
+ }//if
+ regFragPtr->lastusedOprec = regOperPtr.i;
+}//Dbtup::linkOpIntoFragList()
+
+/*
+This routine is optimised for use from TUPKEYREQ.
+This means that a lot of input data is stored in the operation record.
+The routine expects the following data in the operation record to be
+set-up properly.
+Transaction data
+1) transid1
+2) transid2
+3) savePointId
+
+Operation data
+4) optype
+5) dirtyOp
+
+Tuple address
+6) fragPageId
+7) pageIndex
+
+regFragPtr and regTabPtr are references to the table and fragment data and
+is read-only.
+
+The routine will set up the following data in the operation record if
+returned with success.
+
+Tuple address data
+1) realPageId
+2) fragPageId
+3) pageOffset
+4) pageIndex
+
+Also the pagePtr is an output variable if the routine returns with success.
+It's input value can be undefined.
+*/
+bool
+Dbtup::getPage(PagePtr& pagePtr,
+ Operationrec* const regOperPtr,
+ Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr)
+{
+/* ------------------------------------------------------------------------- */
+// GET THE REFERENCE TO THE TUPLE HEADER BY TRANSLATING THE FRAGMENT PAGE ID
+// INTO A REAL PAGE ID AND BY USING THE PAGE INDEX TO DERIVE THE PROPER INDEX
+// IN THE REAL PAGE.
+/* ------------------------------------------------------------------------- */
+ pagePtr.i = getRealpid(regFragPtr, regOperPtr->fragPageId);
+ regOperPtr->realPageId = pagePtr.i;
+ Uint32 RpageIndex = regOperPtr->pageIndex;
+ Uint32 Rtupheadsize = regTabPtr->tupheadsize;
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ Uint32 RpageIndexScaled = RpageIndex >> 1;
+ ndbrequire((RpageIndex & 1) == 0);
+ regOperPtr->pageOffset = ZPAGE_HEADER_SIZE +
+ (Rtupheadsize * RpageIndexScaled);
+
+ OperationrecPtr leaderOpPtr;
+ ndbrequire(regOperPtr->pageOffset < ZWORDS_ON_PAGE);
+ leaderOpPtr.i = pagePtr.p->pageWord[regOperPtr->pageOffset];
+ if (leaderOpPtr.i == RNIL) {
+ return true;
+ }//if
+ ptrCheckGuard(leaderOpPtr, cnoOfOprec, operationrec);
+ bool dirtyRead = ((regOperPtr->optype == ZREAD) &&
+ (regOperPtr->dirtyOp == 1));
+ if (dirtyRead) {
+ bool sameTrans = ((regOperPtr->transid1 == leaderOpPtr.p->transid1) &&
+ (regOperPtr->transid2 == leaderOpPtr.p->transid2));
+ if (!sameTrans) {
+ if (!getPageLastCommitted(regOperPtr, leaderOpPtr.p)) {
+ return false;
+ }//if
+ pagePtr.i = regOperPtr->realPageId;
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ return true;
+ }//if
+ }//if
+ if (regOperPtr->optype == ZREAD) {
+ /*
+ Read uses savepoint id's to find the correct tuple version.
+ */
+ if (getPageThroughSavePoint(regOperPtr, leaderOpPtr.p)) {
+ jam();
+ pagePtr.i = regOperPtr->realPageId;
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ return true;
+ }
+ return false;
+ }
+//----------------------------------------------------------------------
+// Check that no other operation is already active on the tuple. Also
+// that abort or commit is not ongoing.
+//----------------------------------------------------------------------
+ if (leaderOpPtr.p->tupleState == NO_OTHER_OP) {
+ jam();
+ if ((leaderOpPtr.p->optype == ZDELETE) &&
+ (regOperPtr->optype != ZINSERT)) {
+ jam();
+ terrorCode = ZTUPLE_DELETED_ERROR;
+ return false;
+ }//if
+ return true;
+ } else if (leaderOpPtr.p->tupleState == ALREADY_ABORTED) {
+ jam();
+ terrorCode = ZMUST_BE_ABORTED_ERROR;
+ return false;
+ } else {
+ ndbrequire(false);
+ }//if
+ return true;
+}//Dbtup::getPage()
+
+bool
+Dbtup::getPageThroughSavePoint(Operationrec* regOperPtr,
+ Operationrec* leaderOpPtr)
+{
+ bool found = false;
+ OperationrecPtr loopOpPtr;
+ loopOpPtr.p = leaderOpPtr;
+ while(true) {
+ if (regOperPtr->savePointId > loopOpPtr.p->savePointId) {
+ jam();
+ found = true;
+ break;
+ }
+ if (loopOpPtr.p->nextActiveOp == RNIL) {
+ break;
+ }
+ loopOpPtr.i = loopOpPtr.p->nextActiveOp;
+ ptrCheckGuard(loopOpPtr, cnoOfOprec, operationrec);
+ jam();
+ }
+ if (!found) {
+ return getPageLastCommitted(regOperPtr, loopOpPtr.p);
+ } else {
+ if (loopOpPtr.p->optype == ZDELETE) {
+ jam();
+ terrorCode = ZTUPLE_DELETED_ERROR;
+ return false;
+ }
+ if (loopOpPtr.p->tupleState == ALREADY_ABORTED) {
+ /*
+ Requested tuple version has already been aborted
+ */
+ jam();
+ terrorCode = ZMUST_BE_ABORTED_ERROR;
+ return false;
+ }
+ bool use_copy;
+ if (loopOpPtr.p->prevActiveOp == RNIL) {
+ jam();
+ /*
+ Use original tuple since we are reading from the last written tuple.
+ We are the
+ */
+ use_copy = false;
+ } else {
+ /*
+ Go forward in time to find a copy of the tuple which this operation
+ produced
+ */
+ loopOpPtr.i = loopOpPtr.p->prevActiveOp;
+ ptrCheckGuard(loopOpPtr, cnoOfOprec, operationrec);
+ if (loopOpPtr.p->optype == ZDELETE) {
+ /*
+ This operation was a Delete and thus have no copy tuple attached to
+ it. We will move forward to the next that either doesn't exist in
+ which case we will return the original tuple of any operation and
+ otherwise it must be an insert which contains a copy record.
+ */
+ if (loopOpPtr.p->prevActiveOp == RNIL) {
+ jam();
+ use_copy = false;
+ } else {
+ jam();
+ loopOpPtr.i = loopOpPtr.p->prevActiveOp;
+ ptrCheckGuard(loopOpPtr, cnoOfOprec, operationrec);
+ ndbrequire(loopOpPtr.p->optype == ZINSERT);
+ use_copy = true;
+ }
+ } else if (loopOpPtr.p->optype == ZUPDATE) {
+ jam();
+ /*
+ This operation which was the next in time have a copy which was the
+ result of the previous operation which we want to use. Thus use
+ the copy tuple of this operation.
+ */
+ use_copy = true;
+ } else {
+ /*
+ This operation was an insert that happened after an insert or update.
+ This is not a possible case.
+ */
+ ndbrequire(false);
+ return false;
+ }
+ }
+ if (use_copy) {
+ regOperPtr->realPageId = loopOpPtr.p->realPageIdC;
+ regOperPtr->fragPageId = loopOpPtr.p->fragPageIdC;
+ regOperPtr->pageIndex = loopOpPtr.p->pageIndexC;
+ regOperPtr->pageOffset = loopOpPtr.p->pageOffsetC;
+ } else {
+ regOperPtr->realPageId = loopOpPtr.p->realPageId;
+ regOperPtr->fragPageId = loopOpPtr.p->fragPageId;
+ regOperPtr->pageIndex = loopOpPtr.p->pageIndex;
+ regOperPtr->pageOffset = loopOpPtr.p->pageOffset;
+ }
+ return true;
+ }
+}
+
+bool
+Dbtup::getPageLastCommitted(Operationrec* const regOperPtr,
+ Operationrec* const leaderOpPtr)
+{
+//----------------------------------------------------------------------
+// Dirty reads wants to read the latest committed tuple. The latest
+// tuple value could be not existing or else we have to find the copy
+// tuple. Start by finding the end of the list to find the first operation
+// on the record in the ongoing transaction.
+//----------------------------------------------------------------------
+ jam();
+ OperationrecPtr loopOpPtr;
+ loopOpPtr.p = leaderOpPtr;
+ while (loopOpPtr.p->nextActiveOp != RNIL) {
+ jam();
+ loopOpPtr.i = loopOpPtr.p->nextActiveOp;
+ ptrCheckGuard(loopOpPtr, cnoOfOprec, operationrec);
+ }//while
+ if (loopOpPtr.p->optype == ZINSERT) {
+ jam();
+//----------------------------------------------------------------------
+// With an insert in the start of the list we know that the tuple did not
+// exist before this transaction was started. We don't care if the current
+// transaction is in the commit phase since the commit is not really
+// completed until the operation is gone from TUP.
+//----------------------------------------------------------------------
+ terrorCode = ZTUPLE_DELETED_ERROR;
+ return false;
+ } else {
+//----------------------------------------------------------------------
+// A successful update and delete as first in the queue means that a tuple
+// exist in the committed world. We need to find it.
+//----------------------------------------------------------------------
+ if (loopOpPtr.p->optype == ZUPDATE) {
+ jam();
+//----------------------------------------------------------------------
+// The first operation was a delete we set our tuple reference to the
+// copy tuple of this operation.
+//----------------------------------------------------------------------
+ regOperPtr->realPageId = loopOpPtr.p->realPageIdC;
+ regOperPtr->fragPageId = loopOpPtr.p->fragPageIdC;
+ regOperPtr->pageIndex = loopOpPtr.p->pageIndexC;
+ regOperPtr->pageOffset = loopOpPtr.p->pageOffsetC;
+ } else if ((loopOpPtr.p->optype == ZDELETE) &&
+ (loopOpPtr.p->prevActiveOp == RNIL)) {
+ jam();
+//----------------------------------------------------------------------
+// There was only a delete. The original tuple still is ok.
+//----------------------------------------------------------------------
+ } else {
+ jam();
+//----------------------------------------------------------------------
+// There was another operation after the delete, this must be an insert
+// and we have found our copy tuple there.
+//----------------------------------------------------------------------
+ loopOpPtr.i = loopOpPtr.p->prevActiveOp;
+ ptrCheckGuard(loopOpPtr, cnoOfOprec, operationrec);
+ ndbrequire(loopOpPtr.p->optype == ZINSERT);
+ regOperPtr->realPageId = loopOpPtr.p->realPageIdC;
+ regOperPtr->fragPageId = loopOpPtr.p->fragPageIdC;
+ regOperPtr->pageIndex = loopOpPtr.p->pageIndexC;
+ regOperPtr->pageOffset = loopOpPtr.p->pageOffsetC;
+ }//if
+ }//if
+ return true;
+}//Dbtup::getPageLastCommitted()
+
+void Dbtup::execTUPKEYREQ(Signal* signal)
+{
+ TupKeyReq * const tupKeyReq = (TupKeyReq *)signal->getDataPtr();
+ Uint32 RoperPtr = tupKeyReq->connectPtr;
+ Uint32 Rtabptr = tupKeyReq->tableRef;
+ Uint32 RfragId = tupKeyReq->fragId;
+ Uint32 Rstoredid = tupKeyReq->storedProcedure;
+ Uint32 Rfragptr = tupKeyReq->fragPtr;
+
+ Uint32 RnoOfOprec = cnoOfOprec;
+ Uint32 RnoOfTablerec = cnoOfTablerec;
+ Uint32 RnoOfFragrec = cnoOfFragrec;
+
+ operPtr.i = RoperPtr;
+ fragptr.i = Rfragptr;
+ tabptr.i = Rtabptr;
+ jamEntry();
+
+ ndbrequire(((RoperPtr < RnoOfOprec) &&
+ (Rtabptr < RnoOfTablerec) &&
+ (Rfragptr < RnoOfFragrec)));
+ ptrAss(operPtr, operationrec);
+ Operationrec * const regOperPtr = operPtr.p;
+ ptrAss(fragptr, fragrecord);
+ Fragrecord * const regFragPtr = fragptr.p;
+ ptrAss(tabptr, tablerec);
+ Tablerec* const regTabPtr = tabptr.p;
+
+ Uint32 TrequestInfo = tupKeyReq->request;
+
+ if (regOperPtr->transstate != IDLE) {
+ TUPKEY_abort(signal, 39);
+ return;
+ }//if
+/* ----------------------------------------------------------------- */
+// Operation is ZREAD when we arrive here so no need to worry about the
+// abort process.
+/* ----------------------------------------------------------------- */
+/* ----------- INITIATE THE OPERATION RECORD -------------- */
+/* ----------------------------------------------------------------- */
+ regOperPtr->fragmentPtr = Rfragptr;
+ regOperPtr->dirtyOp = TrequestInfo & 1;
+ regOperPtr->opSimple = (TrequestInfo >> 1) & 1;
+ regOperPtr->interpretedExec = (TrequestInfo >> 10) & 1;
+ regOperPtr->optype = (TrequestInfo >> 6) & 0xf;
+
+ // Attributes needed by trigger execution
+ regOperPtr->noFiredTriggers = 0;
+ regOperPtr->tableRef = Rtabptr;
+ regOperPtr->tcOperationPtr = tupKeyReq->opRef;
+ regOperPtr->primaryReplica = tupKeyReq->primaryReplica;
+ regOperPtr->coordinatorTC = tupKeyReq->coordinatorTC;
+ regOperPtr->tcOpIndex = tupKeyReq->tcOpIndex;
+ regOperPtr->savePointId = tupKeyReq->savePointId;
+
+ regOperPtr->fragId = RfragId;
+
+ regOperPtr->fragPageId = tupKeyReq->keyRef1;
+ regOperPtr->pageIndex = tupKeyReq->keyRef2;
+ regOperPtr->attrinbufLen = regOperPtr->logSize = tupKeyReq->attrBufLen;
+ regOperPtr->recBlockref = tupKeyReq->applRef;
+
+// Schema Version in tupKeyReq->schemaVersion not used in this version
+ regOperPtr->storedProcedureId = Rstoredid;
+ regOperPtr->transid1 = tupKeyReq->transId1;
+ regOperPtr->transid2 = tupKeyReq->transId2;
+
+ regOperPtr->attroutbufLen = 0;
+/* ----------------------------------------------------------------------- */
+// INITIALISE TO DEFAULT VALUE
+// INIT THE COPY REFERENCE RECORDS TO RNIL TO ENSURE THAT THEIR VALUES
+// ARE VALID IF THEY EXISTS
+// NO PENDING CHECKPOINT WHEN COPY CREATED (DEFAULT)
+// NO TUPLE HAS BEEN ALLOCATED YET
+// NO COPY HAS BEEN CREATED YET
+/* ----------------------------------------------------------------------- */
+ regOperPtr->undoLogged = false;
+ regOperPtr->realPageId = RNIL;
+ regOperPtr->realPageIdC = RNIL;
+ regOperPtr->fragPageIdC = RNIL;
+
+ regOperPtr->pageOffset = ZNIL;
+ regOperPtr->pageOffsetC = ZNIL;
+
+ regOperPtr->pageIndexC = ZNIL;
+
+ // version not yet known
+ regOperPtr->tupVersion = ZNIL;
+ regOperPtr->deleteInsertFlag = 0;
+
+ regOperPtr->tupleState = TUPLE_BLOCKED;
+ regOperPtr->changeMask.clear();
+
+ if (Rstoredid != ZNIL) {
+ ndbrequire(initStoredOperationrec(regOperPtr, Rstoredid) == ZOK);
+ }//if
+ copyAttrinfo(signal, regOperPtr, &cinBuffer[0]);
+
+ PagePtr pagePtr;
+ if (!getPage(pagePtr, regOperPtr, regFragPtr, regTabPtr)) {
+ tupkeyErrorLab(signal);
+ return;
+ }//if
+
+ Uint32 Roptype = regOperPtr->optype;
+ if (Roptype == ZREAD) {
+ jam();
+ if (handleReadReq(signal, regOperPtr, regTabPtr, pagePtr.p) != -1) {
+ sendTUPKEYCONF(signal, regOperPtr, 0);
+/* ------------------------------------------------------------------------- */
+// Read Operations need not to be taken out of any lists. We also do not
+// need to wait for commit since there is no changes to commit. Thus we
+// prepare the operation record already now for the next operation.
+// Write operations have set the state to STARTED above indicating that
+// they are waiting for the Commit or Abort decision.
+/* ------------------------------------------------------------------------- */
+ regOperPtr->transstate = IDLE;
+ regOperPtr->currentAttrinbufLen = 0;
+ }//if
+ return;
+ }//if
+ linkOpIntoFragList(operPtr, regFragPtr);
+ insertActiveOpList(signal,
+ operPtr,
+ pagePtr.p,
+ regOperPtr->pageOffset);
+ if (isUndoLoggingBlocked(regFragPtr)) {
+ TUPKEY_abort(signal, 38);
+ return;
+ }//if
+/* ---------------------------------------------------------------------- */
+// WE SET THE CURRENT ACTIVE OPERATION IN THE TUPLE TO POINT TO OUR
+//OPERATION RECORD. IF SEVERAL OPERATIONS WORK ON THIS TUPLE THEY ARE
+// LINKED TO OUR OPERATION RECORD. DIRTY READS CAN ACCESS THE COPY
+// TUPLE THROUGH OUR OPERATION RECORD.
+/* ---------------------------------------------------------------------- */
+ if (Roptype == ZINSERT) {
+ jam();
+ if (handleInsertReq(signal, regOperPtr,
+ regFragPtr, regTabPtr, pagePtr.p) == -1) {
+ return;
+ }//if
+ if (!regTabPtr->tuxCustomTriggers.isEmpty()) {
+ jam();
+ if (executeTuxInsertTriggers(signal, regOperPtr, regTabPtr) != 0) {
+ jam();
+ tupkeyErrorLab(signal);
+ return;
+ }
+ }
+ checkImmediateTriggersAfterInsert(signal,
+ regOperPtr,
+ regTabPtr);
+ sendTUPKEYCONF(signal, regOperPtr, regOperPtr->logSize);
+ return;
+ }//if
+ if (regTabPtr->checksumIndicator &&
+ (calculateChecksum(pagePtr.p,
+ regOperPtr->pageOffset,
+ regTabPtr->tupheadsize) != 0)) {
+ jam();
+ terrorCode = ZTUPLE_CORRUPTED_ERROR;
+ tupkeyErrorLab(signal);
+ return;
+ }//if
+ if (Roptype == ZUPDATE) {
+ jam();
+ if (handleUpdateReq(signal, regOperPtr,
+ regFragPtr, regTabPtr, pagePtr.p) == -1) {
+ return;
+ }//if
+ // If update operation is done on primary,
+ // check any after op triggers
+ terrorCode = 0;
+ if (!regTabPtr->tuxCustomTriggers.isEmpty()) {
+ jam();
+ if (executeTuxUpdateTriggers(signal, regOperPtr, regTabPtr) != 0) {
+ jam();
+ tupkeyErrorLab(signal);
+ return;
+ }
+ }
+ checkImmediateTriggersAfterUpdate(signal,
+ regOperPtr,
+ regTabPtr);
+ // XXX use terrorCode for now since all methods are void
+ if (terrorCode != 0) {
+ tupkeyErrorLab(signal);
+ return;
+ }
+ sendTUPKEYCONF(signal, regOperPtr, regOperPtr->logSize);
+ return;
+ } else if (Roptype == ZDELETE) {
+ jam();
+ if (handleDeleteReq(signal, regOperPtr,
+ regFragPtr, regTabPtr, pagePtr.p) == -1) {
+ return;
+ }//if
+ // If delete operation is done on primary,
+ // check any after op triggers
+ if (!regTabPtr->tuxCustomTriggers.isEmpty()) {
+ jam();
+ if (executeTuxDeleteTriggers(signal, regOperPtr, regTabPtr) != 0) {
+ jam();
+ tupkeyErrorLab(signal);
+ return;
+ }
+ }
+ checkImmediateTriggersAfterDelete(signal,
+ regOperPtr,
+ regTabPtr);
+ sendTUPKEYCONF(signal, regOperPtr, 0);
+ return;
+ } else {
+ ndbrequire(false);
+ }//if
+}//Dbtup::execTUPKEYREQ()
+
+/* ---------------------------------------------------------------- */
+/* ------------------------ CONFIRM REQUEST ----------------------- */
+/* ---------------------------------------------------------------- */
+void Dbtup::sendTUPKEYCONF(Signal* signal,
+ Operationrec * const regOperPtr,
+ Uint32 TlogSize)
+{
+ TupKeyConf * const tupKeyConf = (TupKeyConf *)signal->getDataPtrSend();
+
+ Uint32 RuserPointer = regOperPtr->userpointer;
+ Uint32 RattroutbufLen = regOperPtr->attroutbufLen;
+ Uint32 RnoFiredTriggers = regOperPtr->noFiredTriggers;
+ BlockReference Ruserblockref = regOperPtr->userblockref;
+ Uint32 lastRow = regOperPtr->lastRow;
+
+ regOperPtr->transstate = STARTED;
+ regOperPtr->tupleState = NO_OTHER_OP;
+ tupKeyConf->userPtr = RuserPointer;
+ tupKeyConf->readLength = RattroutbufLen;
+ tupKeyConf->writeLength = TlogSize;
+ tupKeyConf->noFiredTriggers = RnoFiredTriggers;
+ tupKeyConf->lastRow = lastRow;
+
+ EXECUTE_DIRECT(refToBlock(Ruserblockref), GSN_TUPKEYCONF, signal,
+ TupKeyConf::SignalLength);
+ return;
+}//Dbtup::sendTUPKEYCONF()
+
+#define MAX_READ (sizeof(signal->theData) > MAX_MESSAGE_SIZE ? MAX_MESSAGE_SIZE : sizeof(signal->theData))
+
+/* ---------------------------------------------------------------- */
+/* ----------------------------- READ ---------------------------- */
+/* ---------------------------------------------------------------- */
+int Dbtup::handleReadReq(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const regTabPtr,
+ Page* pagePtr)
+{
+ Uint32 Ttupheadoffset = regOperPtr->pageOffset;
+ const BlockReference sendBref = regOperPtr->recBlockref;
+ if (regTabPtr->checksumIndicator &&
+ (calculateChecksum(pagePtr, Ttupheadoffset,
+ regTabPtr->tupheadsize) != 0)) {
+ jam();
+ terrorCode = ZTUPLE_CORRUPTED_ERROR;
+ tupkeyErrorLab(signal);
+ return -1;
+ }//if
+
+ Uint32 * dst = &signal->theData[25];
+ Uint32 dstLen = (MAX_READ / 4) - 25;
+ const Uint32 node = refToNode(sendBref);
+ if(node != 0 && node != getOwnNodeId()) {
+ ;
+ } else {
+ jam();
+ /**
+ * execute direct
+ */
+ dst = &signal->theData[3];
+ dstLen = (MAX_READ / 4) - 3;
+ }
+
+ if (regOperPtr->interpretedExec != 1) {
+ jam();
+ int ret = readAttributes(pagePtr,
+ Ttupheadoffset,
+ &cinBuffer[0],
+ regOperPtr->attrinbufLen,
+ dst,
+ dstLen,
+ false);
+ if (ret != -1) {
+/* ------------------------------------------------------------------------- */
+// We have read all data into coutBuffer. Now send it to the API.
+/* ------------------------------------------------------------------------- */
+ jam();
+ Uint32 TnoOfDataRead= (Uint32) ret;
+ regOperPtr->attroutbufLen = TnoOfDataRead;
+ sendReadAttrinfo(signal, TnoOfDataRead, regOperPtr);
+ return 0;
+ }//if
+ jam();
+ tupkeyErrorLab(signal);
+ return -1;
+ } else {
+ jam();
+ regOperPtr->lastRow = 0;
+ if (interpreterStartLab(signal, pagePtr, Ttupheadoffset) != -1) {
+ return 0;
+ }//if
+ return -1;
+ }//if
+}//Dbtup::handleReadReq()
+
+/* ---------------------------------------------------------------- */
+/* ---------------------------- UPDATE ---------------------------- */
+/* ---------------------------------------------------------------- */
+int Dbtup::handleUpdateReq(Signal* signal,
+ Operationrec* const regOperPtr,
+ Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr,
+ Page* const pagePtr)
+{
+ PagePtr copyPagePtr;
+ Uint32 tuple_size = regTabPtr->tupheadsize;
+
+//---------------------------------------------------
+/* --- MAKE A COPY OF THIS TUPLE ON A COPY PAGE --- */
+//---------------------------------------------------
+ Uint32 RpageOffsetC;
+ if (!allocTh(regFragPtr,
+ regTabPtr,
+ COPY_PAGE,
+ signal,
+ RpageOffsetC,
+ copyPagePtr)) {
+ TUPKEY_abort(signal, 1);
+ return -1;
+ }//if
+ Uint32 RpageIdC = copyPagePtr.i;
+ Uint32 RfragPageIdC = copyPagePtr.p->pageWord[ZPAGE_FRAG_PAGE_ID_POS];
+ Uint32 indexC = ((RpageOffsetC - ZPAGE_HEADER_SIZE) / tuple_size) << 1;
+ regOperPtr->pageIndexC = indexC;
+ regOperPtr->fragPageIdC = RfragPageIdC;
+ regOperPtr->realPageIdC = RpageIdC;
+ regOperPtr->pageOffsetC = RpageOffsetC;
+ /* -------------------------------------------------------------- */
+ /* IF WE HAVE AN ONGING CHECKPOINT WE HAVE TO LOG THE ALLOCATION */
+ /* OF THE TUPLE HEADER TO BE ABLE TO DELETE IT UPON RESTART */
+ /* THE ONLY DATA EXCEPT THE TYPE, PAGE, INDEX IS THE SIZE TO FREE */
+ /* -------------------------------------------------------------- */
+ if (isUndoLoggingActive(regFragPtr)) {
+ if (isPageUndoLogged(regFragPtr, RfragPageIdC)) {
+ jam();
+ regOperPtr->undoLogged = true;
+ cprAddUndoLogRecord(signal,
+ ZLCPR_TYPE_DELETE_TH,
+ RfragPageIdC,
+ indexC,
+ regOperPtr->tableRef,
+ regOperPtr->fragId,
+ regFragPtr->checkpointVersion);
+ }//if
+ if (isPageUndoLogged(regFragPtr, regOperPtr->fragPageId)) {
+ jam();
+ cprAddUndoLogRecord(signal,
+ ZLCPR_TYPE_UPDATE_TH,
+ regOperPtr->fragPageId,
+ regOperPtr->pageIndex,
+ regOperPtr->tableRef,
+ regOperPtr->fragId,
+ regFragPtr->checkpointVersion);
+ cprAddData(signal,
+ regFragPtr,
+ regOperPtr->realPageId,
+ tuple_size,
+ regOperPtr->pageOffset);
+ }//if
+ }//if
+ Uint32 RwordCount = tuple_size - 1;
+ Uint32 end_dest = RpageOffsetC + tuple_size;
+ Uint32 offset = regOperPtr->pageOffset;
+ Uint32 end_source = offset + tuple_size;
+ ndbrequire(end_dest <= ZWORDS_ON_PAGE && end_source <= ZWORDS_ON_PAGE);
+ void* Tdestination = (void*)&copyPagePtr.p->pageWord[RpageOffsetC + 1];
+ const void* Tsource = (void*)&pagePtr->pageWord[offset + 1];
+ MEMCOPY_NO_WORDS(Tdestination, Tsource, RwordCount);
+
+ Uint32 prev_tup_version;
+ // nextActiveOp is before this op in event order
+ if (regOperPtr->nextActiveOp == RNIL) {
+ jam();
+ prev_tup_version = ((const Uint32*)Tsource)[0];
+ } else {
+ OperationrecPtr prevOperPtr;
+ jam();
+ prevOperPtr.i = regOperPtr->nextActiveOp;
+ ptrCheckGuard(prevOperPtr, cnoOfOprec, operationrec);
+ prev_tup_version = prevOperPtr.p->tupVersion;
+ }//if
+ regOperPtr->tupVersion = (prev_tup_version + 1) &
+ ((1 << ZTUP_VERSION_BITS) - 1);
+ // global variable alert
+ ndbassert(operationrec + operPtr.i == regOperPtr);
+ copyPagePtr.p->pageWord[RpageOffsetC] = operPtr.i;
+
+ return updateStartLab(signal, regOperPtr, regTabPtr, pagePtr);
+}//Dbtup::handleUpdateReq()
+
+/* ---------------------------------------------------------------- */
+/* ----------------------------- INSERT --------------------------- */
+/* ---------------------------------------------------------------- */
+int Dbtup::handleInsertReq(Signal* signal,
+ Operationrec* const regOperPtr,
+ Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr,
+ Page* const pagePtr)
+{
+ Uint32 ret_value;
+
+ if (regOperPtr->nextActiveOp != RNIL) {
+ jam();
+ OperationrecPtr prevExecOpPtr;
+ prevExecOpPtr.i = regOperPtr->nextActiveOp;
+ ptrCheckGuard(prevExecOpPtr, cnoOfOprec, operationrec);
+ if (prevExecOpPtr.p->optype != ZDELETE) {
+ terrorCode = ZINSERT_ERROR;
+ tupkeyErrorLab(signal);
+ return -1;
+ }//if
+ ret_value = handleUpdateReq(signal, regOperPtr,
+ regFragPtr, regTabPtr, pagePtr);
+ } else {
+ jam();
+ regOperPtr->tupVersion = 0;
+ ret_value = updateStartLab(signal, regOperPtr, regTabPtr, pagePtr);
+ }//if
+ if (ret_value != (Uint32)-1) {
+ if (checkNullAttributes(regOperPtr, regTabPtr)) {
+ jam();
+ return 0;
+ }//if
+ TUPKEY_abort(signal, 17);
+ }//if
+ return -1;
+}//Dbtup::handleInsertReq()
+
+/* ---------------------------------------------------------------- */
+/* ---------------------------- DELETE ---------------------------- */
+/* ---------------------------------------------------------------- */
+int Dbtup::handleDeleteReq(Signal* signal,
+ Operationrec* const regOperPtr,
+ Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr,
+ Page* const pagePtr)
+{
+ // delete must set but not increment tupVersion
+ if (regOperPtr->nextActiveOp != RNIL) {
+ OperationrecPtr prevExecOpPtr;
+ prevExecOpPtr.i = regOperPtr->nextActiveOp;
+ ptrCheckGuard(prevExecOpPtr, cnoOfOprec, operationrec);
+ regOperPtr->tupVersion = prevExecOpPtr.p->tupVersion;
+ } else {
+ jam();
+ regOperPtr->tupVersion = pagePtr->pageWord[regOperPtr->pageOffset + 1];
+ }
+ if (isUndoLoggingNeeded(regFragPtr, regOperPtr->fragPageId)) {
+ jam();
+ cprAddUndoLogRecord(signal,
+ ZINDICATE_NO_OP_ACTIVE,
+ regOperPtr->fragPageId,
+ regOperPtr->pageIndex,
+ regOperPtr->tableRef,
+ regOperPtr->fragId,
+ regFragPtr->checkpointVersion);
+ }//if
+ if (regOperPtr->attrinbufLen == 0) {
+ return 0;
+ }//if
+/* ------------------------------------------------------------------------ */
+/* THE APPLICATION WANTS TO READ THE TUPLE BEFORE IT IS DELETED. */
+/* ------------------------------------------------------------------------ */
+ return handleReadReq(signal, regOperPtr, regTabPtr, pagePtr);
+}//Dbtup::handleDeleteReq()
+
+int
+Dbtup::updateStartLab(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const regTabPtr,
+ Page* const pagePtr)
+{
+ int retValue;
+ if (regOperPtr->optype == ZINSERT) {
+ jam();
+ setNullBits(pagePtr, regTabPtr, regOperPtr->pageOffset);
+ }
+ if (regOperPtr->interpretedExec != 1) {
+ jam();
+ retValue = updateAttributes(pagePtr,
+ regOperPtr->pageOffset,
+ &cinBuffer[0],
+ regOperPtr->attrinbufLen);
+ if (retValue == -1) {
+ tupkeyErrorLab(signal);
+ return -1;
+ }//if
+ } else {
+ jam();
+ retValue = interpreterStartLab(signal, pagePtr, regOperPtr->pageOffset);
+ }//if
+ ndbrequire(regOperPtr->tupVersion != ZNIL);
+ pagePtr->pageWord[regOperPtr->pageOffset + 1] = regOperPtr->tupVersion;
+ if (regTabPtr->checksumIndicator) {
+ jam();
+ setChecksum(pagePtr, regOperPtr->pageOffset, regTabPtr->tupheadsize);
+ }//if
+ return retValue;
+}//Dbtup::updateStartLab()
+
+void
+Dbtup::setNullBits(Page* const regPage, Tablerec* const regTabPtr, Uint32 pageOffset)
+{
+ Uint32 noOfExtraNullWords = regTabPtr->tupNullWords;
+ Uint32 nullOffsetStart = regTabPtr->tupNullIndex + pageOffset;
+ ndbrequire((noOfExtraNullWords + nullOffsetStart) < ZWORDS_ON_PAGE);
+ for (Uint32 i = 0; i < noOfExtraNullWords; i++) {
+ regPage->pageWord[nullOffsetStart + i] = 0xFFFFFFFF;
+ }//for
+}//Dbtup::setNullBits()
+
+bool
+Dbtup::checkNullAttributes(Operationrec* const regOperPtr,
+ Tablerec* const regTabPtr)
+{
+// Implement checking of updating all not null attributes in an insert here.
+ Bitmask<MAXNROFATTRIBUTESINWORDS> attributeMask;
+ /*
+ * The idea here is maybe that changeMask is not-null attributes
+ * and must contain notNullAttributeMask. But:
+ *
+ * 1. changeMask has all bits set on insert
+ * 2. not-null is checked in each UpdateFunction
+ * 3. the code below does not work except trivially due to 1.
+ *
+ * XXX remove or fix
+ */
+ attributeMask.clear();
+ attributeMask.bitOR(regOperPtr->changeMask);
+ attributeMask.bitAND(regTabPtr->notNullAttributeMask);
+ attributeMask.bitXOR(regTabPtr->notNullAttributeMask);
+ if (!attributeMask.isclear()) {
+ return false;
+ }//if
+ return true;
+}//Dbtup::checkNullAttributes()
+
+/* ---------------------------------------------------------------- */
+/* THIS IS THE START OF THE INTERPRETED EXECUTION OF UPDATES. WE */
+/* START BY LINKING ALL ATTRINFO'S IN A DOUBLY LINKED LIST (THEY ARE*/
+/* ALREADY IN A LINKED LIST). WE ALLOCATE A REGISTER MEMORY (EQUAL */
+/* TO AN ATTRINFO RECORD). THE INTERPRETER GOES THROUGH FOUR PHASES*/
+/* DURING THE FIRST PHASE IT IS ONLY ALLOWED TO READ ATTRIBUTES THAT*/
+/* ARE SENT TO THE CLIENT APPLICATION. DURING THE SECOND PHASE IT IS*/
+/* ALLOWED TO READ FROM ATTRIBUTES INTO REGISTERS, TO UPDATE */
+/* ATTRIBUTES BASED ON EITHER A CONSTANT VALUE OR A REGISTER VALUE, */
+/* A DIVERSE SET OF OPERATIONS ON REGISTERS ARE AVAILABLE AS WELL. */
+/* IT IS ALSO POSSIBLE TO PERFORM JUMPS WITHIN THE INSTRUCTIONS THAT*/
+/* BELONGS TO THE SECOND PHASE. ALSO SUBROUTINES CAN BE CALLED IN */
+/* THIS PHASE. THE THIRD PHASE IS TO AGAIN READ ATTRIBUTES AND */
+/* FINALLY THE FOURTH PHASE READS SELECTED REGISTERS AND SEND THEM */
+/* TO THE CLIENT APPLICATION. */
+/* THERE IS A FIFTH REGION WHICH CONTAINS SUBROUTINES CALLABLE FROM */
+/* THE INTERPRETER EXECUTION REGION. */
+/* THE FIRST FIVE WORDS WILL GIVE THE LENGTH OF THE FIVEE REGIONS */
+/* */
+/* THIS MEANS THAT FROM THE APPLICATIONS POINT OF VIEW THE DATABASE */
+/* CAN HANDLE SUBROUTINE CALLS WHERE THE CODE IS SENT IN THE REQUEST*/
+/* THE RETURN PARAMETERS ARE FIXED AND CAN EITHER BE GENERATED */
+/* BEFORE THE EXECUTION OF THE ROUTINE OR AFTER. */
+/* */
+/* IN LATER VERSIONS WE WILL ADD MORE THINGS LIKE THE POSSIBILITY */
+/* TO ALLOCATE MEMORY AND USE THIS AS LOCAL STORAGE. IT IS ALSO */
+/* IMAGINABLE TO HAVE SPECIAL ROUTINES THAT CAN PERFORM CERTAIN */
+/* OPERATIONS ON BLOB'S DEPENDENT ON WHAT THE BLOB REPRESENTS. */
+/* */
+/* */
+/* ----------------------------------------- */
+/* + INITIAL READ REGION + */
+/* ----------------------------------------- */
+/* + INTERPRETED EXECUTE REGION + */
+/* ----------------------------------------- */
+/* + FINAL UPDATE REGION + */
+/* ----------------------------------------- */
+/* + FINAL READ REGION + */
+/* ----------------------------------------- */
+/* + SUBROUTINE REGION + */
+/* ----------------------------------------- */
+/* ---------------------------------------------------------------- */
+/* ---------------------------------------------------------------- */
+/* ----------------- INTERPRETED EXECUTION ----------------------- */
+/* ---------------------------------------------------------------- */
+int Dbtup::interpreterStartLab(Signal* signal,
+ Page* const pagePtr,
+ Uint32 TupHeadOffset)
+{
+ Operationrec * const regOperPtr = operPtr.p;
+ Uint32 RtotalLen;
+ int TnoDataRW;
+
+ Uint32 RinitReadLen = cinBuffer[0];
+ Uint32 RexecRegionLen = cinBuffer[1];
+ Uint32 RfinalUpdateLen = cinBuffer[2];
+ Uint32 RfinalRLen = cinBuffer[3];
+ Uint32 RsubLen = cinBuffer[4];
+
+ Uint32 RattrinbufLen = regOperPtr->attrinbufLen;
+ const BlockReference sendBref = regOperPtr->recBlockref;
+
+ Uint32 * dst = &signal->theData[25];
+ Uint32 dstLen = (MAX_READ / 4) - 25;
+ const Uint32 node = refToNode(sendBref);
+ if(node != 0 && node != getOwnNodeId()) {
+ ;
+ } else {
+ jam();
+ /**
+ * execute direct
+ */
+ dst = &signal->theData[3];
+ dstLen = (MAX_READ / 4) - 3;
+ }
+
+ RtotalLen = RinitReadLen;
+ RtotalLen += RexecRegionLen;
+ RtotalLen += RfinalUpdateLen;
+ RtotalLen += RfinalRLen;
+ RtotalLen += RsubLen;
+
+ Uint32 RattroutCounter = 0;
+ Uint32 RinstructionCounter = 5;
+ Uint32 RlogSize = 0;
+
+ if (((RtotalLen + 5) == RattrinbufLen) &&
+ (RattrinbufLen >= 5) &&
+ (RattrinbufLen < ZATTR_BUFFER_SIZE)) {
+ /* ---------------------------------------------------------------- */
+ // We start by checking consistency. We must have the first five
+ // words of the ATTRINFO to give us the length of the regions. The
+ // size of these regions must be the same as the total ATTRINFO
+ // length and finally the total length must be within the limits.
+ /* ---------------------------------------------------------------- */
+
+ if (RinitReadLen > 0) {
+ jam();
+ /* ---------------------------------------------------------------- */
+ // The first step that can be taken in the interpreter is to read
+ // data of the tuple before any updates have been applied.
+ /* ---------------------------------------------------------------- */
+ TnoDataRW = readAttributes(pagePtr,
+ TupHeadOffset,
+ &cinBuffer[5],
+ RinitReadLen,
+ &dst[0],
+ dstLen,
+ false);
+ if (TnoDataRW != -1) {
+ RattroutCounter = TnoDataRW;
+ RinstructionCounter += RinitReadLen;
+ } else {
+ jam();
+ tupkeyErrorLab(signal);
+ return -1;
+ }//if
+ }//if
+ if (RexecRegionLen > 0) {
+ jam();
+ /* ---------------------------------------------------------------- */
+ // The next step is the actual interpreted execution. This executes
+ // a register-based virtual machine which can read and write attributes
+ // to and from registers.
+ /* ---------------------------------------------------------------- */
+ Uint32 RsubPC = RinstructionCounter + RfinalUpdateLen + RfinalRLen;
+ TnoDataRW = interpreterNextLab(signal,
+ pagePtr,
+ TupHeadOffset,
+ &clogMemBuffer[0],
+ &cinBuffer[RinstructionCounter],
+ RexecRegionLen,
+ &cinBuffer[RsubPC],
+ RsubLen,
+ &coutBuffer[0],
+ sizeof(coutBuffer) / 4);
+ if (TnoDataRW != -1) {
+ RinstructionCounter += RexecRegionLen;
+ RlogSize = TnoDataRW;
+ } else {
+ jam();
+ return -1;
+ }//if
+ }//if
+ if (RfinalUpdateLen > 0) {
+ jam();
+ /* ---------------------------------------------------------------- */
+ // We can also apply a set of updates without any conditions as part
+ // of the interpreted execution.
+ /* ---------------------------------------------------------------- */
+ if (regOperPtr->optype == ZUPDATE) {
+ TnoDataRW = updateAttributes(pagePtr,
+ TupHeadOffset,
+ &cinBuffer[RinstructionCounter],
+ RfinalUpdateLen);
+ if (TnoDataRW != -1) {
+ MEMCOPY_NO_WORDS(&clogMemBuffer[RlogSize],
+ &cinBuffer[RinstructionCounter],
+ RfinalUpdateLen);
+ RinstructionCounter += RfinalUpdateLen;
+ RlogSize += RfinalUpdateLen;
+ } else {
+ jam();
+ tupkeyErrorLab(signal);
+ return -1;
+ }//if
+ } else {
+ return TUPKEY_abort(signal, 19);
+ }//if
+ }//if
+ if (RfinalRLen > 0) {
+ jam();
+ /* ---------------------------------------------------------------- */
+ // The final action is that we can also read the tuple after it has
+ // been updated.
+ /* ---------------------------------------------------------------- */
+ TnoDataRW = readAttributes(pagePtr,
+ TupHeadOffset,
+ &cinBuffer[RinstructionCounter],
+ RfinalRLen,
+ &dst[RattroutCounter],
+ (dstLen - RattroutCounter),
+ false);
+ if (TnoDataRW != -1) {
+ RattroutCounter += TnoDataRW;
+ } else {
+ jam();
+ tupkeyErrorLab(signal);
+ return -1;
+ }//if
+ }//if
+ regOperPtr->logSize = RlogSize;
+ regOperPtr->attroutbufLen = RattroutCounter;
+ sendReadAttrinfo(signal, RattroutCounter, regOperPtr);
+ if (RlogSize > 0) {
+ sendLogAttrinfo(signal, RlogSize, regOperPtr);
+ }//if
+ return 0;
+ } else {
+ return TUPKEY_abort(signal, 22);
+ }//if
+}//Dbtup::interpreterStartLab()
+
+/* ---------------------------------------------------------------- */
+/* WHEN EXECUTION IS INTERPRETED WE NEED TO SEND SOME ATTRINFO*/
+/* BACK TO LQH FOR LOGGING AND SENDING TO BACKUP AND STANDBY */
+/* NODES. */
+/* INPUT: LOG_ATTRINFOPTR WHERE TO FETCH DATA FROM */
+/* TLOG_START FIRST INDEX TO LOG */
+/* TLOG_END LAST INDEX + 1 TO LOG */
+/* ---------------------------------------------------------------- */
+void Dbtup::sendLogAttrinfo(Signal* signal,
+ Uint32 TlogSize,
+ Operationrec * const regOperPtr)
+
+{
+ Uint32 TbufferIndex = 0;
+ signal->theData[0] = regOperPtr->userpointer;
+ while (TlogSize > 22) {
+ MEMCOPY_NO_WORDS(&signal->theData[3],
+ &clogMemBuffer[TbufferIndex],
+ 22);
+ EXECUTE_DIRECT(refToBlock(regOperPtr->userblockref),
+ GSN_TUP_ATTRINFO, signal, 25);
+ TbufferIndex += 22;
+ TlogSize -= 22;
+ }//while
+ MEMCOPY_NO_WORDS(&signal->theData[3],
+ &clogMemBuffer[TbufferIndex],
+ TlogSize);
+ EXECUTE_DIRECT(refToBlock(regOperPtr->userblockref),
+ GSN_TUP_ATTRINFO, signal, 3 + TlogSize);
+}//Dbtup::sendLogAttrinfo()
+
+inline
+Uint32
+brancher(Uint32 TheInstruction, Uint32 TprogramCounter)
+{
+ Uint32 TbranchDirection = TheInstruction >> 31;
+ Uint32 TbranchLength = (TheInstruction >> 16) & 0x7fff;
+ TprogramCounter--;
+ if (TbranchDirection == 1) {
+ jam();
+ /* ---------------------------------------------------------------- */
+ /* WE JUMP BACKWARDS. */
+ /* ---------------------------------------------------------------- */
+ return (TprogramCounter - TbranchLength);
+ } else {
+ jam();
+ /* ---------------------------------------------------------------- */
+ /* WE JUMP FORWARD. */
+ /* ---------------------------------------------------------------- */
+ return (TprogramCounter + TbranchLength);
+ }//if
+}//brancher()
+
+int Dbtup::interpreterNextLab(Signal* signal,
+ Page* const pagePtr,
+ Uint32 TupHeadOffset,
+ Uint32* logMemory,
+ Uint32* mainProgram,
+ Uint32 TmainProgLen,
+ Uint32* subroutineProg,
+ Uint32 TsubroutineLen,
+ Uint32 * tmpArea,
+ Uint32 tmpAreaSz)
+{
+ register Uint32* TcurrentProgram = mainProgram;
+ register Uint32 TcurrentSize = TmainProgLen;
+ register Uint32 RnoOfInstructions = 0;
+ register Uint32 TprogramCounter = 0;
+ register Uint32 theInstruction;
+ register Uint32 theRegister;
+ Uint32 TdataWritten = 0;
+ Uint32 RstackPtr = 0;
+ union {
+ Uint32 TregMemBuffer[32];
+ Uint64 Tdummy[16];
+ };
+ Uint32 TstackMemBuffer[32];
+
+ /* ---------------------------------------------------------------- */
+ // Initialise all 8 registers to contain the NULL value.
+ // In this version we can handle 32 and 64 bit unsigned integers.
+ // They are handled as 64 bit values. Thus the 32 most significant
+ // bits are zeroed for 32 bit values.
+ /* ---------------------------------------------------------------- */
+ TregMemBuffer[0] = 0;
+ TregMemBuffer[4] = 0;
+ TregMemBuffer[8] = 0;
+ TregMemBuffer[12] = 0;
+ TregMemBuffer[16] = 0;
+ TregMemBuffer[20] = 0;
+ TregMemBuffer[24] = 0;
+ TregMemBuffer[28] = 0;
+ Uint32 tmpHabitant = ~0;
+
+ while (RnoOfInstructions < 8000) {
+ /* ---------------------------------------------------------------- */
+ /* EXECUTE THE NEXT INTERPRETER INSTRUCTION. */
+ /* ---------------------------------------------------------------- */
+ RnoOfInstructions++;
+ theInstruction = TcurrentProgram[TprogramCounter];
+ theRegister = Interpreter::getReg1(theInstruction) << 2;
+ if (TprogramCounter < TcurrentSize) {
+ TprogramCounter++;
+ switch (Interpreter::getOpCode(theInstruction)) {
+ case Interpreter::READ_ATTR_INTO_REG:
+ jam();
+ /* ---------------------------------------------------------------- */
+ // Read an attribute from the tuple into a register.
+ // While reading an attribute we allow the attribute to be an array
+ // as long as it fits in the 64 bits of the register.
+ /* ---------------------------------------------------------------- */
+ {
+ Uint32 theAttrinfo = theInstruction;
+ int TnoDataRW= readAttributes(pagePtr,
+ TupHeadOffset,
+ &theAttrinfo,
+ (Uint32)1,
+ &TregMemBuffer[theRegister],
+ (Uint32)3,
+ false);
+ if (TnoDataRW == 2) {
+ /* ------------------------------------------------------------- */
+ // Two words read means that we get the instruction plus one 32
+ // word read. Thus we set the register to be a 32 bit register.
+ /* ------------------------------------------------------------- */
+ TregMemBuffer[theRegister] = 0x50;
+ * (Int64*)(TregMemBuffer+theRegister+2) = TregMemBuffer[theRegister+1];
+ } else if (TnoDataRW == 3) {
+ /* ------------------------------------------------------------- */
+ // Three words read means that we get the instruction plus two
+ // 32 words read. Thus we set the register to be a 64 bit register.
+ /* ------------------------------------------------------------- */
+ TregMemBuffer[theRegister] = 0x60;
+ TregMemBuffer[theRegister+3] = TregMemBuffer[theRegister+2];
+ TregMemBuffer[theRegister+2] = TregMemBuffer[theRegister+1];
+ } else if (TnoDataRW == 1) {
+ /* ------------------------------------------------------------- */
+ // One word read means that we must have read a NULL value. We set
+ // the register to indicate a NULL value.
+ /* ------------------------------------------------------------- */
+ TregMemBuffer[theRegister] = 0;
+ TregMemBuffer[theRegister + 2] = 0;
+ TregMemBuffer[theRegister + 3] = 0;
+ } else if (TnoDataRW == -1) {
+ jam();
+ tupkeyErrorLab(signal);
+ return -1;
+ } else {
+ /* ------------------------------------------------------------- */
+ // Any other return value from the read attribute here is not
+ // allowed and will lead to a system crash.
+ /* ------------------------------------------------------------- */
+ ndbrequire(false);
+ }//if
+ break;
+ }
+
+ case Interpreter::WRITE_ATTR_FROM_REG:
+ jam();
+ {
+ Uint32 TattrId = theInstruction >> 16;
+ Uint32 TattrDescrIndex = tabptr.p->tabDescriptor +
+ (TattrId << ZAD_LOG_SIZE);
+ Uint32 TattrDesc1 = tableDescriptor[TattrDescrIndex].tabDescr;
+ Uint32 TregType = TregMemBuffer[theRegister];
+
+ /* --------------------------------------------------------------- */
+ // Calculate the number of words of this attribute.
+ // We allow writes into arrays as long as they fit into the 64 bit
+ // register size.
+ /* --------------------------------------------------------------- */
+ Uint32 TattrNoOfWords = AttributeDescriptor::getSizeInWords(TattrDesc1);
+ Uint32 Toptype = operPtr.p->optype;
+
+ Uint32 TdataForUpdate[3];
+ Uint32 Tlen;
+
+ AttributeHeader& ah = AttributeHeader::init(&TdataForUpdate[0],
+ TattrId, TattrNoOfWords);
+ TdataForUpdate[1] = TregMemBuffer[theRegister + 2];
+ TdataForUpdate[2] = TregMemBuffer[theRegister + 3];
+ Tlen = TattrNoOfWords + 1;
+ if (Toptype == ZUPDATE) {
+ if (TattrNoOfWords <= 2) {
+ if (TregType == 0) {
+ /* --------------------------------------------------------- */
+ // Write a NULL value into the attribute
+ /* --------------------------------------------------------- */
+ ah.setNULL();
+ Tlen = 1;
+ }//if
+ int TnoDataRW= updateAttributes(pagePtr,
+ TupHeadOffset,
+ &TdataForUpdate[0],
+ Tlen);
+ if (TnoDataRW != -1) {
+ /* --------------------------------------------------------- */
+ // Write the written data also into the log buffer so that it
+ // will be logged.
+ /* --------------------------------------------------------- */
+ logMemory[TdataWritten + 0] = TdataForUpdate[0];
+ logMemory[TdataWritten + 1] = TdataForUpdate[1];
+ logMemory[TdataWritten + 2] = TdataForUpdate[2];
+ TdataWritten += Tlen;
+ } else {
+ tupkeyErrorLab(signal);
+ return -1;
+ }//if
+ } else {
+ return TUPKEY_abort(signal, 15);
+ }//if
+ } else {
+ return TUPKEY_abort(signal, 16);
+ }//if
+ break;
+ }
+
+ case Interpreter::LOAD_CONST_NULL:
+ jam();
+ TregMemBuffer[theRegister] = 0; /* NULL INDICATOR */
+ break;
+
+ case Interpreter::LOAD_CONST16:
+ jam();
+ TregMemBuffer[theRegister] = 0x50; /* 32 BIT UNSIGNED CONSTANT */
+ * (Int64*)(TregMemBuffer+theRegister+2) = theInstruction >> 16;
+ break;
+
+ case Interpreter::LOAD_CONST32:
+ jam();
+ TregMemBuffer[theRegister] = 0x50; /* 32 BIT UNSIGNED CONSTANT */
+ * (Int64*)(TregMemBuffer+theRegister+2) = *
+ (TcurrentProgram+TprogramCounter);
+ TprogramCounter++;
+ break;
+
+ case Interpreter::LOAD_CONST64:
+ jam();
+ TregMemBuffer[theRegister] = 0x60; /* 64 BIT UNSIGNED CONSTANT */
+ TregMemBuffer[theRegister + 2 ] = * (TcurrentProgram + TprogramCounter++);
+ TregMemBuffer[theRegister + 3 ] = * (TcurrentProgram + TprogramCounter++);
+ break;
+
+ case Interpreter::ADD_REG_REG:
+ jam();
+ {
+ Uint32 TrightRegister = Interpreter::getReg2(theInstruction) << 2;
+ Uint32 TdestRegister = Interpreter::getReg3(theInstruction) << 2;
+
+ Uint32 TrightType = TregMemBuffer[TrightRegister];
+ Int64 Tright0 = * (Int64*)(TregMemBuffer + TrightRegister + 2);
+
+
+ Uint32 TleftType = TregMemBuffer[theRegister];
+ Int64 Tleft0 = * (Int64*)(TregMemBuffer + theRegister + 2);
+
+ if ((TleftType | TrightType) != 0) {
+ Uint64 Tdest0 = Tleft0 + Tright0;
+ * (Int64*)(TregMemBuffer+TdestRegister+2) = Tdest0;
+ TregMemBuffer[TdestRegister] = 0x60;
+ } else {
+ return TUPKEY_abort(signal, 20);
+ }
+ break;
+ }
+
+ case Interpreter::SUB_REG_REG:
+ jam();
+ {
+ Uint32 TrightRegister = Interpreter::getReg2(theInstruction) << 2;
+ Uint32 TdestRegister = Interpreter::getReg3(theInstruction) << 2;
+
+ Uint32 TrightType = TregMemBuffer[TrightRegister];
+ Int64 Tright0 = * (Int64*)(TregMemBuffer + TrightRegister + 2);
+
+ Uint32 TleftType = TregMemBuffer[theRegister];
+ Int64 Tleft0 = * (Int64*)(TregMemBuffer + theRegister + 2);
+
+ if ((TleftType | TrightType) != 0) {
+ Int64 Tdest0 = Tleft0 - Tright0;
+ * (Int64*)(TregMemBuffer+TdestRegister+2) = Tdest0;
+ TregMemBuffer[TdestRegister] = 0x60;
+ } else {
+ return TUPKEY_abort(signal, 20);
+ }
+ break;
+ }
+
+ case Interpreter::BRANCH:
+ TprogramCounter = brancher(theInstruction, TprogramCounter);
+ break;
+
+ case Interpreter::BRANCH_REG_EQ_NULL:
+ if (TregMemBuffer[theRegister] != 0) {
+ jam();
+ continue;
+ } else {
+ jam();
+ TprogramCounter = brancher(theInstruction, TprogramCounter);
+ }//if
+ break;
+
+ case Interpreter::BRANCH_REG_NE_NULL:
+ if (TregMemBuffer[theRegister] == 0) {
+ jam();
+ continue;
+ } else {
+ jam();
+ TprogramCounter = brancher(theInstruction, TprogramCounter);
+ }//if
+ break;
+
+
+ case Interpreter::BRANCH_EQ_REG_REG:
+ {
+ Uint32 TrightRegister = Interpreter::getReg2(theInstruction) << 2;
+
+ Uint32 TleftType = TregMemBuffer[theRegister];
+ Uint32 Tleft0 = TregMemBuffer[theRegister + 2];
+ Uint32 Tleft1 = TregMemBuffer[theRegister + 3];
+
+ Uint32 TrightType = TregMemBuffer[TrightRegister];
+ Uint32 Tright0 = TregMemBuffer[TrightRegister + 2];
+ Uint32 Tright1 = TregMemBuffer[TrightRegister + 3];
+ if ((TrightType | TleftType) != 0) {
+ jam();
+ if ((Tleft0 == Tright0) && (Tleft1 == Tright1)) {
+ TprogramCounter = brancher(theInstruction, TprogramCounter);
+ }//if
+ } else {
+ return TUPKEY_abort(signal, 23);
+ }//if
+ break;
+ }
+
+ case Interpreter::BRANCH_NE_REG_REG:
+ {
+ Uint32 TrightRegister = Interpreter::getReg2(theInstruction) << 2;
+
+ Uint32 TleftType = TregMemBuffer[theRegister];
+ Uint32 Tleft0 = TregMemBuffer[theRegister + 2];
+ Uint32 Tleft1 = TregMemBuffer[theRegister + 3];
+
+ Uint32 TrightType = TregMemBuffer[TrightRegister];
+ Uint32 Tright0 = TregMemBuffer[TrightRegister + 2];
+ Uint32 Tright1 = TregMemBuffer[TrightRegister + 3];
+ if ((TrightType | TleftType) != 0) {
+ jam();
+ if ((Tleft0 != Tright0) || (Tleft1 != Tright1)) {
+ TprogramCounter = brancher(theInstruction, TprogramCounter);
+ }//if
+ } else {
+ return TUPKEY_abort(signal, 24);
+ }//if
+ break;
+ }
+
+ case Interpreter::BRANCH_LT_REG_REG:
+ {
+ Uint32 TrightRegister = Interpreter::getReg2(theInstruction) << 2;
+
+ Uint32 TrightType = TregMemBuffer[TrightRegister];
+ Int64 Tright0 = * (Int64*)(TregMemBuffer + TrightRegister + 2);
+
+ Uint32 TleftType = TregMemBuffer[theRegister];
+ Int64 Tleft0 = * (Int64*)(TregMemBuffer + theRegister + 2);
+
+
+ if ((TrightType | TleftType) != 0) {
+ jam();
+ if (Tleft0 < Tright0) {
+ TprogramCounter = brancher(theInstruction, TprogramCounter);
+ }//if
+ } else {
+ return TUPKEY_abort(signal, 24);
+ }//if
+ break;
+ }
+
+ case Interpreter::BRANCH_LE_REG_REG:
+ {
+ Uint32 TrightRegister = Interpreter::getReg2(theInstruction) << 2;
+
+ Uint32 TrightType = TregMemBuffer[TrightRegister];
+ Int64 Tright0 = * (Int64*)(TregMemBuffer + TrightRegister + 2);
+
+ Uint32 TleftType = TregMemBuffer[theRegister];
+ Int64 Tleft0 = * (Int64*)(TregMemBuffer + theRegister + 2);
+
+
+ if ((TrightType | TleftType) != 0) {
+ jam();
+ if (Tleft0 <= Tright0) {
+ TprogramCounter = brancher(theInstruction, TprogramCounter);
+ }//if
+ } else {
+ return TUPKEY_abort(signal, 26);
+ }//if
+ break;
+ }
+
+ case Interpreter::BRANCH_GT_REG_REG:
+ {
+ Uint32 TrightRegister = Interpreter::getReg2(theInstruction) << 2;
+
+ Uint32 TrightType = TregMemBuffer[TrightRegister];
+ Int64 Tright0 = * (Int64*)(TregMemBuffer + TrightRegister + 2);
+
+ Uint32 TleftType = TregMemBuffer[theRegister];
+ Int64 Tleft0 = * (Int64*)(TregMemBuffer + theRegister + 2);
+
+
+ if ((TrightType | TleftType) != 0) {
+ jam();
+ if (Tleft0 > Tright0){
+ TprogramCounter = brancher(theInstruction, TprogramCounter);
+ }//if
+ } else {
+ return TUPKEY_abort(signal, 27);
+ }//if
+ break;
+ }
+
+ case Interpreter::BRANCH_GE_REG_REG:
+ {
+ Uint32 TrightRegister = Interpreter::getReg2(theInstruction) << 2;
+
+ Uint32 TrightType = TregMemBuffer[TrightRegister];
+ Int64 Tright0 = * (Int64*)(TregMemBuffer + TrightRegister + 2);
+
+ Uint32 TleftType = TregMemBuffer[theRegister];
+ Int64 Tleft0 = * (Int64*)(TregMemBuffer + theRegister + 2);
+
+
+ if ((TrightType | TleftType) != 0) {
+ jam();
+ if (Tleft0 >= Tright0){
+ TprogramCounter = brancher(theInstruction, TprogramCounter);
+ }//if
+ } else {
+ return TUPKEY_abort(signal, 28);
+ }//if
+ break;
+ }
+
+ case Interpreter::BRANCH_ATTR_OP_ARG:{
+ jam();
+ Uint32 cond = Interpreter::getBinaryCondition(theInstruction);
+ Uint32 ins2 = TcurrentProgram[TprogramCounter];
+ Uint32 attrId = Interpreter::getBranchCol_AttrId(ins2) << 16;
+ Uint32 argLen = Interpreter::getBranchCol_Len(ins2);
+
+ if(tmpHabitant != attrId){
+ Int32 TnoDataR = readAttributes(pagePtr,
+ TupHeadOffset,
+ &attrId, 1,
+ tmpArea, tmpAreaSz,
+ false);
+
+ if (TnoDataR == -1) {
+ jam();
+ tupkeyErrorLab(signal);
+ return -1;
+ }
+ tmpHabitant = attrId;
+ }
+
+ // get type
+ attrId >>= 16;
+ Uint32 TattrDescrIndex = tabptr.p->tabDescriptor +
+ (attrId << ZAD_LOG_SIZE);
+ Uint32 TattrDesc1 = tableDescriptor[TattrDescrIndex].tabDescr;
+ Uint32 TattrDesc2 = tableDescriptor[TattrDescrIndex+1].tabDescr;
+ Uint32 typeId = AttributeDescriptor::getType(TattrDesc1);
+ void * cs = 0;
+ if(AttributeOffset::getCharsetFlag(TattrDesc2))
+ {
+ Uint32 pos = AttributeOffset::getCharsetPos(TattrDesc2);
+ cs = tabptr.p->charsetArray[pos];
+ }
+ const NdbSqlUtil::Type& sqlType = NdbSqlUtil::getType(typeId);
+
+ // get data
+ AttributeHeader ah(tmpArea[0]);
+ const char* s1 = (char*)&tmpArea[1];
+ const char* s2 = (char*)&TcurrentProgram[TprogramCounter+1];
+ // fixed length in 5.0
+ Uint32 attrLen = AttributeDescriptor::getSizeInBytes(TattrDesc1);
+
+ bool r1_null = ah.isNULL();
+ bool r2_null = argLen == 0;
+ int res1;
+ if (cond != Interpreter::LIKE &&
+ cond != Interpreter::NOT_LIKE) {
+ if (r1_null || r2_null) {
+ // NULL==NULL and NULL<not-NULL
+ res1 = r1_null && r2_null ? 0 : r1_null ? -1 : 1;
+ } else {
+ res1 = (*sqlType.m_cmp)(cs, s1, attrLen, s2, argLen, true);
+ }
+ } else {
+ if (r1_null || r2_null) {
+ // NULL like NULL is true (has no practical use)
+ res1 = r1_null && r2_null ? 0 : -1;
+ } else {
+ res1 = (*sqlType.m_like)(cs, s1, attrLen, s2, argLen);
+ }
+ }
+
+ int res = 0;
+ switch ((Interpreter::BinaryCondition)cond) {
+ case Interpreter::EQ:
+ res = (res1 == 0);
+ break;
+ case Interpreter::NE:
+ res = (res1 != 0);
+ break;
+ // note the condition is backwards
+ case Interpreter::LT:
+ res = (res1 > 0);
+ break;
+ case Interpreter::LE:
+ res = (res1 >= 0);
+ break;
+ case Interpreter::GT:
+ res = (res1 < 0);
+ break;
+ case Interpreter::GE:
+ res = (res1 <= 0);
+ break;
+ case Interpreter::LIKE:
+ res = (res1 == 0);
+ break;
+ case Interpreter::NOT_LIKE:
+ res = (res1 == 1);
+ break;
+ // XXX handle invalid value
+ }
+#ifdef TRACE_INTERPRETER
+ ndbout_c("cond=%u attr(%d)='%.*s'(%d) str='%.*s'(%d) res1=%d res=%d",
+ cond, attrId >> 16,
+ attrLen, s1, attrLen, argLen, s2, argLen, res1, res);
+#endif
+ if (res)
+ TprogramCounter = brancher(theInstruction, TprogramCounter);
+ else
+ {
+ Uint32 tmp = ((argLen + 3) >> 2) + 1;
+ TprogramCounter += tmp;
+ }
+ break;
+ }
+
+ case Interpreter::BRANCH_ATTR_EQ_NULL:{
+ jam();
+ Uint32 ins2 = TcurrentProgram[TprogramCounter];
+ Uint32 attrId = Interpreter::getBranchCol_AttrId(ins2) << 16;
+
+ if(tmpHabitant != attrId){
+ Int32 TnoDataR = readAttributes(pagePtr,
+ TupHeadOffset,
+ &attrId, 1,
+ tmpArea, tmpAreaSz,
+ false);
+
+ if (TnoDataR == -1) {
+ jam();
+ tupkeyErrorLab(signal);
+ return -1;
+ }
+ tmpHabitant = attrId;
+ }
+
+ AttributeHeader ah(tmpArea[0]);
+ if(ah.isNULL()){
+ TprogramCounter = brancher(theInstruction, TprogramCounter);
+ } else {
+ TprogramCounter ++;
+ }
+ break;
+ }
+
+ case Interpreter::BRANCH_ATTR_NE_NULL:{
+ jam();
+ Uint32 ins2 = TcurrentProgram[TprogramCounter];
+ Uint32 attrId = Interpreter::getBranchCol_AttrId(ins2) << 16;
+
+ if(tmpHabitant != attrId){
+ Int32 TnoDataR = readAttributes(pagePtr,
+ TupHeadOffset,
+ &attrId, 1,
+ tmpArea, tmpAreaSz,
+ false);
+
+ if (TnoDataR == -1) {
+ jam();
+ tupkeyErrorLab(signal);
+ return -1;
+ }
+ tmpHabitant = attrId;
+ }
+
+ AttributeHeader ah(tmpArea[0]);
+ if(ah.isNULL()){
+ TprogramCounter ++;
+ } else {
+ TprogramCounter = brancher(theInstruction, TprogramCounter);
+ }
+ break;
+ }
+
+ case Interpreter::EXIT_OK:
+ jam();
+#ifdef TRACE_INTERPRETER
+ ndbout_c(" - exit_ok");
+#endif
+ return TdataWritten;
+
+ case Interpreter::EXIT_OK_LAST:
+ jam();
+#ifdef TRACE_INTERPRETER
+ ndbout_c(" - exit_ok_last");
+#endif
+ operPtr.p->lastRow = 1;
+ return TdataWritten;
+
+ case Interpreter::EXIT_REFUSE:
+ jam();
+#ifdef TRACE_INTERPRETER
+ ndbout_c(" - exit_nok");
+#endif
+ terrorCode = theInstruction >> 16;
+ return TUPKEY_abort(signal, 29);
+
+ case Interpreter::CALL:
+ jam();
+ RstackPtr++;
+ if (RstackPtr < 32) {
+ TstackMemBuffer[RstackPtr] = TprogramCounter + 1;
+ TprogramCounter = theInstruction >> 16;
+ if (TprogramCounter < TsubroutineLen) {
+ TcurrentProgram = subroutineProg;
+ TcurrentSize = TsubroutineLen;
+ } else {
+ return TUPKEY_abort(signal, 30);
+ }//if
+ } else {
+ return TUPKEY_abort(signal, 31);
+ }//if
+ break;
+
+ case Interpreter::RETURN:
+ jam();
+ if (RstackPtr > 0) {
+ TprogramCounter = TstackMemBuffer[RstackPtr];
+ RstackPtr--;
+ if (RstackPtr == 0) {
+ jam();
+ /* ------------------------------------------------------------- */
+ // We are back to the main program.
+ /* ------------------------------------------------------------- */
+ TcurrentProgram = mainProgram;
+ TcurrentSize = TmainProgLen;
+ }//if
+ } else {
+ return TUPKEY_abort(signal, 32);
+ }//if
+ break;
+
+ default:
+ return TUPKEY_abort(signal, 33);
+ }//switch
+ } else {
+ return TUPKEY_abort(signal, 34);
+ }//if
+ }//while
+ return TUPKEY_abort(signal, 35);
+}//Dbtup::interpreterNextLab()
+
+
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp
new file mode 100644
index 00000000000..cdd54ba2337
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp
@@ -0,0 +1,384 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#define DBTUP_C
+#include "Dbtup.hpp"
+#include <RefConvert.hpp>
+#include <ndb_limits.h>
+#include <pc.hpp>
+
+#define ljam() { jamLine(6000 + __LINE__); }
+#define ljamEntry() { jamEntryLine(6000 + __LINE__); }
+
+//
+// Fixed Allocator
+// This module is used to allocate and free fixed size tuples from the
+// set of pages attached to a fragment. The fixed size is preset per
+// fragment and their can only be one such value per fragment in the
+// current implementation.
+//
+// Public methods
+// bool allocTh(Fragrecord* const regFragPtr, # In
+// Tablerec* const regTabPtr, # In
+// Uint32 pageType, # In
+// Signal* signal, # In
+// Uint32& pageOffset, # Out
+// PagePtr& pagePtr) # In/Out
+// This method allocates a fixed size and the pagePtr is a reference
+// to the page and pageOffset is the offset in the page of the tuple.
+//
+// freeTh()
+// This method is used to free a tuple header in normal transaction
+// handling.
+//
+// freeThSr()
+// This method is used to free a tuple as part of executing the undo
+// log records.
+//
+// getThAtPageSr()
+// This method is used to allocate a tuple on a set page as part of
+// undo log execution.
+//
+//
+// Private methods
+// getThAtPage()
+// This method gets a tuple from a page with free tuples.
+//
+// convertThPage()
+// Convert an empty page into a page of free tuples in a linked list.
+//
+// getEmptyPageThCopy()
+// A page recently taken from set of empty pages on fragment is made
+// part of the copy pages.
+//
+// getEmptyPageTh()
+// A page recently taken from the set of empty pages on the fragment is
+// is made part of the set of free pages with fixed size tuples in the
+// fragment.
+//
+bool Dbtup::allocTh(Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr,
+ Uint32 pageType,
+ Signal* signal,
+ Uint32& pageOffset,
+ PagePtr& pagePtr)
+{
+ if (pageType == SAME_PAGE) {
+ ljam();
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ if (pagePtr.p->pageWord[ZPAGE_STATE_POS] == ZTH_MM_FREE) {
+ ljam();
+ getThAtPage(regFragPtr, pagePtr.p, signal, pageOffset);
+ return true;
+ }//if
+ pageType = NORMAL_PAGE;
+ }//if
+ if (pageType == COPY_PAGE) {
+/* ---------------------------------------------------------------- */
+// Allocate a tuple header for the copy of the tuple header
+/* ---------------------------------------------------------------- */
+ if (regFragPtr->thFreeCopyFirst == RNIL) {
+/* ---------------------------------------------------------------- */
+// No page in list with free tuple header exists
+/* ---------------------------------------------------------------- */
+ if (regFragPtr->noCopyPagesAlloc < ZMAX_NO_COPY_PAGES) {
+ ljam();
+/* ---------------------------------------------------------------- */
+// We have not yet allocated the maximum number of copy pages for
+// this fragment.
+/* ---------------------------------------------------------------- */
+ pagePtr.i = getEmptyPage(regFragPtr);
+ if (pagePtr.i != RNIL) {
+ ljam();
+/* ---------------------------------------------------------------- */
+// We have empty pages already allocated to this fragment. Allocate
+// one of those as copy page.
+/* ---------------------------------------------------------------- */
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ getEmptyPageThCopy(regFragPtr, signal, pagePtr.p);
+/* ---------------------------------------------------------------- */
+// Convert page into a tuple header page.
+/* ---------------------------------------------------------------- */
+ convertThPage(regTabPtr->tupheadsize, pagePtr.p);
+ getThAtPage(regFragPtr, pagePtr.p, signal, pageOffset);
+ return true;
+ }//if
+ }//if
+ } else {
+ ljam();
+/* ---------------------------------------------------------------- */
+/* NORMAL PATH WHEN COPY PAGE REQUESTED, GET PAGE POINTER */
+/* AND THEN GOTO COMMON HANDLING OF GET TUPLE HEADER AT PAGE. */
+/* ---------------------------------------------------------------- */
+ pagePtr.i = getRealpid(regFragPtr, regFragPtr->thFreeCopyFirst);
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ getThAtPage(regFragPtr, pagePtr.p, signal, pageOffset);
+ return true;
+ }//if
+ }//if
+/* ---------------------------------------------------------------- */
+/* EITHER NORMAL PAGE REQUESTED OR ALLOCATION FROM COPY PAGE */
+/* FAILED. TRY ALLOCATING FROM NORMAL PAGE. */
+/* ---------------------------------------------------------------- */
+ Uint32 fragPageId = regFragPtr->thFreeFirst;
+ if (fragPageId == RNIL) {
+/* ---------------------------------------------------------------- */
+// No prepared tuple header page with free entries exists.
+/* ---------------------------------------------------------------- */
+ pagePtr.i = getEmptyPage(regFragPtr);
+ if (pagePtr.i != RNIL) {
+ ljam();
+/* ---------------------------------------------------------------- */
+// We found empty pages on the fragment. Allocate an empty page and
+// convert it into a tuple header page and put it in thFreeFirst-list.
+/* ---------------------------------------------------------------- */
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ getEmptyPageTh(regFragPtr, signal, pagePtr.p);
+ convertThPage(regTabPtr->tupheadsize, pagePtr.p);
+ getThAtPage(regFragPtr, pagePtr.p, signal, pageOffset);
+ return true;
+ } else {
+ ljam();
+/* ---------------------------------------------------------------- */
+/* THERE ARE NO EMPTY PAGES. MEMORY CAN NOT BE ALLOCATED. */
+/* ---------------------------------------------------------------- */
+ terrorCode = ZMEM_NOMEM_ERROR;
+ return false;
+ }//if
+ } else {
+ ljam();
+/* ---------------------------------------------------------------- */
+/* THIS SHOULD BE THE COMMON PATH THROUGH THE CODE, FREE */
+/* COPY PAGE EXISTED. */
+/* ---------------------------------------------------------------- */
+ pagePtr.i = getRealpid(regFragPtr, fragPageId);
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ getThAtPage(regFragPtr, pagePtr.p, signal, pageOffset);
+ return true;
+ }//if
+ ndbrequire(false); // Dead code
+ return false;
+}//Dbtup::allocTh()
+
+void Dbtup::convertThPage(Uint32 Tupheadsize,
+ Page* const regPagePtr)
+{
+ Uint32 ctpConstant = Tupheadsize << 16;
+ Uint32 nextTuple = ZPAGE_HEADER_SIZE + Tupheadsize;
+ Uint32 endOfList;
+ /*
+ ASSUMES AT LEAST ONE TUPLE HEADER FITS AND THEREFORE NO HANDLING
+ OF ZERO AS EXTREME CASE
+ */
+ do {
+ ljam();
+ endOfList = nextTuple - Tupheadsize;
+ regPagePtr->pageWord[endOfList] = ctpConstant + nextTuple;
+ nextTuple += Tupheadsize;
+ } while (nextTuple <= ZWORDS_ON_PAGE);
+ regPagePtr->pageWord[endOfList] = ctpConstant;
+ Uint32 startOfList = ZPAGE_HEADER_SIZE;
+ regPagePtr->pageWord[ZFREELIST_HEADER_POS] = (startOfList << 16) + endOfList;
+}//Dbtup::convertThPage()
+
+void Dbtup::getEmptyPageTh(Fragrecord* const regFragPtr,
+ Signal* signal,
+ Page* const regPagePtr)
+{
+ if (isUndoLoggingNeeded(regFragPtr, regPagePtr->pageWord[ZPAGE_FRAG_PAGE_ID_POS])) {
+ cprAddUndoLogPageHeader(signal,
+ regPagePtr,
+ regFragPtr);
+ }//if
+ regPagePtr->pageWord[ZPAGE_NEXT_POS] = regFragPtr->thFreeFirst;
+ regPagePtr->pageWord[ZPAGE_STATE_POS] = ZTH_MM_FREE;
+ regFragPtr->thFreeFirst = regPagePtr->pageWord[ZPAGE_FRAG_PAGE_ID_POS];
+
+ndbrequire(regFragPtr->thFreeFirst != (RNIL -1));
+}//Dbtup::getEmptyPageTh()
+
+void Dbtup::getEmptyPageThCopy(Fragrecord* const regFragPtr,
+ Signal* signal,
+ Page* const regPagePtr)
+{
+ if (isUndoLoggingNeeded(regFragPtr, regPagePtr->pageWord[ZPAGE_FRAG_PAGE_ID_POS])) {
+ cprAddUndoLogPageHeader(signal,
+ regPagePtr,
+ regFragPtr);
+ }//if
+ regPagePtr->pageWord[ZPAGE_NEXT_POS] = regFragPtr->thFreeCopyFirst;
+ regPagePtr->pageWord[ZPAGE_STATE_POS] = ZTH_MM_FREE_COPY;
+ regFragPtr->thFreeCopyFirst = regPagePtr->pageWord[ZPAGE_FRAG_PAGE_ID_POS];
+ regFragPtr->noCopyPagesAlloc++;
+}//Dbtup::getEmptyPageThCopy()
+
+void Dbtup::getThAtPage(Fragrecord* const regFragPtr,
+ Page* const regPagePtr,
+ Signal* signal,
+ Uint32& pageOffset)
+{
+ Uint32 freeListHeader = regPagePtr->pageWord[ZFREELIST_HEADER_POS];
+ Uint32 startTuple = freeListHeader >> 16;
+ Uint32 endTuple = freeListHeader & 0xffff;
+ pageOffset = startTuple; /* START IS THE ONE ALLOCATED */
+ if (startTuple > 0) {
+ if (startTuple != endTuple) {
+/* ---------------------------------------------------------------- */
+/* NOT THE LAST, SIMPLY RESHUFFLE POINTERS. */
+/* ---------------------------------------------------------------- */
+ ndbrequire(startTuple < ZWORDS_ON_PAGE);
+ startTuple = regPagePtr->pageWord[startTuple] & 0xffff;
+ regPagePtr->pageWord[ZFREELIST_HEADER_POS] = endTuple +
+ (startTuple << 16);
+ return;
+ } else {
+/* ---------------------------------------------------------------- */
+/* THIS WAS THE LAST TUPLE HEADER IN THIS PAGE. REMOVE IT FROM*/
+/* THE TUPLE HEADER FREE LIST OR TH COPY FREE LIST. ALSO SET */
+/* A PROPER PAGE STATE. */
+/* */
+/* WE ALSO HAVE TO INSERT AN UNDO LOG ENTRY TO ENSURE PAGE */
+/* ARE MAINTAINED EVEN AFTER A SYSTEM CRASH. */
+/* ---------------------------------------------------------------- */
+ if (isUndoLoggingNeeded(regFragPtr,
+ regPagePtr->pageWord[ZPAGE_FRAG_PAGE_ID_POS])) {
+ cprAddUndoLogPageHeader(signal,
+ regPagePtr,
+ regFragPtr);
+ }//if
+ if (regPagePtr->pageWord[ZPAGE_STATE_POS] == ZTH_MM_FREE) {
+ ljam();
+ regFragPtr->thFreeFirst = regPagePtr->pageWord[ZPAGE_NEXT_POS];
+ regPagePtr->pageWord[ZPAGE_STATE_POS] = ZTH_MM_FULL;
+ } else if (regPagePtr->pageWord[ZPAGE_STATE_POS] == ZTH_MM_FREE_COPY) {
+ ljam();
+ regFragPtr->thFreeCopyFirst = regPagePtr->pageWord[ZPAGE_NEXT_POS];
+ regPagePtr->pageWord[ZPAGE_STATE_POS] = ZTH_MM_FULL_COPY;
+ } else {
+ ndbrequire(false);
+ }//if
+ regPagePtr->pageWord[ZFREELIST_HEADER_POS] = 0;
+ regPagePtr->pageWord[ZPAGE_NEXT_POS] = RNIL;
+ }//if
+ } else {
+ ndbrequire(false);
+ }//if
+ return;
+}//Dbtup::getThAtPage()
+
+void Dbtup::getThAtPageSr(Page* const regPagePtr,
+ Uint32& pageOffset)
+{
+ Uint32 freeListHeader = regPagePtr->pageWord[ZFREELIST_HEADER_POS];
+ Uint32 startTuple = freeListHeader >> 16;
+ Uint32 endTuple = freeListHeader & 0xffff;
+ ndbrequire(startTuple > 0);
+ pageOffset = startTuple; /* START IS THE ONE ALLOCATED */
+ if (startTuple == endTuple) {
+ ljam();
+/* ---------------------------------------------------------------- */
+/* THIS WAS THE LAST TUPLE HEADER IN THIS PAGE. SINCE WE ARE */
+/* UNDOING PAGE UPDATES WE SHALL NOT DO ANYTHING ABOUT THE */
+/* PAGE HEADER. THIS IS DONE BY SEPARATE LOG RECORDS. */
+/* ---------------------------------------------------------------- */
+ regPagePtr->pageWord[ZFREELIST_HEADER_POS] = 0;
+ } else {
+ ljam();
+/* ---------------------------------------------------------------- */
+/* NOT THE LAST, SIMPLY RESHUFFLE POINTERS. */
+/* ---------------------------------------------------------------- */
+ ndbrequire(startTuple < ZWORDS_ON_PAGE);
+ startTuple = regPagePtr->pageWord[startTuple] & 0xffff; /* GET NEXT POINTER */
+ regPagePtr->pageWord[ZFREELIST_HEADER_POS] = endTuple + (startTuple << 16);
+ }//if
+}//Dbtup::getThAtPageSr()
+
+void Dbtup::freeTh(Fragrecord* const regFragPtr,
+ Tablerec* const regTabPtr,
+ Signal* signal,
+ Page* const regPagePtr,
+ Uint32 freePageOffset)
+{
+ Uint32 startOfList = regPagePtr->pageWord[ZFREELIST_HEADER_POS] >> 16;
+ Uint32 endOfList = regPagePtr->pageWord[ZFREELIST_HEADER_POS] & 0xffff;
+/* LINK THE NOW FREE TUPLE SPACE INTO BEGINNING OF FREE LIST OF OF THE PAGE */
+/* SET THE SIZE OF THE NEW FREE SPACE AND LINK TO THE OLD START OF FREELIST */
+ ndbrequire(freePageOffset < ZWORDS_ON_PAGE);
+ regPagePtr->pageWord[freePageOffset] = (regTabPtr->tupheadsize << 16) +
+ startOfList;
+ if (endOfList == 0) {
+ ljam();
+ ndbrequire(startOfList == 0);
+/* ---------------------------------------------------------------- */
+/* THE PAGE WAS PREVIOUSLY FULL, NO EMPTY SPACE AT ALL. */
+/* THIS ENTRY WILL THEN BE BOTH THE START AND THE END OF THE */
+/* LIST. IT WILL ALSO BE PUT ON THE PROPER FREE LIST. */
+/* */
+/* UPDATE OF NEXT POINTER AND PAGE STATE MUST BE LOGGED TO */
+/* THE UNDO LOG TO ENSURE THAT FREE LISTS ARE OK AFTER A */
+/* SYSTEM RESTART. */
+/* ---------------------------------------------------------------- */
+ if (isUndoLoggingNeeded(regFragPtr, regPagePtr->pageWord[ZPAGE_FRAG_PAGE_ID_POS])) {
+ cprAddUndoLogPageHeader(signal,
+ regPagePtr,
+ regFragPtr);
+ }//if
+ regPagePtr->pageWord[ZFREELIST_HEADER_POS] = (freePageOffset << 16) + freePageOffset;
+ if (regPagePtr->pageWord[ZPAGE_STATE_POS] == ZTH_MM_FULL) {
+ ljam();
+ regPagePtr->pageWord[ZPAGE_NEXT_POS] = regFragPtr->thFreeFirst;
+ regFragPtr->thFreeFirst = regPagePtr->pageWord[ZPAGE_FRAG_PAGE_ID_POS];
+ regPagePtr->pageWord[ZPAGE_STATE_POS] = ZTH_MM_FREE;
+ } else {
+ ndbrequire(regPagePtr->pageWord[ZPAGE_STATE_POS] == ZTH_MM_FULL_COPY);
+ ljam();
+ regPagePtr->pageWord[ZPAGE_NEXT_POS] = regFragPtr->thFreeCopyFirst;
+ regFragPtr->thFreeCopyFirst = regPagePtr->pageWord[ZPAGE_FRAG_PAGE_ID_POS];
+ regPagePtr->pageWord[ZPAGE_STATE_POS] = ZTH_MM_FREE_COPY;
+ }//if
+ } else {
+ ljam();
+ regPagePtr->pageWord[ZFREELIST_HEADER_POS] = (freePageOffset << 16) + endOfList;
+ }//if
+}//Dbtup::freeTh()
+
+void Dbtup::freeThSr(Tablerec* const regTabPtr,
+ Page* const regPagePtr,
+ Uint32 freePageOffset)
+{
+/* ------------------------------------------------------------------------ */
+/* LINK THE NOW FREE TUPLE SPACE INTO BEGINNING OF FREE LIST OF OF THE PAGE */
+/* SET THE SIZE OF THE NEW FREE SPACE AND LINK TO THE OLD START OF FREELIST */
+/* ------------------------------------------------------------------------ */
+ Uint32 startOfList = regPagePtr->pageWord[ZFREELIST_HEADER_POS] >> 16;
+ Uint32 endOfList = regPagePtr->pageWord[ZFREELIST_HEADER_POS] & 0xffff;
+ ndbrequire(freePageOffset < ZWORDS_ON_PAGE);
+ regPagePtr->pageWord[freePageOffset] = (regTabPtr->tupheadsize << 16) + startOfList;
+ if (endOfList == 0) {
+ ljam();
+ ndbrequire(startOfList == 0);
+/* ---------------------------------------------------------------- */
+/* THE PAGE WAS PREVIOUSLY FULL, NO EMPTY SPACE AT ALL. */
+/* THIS ENTRY WILL THEN BE BOTH THE START AND THE END OF THE */
+/* LIST. IT WILL ALSO BE PUT ON THE PROPER FREE LIST. */
+/* ---------------------------------------------------------------- */
+ regPagePtr->pageWord[ZFREELIST_HEADER_POS] = (freePageOffset << 16) + freePageOffset;
+ } else {
+ ljam();
+ regPagePtr->pageWord[ZFREELIST_HEADER_POS] = (freePageOffset << 16) + endOfList;
+ }//if
+}//Dbtup::freeThSr()
+
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
new file mode 100644
index 00000000000..0d7430e662d
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
@@ -0,0 +1,1343 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+#define DBTUP_C
+#include "Dbtup.hpp"
+#include <RefConvert.hpp>
+#include <ndb_limits.h>
+#include <pc.hpp>
+#include <AttributeDescriptor.hpp>
+#include "AttributeOffset.hpp"
+#include <AttributeHeader.hpp>
+#include <Interpreter.hpp>
+#include <signaldata/FsConf.hpp>
+#include <signaldata/FsRef.hpp>
+#include <signaldata/FsRemoveReq.hpp>
+#include <signaldata/TupCommit.hpp>
+#include <signaldata/TupKey.hpp>
+
+#include <signaldata/DropTab.hpp>
+
+#define DEBUG(x) { ndbout << "TUP::" << x << endl; }
+
+#define ljam() { jamLine(24000 + __LINE__); }
+#define ljamEntry() { jamEntryLine(24000 + __LINE__); }
+
+void Dbtup::initData()
+{
+ cnoOfAttrbufrec = ZNO_OF_ATTRBUFREC;
+ cnoOfLcpRec = ZNO_OF_LCP_REC;
+ cnoOfConcurrentOpenOp = ZNO_OF_CONCURRENT_OPEN_OP;
+ cnoOfConcurrentWriteOp = ZNO_OF_CONCURRENT_WRITE_OP;
+ cnoOfFragoprec = 2 * MAX_FRAG_PER_NODE;
+ cnoOfPageRangeRec = ZNO_OF_PAGE_RANGE_REC;
+ cnoOfParallellUndoFiles = ZNO_OF_PARALLELL_UNDO_FILES;
+ cnoOfRestartInfoRec = ZNO_OF_RESTART_INFO_REC;
+ c_maxTriggersPerTable = ZDEFAULT_MAX_NO_TRIGGERS_PER_TABLE;
+ c_noOfBuildIndexRec = 32;
+
+ attrbufrec = 0;
+ checkpointInfo = 0;
+ diskBufferSegmentInfo = 0;
+ fragoperrec = 0;
+ fragrecord = 0;
+ hostBuffer = 0;
+ localLogInfo = 0;
+ operationrec = 0;
+ page = 0;
+ pageRange = 0;
+ pendingFileOpenInfo = 0;
+ restartInfoRecord = 0;
+ tablerec = 0;
+ tableDescriptor = 0;
+ undoPage = 0;
+ totNoOfPagesAllocated = 0;
+ cnoOfAllocatedPages = 0;
+
+ // Records with constant sizes
+}//Dbtup::initData()
+
+Dbtup::Dbtup(const class Configuration & conf)
+ : SimulatedBlock(DBTUP, conf),
+ c_storedProcPool(),
+ c_buildIndexList(c_buildIndexPool)
+{
+ Uint32 log_page_size= 0;
+ BLOCK_CONSTRUCTOR(Dbtup);
+
+ const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ ndb_mgm_get_int_parameter(p, CFG_DB_UNDO_DATA_BUFFER,
+ &log_page_size);
+
+ /**
+ * Always set page size in half MBytes
+ */
+ cnoOfUndoPage= (log_page_size / sizeof(UndoPage));
+ Uint32 mega_byte_part= cnoOfUndoPage & 15;
+ if (mega_byte_part != 0) {
+ jam();
+ cnoOfUndoPage+= (16 - mega_byte_part);
+ }
+
+ addRecSignal(GSN_DEBUG_SIG, &Dbtup::execDEBUG_SIG);
+ addRecSignal(GSN_CONTINUEB, &Dbtup::execCONTINUEB);
+
+ addRecSignal(GSN_DUMP_STATE_ORD, &Dbtup::execDUMP_STATE_ORD);
+ addRecSignal(GSN_SEND_PACKED, &Dbtup::execSEND_PACKED);
+ addRecSignal(GSN_ATTRINFO, &Dbtup::execATTRINFO);
+ addRecSignal(GSN_STTOR, &Dbtup::execSTTOR);
+ addRecSignal(GSN_TUP_LCPREQ, &Dbtup::execTUP_LCPREQ);
+ addRecSignal(GSN_END_LCPREQ, &Dbtup::execEND_LCPREQ);
+ addRecSignal(GSN_START_RECREQ, &Dbtup::execSTART_RECREQ);
+ addRecSignal(GSN_MEMCHECKREQ, &Dbtup::execMEMCHECKREQ);
+ addRecSignal(GSN_TUPKEYREQ, &Dbtup::execTUPKEYREQ);
+ addRecSignal(GSN_TUPSEIZEREQ, &Dbtup::execTUPSEIZEREQ);
+ addRecSignal(GSN_TUPRELEASEREQ, &Dbtup::execTUPRELEASEREQ);
+ addRecSignal(GSN_STORED_PROCREQ, &Dbtup::execSTORED_PROCREQ);
+ addRecSignal(GSN_TUPFRAGREQ, &Dbtup::execTUPFRAGREQ);
+ addRecSignal(GSN_TUP_ADD_ATTRREQ, &Dbtup::execTUP_ADD_ATTRREQ);
+ addRecSignal(GSN_TUP_COMMITREQ, &Dbtup::execTUP_COMMITREQ);
+ addRecSignal(GSN_TUP_ABORTREQ, &Dbtup::execTUP_ABORTREQ);
+ addRecSignal(GSN_TUP_SRREQ, &Dbtup::execTUP_SRREQ);
+ addRecSignal(GSN_TUP_PREPLCPREQ, &Dbtup::execTUP_PREPLCPREQ);
+ addRecSignal(GSN_FSOPENCONF, &Dbtup::execFSOPENCONF);
+ addRecSignal(GSN_FSOPENREF, &Dbtup::execFSOPENREF);
+ addRecSignal(GSN_FSCLOSECONF, &Dbtup::execFSCLOSECONF);
+ addRecSignal(GSN_FSCLOSEREF, &Dbtup::execFSCLOSEREF);
+ addRecSignal(GSN_FSWRITECONF, &Dbtup::execFSWRITECONF);
+ addRecSignal(GSN_FSWRITEREF, &Dbtup::execFSWRITEREF);
+ addRecSignal(GSN_FSREADCONF, &Dbtup::execFSREADCONF);
+ addRecSignal(GSN_FSREADREF, &Dbtup::execFSREADREF);
+ addRecSignal(GSN_NDB_STTOR, &Dbtup::execNDB_STTOR);
+ addRecSignal(GSN_READ_CONFIG_REQ, &Dbtup::execREAD_CONFIG_REQ, true);
+ addRecSignal(GSN_SET_VAR_REQ, &Dbtup::execSET_VAR_REQ);
+
+ // Trigger Signals
+ addRecSignal(GSN_CREATE_TRIG_REQ, &Dbtup::execCREATE_TRIG_REQ);
+ addRecSignal(GSN_DROP_TRIG_REQ, &Dbtup::execDROP_TRIG_REQ);
+
+ addRecSignal(GSN_DROP_TAB_REQ, &Dbtup::execDROP_TAB_REQ);
+ addRecSignal(GSN_FSREMOVEREF, &Dbtup::execFSREMOVEREF);
+ addRecSignal(GSN_FSREMOVECONF, &Dbtup::execFSREMOVECONF);
+
+ addRecSignal(GSN_TUP_ALLOCREQ, &Dbtup::execTUP_ALLOCREQ);
+ addRecSignal(GSN_TUP_DEALLOCREQ, &Dbtup::execTUP_DEALLOCREQ);
+ addRecSignal(GSN_TUP_WRITELOG_REQ, &Dbtup::execTUP_WRITELOG_REQ);
+
+ // Ordered index related
+ addRecSignal(GSN_BUILDINDXREQ, &Dbtup::execBUILDINDXREQ);
+
+ initData();
+}//Dbtup::Dbtup()
+
+Dbtup::~Dbtup()
+{
+ // Records with dynamic sizes
+ deallocRecord((void **)&attrbufrec,"Attrbufrec",
+ sizeof(Attrbufrec),
+ cnoOfAttrbufrec);
+
+ deallocRecord((void **)&checkpointInfo,"CheckpointInfo",
+ sizeof(CheckpointInfo),
+ cnoOfLcpRec);
+
+ deallocRecord((void **)&diskBufferSegmentInfo,
+ "DiskBufferSegmentInfo",
+ sizeof(DiskBufferSegmentInfo),
+ cnoOfConcurrentWriteOp);
+
+ deallocRecord((void **)&fragoperrec,"Fragoperrec",
+ sizeof(Fragoperrec),
+ cnoOfFragoprec);
+
+ deallocRecord((void **)&fragrecord,"Fragrecord",
+ sizeof(Fragrecord),
+ cnoOfFragrec);
+
+ deallocRecord((void **)&hostBuffer,"HostBuffer",
+ sizeof(HostBuffer),
+ MAX_NODES);
+
+ deallocRecord((void **)&localLogInfo,"LocalLogInfo",
+ sizeof(LocalLogInfo),
+ cnoOfParallellUndoFiles);
+
+ deallocRecord((void **)&operationrec,"Operationrec",
+ sizeof(Operationrec),
+ cnoOfOprec);
+
+ deallocRecord((void **)&page,"Page",
+ sizeof(Page),
+ cnoOfPage);
+
+ deallocRecord((void **)&pageRange,"PageRange",
+ sizeof(PageRange),
+ cnoOfPageRangeRec);
+
+ deallocRecord((void **)&pendingFileOpenInfo,
+ "PendingFileOpenInfo",
+ sizeof(PendingFileOpenInfo),
+ cnoOfConcurrentOpenOp);
+
+ deallocRecord((void **)&restartInfoRecord,
+ "RestartInfoRecord",
+ sizeof(RestartInfoRecord),
+ cnoOfRestartInfoRec);
+
+ deallocRecord((void **)&tablerec,"Tablerec",
+ sizeof(Tablerec),
+ cnoOfTablerec);
+
+ deallocRecord((void **)&tableDescriptor, "TableDescriptor",
+ sizeof(TableDescriptor),
+ cnoOfTabDescrRec);
+
+ deallocRecord((void **)&undoPage,"UndoPage",
+ sizeof(UndoPage),
+ cnoOfUndoPage);
+
+}//Dbtup::~Dbtup()
+
+BLOCK_FUNCTIONS(Dbtup)
+
+/* **************************************************************** */
+/* ---------------------------------------------------------------- */
+/* ----- GENERAL SIGNAL MULTIPLEXER (FS + CONTINUEB) -------------- */
+/* ---------------------------------------------------------------- */
+/* **************************************************************** */
+void Dbtup::execFSCLOSECONF(Signal* signal)
+{
+ PendingFileOpenInfoPtr pfoPtr;
+ ljamEntry();
+ pfoPtr.i = signal->theData[0];
+ ptrCheckGuard(pfoPtr, cnoOfConcurrentOpenOp, pendingFileOpenInfo);
+ switch (pfoPtr.p->pfoOpenType) {
+ case LCP_DATA_FILE_CLOSE:
+ {
+ CheckpointInfoPtr ciPtr;
+ ljam();
+ ciPtr.i = pfoPtr.p->pfoCheckpointInfoP;
+ ptrCheckGuard(ciPtr, cnoOfLcpRec, checkpointInfo);
+ ciPtr.p->lcpDataFileHandle = RNIL;
+ lcpClosedDataFileLab(signal, ciPtr);
+ break;
+ }
+ case LCP_UNDO_FILE_CLOSE:
+ {
+ LocalLogInfoPtr lliPtr;
+ ljam();
+ lliPtr.i = pfoPtr.p->pfoCheckpointInfoP;
+ ptrCheckGuard(lliPtr, cnoOfParallellUndoFiles, localLogInfo);
+ lliPtr.p->lliUndoFileHandle = RNIL;
+ lcpEndconfLab(signal);
+ break;
+ }
+ case LCP_UNDO_FILE_READ:
+ ljam();
+ endExecUndoLogLab(signal, pfoPtr.p->pfoCheckpointInfoP);
+ break;
+ case LCP_DATA_FILE_READ:
+ ljam();
+ rfrClosedDataFileLab(signal, pfoPtr.p->pfoCheckpointInfoP);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ releasePendingFileOpenInfoRecord(pfoPtr);
+}//Dbtup::execFSCLOSECONF()
+
+void Dbtup::execFSCLOSEREF(Signal* signal)
+{
+ ljamEntry();
+ ndbrequire(false);
+}//Dbtup::execFSCLOSEREF()
+
+void Dbtup::execFSOPENCONF(Signal* signal)
+{
+ PendingFileOpenInfoPtr pfoPtr;
+
+ ljamEntry();
+ pfoPtr.i = signal->theData[0];
+ Uint32 fileHandle = signal->theData[1];
+ ptrCheckGuard(pfoPtr, cnoOfConcurrentOpenOp, pendingFileOpenInfo);
+ switch (pfoPtr.p->pfoOpenType) {
+ case LCP_DATA_FILE_READ:
+ {
+ RestartInfoRecordPtr riPtr;
+ ljam();
+ riPtr.i = pfoPtr.p->pfoRestartInfoP;
+ ptrCheckGuard(riPtr, cnoOfRestartInfoRec, restartInfoRecord);
+ riPtr.p->sriDataFileHandle = fileHandle;
+ rfrReadRestartInfoLab(signal, riPtr);
+ break;
+ }
+ case LCP_UNDO_FILE_READ:
+ {
+ RestartInfoRecordPtr riPtr;
+ LocalLogInfoPtr lliPtr;
+ DiskBufferSegmentInfoPtr dbsiPtr;
+
+ ljam();
+ riPtr.i = pfoPtr.p->pfoRestartInfoP;
+ ptrCheckGuard(riPtr, cnoOfRestartInfoRec, restartInfoRecord);
+ lliPtr.i = riPtr.p->sriLocalLogInfoP;
+ ptrCheckGuard(lliPtr, cnoOfParallellUndoFiles, localLogInfo);
+ lliPtr.p->lliUndoFileHandle = fileHandle;
+ dbsiPtr.i = riPtr.p->sriDataBufferSegmentP;
+ ptrCheckGuard(dbsiPtr, cnoOfConcurrentWriteOp, diskBufferSegmentInfo);
+ rfrLoadDataPagesLab(signal, riPtr, dbsiPtr);
+ break;
+ }
+ case LCP_DATA_FILE_WRITE_WITH_UNDO:
+ {
+ CheckpointInfoPtr ciPtr;
+ LocalLogInfoPtr lliPtr;
+
+ ljam();
+ ciPtr.i = pfoPtr.p->pfoCheckpointInfoP;
+ ptrCheckGuard(ciPtr, cnoOfLcpRec, checkpointInfo);
+ lliPtr.i = ciPtr.p->lcpLocalLogInfoP;
+ ptrCheckGuard(lliPtr, cnoOfParallellUndoFiles, localLogInfo);
+ ciPtr.p->lcpDataFileHandle = fileHandle;
+ if (lliPtr.p->lliUndoFileHandle != RNIL) {
+ ljam();
+ signal->theData[0] = ciPtr.p->lcpUserptr;
+ signal->theData[1] = ciPtr.i;
+ sendSignal(ciPtr.p->lcpBlockref, GSN_TUP_PREPLCPCONF, signal, 2, JBB);
+ }//if
+ break;
+ }
+ case LCP_DATA_FILE_WRITE:
+ {
+ CheckpointInfoPtr ciPtr;
+
+ ljam();
+ ciPtr.i = pfoPtr.p->pfoCheckpointInfoP;
+ ptrCheckGuard(ciPtr, cnoOfLcpRec, checkpointInfo);
+ ciPtr.p->lcpDataFileHandle = fileHandle;
+ signal->theData[0] = ciPtr.p->lcpUserptr;
+ signal->theData[1] = ciPtr.i;
+ sendSignal(ciPtr.p->lcpBlockref, GSN_TUP_PREPLCPCONF, signal, 2, JBB);
+ break;
+ }
+ case LCP_UNDO_FILE_WRITE:
+ {
+ CheckpointInfoPtr ciPtr;
+ LocalLogInfoPtr lliPtr;
+
+ ljam();
+ ciPtr.i = pfoPtr.p->pfoCheckpointInfoP;
+ ptrCheckGuard(ciPtr, cnoOfLcpRec, checkpointInfo);
+ lliPtr.i = ciPtr.p->lcpLocalLogInfoP;
+ ptrCheckGuard(lliPtr, cnoOfParallellUndoFiles, localLogInfo);
+ lliPtr.p->lliUndoFileHandle = fileHandle;
+ if (ciPtr.p->lcpDataFileHandle != RNIL) {
+ ljam();
+ signal->theData[0] = ciPtr.p->lcpUserptr;
+ signal->theData[1] = ciPtr.i;
+ sendSignal(ciPtr.p->lcpBlockref, GSN_TUP_PREPLCPCONF, signal, 2, JBB);
+ }//if
+ break;
+ }
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ releasePendingFileOpenInfoRecord(pfoPtr);
+}//Dbtup::execFSOPENCONF()
+
+void Dbtup::execFSOPENREF(Signal* signal)
+{
+ ljamEntry();
+ ndbrequire(false);
+}//Dbtup::execFSOPENREF()
+
+void Dbtup::execFSREADCONF(Signal* signal)
+{
+ DiskBufferSegmentInfoPtr dbsiPtr;
+ ljamEntry();
+ dbsiPtr.i = signal->theData[0];
+ ptrCheckGuard(dbsiPtr, cnoOfConcurrentWriteOp, diskBufferSegmentInfo);
+ switch (dbsiPtr.p->pdxOperation) {
+ case CHECKPOINT_DATA_READ:
+ {
+ RestartInfoRecordPtr riPtr;
+ ljam();
+ riPtr.i = dbsiPtr.p->pdxRestartInfoP;
+ ptrCheckGuard(riPtr, cnoOfRestartInfoRec, restartInfoRecord);
+/************************************************************/
+/* VERIFY THAT THE PAGES ARE CORRECT, HAVE A CORRECT */
+/* STATE AND A CORRECT PAGE ID. */
+/************************************************************/
+ ndbrequire(dbsiPtr.p->pdxNumDataPages <= 16);
+ for (Uint32 i = 0; i < dbsiPtr.p->pdxNumDataPages; i++) {
+ PagePtr pagePtr;
+ ljam();
+ pagePtr.i = dbsiPtr.p->pdxDataPage[i];
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ ndbrequire(pagePtr.p->pageWord[ZPAGE_STATE_POS] != 0);
+ ndbrequire(pagePtr.p->pageWord[ZPAGE_STATE_POS] <= ZAC_MM_FREE_COPY);
+ ndbrequire(pagePtr.p->pageWord[ZPAGE_FRAG_PAGE_ID_POS] == ((dbsiPtr.p->pdxFilePage - 1) + i));
+ }//for
+ rfrLoadDataPagesLab(signal, riPtr, dbsiPtr);
+ break;
+ }
+ case CHECKPOINT_DATA_READ_PAGE_ZERO:
+ {
+ ljam();
+ rfrInitRestartInfoLab(signal, dbsiPtr);
+ break;
+ }
+ case CHECKPOINT_UNDO_READ:
+ {
+ LocalLogInfoPtr lliPtr;
+ ljam();
+ lliPtr.i = dbsiPtr.p->pdxCheckpointInfoP;
+ ptrCheckGuard(lliPtr, cnoOfParallellUndoFiles, localLogInfo);
+ xlcGetNextRecordLab(signal, dbsiPtr, lliPtr);
+ break;
+ }
+ case CHECKPOINT_UNDO_READ_FIRST:
+ ljam();
+ rfrReadSecondUndoLogLab(signal, dbsiPtr);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+}//Dbtup::execFSREADCONF()
+
+void Dbtup::execFSREADREF(Signal* signal)
+{
+ ljamEntry();
+ ndbrequire(false);
+}//Dbtup::execFSREADREF()
+
+void Dbtup::execFSWRITECONF(Signal* signal)
+{
+ DiskBufferSegmentInfoPtr dbsiPtr;
+
+ ljamEntry();
+ dbsiPtr.i = signal->theData[0];
+ ptrCheckGuard(dbsiPtr, cnoOfConcurrentWriteOp, diskBufferSegmentInfo);
+ switch (dbsiPtr.p->pdxOperation) {
+ case CHECKPOINT_DATA_WRITE:
+ ljam();
+ lcpSaveDataPageLab(signal, dbsiPtr.p->pdxCheckpointInfoP);
+ break;
+ case CHECKPOINT_DATA_WRITE_LAST:
+ {
+ CheckpointInfoPtr ciPtr;
+ ljam();
+ ciPtr.i = dbsiPtr.p->pdxCheckpointInfoP;
+ ptrCheckGuard(ciPtr, cnoOfLcpRec, checkpointInfo);
+ lcpFlushLogLab(signal, ciPtr);
+ break;
+ }
+ case CHECKPOINT_DATA_WRITE_FLUSH:
+ {
+ ljam();
+ Uint32 ciIndex = dbsiPtr.p->pdxCheckpointInfoP;
+ freeDiskBufferSegmentRecord(signal, dbsiPtr);
+ lcpCompletedLab(signal, ciIndex);
+ break;
+ }
+ case CHECKPOINT_UNDO_WRITE_FLUSH:
+ {
+ ljam();
+ Uint32 ciIndex = dbsiPtr.p->pdxCheckpointInfoP;
+ freeDiskBufferSegmentRecord(signal, dbsiPtr);
+ lcpFlushRestartInfoLab(signal, ciIndex);
+ break;
+ }
+ case CHECKPOINT_UNDO_WRITE:
+ ljam();
+ freeDiskBufferSegmentRecord(signal, dbsiPtr);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ return;
+}//Dbtup::execFSWRITECONF()
+
+void Dbtup::execFSWRITEREF(Signal* signal)
+{
+ ljamEntry();
+ ndbrequire(false);
+}//Dbtup::execFSWRITEREF()
+
+void Dbtup::execCONTINUEB(Signal* signal)
+{
+ ljamEntry();
+ Uint32 actionType = signal->theData[0];
+ Uint32 dataPtr = signal->theData[1];
+ switch (actionType) {
+ case ZSTART_EXEC_UNDO_LOG:
+ ljam();
+ startExecUndoLogLab(signal, dataPtr);
+ break;
+ case ZCONT_SAVE_DP:
+ ljam();
+ lcpSaveDataPageLab(signal, dataPtr);
+ break;
+ case ZCONT_START_SAVE_CL:
+ {
+ CheckpointInfoPtr ciPtr;
+
+ ljam();
+ ciPtr.i = dataPtr;
+ ptrCheckGuard(ciPtr, cnoOfLcpRec, checkpointInfo);
+ lcpSaveCopyListLab(signal, ciPtr);
+ break;
+ }
+ case ZCONT_EXECUTE_LC:
+ {
+ LocalLogInfoPtr lliPtr;
+ DiskBufferSegmentInfoPtr dbsiPtr;
+
+ ljam();
+ lliPtr.i = dataPtr;
+ ptrCheckGuard(lliPtr, cnoOfParallellUndoFiles, localLogInfo);
+ dbsiPtr.i = lliPtr.p->lliUndoBufferSegmentP;
+ ptrCheckGuard(dbsiPtr, cnoOfConcurrentWriteOp, diskBufferSegmentInfo);
+ xlcGetNextRecordLab(signal, dbsiPtr, lliPtr);
+ break;
+ }
+ case ZCONT_LOAD_DP:
+ {
+ DiskBufferSegmentInfoPtr dbsiPtr;
+ RestartInfoRecordPtr riPtr;
+
+ ljam();
+ riPtr.i = dataPtr;
+ ptrCheckGuard(riPtr, cnoOfRestartInfoRec, restartInfoRecord);
+ dbsiPtr.i = riPtr.p->sriDataBufferSegmentP;
+ ptrCheckGuard(dbsiPtr, cnoOfConcurrentWriteOp, diskBufferSegmentInfo);
+ rfrLoadDataPagesLab(signal, riPtr, dbsiPtr);
+ break;
+ }
+ case ZLOAD_BAL_LCP_TIMER:
+ ljam();
+ clblPageCounter = clblPagesPerTick;
+ signal->theData[0] = ZLOAD_BAL_LCP_TIMER;
+ sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 400, 1);
+ break;
+ case ZINITIALISE_RECORDS:
+ ljam();
+ initialiseRecordsLab(signal, dataPtr,
+ signal->theData[2], signal->theData[3]);
+ break;
+ case ZREL_FRAG:
+ ljam();
+ releaseFragment(signal, dataPtr);
+ break;
+ case ZREPORT_MEMORY_USAGE:{
+ ljam();
+ static int c_currentMemUsed = 0;
+ int now = (cnoOfAllocatedPages * 100)/cnoOfPage;
+ const int thresholds[] = { 100, 90, 80, 0 };
+
+ Uint32 i = 0;
+ const Uint32 sz = sizeof(thresholds)/sizeof(thresholds[0]);
+ for(i = 0; i<sz; i++){
+ if(now >= thresholds[i]){
+ now = thresholds[i];
+ break;
+ }
+ }
+
+ if(now != c_currentMemUsed){
+ reportMemoryUsage(signal, now > c_currentMemUsed ? 1 : -1);
+ c_currentMemUsed = now;
+ }
+ signal->theData[0] = ZREPORT_MEMORY_USAGE;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 2000, 1);
+ return;
+ }
+ case ZBUILD_INDEX:
+ ljam();
+ buildIndex(signal, dataPtr);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+}//Dbtup::execTUP_CONTINUEB()
+
+/* **************************************************************** */
+/* ---------------------------------------------------------------- */
+/* ------------------- SYSTEM RESTART MODULE ---------------------- */
+/* ---------------------------------------------------------------- */
+/* **************************************************************** */
+void Dbtup::execSTTOR(Signal* signal)
+{
+ ljamEntry();
+ Uint32 startPhase = signal->theData[1];
+ Uint32 sigKey = signal->theData[6];
+ switch (startPhase) {
+ case ZSTARTPHASE1:
+ ljam();
+ CLEAR_ERROR_INSERT_VALUE;
+ cownref = calcTupBlockRef(0);
+ break;
+ default:
+ ljam();
+ break;
+ }//switch
+ signal->theData[0] = sigKey;
+ signal->theData[1] = 3;
+ signal->theData[2] = 2;
+ signal->theData[3] = ZSTARTPHASE1;
+ signal->theData[4] = 255;
+ sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 5, JBB);
+ return;
+}//Dbtup::execSTTOR()
+
+/************************************************************************************************/
+// SIZE_ALTREP INITIALIZE DATA STRUCTURES, FILES AND DS VARIABLES, GET READY FOR EXTERNAL
+// CONNECTIONS.
+/************************************************************************************************/
+void Dbtup::execREAD_CONFIG_REQ(Signal* signal)
+{
+ const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr();
+ Uint32 ref = req->senderRef;
+ Uint32 senderData = req->senderData;
+ ndbrequire(req->noOfParameters == 0);
+
+ ljamEntry();
+
+ const ndb_mgm_configuration_iterator * p =
+ theConfiguration.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_FRAG, &cnoOfFragrec));
+
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_OP_RECS, &cnoOfOprec));
+
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_PAGE, &cnoOfPage));
+ Uint32 noOfTriggers= 0;
+
+ Uint32 tmp= 0;
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_PAGE_RANGE, &tmp));
+ initPageRangeSize(tmp);
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_TABLE, &cnoOfTablerec));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_TABLE_DESC,
+ &cnoOfTabDescrRec));
+ Uint32 noOfStoredProc;
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_STORED_PROC,
+ &noOfStoredProc));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_TRIGGERS,
+ &noOfTriggers));
+
+ cnoOfTabDescrRec = (cnoOfTabDescrRec & 0xFFFFFFF0) + 16;
+ c_storedProcPool.setSize(noOfStoredProc);
+ c_buildIndexPool.setSize(c_noOfBuildIndexRec);
+ c_triggerPool.setSize(noOfTriggers);
+
+ initRecords();
+ czero = 0;
+ cminusOne = czero - 1;
+ clastBitMask = 1;
+ clastBitMask = clastBitMask << 31;
+ cnoOfLocalLogInfo = 0;
+ cnoFreeUndoSeg = 0;
+
+ initialiseRecordsLab(signal, 0, ref, senderData);
+
+ clblPagesPerTick = 50;
+ //ndb_mgm_get_int_parameter(p, CFG_DB_, &clblPagesPerTick);
+
+ clblPagesPerTickAfterSr = 50;
+ //ndb_mgm_get_int_parameter(p, CFG_DB_, &clblPagesPerTickAfterSr);
+
+}//Dbtup::execSIZEALT_REP()
+
+void Dbtup::initRecords()
+{
+ // Records with dynamic sizes
+ attrbufrec = (Attrbufrec*)allocRecord("Attrbufrec",
+ sizeof(Attrbufrec),
+ cnoOfAttrbufrec);
+
+ checkpointInfo = (CheckpointInfo*)allocRecord("CheckpointInfo",
+ sizeof(CheckpointInfo),
+ cnoOfLcpRec);
+
+ diskBufferSegmentInfo = (DiskBufferSegmentInfo*)
+ allocRecord("DiskBufferSegmentInfo",
+ sizeof(DiskBufferSegmentInfo),
+ cnoOfConcurrentWriteOp);
+
+ fragoperrec = (Fragoperrec*)allocRecord("Fragoperrec",
+ sizeof(Fragoperrec),
+ cnoOfFragoprec);
+
+ fragrecord = (Fragrecord*)allocRecord("Fragrecord",
+ sizeof(Fragrecord),
+ cnoOfFragrec);
+
+ hostBuffer = (HostBuffer*)allocRecord("HostBuffer",
+ sizeof(HostBuffer),
+ MAX_NODES);
+
+ localLogInfo = (LocalLogInfo*)allocRecord("LocalLogInfo",
+ sizeof(LocalLogInfo),
+ cnoOfParallellUndoFiles);
+
+ operationrec = (Operationrec*)allocRecord("Operationrec",
+ sizeof(Operationrec),
+ cnoOfOprec);
+
+ page = (Page*)allocRecord("Page",
+ sizeof(Page),
+ cnoOfPage,
+ false);
+
+ pageRange = (PageRange*)allocRecord("PageRange",
+ sizeof(PageRange),
+ cnoOfPageRangeRec);
+
+ pendingFileOpenInfo = (PendingFileOpenInfo*)
+ allocRecord("PendingFileOpenInfo",
+ sizeof(PendingFileOpenInfo),
+ cnoOfConcurrentOpenOp);
+
+ restartInfoRecord = (RestartInfoRecord*)
+ allocRecord("RestartInfoRecord",
+ sizeof(RestartInfoRecord),
+ cnoOfRestartInfoRec);
+
+
+ tablerec = (Tablerec*)allocRecord("Tablerec",
+ sizeof(Tablerec),
+ cnoOfTablerec);
+
+ for(unsigned i = 0; i<cnoOfTablerec; i++) {
+ void * p = &tablerec[i];
+ new (p) Tablerec(c_triggerPool);
+ }
+
+ tableDescriptor = (TableDescriptor*)
+ allocRecord("TableDescriptor",
+ sizeof(TableDescriptor),
+ cnoOfTabDescrRec);
+
+ undoPage = (UndoPage*)allocRecord("UndoPage",
+ sizeof(UndoPage),
+ cnoOfUndoPage);
+
+
+ // Initialize BAT for interface to file system
+ NewVARIABLE* bat = allocateBat(3);
+ bat[1].WA = &page->pageWord[0];
+ bat[1].nrr = cnoOfPage;
+ bat[1].ClusterSize = sizeof(Page);
+ bat[1].bits.q = 13; /* 8192 words/page */
+ bat[1].bits.v = 5;
+ bat[2].WA = &undoPage->undoPageWord[0];
+ bat[2].nrr = cnoOfUndoPage;
+ bat[2].ClusterSize = sizeof(UndoPage);
+ bat[2].bits.q = 13; /* 8192 words/page */
+ bat[2].bits.v = 5;
+}//Dbtup::initRecords()
+
+void Dbtup::initialiseRecordsLab(Signal* signal, Uint32 switchData,
+ Uint32 retRef, Uint32 retData)
+{
+ switch (switchData) {
+ case 0:
+ ljam();
+ initializeHostBuffer();
+ break;
+ case 1:
+ ljam();
+ initializeOperationrec();
+ break;
+ case 2:
+ ljam();
+ initializePage();
+ break;
+ case 3:
+ ljam();
+ initializeUndoPage();
+ break;
+ case 4:
+ ljam();
+ initializeTablerec();
+ break;
+ case 5:
+ ljam();
+ initializeCheckpointInfoRec();
+ break;
+ case 6:
+ ljam();
+ initializeFragrecord();
+ break;
+ case 7:
+ ljam();
+ initializeFragoperrec();
+ break;
+ case 8:
+ ljam();
+ initializePageRange();
+ break;
+ case 9:
+ ljam();
+ initializeTabDescr();
+ break;
+ case 10:
+ ljam();
+ initializeDiskBufferSegmentRecord();
+ break;
+ case 11:
+ ljam();
+ initializeLocalLogInfo();
+ break;
+ case 12:
+ ljam();
+ initializeAttrbufrec();
+ break;
+ case 13:
+ ljam();
+ initializePendingFileOpenInfoRecord();
+ break;
+ case 14:
+ ljam();
+ initializeRestartInfoRec();
+
+ {
+ ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = retData;
+ sendSignal(retRef, GSN_READ_CONFIG_CONF, signal,
+ ReadConfigConf::SignalLength, JBB);
+ }
+ return;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ signal->theData[0] = ZINITIALISE_RECORDS;
+ signal->theData[1] = switchData + 1;
+ signal->theData[2] = retRef;
+ signal->theData[3] = retData;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 4, JBB);
+ return;
+}//Dbtup::initialiseRecordsLab()
+
+void Dbtup::execNDB_STTOR(Signal* signal)
+{
+ ljamEntry();
+ cndbcntrRef = signal->theData[0];
+ Uint32 ownNodeId = signal->theData[1];
+ Uint32 startPhase = signal->theData[2];
+ switch (startPhase) {
+ case ZSTARTPHASE1:
+ ljam();
+ cownNodeId = ownNodeId;
+ cownref = calcTupBlockRef(ownNodeId);
+ break;
+ case ZSTARTPHASE2:
+ ljam();
+ break;
+ case ZSTARTPHASE3:
+ ljam();
+ startphase3Lab(signal, ~0, ~0);
+ break;
+ case ZSTARTPHASE4:
+ ljam();
+ break;
+ case ZSTARTPHASE6:
+ ljam();
+/*****************************************/
+/* NOW SET THE DISK WRITE SPEED TO */
+/* PAGES PER TICK AFTER SYSTEM */
+/* RESTART. */
+/*****************************************/
+ clblPagesPerTick = clblPagesPerTickAfterSr;
+
+ signal->theData[0] = ZREPORT_MEMORY_USAGE;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 2000, 1);
+ break;
+ default:
+ ljam();
+ break;
+ }//switch
+ signal->theData[0] = cownref;
+ sendSignal(cndbcntrRef, GSN_NDB_STTORRY, signal, 1, JBB);
+}//Dbtup::execNDB_STTOR()
+
+void Dbtup::startphase3Lab(Signal* signal, Uint32 config1, Uint32 config2)
+{
+ clblPageCounter = clblPagesPerTick;
+ signal->theData[0] = ZLOAD_BAL_LCP_TIMER;
+ sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 100, 1);
+}//Dbtup::startphase3Lab()
+
+void Dbtup::initializeAttrbufrec()
+{
+ AttrbufrecPtr attrBufPtr;
+ for (attrBufPtr.i = 0;
+ attrBufPtr.i < cnoOfAttrbufrec; attrBufPtr.i++) {
+ refresh_watch_dog();
+ ptrAss(attrBufPtr, attrbufrec);
+ attrBufPtr.p->attrbuf[ZBUF_NEXT] = attrBufPtr.i + 1;
+ }//for
+ attrBufPtr.i = cnoOfAttrbufrec - 1;
+ ptrAss(attrBufPtr, attrbufrec);
+ attrBufPtr.p->attrbuf[ZBUF_NEXT] = RNIL;
+ cfirstfreeAttrbufrec = 0;
+ cnoFreeAttrbufrec = cnoOfAttrbufrec;
+}//Dbtup::initializeAttrbufrec()
+
+void Dbtup::initializeCheckpointInfoRec()
+{
+ CheckpointInfoPtr checkpointInfoPtr;
+ for (checkpointInfoPtr.i = 0;
+ checkpointInfoPtr.i < cnoOfLcpRec; checkpointInfoPtr.i++) {
+ ptrAss(checkpointInfoPtr, checkpointInfo);
+ checkpointInfoPtr.p->lcpNextRec = checkpointInfoPtr.i + 1;
+ }//for
+ checkpointInfoPtr.i = cnoOfLcpRec - 1;
+ ptrAss(checkpointInfoPtr, checkpointInfo);
+ checkpointInfoPtr.p->lcpNextRec = RNIL;
+ cfirstfreeLcp = 0;
+}//Dbtup::initializeCheckpointInfoRec()
+
+void Dbtup::initializeDiskBufferSegmentRecord()
+{
+ DiskBufferSegmentInfoPtr diskBufferSegmentPtr;
+ for (diskBufferSegmentPtr.i = 0;
+ diskBufferSegmentPtr.i < cnoOfConcurrentWriteOp; diskBufferSegmentPtr.i++) {
+ ptrAss(diskBufferSegmentPtr, diskBufferSegmentInfo);
+ diskBufferSegmentPtr.p->pdxNextRec = diskBufferSegmentPtr.i + 1;
+ diskBufferSegmentPtr.p->pdxBuffertype = NOT_INITIALIZED;
+ }//for
+ diskBufferSegmentPtr.i = cnoOfConcurrentWriteOp - 1;
+ ptrAss(diskBufferSegmentPtr, diskBufferSegmentInfo);
+ diskBufferSegmentPtr.p->pdxNextRec = RNIL;
+ cfirstfreePdx = 0;
+}//Dbtup::initializeDiskBufferSegmentRecord()
+
+void Dbtup::initializeFragoperrec()
+{
+ FragoperrecPtr fragoperPtr;
+ for (fragoperPtr.i = 0; fragoperPtr.i < cnoOfFragoprec; fragoperPtr.i++) {
+ ptrAss(fragoperPtr, fragoperrec);
+ fragoperPtr.p->nextFragoprec = fragoperPtr.i + 1;
+ }//for
+ fragoperPtr.i = cnoOfFragoprec - 1;
+ ptrAss(fragoperPtr, fragoperrec);
+ fragoperPtr.p->nextFragoprec = RNIL;
+ cfirstfreeFragopr = 0;
+}//Dbtup::initializeFragoperrec()
+
+void Dbtup::initializeFragrecord()
+{
+ FragrecordPtr regFragPtr;
+ for (regFragPtr.i = 0; regFragPtr.i < cnoOfFragrec; regFragPtr.i++) {
+ refresh_watch_dog();
+ ptrAss(regFragPtr, fragrecord);
+ regFragPtr.p->nextfreefrag = regFragPtr.i + 1;
+ regFragPtr.p->checkpointVersion = RNIL;
+ regFragPtr.p->firstusedOprec = RNIL;
+ regFragPtr.p->lastusedOprec = RNIL;
+ regFragPtr.p->fragStatus = IDLE;
+ }//for
+ regFragPtr.i = cnoOfFragrec - 1;
+ ptrAss(regFragPtr, fragrecord);
+ regFragPtr.p->nextfreefrag = RNIL;
+ cfirstfreefrag = 0;
+}//Dbtup::initializeFragrecord()
+
+void Dbtup::initializeHostBuffer()
+{
+ Uint32 hostId;
+ cpackedListIndex = 0;
+ for (hostId = 0; hostId < MAX_NODES; hostId++) {
+ hostBuffer[hostId].inPackedList = false;
+ hostBuffer[hostId].noOfPacketsTA = 0;
+ hostBuffer[hostId].packetLenTA = 0;
+ }//for
+}//Dbtup::initializeHostBuffer()
+
+void Dbtup::initializeLocalLogInfo()
+{
+ LocalLogInfoPtr localLogInfoPtr;
+ for (localLogInfoPtr.i = 0;
+ localLogInfoPtr.i < cnoOfParallellUndoFiles; localLogInfoPtr.i++) {
+ ptrAss(localLogInfoPtr, localLogInfo);
+ localLogInfoPtr.p->lliActiveLcp = 0;
+ localLogInfoPtr.p->lliUndoFileHandle = RNIL;
+ }//for
+}//Dbtup::initializeLocalLogInfo()
+
+void Dbtup::initializeOperationrec()
+{
+ OperationrecPtr regOpPtr;
+ for (regOpPtr.i = 0; regOpPtr.i < cnoOfOprec; regOpPtr.i++) {
+ refresh_watch_dog();
+ ptrAss(regOpPtr, operationrec);
+ regOpPtr.p->firstAttrinbufrec = RNIL;
+ regOpPtr.p->lastAttrinbufrec = RNIL;
+ regOpPtr.p->prevOprecInList = RNIL;
+ regOpPtr.p->nextOprecInList = regOpPtr.i + 1;
+ regOpPtr.p->optype = ZREAD;
+ regOpPtr.p->inFragList = ZFALSE;
+ regOpPtr.p->inActiveOpList = ZFALSE;
+/* FOR ABORT HANDLING BEFORE ANY SUCCESSFUL OPERATION */
+ regOpPtr.p->transstate = DISCONNECTED;
+ regOpPtr.p->storedProcedureId = ZNIL;
+ regOpPtr.p->prevActiveOp = RNIL;
+ regOpPtr.p->nextActiveOp = RNIL;
+ regOpPtr.p->tupVersion = ZNIL;
+ regOpPtr.p->deleteInsertFlag = 0;
+ }//for
+ regOpPtr.i = cnoOfOprec - 1;
+ ptrAss(regOpPtr, operationrec);
+ regOpPtr.p->nextOprecInList = RNIL;
+ cfirstfreeOprec = 0;
+}//Dbtup::initializeOperationrec()
+
+void Dbtup::initializePendingFileOpenInfoRecord()
+{
+ PendingFileOpenInfoPtr pendingFileOpenInfoPtr;
+ for (pendingFileOpenInfoPtr.i = 0;
+ pendingFileOpenInfoPtr.i < cnoOfConcurrentOpenOp; pendingFileOpenInfoPtr.i++) {
+ ptrAss(pendingFileOpenInfoPtr, pendingFileOpenInfo);
+ pendingFileOpenInfoPtr.p->pfoNextRec = pendingFileOpenInfoPtr.i + 1;
+ }//for
+ pendingFileOpenInfoPtr.i = cnoOfConcurrentOpenOp - 1;
+ ptrAss(pendingFileOpenInfoPtr, pendingFileOpenInfo);
+ pendingFileOpenInfoPtr.p->pfoNextRec = RNIL;
+ cfirstfreePfo = 0;
+}//Dbtup::initializePendingFileOpenInfoRecord()
+
+void Dbtup::initializeRestartInfoRec()
+{
+ RestartInfoRecordPtr restartInfoPtr;
+ for (restartInfoPtr.i = 0; restartInfoPtr.i < cnoOfRestartInfoRec; restartInfoPtr.i++) {
+ ptrAss(restartInfoPtr, restartInfoRecord);
+ restartInfoPtr.p->sriNextRec = restartInfoPtr.i + 1;
+ }//for
+ restartInfoPtr.i = cnoOfRestartInfoRec - 1;
+ ptrAss(restartInfoPtr, restartInfoRecord);
+ restartInfoPtr.p->sriNextRec = RNIL;
+ cfirstfreeSri = 0;
+}//Dbtup::initializeRestartInfoRec()
+
+void Dbtup::initializeTablerec()
+{
+ TablerecPtr regTabPtr;
+ for (regTabPtr.i = 0; regTabPtr.i < cnoOfTablerec; regTabPtr.i++) {
+ ljam();
+ refresh_watch_dog();
+ ptrAss(regTabPtr, tablerec);
+ initTab(regTabPtr.p);
+ }//for
+}//Dbtup::initializeTablerec()
+
+void
+Dbtup::initTab(Tablerec* const regTabPtr)
+{
+ for (Uint32 i = 0; i < (2 * MAX_FRAG_PER_NODE); i++) {
+ regTabPtr->fragid[i] = RNIL;
+ regTabPtr->fragrec[i] = RNIL;
+ }//for
+ regTabPtr->readFunctionArray = NULL;
+ regTabPtr->updateFunctionArray = NULL;
+ regTabPtr->charsetArray = NULL;
+
+ regTabPtr->tabDescriptor = RNIL;
+ regTabPtr->attributeGroupDescriptor = RNIL;
+ regTabPtr->readKeyArray = RNIL;
+
+ regTabPtr->checksumIndicator = false;
+ regTabPtr->GCPIndicator = false;
+
+ regTabPtr->noOfAttr = 0;
+ regTabPtr->noOfKeyAttr = 0;
+ regTabPtr->noOfNewAttr = 0;
+ regTabPtr->noOfAttributeGroups = 0;
+
+ regTabPtr->tupheadsize = 0;
+ regTabPtr->tupNullIndex = 0;
+ regTabPtr->tupNullWords = 0;
+ regTabPtr->tupChecksumIndex = 0;
+ regTabPtr->tupGCPIndex = 0;
+
+ regTabPtr->m_dropTable.tabUserPtr = RNIL;
+ regTabPtr->m_dropTable.tabUserRef = 0;
+ regTabPtr->tableStatus = NOT_DEFINED;
+
+ // Clear trigger data
+ if (!regTabPtr->afterInsertTriggers.isEmpty())
+ regTabPtr->afterInsertTriggers.release();
+ if (!regTabPtr->afterDeleteTriggers.isEmpty())
+ regTabPtr->afterDeleteTriggers.release();
+ if (!regTabPtr->afterUpdateTriggers.isEmpty())
+ regTabPtr->afterUpdateTriggers.release();
+ if (!regTabPtr->subscriptionInsertTriggers.isEmpty())
+ regTabPtr->subscriptionInsertTriggers.release();
+ if (!regTabPtr->subscriptionDeleteTriggers.isEmpty())
+ regTabPtr->subscriptionDeleteTriggers.release();
+ if (!regTabPtr->subscriptionUpdateTriggers.isEmpty())
+ regTabPtr->subscriptionUpdateTriggers.release();
+ if (!regTabPtr->constraintUpdateTriggers.isEmpty())
+ regTabPtr->constraintUpdateTriggers.release();
+ if (!regTabPtr->tuxCustomTriggers.isEmpty())
+ regTabPtr->tuxCustomTriggers.release();
+}//Dbtup::initTab()
+
+void Dbtup::initializeTabDescr()
+{
+ TableDescriptorPtr regTabDesPtr;
+ for (Uint32 i = 0; i < 16; i++) {
+ cfreeTdList[i] = RNIL;
+ }//for
+ for (regTabDesPtr.i = 0; regTabDesPtr.i < cnoOfTabDescrRec; regTabDesPtr.i++) {
+ refresh_watch_dog();
+ ptrAss(regTabDesPtr, tableDescriptor);
+ regTabDesPtr.p->tabDescr = RNIL;
+ }//for
+ freeTabDescr(0, cnoOfTabDescrRec);
+}//Dbtup::initializeTabDescr()
+
+void Dbtup::initializeUndoPage()
+{
+ UndoPagePtr undoPagep;
+ for (undoPagep.i = 0;
+ undoPagep.i < cnoOfUndoPage;
+ undoPagep.i = undoPagep.i + ZUB_SEGMENT_SIZE) {
+ refresh_watch_dog();
+ ptrAss(undoPagep, undoPage);
+ undoPagep.p->undoPageWord[ZPAGE_NEXT_POS] = undoPagep.i +
+ ZUB_SEGMENT_SIZE;
+ cnoFreeUndoSeg++;
+ }//for
+ undoPagep.i = cnoOfUndoPage - ZUB_SEGMENT_SIZE;
+ ptrAss(undoPagep, undoPage);
+ undoPagep.p->undoPageWord[ZPAGE_NEXT_POS] = RNIL;
+ cfirstfreeUndoSeg = 0;
+}//Dbtup::initializeUndoPage()
+
+/* ---------------------------------------------------------------- */
+/* ---------------------------------------------------------------- */
+/* --------------- CONNECT/DISCONNECT MODULE ---------------------- */
+/* ---------------------------------------------------------------- */
+/* ---------------------------------------------------------------- */
+void Dbtup::execTUPSEIZEREQ(Signal* signal)
+{
+ OperationrecPtr regOperPtr;
+ ljamEntry();
+ Uint32 userPtr = signal->theData[0];
+ BlockReference userRef = signal->theData[1];
+ if (cfirstfreeOprec != RNIL) {
+ ljam();
+ seizeOpRec(regOperPtr);
+ } else {
+ ljam();
+ signal->theData[0] = userPtr;
+ signal->theData[1] = ZGET_OPREC_ERROR;
+ sendSignal(userRef, GSN_TUPSEIZEREF, signal, 2, JBB);
+ return;
+ }//if
+ regOperPtr.p->optype = ZREAD;
+ initOpConnection(regOperPtr.p, 0);
+ regOperPtr.p->userpointer = userPtr;
+ regOperPtr.p->userblockref = userRef;
+ signal->theData[0] = regOperPtr.p->userpointer;
+ signal->theData[1] = regOperPtr.i;
+ sendSignal(userRef, GSN_TUPSEIZECONF, signal, 2, JBB);
+ return;
+}//Dbtup::execTUPSEIZEREQ()
+
+#define printFragment(t){ for(Uint32 i = 0; i < (2 * MAX_FRAG_PER_NODE);i++){\
+ ndbout_c("table = %d fragid[%d] = %d fragrec[%d] = %d", \
+ t.i, t.p->fragid[i], i, t.p->fragrec[i]); }}
+
+void Dbtup::execTUPRELEASEREQ(Signal* signal)
+{
+ OperationrecPtr regOperPtr;
+ ljamEntry();
+ regOperPtr.i = signal->theData[0];
+ ptrCheckGuard(regOperPtr, cnoOfOprec, operationrec);
+ regOperPtr.p->transstate = DISCONNECTED;
+ regOperPtr.p->nextOprecInList = cfirstfreeOprec;
+ cfirstfreeOprec = regOperPtr.i;
+ signal->theData[0] = regOperPtr.p->userpointer;
+ sendSignal(regOperPtr.p->userblockref, GSN_TUPRELEASECONF, signal, 1, JBB);
+ return;
+}//Dbtup::execTUPRELEASEREQ()
+
+/* ---------------------------------------------------------------- */
+/* ---------------- FREE_DISK_BUFFER_SEGMENT_RECORD --------------- */
+/* ---------------------------------------------------------------- */
+/* */
+/* THIS ROUTINE DEALLOCATES A DISK SEGMENT AND ITS DATA PAGES */
+/* */
+/* INPUT: DISK_BUFFER_SEGMENT_PTR THE DISK SEGMENT */
+/* */
+/* -----------------------------------------------------------------*/
+void Dbtup::freeDiskBufferSegmentRecord(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr)
+{
+ switch (dbsiPtr.p->pdxBuffertype) {
+ case UNDO_PAGES:
+ case COMMON_AREA_PAGES:
+ ljam();
+ freeUndoBufferPages(signal, dbsiPtr);
+ break;
+ case UNDO_RESTART_PAGES:
+ ljam();
+ dbsiPtr.p->pdxDataPage[0] = dbsiPtr.p->pdxUndoBufferSet[0];
+ freeUndoBufferPages(signal, dbsiPtr);
+ dbsiPtr.p->pdxDataPage[0] = dbsiPtr.p->pdxUndoBufferSet[1];
+ freeUndoBufferPages(signal, dbsiPtr);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ releaseDiskBufferSegmentRecord(dbsiPtr);
+}//Dbtup::freeDiskBufferSegmentRecord()
+
+/* ---------------------------------------------------------------- */
+/* -------------------- FREE_UNDO_BUFFER_PAGES -------------------- */
+/* ---------------------------------------------------------------- */
+/* */
+/* THIS ROUTINE DEALLOCATES A SEGMENT OF UNDO PAGES */
+/* */
+/* INPUT: UNDO_PAGEP POINTER TO FIRST PAGE IN SEGMENT */
+/* */
+/* -----------------------------------------------------------------*/
+void Dbtup::freeUndoBufferPages(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr)
+{
+ UndoPagePtr undoPagePtr;
+
+ undoPagePtr.i = dbsiPtr.p->pdxDataPage[0];
+ ptrCheckGuard(undoPagePtr, cnoOfUndoPage, undoPage);
+ undoPagePtr.p->undoPageWord[ZPAGE_NEXT_POS] = cfirstfreeUndoSeg;
+ cfirstfreeUndoSeg = undoPagePtr.i;
+ cnoFreeUndoSeg++;
+ if (cnoFreeUndoSeg == ZMIN_PAGE_LIMIT_TUP_COMMITREQ) {
+ EXECUTE_DIRECT(DBLQH, GSN_TUP_COM_UNBLOCK, signal, 1);
+ ljamEntry();
+ }//if
+}//Dbtup::freeUndoBufferPages()
+
+void Dbtup::releaseCheckpointInfoRecord(CheckpointInfoPtr ciPtr)
+{
+ ciPtr.p->lcpNextRec = cfirstfreeLcp;
+ cfirstfreeLcp = ciPtr.i;
+}//Dbtup::releaseCheckpointInfoRecord()
+
+void Dbtup::releaseDiskBufferSegmentRecord(DiskBufferSegmentInfoPtr dbsiPtr)
+{
+ dbsiPtr.p->pdxNextRec = cfirstfreePdx;
+ cfirstfreePdx = dbsiPtr.i;
+}//Dbtup::releaseDiskBufferSegmentRecord()
+
+void Dbtup::releaseFragrec(FragrecordPtr regFragPtr)
+{
+ regFragPtr.p->nextfreefrag = cfirstfreefrag;
+ cfirstfreefrag = regFragPtr.i;
+}//Dbtup::releaseFragrec()
+
+void Dbtup::releasePendingFileOpenInfoRecord(PendingFileOpenInfoPtr pfoPtr)
+{
+ pfoPtr.p->pfoNextRec = cfirstfreePfo;
+ cfirstfreePfo = pfoPtr.i;
+}//Dbtup::releasePendingFileOpenInfoRecord()
+
+void Dbtup::releaseRestartInfoRecord(RestartInfoRecordPtr riPtr)
+{
+ riPtr.p->sriNextRec = cfirstfreeSri;
+ cfirstfreeSri = riPtr.i;
+}//Dbtup::releaseRestartInfoRecord()
+
+void Dbtup::seizeCheckpointInfoRecord(CheckpointInfoPtr& ciPtr)
+{
+ ciPtr.i = cfirstfreeLcp;
+ ptrCheckGuard(ciPtr, cnoOfLcpRec, checkpointInfo);
+ cfirstfreeLcp = ciPtr.p->lcpNextRec;
+ ciPtr.p->lcpNextRec = RNIL;
+}//Dbtup::seizeCheckpointInfoRecord()
+
+void Dbtup::seizeDiskBufferSegmentRecord(DiskBufferSegmentInfoPtr& dbsiPtr)
+{
+ dbsiPtr.i = cfirstfreePdx;
+ ptrCheckGuard(dbsiPtr, cnoOfConcurrentWriteOp, diskBufferSegmentInfo);
+ cfirstfreePdx = dbsiPtr.p->pdxNextRec;
+ dbsiPtr.p->pdxNextRec = RNIL;
+ for (Uint32 i = 0; i < 16; i++) {
+ dbsiPtr.p->pdxDataPage[i] = RNIL;
+ }//for
+ dbsiPtr.p->pdxCheckpointInfoP = RNIL;
+ dbsiPtr.p->pdxRestartInfoP = RNIL;
+ dbsiPtr.p->pdxLocalLogInfoP = RNIL;
+ dbsiPtr.p->pdxFilePage = 0;
+ dbsiPtr.p->pdxNumDataPages = 0;
+}//Dbtup::seizeDiskBufferSegmentRecord()
+
+void Dbtup::seizeOpRec(OperationrecPtr& regOperPtr)
+{
+ regOperPtr.i = cfirstfreeOprec;
+ ptrCheckGuard(regOperPtr, cnoOfOprec, operationrec);
+ cfirstfreeOprec = regOperPtr.p->nextOprecInList;
+}//Dbtup::seizeOpRec()
+
+void Dbtup::seizePendingFileOpenInfoRecord(PendingFileOpenInfoPtr& pfoiPtr)
+{
+ pfoiPtr.i = cfirstfreePfo;
+ ptrCheckGuard(pfoiPtr, cnoOfConcurrentOpenOp, pendingFileOpenInfo);
+ cfirstfreePfo = pfoiPtr.p->pfoNextRec;
+ pfoiPtr.p->pfoNextRec = RNIL;
+}//Dbtup::seizePendingFileOpenInfoRecord()
+
+void Dbtup::execSET_VAR_REQ(Signal* signal)
+{
+#if 0
+ SetVarReq* const setVarReq = (SetVarReq*)signal->getDataPtrSend();
+ ConfigParamId var = setVarReq->variable();
+ int val = setVarReq->value();
+
+ switch (var) {
+
+ case NoOfDiskPagesToDiskAfterRestartTUP:
+ clblPagesPerTick = val;
+ sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
+ break;
+
+ case NoOfDiskPagesToDiskDuringRestartTUP:
+ // Valid only during start so value not set.
+ sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
+ break;
+
+ default:
+ sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
+ } // switch
+#endif
+
+}//execSET_VAR_REQ()
+
+
+
+
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp
new file mode 100644
index 00000000000..ab6e0642e11
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp
@@ -0,0 +1,569 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#define DBTUP_C
+#include "Dbtup.hpp"
+#include <RefConvert.hpp>
+#include <ndb_limits.h>
+#include <pc.hpp>
+#include <AttributeDescriptor.hpp>
+#include "AttributeOffset.hpp"
+#include <AttributeHeader.hpp>
+#include <signaldata/TuxMaint.hpp>
+
+#define ljam() { jamLine(28000 + __LINE__); }
+#define ljamEntry() { jamEntryLine(28000 + __LINE__); }
+
+// methods used by ordered index
+
+void
+Dbtup::tuxGetTupAddr(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32& tupAddr)
+{
+ ljamEntry();
+ FragrecordPtr fragPtr;
+ fragPtr.i = fragPtrI;
+ ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
+ TablerecPtr tablePtr;
+ tablePtr.i = fragPtr.p->fragTableId;
+ ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
+ PagePtr pagePtr;
+ pagePtr.i = pageId;
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ Uint32 fragPageId = pagePtr.p->pageWord[ZPAGE_FRAG_PAGE_ID_POS];
+ Uint32 tupheadsize = tablePtr.p->tupheadsize;
+ ndbrequire(pageOffset >= ZPAGE_HEADER_SIZE);
+ Uint32 offset = pageOffset - ZPAGE_HEADER_SIZE;
+ ndbrequire(offset % tupheadsize == 0);
+ Uint32 pageIndex = (offset / tupheadsize) << 1;
+ tupAddr = (fragPageId << MAX_TUPLES_BITS) | pageIndex;
+}
+
+int
+Dbtup::tuxAllocNode(Signal* signal, Uint32 fragPtrI, Uint32& pageId, Uint32& pageOffset, Uint32*& node)
+{
+ ljamEntry();
+ FragrecordPtr fragPtr;
+ fragPtr.i = fragPtrI;
+ ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
+ TablerecPtr tablePtr;
+ tablePtr.i = fragPtr.p->fragTableId;
+ ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
+ PagePtr pagePtr;
+ terrorCode = 0;
+ if (! allocTh(fragPtr.p, tablePtr.p, NORMAL_PAGE, signal, pageOffset, pagePtr)) {
+ ljam();
+ ndbrequire(terrorCode != 0);
+ return terrorCode;
+ }
+ pageId = pagePtr.i;
+ Uint32 attrDescIndex = tablePtr.p->tabDescriptor + (0 << ZAD_LOG_SIZE);
+ Uint32 attrDataOffset = AttributeOffset::getOffset(tableDescriptor[attrDescIndex + 1].tabDescr);
+ node = &pagePtr.p->pageWord[pageOffset] + attrDataOffset;
+ return 0;
+}
+
+void
+Dbtup::tuxFreeNode(Signal* signal, Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* node)
+{
+ ljamEntry();
+ FragrecordPtr fragPtr;
+ fragPtr.i = fragPtrI;
+ ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
+ TablerecPtr tablePtr;
+ tablePtr.i = fragPtr.p->fragTableId;
+ ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
+ PagePtr pagePtr;
+ pagePtr.i = pageId;
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ Uint32 attrDescIndex = tablePtr.p->tabDescriptor + (0 << ZAD_LOG_SIZE);
+ Uint32 attrDataOffset = AttributeOffset::getOffset(tableDescriptor[attrDescIndex + 1].tabDescr);
+ ndbrequire(node == &pagePtr.p->pageWord[pageOffset] + attrDataOffset);
+ freeTh(fragPtr.p, tablePtr.p, signal, pagePtr.p, pageOffset);
+}
+
+void
+Dbtup::tuxGetNode(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32*& node)
+{
+ ljamEntry();
+ FragrecordPtr fragPtr;
+ fragPtr.i = fragPtrI;
+ ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
+ TablerecPtr tablePtr;
+ tablePtr.i = fragPtr.p->fragTableId;
+ ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
+ PagePtr pagePtr;
+ pagePtr.i = pageId;
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ Uint32 attrDescIndex = tablePtr.p->tabDescriptor + (0 << ZAD_LOG_SIZE);
+ Uint32 attrDataOffset = AttributeOffset::getOffset(tableDescriptor[attrDescIndex + 1].tabDescr);
+ node = &pagePtr.p->pageWord[pageOffset] + attrDataOffset;
+}
+
+int
+Dbtup::tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tupVersion, const Uint32* attrIds, Uint32 numAttrs, Uint32* dataOut)
+{
+ ljamEntry();
+ // use own variables instead of globals
+ FragrecordPtr fragPtr;
+ fragPtr.i = fragPtrI;
+ ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
+ TablerecPtr tablePtr;
+ tablePtr.i = fragPtr.p->fragTableId;
+ ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
+ PagePtr pagePtr;
+ pagePtr.i = pageId;
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ // search for tuple version if not original
+ if (pagePtr.p->pageWord[pageOffset + 1] != tupVersion) {
+ ljam();
+ OperationrecPtr opPtr;
+ opPtr.i = pagePtr.p->pageWord[pageOffset];
+ Uint32 loopGuard = 0;
+ while (true) {
+ ptrCheckGuard(opPtr, cnoOfOprec, operationrec);
+ if (opPtr.p->realPageIdC != RNIL) {
+ // update page and offset
+ pagePtr.i = opPtr.p->realPageIdC;
+ pageOffset = opPtr.p->pageOffsetC;
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ if (pagePtr.p->pageWord[pageOffset + 1] == tupVersion) {
+ ljam();
+ break;
+ }
+ }
+ ljam();
+ opPtr.i = opPtr.p->nextActiveOp;
+ ndbrequire(++loopGuard < (1 << ZTUP_VERSION_BITS));
+ }
+ }
+ // read key attributes from found tuple version
+ // save globals
+ TablerecPtr tabptr_old = tabptr;
+ FragrecordPtr fragptr_old = fragptr;
+ OperationrecPtr operPtr_old = operPtr;
+ // new globals
+ tabptr = tablePtr;
+ fragptr = fragPtr;
+ operPtr.i = RNIL;
+ operPtr.p = NULL;
+ // do it
+ int ret = readAttributes(pagePtr.p, pageOffset, attrIds, numAttrs, dataOut, ZNIL, true);
+ // restore globals
+ tabptr = tabptr_old;
+ fragptr = fragptr_old;
+ operPtr = operPtr_old;
+ // done
+ if (ret == -1) {
+ ret = terrorCode ? (-(int)terrorCode) : -1;
+ }
+ return ret;
+}
+
+int
+Dbtup::tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* dataOut, bool xfrmFlag)
+{
+ ljamEntry();
+ // use own variables instead of globals
+ FragrecordPtr fragPtr;
+ fragPtr.i = fragPtrI;
+ ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
+ TablerecPtr tablePtr;
+ tablePtr.i = fragPtr.p->fragTableId;
+ ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
+ PagePtr pagePtr;
+ pagePtr.i = pageId;
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ const Uint32 tabDescriptor = tablePtr.p->tabDescriptor;
+ const Uint32* attrIds = &tableDescriptor[tablePtr.p->readKeyArray].tabDescr;
+ const Uint32 numAttrs = tablePtr.p->noOfKeyAttr;
+ // read pk attributes from original tuple
+ // save globals
+ TablerecPtr tabptr_old = tabptr;
+ FragrecordPtr fragptr_old = fragptr;
+ OperationrecPtr operPtr_old = operPtr;
+ // new globals
+ tabptr = tablePtr;
+ fragptr = fragPtr;
+ operPtr.i = RNIL;
+ operPtr.p = NULL;
+ // do it
+ int ret = readAttributes(pagePtr.p, pageOffset, attrIds, numAttrs, dataOut, ZNIL, xfrmFlag);
+ // restore globals
+ tabptr = tabptr_old;
+ fragptr = fragptr_old;
+ operPtr = operPtr_old;
+ // done
+ if (ret != -1) {
+ // remove headers
+ Uint32 n = 0;
+ Uint32 i = 0;
+ while (n < numAttrs) {
+ const AttributeHeader ah(dataOut[i]);
+ Uint32 size = ah.getDataSize();
+ ndbrequire(size != 0);
+ for (Uint32 j = 0; j < size; j++) {
+ dataOut[i + j - n] = dataOut[i + j + 1];
+ }
+ n += 1;
+ i += 1 + size;
+ }
+ ndbrequire((int)i == ret);
+ ret -= numAttrs;
+ } else {
+ ret = terrorCode ? (-(int)terrorCode) : -1;
+ }
+ return ret;
+}
+
+int
+Dbtup::accReadPk(Uint32 tableId, Uint32 fragId, Uint32 fragPageId, Uint32 pageIndex, Uint32* dataOut, bool xfrmFlag)
+{
+ ljamEntry();
+ // get table
+ TablerecPtr tablePtr;
+ tablePtr.i = tableId;
+ ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
+ // get fragment
+ FragrecordPtr fragPtr;
+ getFragmentrec(fragPtr, fragId, tablePtr.p);
+ // get real page id and tuple offset
+ PagePtr pagePtr;
+ Uint32 pageId = getRealpid(fragPtr.p, fragPageId);
+ ndbrequire((pageIndex & 0x1) == 0);
+ Uint32 pageOffset = ZPAGE_HEADER_SIZE + (pageIndex >> 1) * tablePtr.p->tupheadsize;
+ // use TUX routine - optimize later
+ int ret = tuxReadPk(fragPtr.i, pageId, pageOffset, dataOut, xfrmFlag);
+ return ret;
+}
+
+bool
+Dbtup::tuxQueryTh(Uint32 fragPtrI, Uint32 tupAddr, Uint32 tupVersion, Uint32 transId1, Uint32 transId2, Uint32 savePointId)
+{
+ ljamEntry();
+ FragrecordPtr fragPtr;
+ fragPtr.i = fragPtrI;
+ ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
+ TablerecPtr tablePtr;
+ tablePtr.i = fragPtr.p->fragTableId;
+ ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
+ // get page
+ PagePtr pagePtr;
+ Uint32 fragPageId = tupAddr >> MAX_TUPLES_BITS;
+ Uint32 pageIndex = tupAddr & ((1 << MAX_TUPLES_BITS ) - 1);
+ // use temp op rec
+ Operationrec tempOp;
+ tempOp.fragPageId = fragPageId;
+ tempOp.pageIndex = pageIndex;
+ tempOp.transid1 = transId1;
+ tempOp.transid2 = transId2;
+ tempOp.savePointId = savePointId;
+ tempOp.optype = ZREAD;
+ tempOp.dirtyOp = 1;
+ if (getPage(pagePtr, &tempOp, fragPtr.p, tablePtr.p)) {
+ /*
+ * We use the normal getPage which will return the tuple to be used
+ * for this transaction and savepoint id. If its tuple version
+ * equals the requested then we have a visible tuple otherwise not.
+ */
+ ljam();
+ Uint32 read_tupVersion = pagePtr.p->pageWord[tempOp.pageOffset + 1];
+ if (read_tupVersion == tupVersion) {
+ ljam();
+ return true;
+ }
+ }
+ return false;
+}
+
+// ordered index build
+
+//#define TIME_MEASUREMENT
+#ifdef TIME_MEASUREMENT
+ static Uint32 time_events;
+ NDB_TICKS tot_time_passed;
+ Uint32 number_events;
+#endif
+void
+Dbtup::execBUILDINDXREQ(Signal* signal)
+{
+ ljamEntry();
+#ifdef TIME_MEASUREMENT
+ time_events = 0;
+ tot_time_passed = 0;
+ number_events = 1;
+#endif
+ // get new operation
+ BuildIndexPtr buildPtr;
+ if (! c_buildIndexList.seize(buildPtr)) {
+ ljam();
+ BuildIndexRec buildRec;
+ memcpy(buildRec.m_request, signal->theData, sizeof(buildRec.m_request));
+ buildRec.m_errorCode = BuildIndxRef::Busy;
+ buildIndexReply(signal, &buildRec);
+ return;
+ }
+ memcpy(buildPtr.p->m_request, signal->theData, sizeof(buildPtr.p->m_request));
+ // check
+ buildPtr.p->m_errorCode = BuildIndxRef::NoError;
+ do {
+ const BuildIndxReq* buildReq = (const BuildIndxReq*)buildPtr.p->m_request;
+ if (buildReq->getTableId() >= cnoOfTablerec) {
+ ljam();
+ buildPtr.p->m_errorCode = BuildIndxRef::InvalidPrimaryTable;
+ break;
+ }
+ TablerecPtr tablePtr;
+ tablePtr.i = buildReq->getTableId();
+ ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
+ if (tablePtr.p->tableStatus != DEFINED) {
+ ljam();
+ buildPtr.p->m_errorCode = BuildIndxRef::InvalidPrimaryTable;
+ break;
+ }
+ if (! DictTabInfo::isOrderedIndex(buildReq->getIndexType())) {
+ ljam();
+ buildPtr.p->m_errorCode = BuildIndxRef::InvalidIndexType;
+ break;
+ }
+ const ArrayList<TupTriggerData>& triggerList = tablePtr.p->tuxCustomTriggers;
+ TriggerPtr triggerPtr;
+ triggerList.first(triggerPtr);
+ while (triggerPtr.i != RNIL) {
+ if (triggerPtr.p->indexId == buildReq->getIndexId()) {
+ ljam();
+ break;
+ }
+ triggerList.next(triggerPtr);
+ }
+ if (triggerPtr.i == RNIL) {
+ ljam();
+ // trigger was not created
+ buildPtr.p->m_errorCode = BuildIndxRef::InternalError;
+ break;
+ }
+ buildPtr.p->m_triggerPtrI = triggerPtr.i;
+ // set to first tuple position
+ buildPtr.p->m_fragNo = 0;
+ buildPtr.p->m_pageId = 0;
+ buildPtr.p->m_tupleNo = 0;
+ // start build
+ buildIndex(signal, buildPtr.i);
+ return;
+ } while (0);
+ // check failed
+ buildIndexReply(signal, buildPtr.p);
+ c_buildIndexList.release(buildPtr);
+}
+
+void
+Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI)
+{
+ // get build record
+ BuildIndexPtr buildPtr;
+ buildPtr.i = buildPtrI;
+ c_buildIndexList.getPtr(buildPtr);
+ const BuildIndxReq* buildReq = (const BuildIndxReq*)buildPtr.p->m_request;
+ // get table
+ TablerecPtr tablePtr;
+ tablePtr.i = buildReq->getTableId();
+ ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
+ // get trigger
+ TriggerPtr triggerPtr;
+ triggerPtr.i = buildPtr.p->m_triggerPtrI;
+ c_triggerPool.getPtr(triggerPtr);
+ ndbrequire(triggerPtr.p->indexId == buildReq->getIndexId());
+#ifdef TIME_MEASUREMENT
+ MicroSecondTimer start;
+ MicroSecondTimer stop;
+ NDB_TICKS time_passed;
+#endif
+ do {
+ // get fragment
+ FragrecordPtr fragPtr;
+ if (buildPtr.p->m_fragNo == 2 * MAX_FRAG_PER_NODE) {
+ ljam();
+ // build ready
+ buildIndexReply(signal, buildPtr.p);
+ c_buildIndexList.release(buildPtr);
+ return;
+ }
+ ndbrequire(buildPtr.p->m_fragNo < 2 * MAX_FRAG_PER_NODE);
+ fragPtr.i = tablePtr.p->fragrec[buildPtr.p->m_fragNo];
+ if (fragPtr.i == RNIL) {
+ ljam();
+ buildPtr.p->m_fragNo++;
+ buildPtr.p->m_pageId = 0;
+ buildPtr.p->m_tupleNo = 0;
+ break;
+ }
+ ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
+ // get page
+ PagePtr pagePtr;
+ if (buildPtr.p->m_pageId >= fragPtr.p->noOfPages) {
+ ljam();
+ buildPtr.p->m_fragNo++;
+ buildPtr.p->m_pageId = 0;
+ buildPtr.p->m_tupleNo = 0;
+ break;
+ }
+ Uint32 realPageId = getRealpid(fragPtr.p, buildPtr.p->m_pageId);
+ pagePtr.i = realPageId;
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ const Uint32 pageState = pagePtr.p->pageWord[ZPAGE_STATE_POS];
+ if (pageState != ZTH_MM_FREE &&
+ pageState != ZTH_MM_FREE_COPY &&
+ pageState != ZTH_MM_FULL &&
+ pageState != ZTH_MM_FULL_COPY) {
+ ljam();
+ buildPtr.p->m_pageId++;
+ buildPtr.p->m_tupleNo = 0;
+ break;
+ }
+ // get tuple
+ const Uint32 tupheadsize = tablePtr.p->tupheadsize;
+ Uint32 pageOffset = ZPAGE_HEADER_SIZE + buildPtr.p->m_tupleNo * tupheadsize;
+ if (pageOffset + tupheadsize > ZWORDS_ON_PAGE) {
+ ljam();
+ buildPtr.p->m_pageId++;
+ buildPtr.p->m_tupleNo = 0;
+ break;
+ }
+ // skip over free tuple
+ bool isFree = false;
+ if (pageState == ZTH_MM_FREE ||
+ pageState == ZTH_MM_FREE_COPY) {
+ ljam();
+ if ((pagePtr.p->pageWord[pageOffset] >> 16) == tupheadsize) {
+ // verify it really is free XXX far too expensive
+ Uint32 nextTuple = pagePtr.p->pageWord[ZFREELIST_HEADER_POS] >> 16;
+ ndbrequire(nextTuple != 0);
+ while (nextTuple != 0) {
+ ljam();
+ if (nextTuple == pageOffset) {
+ ljam();
+ isFree = true;
+ break;
+ }
+ nextTuple = pagePtr.p->pageWord[nextTuple] & 0xffff;
+ }
+ }
+ }
+ if (isFree) {
+ ljam();
+ buildPtr.p->m_tupleNo++;
+ break;
+ }
+ Uint32 tupVersion = pagePtr.p->pageWord[pageOffset + 1];
+ OperationrecPtr pageOperPtr;
+ pageOperPtr.i = pagePtr.p->pageWord[pageOffset];
+ if (pageOperPtr.i != RNIL) {
+ /*
+ If there is an ongoing operation on the tuple then it is either a
+ copy tuple or an original tuple with an ongoing transaction. In
+ both cases realPageId and pageOffset refer to the original tuple.
+ The tuple address stored in TUX will always be the original tuple
+ but with the tuple version of the tuple we found.
+
+ This is necessary to avoid having to update TUX at abort of
+ update. If an update aborts then the copy tuple is copied to
+ the original tuple. The build will however have found that
+ tuple as a copy tuple. The original tuple is stable and is thus
+ preferrable to store in TUX.
+ */
+ ljam();
+ ptrCheckGuard(pageOperPtr, cnoOfOprec, operationrec);
+ realPageId = pageOperPtr.p->realPageId;
+ pageOffset = pageOperPtr.p->pageOffset;
+ }//if
+#ifdef TIME_MEASUREMENT
+ NdbTick_getMicroTimer(&start);
+#endif
+ // add to index
+ TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
+ req->errorCode = RNIL;
+ req->tableId = tablePtr.i;
+ req->indexId = triggerPtr.p->indexId;
+ req->fragId = tablePtr.p->fragid[buildPtr.p->m_fragNo];
+ req->pageId = realPageId;
+ req->pageOffset = pageOffset;
+ req->tupVersion = tupVersion;
+ req->opInfo = TuxMaintReq::OpAdd;
+ EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ,
+ signal, TuxMaintReq::SignalLength);
+ ljamEntry();
+ if (req->errorCode != 0) {
+ switch (req->errorCode) {
+ case TuxMaintReq::NoMemError:
+ ljam();
+ buildPtr.p->m_errorCode = BuildIndxRef::AllocationFailure;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }
+ buildIndexReply(signal, buildPtr.p);
+ c_buildIndexList.release(buildPtr);
+ return;
+ }
+#ifdef TIME_MEASUREMENT
+ NdbTick_getMicroTimer(&stop);
+ time_passed = NdbTick_getMicrosPassed(start, stop);
+ if (time_passed < 1000) {
+ time_events++;
+ tot_time_passed += time_passed;
+ if (time_events == number_events) {
+ NDB_TICKS mean_time_passed = tot_time_passed / (NDB_TICKS)number_events;
+ ndbout << "Number of events = " << number_events;
+ ndbout << " Mean time passed = " << mean_time_passed << endl;
+ number_events <<= 1;
+ tot_time_passed = (NDB_TICKS)0;
+ time_events = 0;
+ }//if
+ }
+#endif
+ // next tuple
+ buildPtr.p->m_tupleNo++;
+ break;
+ } while (0);
+ signal->theData[0] = ZBUILD_INDEX;
+ signal->theData[1] = buildPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB);
+}
+
+void
+Dbtup::buildIndexReply(Signal* signal, const BuildIndexRec* buildPtrP)
+{
+ const BuildIndxReq* const buildReq = (const BuildIndxReq*)buildPtrP->m_request;
+ // conf is subset of ref
+ BuildIndxRef* rep = (BuildIndxRef*)signal->getDataPtr();
+ rep->setUserRef(buildReq->getUserRef());
+ rep->setConnectionPtr(buildReq->getConnectionPtr());
+ rep->setRequestType(buildReq->getRequestType());
+ rep->setTableId(buildReq->getTableId());
+ rep->setIndexType(buildReq->getIndexType());
+ rep->setIndexId(buildReq->getIndexId());
+ // conf
+ if (buildPtrP->m_errorCode == BuildIndxRef::NoError) {
+ ljam();
+ sendSignal(rep->getUserRef(), GSN_BUILDINDXCONF,
+ signal, BuildIndxConf::SignalLength, JBB);
+ return;
+ }
+ // ref
+ rep->setErrorCode(buildPtrP->m_errorCode);
+ sendSignal(rep->getUserRef(), GSN_BUILDINDXREF,
+ signal, BuildIndxRef::SignalLength, JBB);
+}
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupLCP.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupLCP.cpp
new file mode 100644
index 00000000000..370ef4c4ba5
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupLCP.cpp
@@ -0,0 +1,596 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+#define DBTUP_C
+#include "Dbtup.hpp"
+#include <RefConvert.hpp>
+#include <ndb_limits.h>
+#include <pc.hpp>
+#include <signaldata/FsConf.hpp>
+#include <signaldata/FsRef.hpp>
+
+#define ljam() { jamLine(10000 + __LINE__); }
+#define ljamEntry() { jamEntryLine(10000 + __LINE__); }
+
+/* ---------------------------------------------------------------- */
+/* ---------------------------------------------------------------- */
+/* -------------------- LOCAL CHECKPOINT MODULE ------------------- */
+/* ---------------------------------------------------------------- */
+/* ---------------------------------------------------------------- */
+void Dbtup::execTUP_PREPLCPREQ(Signal* signal)
+{
+ CheckpointInfoPtr ciPtr;
+ DiskBufferSegmentInfoPtr dbsiPtr;
+ FragrecordPtr regFragPtr;
+ LocalLogInfoPtr lliPtr;
+ TablerecPtr regTabPtr;
+
+ ljamEntry();
+ Uint32 userptr = signal->theData[0];
+ BlockReference userblockref = signal->theData[1];
+ regTabPtr.i = signal->theData[2];
+ ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec);
+ Uint32 fragId = signal->theData[3];
+ Uint32 checkpointNumber = signal->theData[4];
+ cundoFileVersion = signal->theData[5];
+
+ getFragmentrec(regFragPtr, fragId, regTabPtr.p);
+ ndbrequire(regTabPtr.i != RNIL);
+ seizeCheckpointInfoRecord(ciPtr);
+
+ lliPtr.i = (cundoFileVersion << 2) + (regTabPtr.i & 0x3);
+ ptrCheckGuard(lliPtr, cnoOfParallellUndoFiles, localLogInfo);
+ cnoOfDataPagesToDiskWithoutSynch = 0;
+
+ ciPtr.p->lcpDataFileHandle = RNIL;
+ ciPtr.p->lcpCheckpointVersion = checkpointNumber;
+ ciPtr.p->lcpLocalLogInfoP = lliPtr.i;
+ ciPtr.p->lcpFragmentP = regFragPtr.i; /* SET THE FRAGMENT */
+ ciPtr.p->lcpFragmentId = fragId; /* SAVE THE FRAGMENT IDENTITY */
+ ciPtr.p->lcpTabPtr = regTabPtr.i; /* SET THE TABLE POINTER */
+ ciPtr.p->lcpBlockref = userblockref; /* SET THE BLOCK REFERENCE */
+ ciPtr.p->lcpUserptr = userptr; /* SET THE USERPOINTER */
+
+ /***************************************************************/
+ /* OPEN THE UNDO FILE FOR WRITE */
+ /* UPON FSOPENCONF */
+ /***************************************************************/
+ if (lliPtr.p->lliActiveLcp == 0) { /* IS THE UNDO LOG FILE OPEN? */
+ PendingFileOpenInfoPtr undoPfoiPtr;
+ UndoPagePtr regUndoPagePtr;
+
+ ljam();
+ lliPtr.p->lliPrevRecordId = 0;
+ lliPtr.p->lliLogFilePage = 0;
+ lliPtr.p->lliUndoPagesToDiskWithoutSynch = 0;
+ lliPtr.p->lliUndoWord = ZUNDO_PAGE_HEADER_SIZE;
+
+ seizeUndoBufferSegment(signal, regUndoPagePtr);
+ seizeDiskBufferSegmentRecord(dbsiPtr);
+ dbsiPtr.p->pdxBuffertype = UNDO_PAGES;
+ for (Uint32 i = 0; i < ZUB_SEGMENT_SIZE; i++) {
+ dbsiPtr.p->pdxDataPage[i] = regUndoPagePtr.i + i;
+ }//for
+ dbsiPtr.p->pdxFilePage = lliPtr.p->lliLogFilePage;
+ lliPtr.p->lliUndoPage = regUndoPagePtr.i;
+ lliPtr.p->lliUndoBufferSegmentP = dbsiPtr.i;
+ /* F LEVEL NOT USED */
+ Uint32 fileType = 1; /* VERSION */
+ fileType = (fileType << 8) | 2; /* .LOCLOG */
+ fileType = (fileType << 8) | 6; /* D6 */
+ fileType = (fileType << 8) | 0xff; /* DON'T USE P DIRECTORY LEVEL */
+ Uint32 fileFlag = 0x301; /* CREATE, WRITE ONLY, TRUNCATE */
+
+ seizePendingFileOpenInfoRecord(undoPfoiPtr);
+ undoPfoiPtr.p->pfoOpenType = LCP_UNDO_FILE_WRITE;
+ undoPfoiPtr.p->pfoCheckpointInfoP = ciPtr.i;
+
+ signal->theData[0] = cownref;
+ signal->theData[1] = undoPfoiPtr.i;
+ signal->theData[2] = lliPtr.i;
+ signal->theData[3] = 0xFFFFFFFF;
+ signal->theData[4] = cundoFileVersion;
+ signal->theData[5] = fileType;
+ signal->theData[6] = fileFlag;
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
+ }//if
+ /***************************************************************/
+ /* OPEN THE DATA FILE FOR WRITE */
+ /* THE FILE HANDLE WILL BE SET IN THE CHECKPOINT_INFO_RECORD */
+ /* UPON FSOPENCONF */
+ /***************************************************************/
+ /* OPEN THE DATA FILE IN THE FOLLOWING FORM */
+ /* D5/DBTUP/T<TABID>/F<FRAGID>/S<CHECKPOINT_NUMBER>.DATA */
+
+ PendingFileOpenInfoPtr dataPfoiPtr;
+
+ Uint32 fileType = 1; /* VERSION */
+ fileType = (fileType << 8) | 0; /* .DATA */
+ fileType = (fileType << 8) | 5; /* D5 */
+ fileType = (fileType << 8) | 0xff; /* DON'T USE P DIRECTORY LEVEL */
+ Uint32 fileFlag = 0x301; /* CREATE, WRITE ONLY, TRUNCATE */
+
+ seizePendingFileOpenInfoRecord(dataPfoiPtr); /* SEIZE A NEW FILE OPEN INFO */
+ if (lliPtr.p->lliActiveLcp == 0) {
+ ljam();
+ dataPfoiPtr.p->pfoOpenType = LCP_DATA_FILE_WRITE_WITH_UNDO;
+ } else {
+ ljam();
+ dataPfoiPtr.p->pfoOpenType = LCP_DATA_FILE_WRITE;
+ }//if
+ dataPfoiPtr.p->pfoCheckpointInfoP = ciPtr.i;
+
+ /* LET'S OPEN THE DATA FILE FOR WRITE */
+ /* INCREASE NUMBER OF ACTIVE CHECKPOINTS */
+ lliPtr.p->lliActiveLcp = 1;
+ signal->theData[0] = cownref;
+ signal->theData[1] = dataPfoiPtr.i;
+ signal->theData[2] = ciPtr.p->lcpTabPtr;
+ signal->theData[3] = ciPtr.p->lcpFragmentId;
+ signal->theData[4] = ciPtr.p->lcpCheckpointVersion;
+ signal->theData[5] = fileType;
+ signal->theData[6] = fileFlag;
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
+ return;
+}//Dbtup::execTUP_PREPLCPREQ()
+
+/* ---------------------------------------------------------------- */
+/* ------------------------ START CHECKPOINT --------------------- */
+/* ---------------------------------------------------------------- */
+void Dbtup::execTUP_LCPREQ(Signal* signal)
+{
+ CheckpointInfoPtr ciPtr;
+ DiskBufferSegmentInfoPtr dbsiPtr;
+ FragrecordPtr regFragPtr;
+ LocalLogInfoPtr lliPtr;
+
+ ljamEntry();
+// Uint32 userptr = signal->theData[0];
+// BlockReference userblockref = signal->theData[1];
+ ciPtr.i = signal->theData[2];
+
+ ptrCheckGuard(ciPtr, cnoOfLcpRec, checkpointInfo);
+ regFragPtr.i = ciPtr.p->lcpFragmentP;
+ ptrCheckGuard(regFragPtr, cnoOfFragrec, fragrecord);
+
+/* ---------------------------------------------------------------- */
+/* ASSIGNING A VALUE DIFFERENT FROM RNIL TO CHECKPOINT VERSION*/
+/* TRIGGERS THAT UNDO LOGGING WILL START FOR THIS FRAGMENT. */
+/* WE ASSIGN IT THE POINTER TO THE CHECKPOINT RECORD FOR */
+/* OPTIMISATION OF THE WRITING OF THE UNDO LOG. */
+/* ---------------------------------------------------------------- */
+ regFragPtr.p->checkpointVersion = ciPtr.p->lcpLocalLogInfoP; /* MARK START OF UNDO LOGGING */
+
+ regFragPtr.p->maxPageWrittenInCheckpoint = getNoOfPages(regFragPtr.p);
+ regFragPtr.p->minPageNotWrittenInCheckpoint = 0;
+ ndbrequire(getNoOfPages(regFragPtr.p) > 0);
+ allocDataBufferSegment(signal, dbsiPtr);
+
+ dbsiPtr.p->pdxNumDataPages = 0;
+ dbsiPtr.p->pdxFilePage = 1;
+ ciPtr.p->lcpDataBufferSegmentP = dbsiPtr.i;
+ dbsiPtr.p->pdxCheckpointInfoP = ciPtr.i;
+ ciPtr.p->lcpNoOfPages = getNoOfPages(regFragPtr.p);
+ ciPtr.p->lcpNoCopyPagesAlloc = regFragPtr.p->noCopyPagesAlloc;
+ ciPtr.p->lcpEmptyPrimPage = regFragPtr.p->emptyPrimPage;
+ ciPtr.p->lcpThFreeFirst = regFragPtr.p->thFreeFirst;
+ ciPtr.p->lcpThFreeCopyFirst = regFragPtr.p->thFreeCopyFirst;
+ lliPtr.i = ciPtr.p->lcpLocalLogInfoP;
+ ptrCheckGuard(lliPtr, cnoOfParallellUndoFiles, localLogInfo);
+/* ---------------------------------------------------------------- */
+/* --- PERFORM A COPY OF THE TABLE DESCRIPTOR FOR THIS FRAGMENT --- */
+/* ---------------------------------------------------------------- */
+ cprAddLogHeader(signal,
+ lliPtr.p,
+ ZTABLE_DESCRIPTOR,
+ ciPtr.p->lcpTabPtr,
+ ciPtr.p->lcpFragmentId);
+
+/* ---------------------------------------------------------------- */
+/* CONTINUE WITH SAVING ACTIVE OPERATIONS AFTER A REAL-TIME */
+/* BREAK. */
+/* ---------------------------------------------------------------- */
+ ciPtr.p->lcpTmpOperPtr = regFragPtr.p->firstusedOprec;
+ lcpSaveCopyListLab(signal, ciPtr);
+ return;
+}//Dbtup::execTUP_LCPREQ()
+
+void Dbtup::allocDataBufferSegment(Signal* signal, DiskBufferSegmentInfoPtr& dbsiPtr)
+{
+ UndoPagePtr regUndoPagePtr;
+
+ seizeDiskBufferSegmentRecord(dbsiPtr);
+ dbsiPtr.p->pdxBuffertype = COMMON_AREA_PAGES;
+ ndbrequire(cfirstfreeUndoSeg != RNIL);
+ if (cnoFreeUndoSeg == ZMIN_PAGE_LIMIT_TUP_COMMITREQ) {
+ EXECUTE_DIRECT(DBLQH, GSN_TUP_COM_BLOCK, signal, 1);
+ ljamEntry();
+ }//if
+ cnoFreeUndoSeg--;
+ ndbrequire(cnoFreeUndoSeg >= 0);
+
+ regUndoPagePtr.i = cfirstfreeUndoSeg;
+ ptrCheckGuard(regUndoPagePtr, cnoOfUndoPage, undoPage);
+ cfirstfreeUndoSeg = regUndoPagePtr.p->undoPageWord[ZPAGE_NEXT_POS];
+ regUndoPagePtr.p->undoPageWord[ZPAGE_NEXT_POS] = RNIL;
+ for (Uint32 i = 0; i < ZUB_SEGMENT_SIZE; i++) {
+ dbsiPtr.p->pdxDataPage[i] = regUndoPagePtr.i + i;
+ }//for
+}//Dbtup::allocDataBufferSegment()
+
+/* ---------------------------------------------------------------- */
+/* --- PERFORM A COPY OF THE ACTIVE OPERATIONS FOR THIS FRAGMENT -- */
+/* ---------------------------------------------------------------- */
+void Dbtup::lcpSaveCopyListLab(Signal* signal, CheckpointInfoPtr ciPtr)
+{
+ FragrecordPtr regFragPtr;
+ LocalLogInfoPtr lliPtr;
+ OperationrecPtr regOpPtr;
+
+ regFragPtr.i = ciPtr.p->lcpFragmentP;
+ ptrCheckGuard(regFragPtr, cnoOfFragrec, fragrecord);
+ lliPtr.i = ciPtr.p->lcpLocalLogInfoP;
+ ptrCheckGuard(lliPtr, cnoOfParallellUndoFiles, localLogInfo);
+ regOpPtr.i = ciPtr.p->lcpTmpOperPtr;
+
+/* -------------------------------------------------------------------------------- */
+/* TRAVERSE THE ENTIRE BLOCK OF OPERATIONS. CHECK IF THERE ARE EXISTING COPYS OF */
+/* TUPLES IN THE CHECKPOINTED FRAGMENT. SAVE THOSE IN A LIST IN THE FOLLOWING FORM: */
+/* */
+/* SOURCE PAGE */
+/* SOURCE INDEX */
+/* COPY PAGE */
+/* COPY INDEX */
+/* -------------------------------------------------------------------------------- */
+ Uint32 loopCount = 0;
+ while ((regOpPtr.i != RNIL) && (loopCount < 50)) {
+ ljam();
+ ptrCheckGuard(regOpPtr, cnoOfOprec, operationrec);
+ if (regOpPtr.p->realPageId != RNIL) {
+/* ---------------------------------------------------------------- */
+// We ensure that we have actually allocated the tuple header and
+// also found it. Otherwise we will fill the undo log with garbage.
+/* ---------------------------------------------------------------- */
+ if (regOpPtr.p->optype == ZUPDATE ||
+ (regOpPtr.p->optype == ZINSERT && regOpPtr.p->deleteInsertFlag)) {
+ ljam();
+ if (regOpPtr.p->realPageIdC != RNIL) {
+/* ---------------------------------------------------------------- */
+// We ensure that we have actually allocated the tuple header copy.
+// Otherwise we will fill the undo log with garbage.
+/* ---------------------------------------------------------------- */
+ cprAddLogHeader(signal,
+ lliPtr.p,
+ ZLCPR_ABORT_UPDATE,
+ ciPtr.p->lcpTabPtr,
+ ciPtr.p->lcpFragmentId);
+ cprAddAbortUpdate(signal, lliPtr.p, regOpPtr.p);
+ }//if
+ } else if (regOpPtr.p->optype == ZINSERT) {
+ ljam();
+ cprAddUndoLogRecord(signal,
+ ZLCPR_ABORT_INSERT,
+ regOpPtr.p->fragPageId,
+ regOpPtr.p->pageIndex,
+ regOpPtr.p->tableRef,
+ regOpPtr.p->fragId,
+ regFragPtr.p->checkpointVersion);
+ } else {
+ ndbrequire(regOpPtr.p->optype == ZDELETE);
+ ljam();
+ cprAddUndoLogRecord(signal,
+ ZINDICATE_NO_OP_ACTIVE,
+ regOpPtr.p->fragPageId,
+ regOpPtr.p->pageIndex,
+ regOpPtr.p->tableRef,
+ regOpPtr.p->fragId,
+ regFragPtr.p->checkpointVersion);
+ }//if
+ }//if
+ loopCount++;;
+ regOpPtr.i = regOpPtr.p->nextOprecInList;
+ }//while
+ if (regOpPtr.i == RNIL) {
+ ljam();
+
+ signal->theData[0] = ciPtr.p->lcpUserptr;
+ sendSignal(ciPtr.p->lcpBlockref, GSN_TUP_LCPSTARTED, signal, 1, JBA);
+
+ signal->theData[0] = ZCONT_SAVE_DP;
+ signal->theData[1] = ciPtr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ } else {
+ ljam();
+ ciPtr.p->lcpTmpOperPtr = regOpPtr.i;
+ signal->theData[0] = ZCONT_START_SAVE_CL;
+ signal->theData[1] = ciPtr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ }//if
+}//Dbtup::lcpSaveCopyListLab()
+
+/* ---------------------------------------------------------------- */
+/* ------- PERFORM A COPY OF ONE DATAPAGE DURING CHECKPOINT ------- */
+/* ---------------------------------------------------------------- */
+/* THE RANGE OF DATA PAGES IS INCLUDED IN THE CHECKPOINT_INFO_PTR */
+/* LAST_PAGE_TO_BUFFER ELEMENT IS INCREASED UNTIL ALL PAGES ARE */
+/* COPIED TO THE DISK BUFFER. WHEN A DISK BUFFER SEGMENT IS FULL */
+/* IT WILL BE WRITTEN TO DISK (TYPICALLY EACH 8:TH PAGE) */
+/* ---------------------------------------------------------------- */
+void Dbtup::lcpSaveDataPageLab(Signal* signal, Uint32 ciIndex)
+{
+ CheckpointInfoPtr ciPtr;
+ DiskBufferSegmentInfoPtr dbsiPtr;
+ FragrecordPtr regFragPtr;
+ LocalLogInfoPtr lliPtr;
+ UndoPagePtr undoCopyPagePtr;
+ PagePtr pagePtr;
+
+ ciPtr.i = ciIndex;
+ ptrCheckGuard(ciPtr, cnoOfLcpRec, checkpointInfo);
+ if (ERROR_INSERTED(4000)){
+ if (ciPtr.p->lcpTabPtr == c_errorInsert4000TableId) {
+ // Delay writing of data pages during LCP
+ ndbout << "Delay writing of data pages during LCP" << endl;
+ signal->theData[0] = ZCONT_SAVE_DP;
+ signal->theData[1] = ciIndex;
+ sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 1000, 2);
+ return;
+ }//if
+ }//if
+ if (clblPageCounter == 0) {
+ ljam();
+ signal->theData[0] = ZCONT_SAVE_DP;
+ signal->theData[1] = ciPtr.i;
+ sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 100, 2);
+ return;
+ } else {
+ ljam();
+ clblPageCounter--;
+ }//if
+
+ regFragPtr.i = ciPtr.p->lcpFragmentP;
+ ptrCheckGuard(regFragPtr, cnoOfFragrec, fragrecord);
+ dbsiPtr.i = ciPtr.p->lcpDataBufferSegmentP;
+ ptrCheckGuard(dbsiPtr, cnoOfConcurrentWriteOp, diskBufferSegmentInfo);
+
+ pagePtr.i = getRealpid(regFragPtr.p, regFragPtr.p->minPageNotWrittenInCheckpoint);
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ ndbrequire(dbsiPtr.p->pdxNumDataPages < 16);
+ undoCopyPagePtr.i = dbsiPtr.p->pdxDataPage[dbsiPtr.p->pdxNumDataPages];
+ ptrCheckGuard(undoCopyPagePtr, cnoOfUndoPage, undoPage);
+ MEMCOPY_NO_WORDS(&undoCopyPagePtr.p->undoPageWord[0],
+ &pagePtr.p->pageWord[0],
+ ZWORDS_ON_PAGE);
+ regFragPtr.p->minPageNotWrittenInCheckpoint++;
+ dbsiPtr.p->pdxNumDataPages++;
+ if (regFragPtr.p->minPageNotWrittenInCheckpoint == regFragPtr.p->maxPageWrittenInCheckpoint) {
+ /* ---------------------------------------------------------- */
+ /* ALL PAGES ARE COPIED, TIME TO FINISH THE CHECKPOINT */
+ /* SAVE THE END POSITIONS OF THE LOG RECORDS SINCE ALL DATA */
+ /* PAGES ARE NOW SAFE ON DISK AND NO MORE LOGGING WILL APPEAR */
+ /* ---------------------------------------------------------- */
+ ljam();
+ lliPtr.i = ciPtr.p->lcpLocalLogInfoP;
+ ptrCheckGuard(lliPtr, cnoOfParallellUndoFiles, localLogInfo);
+ regFragPtr.p->checkpointVersion = RNIL; /* UNDO LOGGING IS SHUT OFF */
+ lcpWriteListDataPageSegment(signal, dbsiPtr, ciPtr, false);
+ dbsiPtr.p->pdxOperation = CHECKPOINT_DATA_WRITE_LAST;
+ } else if (dbsiPtr.p->pdxNumDataPages == ZDB_SEGMENT_SIZE) {
+ ljam();
+ lcpWriteListDataPageSegment(signal, dbsiPtr, ciPtr, false);
+ dbsiPtr.p->pdxOperation = CHECKPOINT_DATA_WRITE;
+ } else {
+ ljam();
+ signal->theData[0] = ZCONT_SAVE_DP;
+ signal->theData[1] = ciPtr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ }//if
+}//Dbtup::lcpSaveDataPageLab()
+
+void Dbtup::lcpWriteListDataPageSegment(Signal* signal,
+ DiskBufferSegmentInfoPtr dbsiPtr,
+ CheckpointInfoPtr ciPtr,
+ bool flushFlag)
+{
+ Uint32 flags = 1;
+ cnoOfDataPagesToDiskWithoutSynch += dbsiPtr.p->pdxNumDataPages;
+ if ((cnoOfDataPagesToDiskWithoutSynch > MAX_PAGES_WITHOUT_SYNCH) ||
+ (flushFlag)) {
+ ljam();
+/* ---------------------------------------------------------------- */
+// To avoid synching too big chunks at a time we synch after writing
+// a certain number of data pages. (e.g. 2 MBytes).
+/* ---------------------------------------------------------------- */
+ cnoOfDataPagesToDiskWithoutSynch = 0;
+ flags |= 0x10; //Set synch flag unconditionally
+ }//if
+ signal->theData[0] = ciPtr.p->lcpDataFileHandle;
+ signal->theData[1] = cownref;
+ signal->theData[2] = dbsiPtr.i;
+ signal->theData[3] = flags;
+ signal->theData[4] = ZBASE_ADDR_UNDO_WORD;
+ signal->theData[5] = dbsiPtr.p->pdxNumDataPages;
+ signal->theData[6] = dbsiPtr.p->pdxDataPage[0];
+ signal->theData[7] = dbsiPtr.p->pdxFilePage;
+ sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA);
+
+ dbsiPtr.p->pdxFilePage += dbsiPtr.p->pdxNumDataPages;
+ dbsiPtr.p->pdxNumDataPages = 0;
+}//Dbtup::lcpWriteListDataPageSegment()
+
+void Dbtup::lcpFlushLogLab(Signal* signal, CheckpointInfoPtr ciPtr)
+{
+ DiskBufferSegmentInfoPtr oldDbsiPtr;
+ LocalLogInfoPtr lliPtr;
+ UndoPagePtr oldUndoPagePtr;
+ UndoPagePtr newUndoPagePtr;
+
+ lliPtr.i = ciPtr.p->lcpLocalLogInfoP;
+ ptrCheckGuard(lliPtr, cnoOfParallellUndoFiles, localLogInfo);
+ oldDbsiPtr.i = lliPtr.p->lliUndoBufferSegmentP;
+ ptrCheckGuard(oldDbsiPtr, cnoOfConcurrentWriteOp, diskBufferSegmentInfo);
+ oldDbsiPtr.p->pdxNumDataPages++;
+ if (clblPageCounter > 0) {
+ ljam();
+ clblPageCounter--;
+ }//if
+ oldUndoPagePtr.i = lliPtr.p->lliUndoPage;
+ ptrCheckGuard(oldUndoPagePtr, cnoOfUndoPage, undoPage);
+ lcpWriteUndoSegment(signal, lliPtr.p, true);
+ oldDbsiPtr.p->pdxOperation = CHECKPOINT_UNDO_WRITE_FLUSH;
+ oldDbsiPtr.p->pdxCheckpointInfoP = ciPtr.i;
+
+/* ---------------------------------------------------------------- */
+/* SINCE LAST PAGE SENT TO DISK WAS NOT FULL YET WE COPY IT */
+/* TO THE NEW LAST PAGE. */
+/* ---------------------------------------------------------------- */
+ newUndoPagePtr.i = lliPtr.p->lliUndoPage;
+ ptrCheckGuard(newUndoPagePtr, cnoOfUndoPage, undoPage);
+ ndbrequire(lliPtr.p->lliUndoWord < ZWORDS_ON_PAGE);
+ MEMCOPY_NO_WORDS(&newUndoPagePtr.p->undoPageWord[0],
+ &oldUndoPagePtr.p->undoPageWord[0],
+ lliPtr.p->lliUndoWord);
+}//Dbtup::lcpFlushLogLab()
+
+void Dbtup::lcpFlushRestartInfoLab(Signal* signal, Uint32 ciIndex)
+{
+ CheckpointInfoPtr ciPtr;
+ DiskBufferSegmentInfoPtr dbsiPtr;
+ LocalLogInfoPtr lliPtr;
+ UndoPagePtr undoCopyPagePtr;
+
+ ciPtr.i = ciIndex;
+ ptrCheckGuard(ciPtr, cnoOfLcpRec, checkpointInfo);
+
+ lliPtr.i = ciPtr.p->lcpLocalLogInfoP;
+ ptrCheckGuard(lliPtr, cnoOfParallellUndoFiles, localLogInfo);
+ dbsiPtr.i = ciPtr.p->lcpDataBufferSegmentP;
+ ptrCheckGuard(dbsiPtr, cnoOfConcurrentWriteOp, diskBufferSegmentInfo);
+ undoCopyPagePtr.i = dbsiPtr.p->pdxDataPage[0]; /* UNDO INFO STORED AT PAGE 0 */
+ ptrCheckGuard(undoCopyPagePtr, cnoOfUndoPage, undoPage);
+ ndbrequire(ciPtr.p->lcpNoOfPages > 0);
+ undoCopyPagePtr.p->undoPageWord[ZSRI_NO_OF_FRAG_PAGES_POS] = ciPtr.p->lcpNoOfPages;
+ undoCopyPagePtr.p->undoPageWord[ZSRI_NO_COPY_PAGES_ALLOC] = ciPtr.p->lcpNoCopyPagesAlloc;
+ undoCopyPagePtr.p->undoPageWord[ZSRI_EMPTY_PRIM_PAGE] = ciPtr.p->lcpEmptyPrimPage;
+ undoCopyPagePtr.p->undoPageWord[ZSRI_TH_FREE_FIRST] = ciPtr.p->lcpThFreeFirst;
+ undoCopyPagePtr.p->undoPageWord[ZSRI_TH_FREE_COPY_FIRST] = ciPtr.p->lcpThFreeCopyFirst;
+ undoCopyPagePtr.p->undoPageWord[ZSRI_UNDO_LOG_END_REC_ID] = lliPtr.p->lliPrevRecordId;
+ undoCopyPagePtr.p->undoPageWord[ZSRI_UNDO_FILE_VER] = cundoFileVersion;
+ if (lliPtr.p->lliUndoWord == ZUNDO_PAGE_HEADER_SIZE) {
+ ljam();
+ undoCopyPagePtr.p->undoPageWord[ZSRI_UNDO_LOG_END_PAGE_ID] = lliPtr.p->lliLogFilePage - 1;
+ } else {
+ ljam();
+ undoCopyPagePtr.p->undoPageWord[ZSRI_UNDO_LOG_END_PAGE_ID] = lliPtr.p->lliLogFilePage;
+ }//if
+ dbsiPtr.p->pdxNumDataPages = 1;
+ dbsiPtr.p->pdxFilePage = 0;
+ if (clblPageCounter > 0) {
+ ljam();
+ clblPageCounter--;
+ }//if
+ lcpWriteListDataPageSegment(signal, dbsiPtr, ciPtr, true);
+ dbsiPtr.p->pdxOperation = CHECKPOINT_DATA_WRITE_FLUSH;
+ return;
+}//Dbtup::lcpFlushRestartInfoLab()
+
+void Dbtup::lcpCompletedLab(Signal* signal, Uint32 ciIndex)
+{
+ CheckpointInfoPtr ciPtr;
+ PendingFileOpenInfoPtr pfoiPtr;
+/* ---------------------------------------------------------------------- */
+/* INSERT CODE TO CLOSE DATA FILE HERE. DO THIS BEFORE SEND CONF */
+/* ---------------------------------------------------------------------- */
+ ciPtr.i = ciIndex;
+ ptrCheckGuard(ciPtr, cnoOfLcpRec, checkpointInfo);
+
+ seizePendingFileOpenInfoRecord(pfoiPtr);
+ pfoiPtr.p->pfoOpenType = LCP_DATA_FILE_CLOSE;
+ pfoiPtr.p->pfoCheckpointInfoP = ciPtr.i;
+
+ signal->theData[0] = ciPtr.p->lcpDataFileHandle;
+ signal->theData[1] = cownref;
+ signal->theData[2] = pfoiPtr.i;
+ signal->theData[3] = 0;
+ sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 4, JBA);
+ return;
+}//Dbtup::lcpCompletedLab()
+
+void Dbtup::lcpClosedDataFileLab(Signal* signal, CheckpointInfoPtr ciPtr)
+{
+ signal->theData[0] = ciPtr.p->lcpUserptr;
+ sendSignal(ciPtr.p->lcpBlockref, GSN_TUP_LCPCONF, signal, 1, JBB);
+ releaseCheckpointInfoRecord(ciPtr);
+ return;
+}//Dbtup::lcpClosedDataFileLab()
+
+/* ---------------------------------------------------------------------- */
+/* LCP END IS THE LAST STEP IN THE LCP PROCESS IT WILL CLOSE THE LOGFILES */
+/* AND RELEASE THE ALLOCATED CHECKPOINT_INFO_RECORDS */
+/* ---------------------------------------------------------------------- */
+void Dbtup::execEND_LCPREQ(Signal* signal)
+{
+ DiskBufferSegmentInfoPtr dbsiPtr;
+ LocalLogInfoPtr lliPtr;
+ PendingFileOpenInfoPtr pfoiPtr;
+
+ ljamEntry();
+ clqhUserpointer = signal->theData[0];
+ clqhBlockref = signal->theData[1];
+ for (lliPtr.i = 0; lliPtr.i < 16; lliPtr.i++) {
+ ljam();
+ ptrAss(lliPtr, localLogInfo);
+ if (lliPtr.p->lliActiveLcp > 0) {
+ ljam();
+ dbsiPtr.i = lliPtr.p->lliUndoBufferSegmentP;
+ ptrCheckGuard(dbsiPtr, cnoOfConcurrentWriteOp, diskBufferSegmentInfo);
+ freeDiskBufferSegmentRecord(signal, dbsiPtr);
+
+ seizePendingFileOpenInfoRecord(pfoiPtr); /* SEIZE A NEW FILE OPEN INFO */
+ pfoiPtr.p->pfoOpenType = LCP_UNDO_FILE_CLOSE;
+ pfoiPtr.p->pfoCheckpointInfoP = lliPtr.i;
+
+ signal->theData[0] = lliPtr.p->lliUndoFileHandle;
+ signal->theData[1] = cownref;
+ signal->theData[2] = pfoiPtr.i;
+ signal->theData[3] = 0;
+ sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 4, JBA);
+ lliPtr.p->lliActiveLcp = 0;
+ }//if
+ }//for
+ return;
+}//Dbtup::execEND_LCPREQ()
+
+void Dbtup::lcpEndconfLab(Signal* signal)
+{
+ LocalLogInfoPtr lliPtr;
+ for (lliPtr.i = 0; lliPtr.i < 16; lliPtr.i++) {
+ ljam();
+ ptrAss(lliPtr, localLogInfo);
+ if (lliPtr.p->lliUndoFileHandle != RNIL) {
+ ljam();
+/* ---------------------------------------------------------------------- */
+/* WAIT UNTIL ALL LOG FILES HAVE BEEN CLOSED. */
+/* ---------------------------------------------------------------------- */
+ return;
+ }//if
+ }//for
+ signal->theData[0] = clqhUserpointer;
+ sendSignal(clqhBlockref, GSN_END_LCPCONF, signal, 1, JBB);
+ return;
+}//Dbtup::lcpEndconfLab()
+
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
new file mode 100644
index 00000000000..4ce807528c4
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
@@ -0,0 +1,711 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+#define DBTUP_C
+#include "Dbtup.hpp"
+#include <RefConvert.hpp>
+#include <ndb_limits.h>
+#include <pc.hpp>
+#include <signaldata/TupFrag.hpp>
+#include <signaldata/FsConf.hpp>
+#include <signaldata/FsRemoveReq.hpp>
+#include <signaldata/DropTab.hpp>
+#include <signaldata/AlterTab.hpp>
+#include <AttributeDescriptor.hpp>
+#include "AttributeOffset.hpp"
+#include <my_sys.h>
+
+#define ljam() { jamLine(20000 + __LINE__); }
+#define ljamEntry() { jamEntryLine(20000 + __LINE__); }
+
+/* ---------------------------------------------------------------- */
+/* ---------------------------------------------------------------- */
+/* --------------- ADD/DROP FRAGMENT TABLE MODULE ----------------- */
+/* ---------------------------------------------------------------- */
+/* ---------------------------------------------------------------- */
+void Dbtup::execTUPFRAGREQ(Signal* signal)
+{
+ ljamEntry();
+
+ if (signal->theData[0] == (Uint32)-1) {
+ ljam();
+ abortAddFragOp(signal);
+ return;
+ }
+
+ FragoperrecPtr fragOperPtr;
+ FragrecordPtr regFragPtr;
+ TablerecPtr regTabPtr;
+
+ Uint32 userptr = signal->theData[0];
+ Uint32 userblockref = signal->theData[1];
+ Uint32 reqinfo = signal->theData[2];
+ regTabPtr.i = signal->theData[3];
+ Uint32 noOfAttributes = signal->theData[4];
+ Uint32 fragId = signal->theData[5];
+ Uint32 noOfNullAttr = signal->theData[7];
+ /* Uint32 schemaVersion = signal->theData[8];*/
+ Uint32 noOfKeyAttr = signal->theData[9];
+
+ Uint32 noOfNewAttr = (signal->theData[10] & 0xFFFF);
+ /* DICT sends number of character sets in upper half */
+ Uint32 noOfCharsets = (signal->theData[10] >> 16);
+
+ Uint32 checksumIndicator = signal->theData[11];
+ Uint32 noOfAttributeGroups = signal->theData[12];
+ Uint32 globalCheckpointIdIndicator = signal->theData[13];
+
+#ifndef VM_TRACE
+ // config mismatch - do not crash if release compiled
+ if (regTabPtr.i >= cnoOfTablerec) {
+ ljam();
+ signal->theData[0] = userptr;
+ signal->theData[1] = 800;
+ sendSignal(userblockref, GSN_TUPFRAGREF, signal, 2, JBB);
+ return;
+ }
+#endif
+
+ ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec);
+ if (cfirstfreeFragopr == RNIL) {
+ ljam();
+ signal->theData[0] = userptr;
+ signal->theData[1] = ZNOFREE_FRAGOP_ERROR;
+ sendSignal(userblockref, GSN_TUPFRAGREF, signal, 2, JBB);
+ return;
+ }//if
+ seizeFragoperrec(fragOperPtr);
+
+ fragOperPtr.p->nextFragoprec = RNIL;
+ fragOperPtr.p->lqhBlockrefFrag = userblockref;
+ fragOperPtr.p->lqhPtrFrag = userptr;
+ fragOperPtr.p->fragidFrag = fragId;
+ fragOperPtr.p->tableidFrag = regTabPtr.i;
+ fragOperPtr.p->attributeCount = noOfAttributes;
+ fragOperPtr.p->noOfNullBits = noOfNullAttr;
+ fragOperPtr.p->noOfNewAttrCount = noOfNewAttr;
+ fragOperPtr.p->charsetIndex = 0;
+ fragOperPtr.p->currNullBit = 0;
+
+ ndbrequire(reqinfo == ZADDFRAG);
+
+ getFragmentrec(regFragPtr, fragId, regTabPtr.p);
+ if (regFragPtr.i != RNIL) {
+ ljam();
+ terrorCode = ZEXIST_FRAG_ERROR; /* THE FRAGMENT ALREADY EXIST */
+ fragrefuse1Lab(signal, fragOperPtr);
+ return;
+ }//if
+ if (cfirstfreefrag != RNIL) {
+ ljam();
+ seizeFragrecord(regFragPtr);
+ } else {
+ ljam();
+ terrorCode = ZFULL_FRAGRECORD_ERROR;
+ fragrefuse1Lab(signal, fragOperPtr);
+ return;
+ }//if
+ initFragRange(regFragPtr.p);
+ if (!addfragtotab(regTabPtr.p, fragId, regFragPtr.i)) {
+ ljam();
+ terrorCode = ZNO_FREE_TAB_ENTRY_ERROR;
+ fragrefuse2Lab(signal, fragOperPtr, regFragPtr);
+ return;
+ }//if
+ if (cfirstfreerange == RNIL) {
+ ljam();
+ terrorCode = ZNO_FREE_PAGE_RANGE_ERROR;
+ fragrefuse3Lab(signal, fragOperPtr, regFragPtr, regTabPtr.p, fragId);
+ return;
+ }//if
+
+ regFragPtr.p->emptyPrimPage = RNIL;
+ regFragPtr.p->thFreeFirst = RNIL;
+ regFragPtr.p->thFreeCopyFirst = RNIL;
+ regFragPtr.p->noCopyPagesAlloc = 0;
+ regFragPtr.p->fragTableId = regTabPtr.i;
+ regFragPtr.p->fragmentId = fragId;
+ regFragPtr.p->checkpointVersion = RNIL;
+
+ Uint32 noAllocatedPages = 2;
+ noAllocatedPages = allocFragPages(regFragPtr.p, noAllocatedPages);
+
+ if (noAllocatedPages == 0) {
+ ljam();
+ terrorCode = ZNO_PAGES_ALLOCATED_ERROR;
+ fragrefuse3Lab(signal, fragOperPtr, regFragPtr, regTabPtr.p, fragId);
+ return;
+ }//if
+
+ if (ERROR_INSERTED(4007) && regTabPtr.p->fragid[0] == fragId ||
+ ERROR_INSERTED(4008) && regTabPtr.p->fragid[1] == fragId) {
+ ljam();
+ terrorCode = 1;
+ fragrefuse4Lab(signal, fragOperPtr, regFragPtr, regTabPtr.p, fragId);
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }
+
+ if (regTabPtr.p->tableStatus == NOT_DEFINED) {
+ ljam();
+//-------------------------------------------------------------------------------------
+// We are setting up references to the header of the tuple.
+// Active operation This word contains a reference to the operation active on the tuple
+// at the moment. RNIL means no one active at all. Not optional.
+// Tuple version Uses only low 16 bits. Not optional.
+// Checksum The third header word is optional and contains a checksum of the
+// tuple header.
+// Null-bits A number of words to contain null bits for all non-dynamic attributes.
+// Each word contains upto 32 null bits. Each time a new word is needed
+// we allocate the complete word. Zero nullable attributes means that
+// there is no word at all
+// Global Checkpoint id
+// This word is optional. When used it is stored as a 32-bit unsigned
+// attribute with attribute identity 0. Thus the kernel assumes that
+// this is the first word after the header.
+//-------------------------------------------------------------------------------------
+ fragOperPtr.p->definingFragment = true;
+ regTabPtr.p->tableStatus = DEFINING;
+ regTabPtr.p->checksumIndicator = (checksumIndicator != 0 ? true : false);
+ regTabPtr.p->GCPIndicator = (globalCheckpointIdIndicator != 0 ? true : false);
+
+ regTabPtr.p->tupChecksumIndex = 2;
+ regTabPtr.p->tupNullIndex = 2 + (checksumIndicator != 0 ? 1 : 0);
+ regTabPtr.p->tupNullWords = (noOfNullAttr + 31) >> 5;
+ regTabPtr.p->tupGCPIndex = regTabPtr.p->tupNullIndex + regTabPtr.p->tupNullWords;
+ regTabPtr.p->tupheadsize = regTabPtr.p->tupGCPIndex;
+
+ regTabPtr.p->noOfKeyAttr = noOfKeyAttr;
+ regTabPtr.p->noOfCharsets = noOfCharsets;
+ regTabPtr.p->noOfAttr = noOfAttributes;
+ regTabPtr.p->noOfNewAttr = noOfNewAttr;
+ regTabPtr.p->noOfNullAttr = noOfNullAttr;
+ regTabPtr.p->noOfAttributeGroups = noOfAttributeGroups;
+
+ regTabPtr.p->notNullAttributeMask.clear();
+
+ Uint32 offset[10];
+ Uint32 tableDescriptorRef = allocTabDescr(regTabPtr.p, offset);
+ if (tableDescriptorRef == RNIL) {
+ ljam();
+ fragrefuse4Lab(signal, fragOperPtr, regFragPtr, regTabPtr.p, fragId);
+ return;
+ }//if
+ setUpDescriptorReferences(tableDescriptorRef, regTabPtr.p, offset);
+ } else {
+ ljam();
+ fragOperPtr.p->definingFragment = false;
+ }//if
+ signal->theData[0] = fragOperPtr.p->lqhPtrFrag;
+ signal->theData[1] = fragOperPtr.i;
+ signal->theData[2] = regFragPtr.i;
+ signal->theData[3] = fragId;
+ sendSignal(fragOperPtr.p->lqhBlockrefFrag, GSN_TUPFRAGCONF, signal, 4, JBB);
+ return;
+}//Dbtup::execTUPFRAGREQ()
+
+/* -------------------------------------------------------------------- */
+/* ------------------------- ADDFRAGTOTAB ----------------------------- */
+/* PUTS A FRAGMENT POINTER AND FID IN THE TABLE ARRAY OF THE TID RECORD */
+/* -------------------------------------------------------------------- */
+bool Dbtup::addfragtotab(Tablerec* const regTabPtr, Uint32 fragId, Uint32 fragIndex)
+{
+ for (Uint32 i = 0; i < (2 * MAX_FRAG_PER_NODE); i++) {
+ ljam();
+ if (regTabPtr->fragid[i] == RNIL) {
+ ljam();
+ regTabPtr->fragid[i] = fragId;
+ regTabPtr->fragrec[i] = fragIndex;
+ return true;
+ }//if
+ }//for
+ return false;
+}//Dbtup::addfragtotab()
+
+void Dbtup::getFragmentrec(FragrecordPtr& regFragPtr, Uint32 fragId, Tablerec* const regTabPtr)
+{
+ for (Uint32 i = 0; i < (2 * MAX_FRAG_PER_NODE); i++) {
+ ljam();
+ if (regTabPtr->fragid[i] == fragId) {
+ ljam();
+/* ---------------------------------------------------------------- */
+/* A FRAGMENT RECORD HAVE BEEN FOUND FOR THIS OPERATION. */
+/* ---------------------------------------------------------------- */
+ regFragPtr.i = regTabPtr->fragrec[i];
+ ptrCheckGuard(regFragPtr, cnoOfFragrec, fragrecord);
+ return;
+ }//if
+ }//for
+ regFragPtr.i = RNIL;
+ ptrNull(regFragPtr);
+}//Dbtup::getFragmentrec()
+
+void Dbtup::seizeFragrecord(FragrecordPtr& regFragPtr)
+{
+ regFragPtr.i = cfirstfreefrag;
+ ptrCheckGuard(regFragPtr, cnoOfFragrec, fragrecord);
+ cfirstfreefrag = regFragPtr.p->nextfreefrag;
+ regFragPtr.p->nextfreefrag = RNIL;
+}//Dbtup::seizeFragrecord()
+
+/* ---------------------------------------------------------------- */
+/* SEIZE A FRAGMENT OPERATION RECORD */
+/* ---------------------------------------------------------------- */
+void Dbtup::seizeFragoperrec(FragoperrecPtr& fragOperPtr)
+{
+ fragOperPtr.i = cfirstfreeFragopr;
+ ptrCheckGuard(fragOperPtr, cnoOfFragoprec, fragoperrec);
+ cfirstfreeFragopr = fragOperPtr.p->nextFragoprec;
+ fragOperPtr.p->nextFragoprec = RNIL;
+ fragOperPtr.p->inUse = true;
+}//Dbtup::seizeFragoperrec()
+
+/* **************************************************************** */
+/* ************** TUP_ADD_ATTRREQ ****************** */
+/* **************************************************************** */
+void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
+{
+ FragrecordPtr regFragPtr;
+ FragoperrecPtr fragOperPtr;
+ TablerecPtr regTabPtr;
+
+ ljamEntry();
+ fragOperPtr.i = signal->theData[0];
+ ptrCheckGuard(fragOperPtr, cnoOfFragoprec, fragoperrec);
+ Uint32 attrId = signal->theData[2];
+ Uint32 attrDescriptor = signal->theData[3];
+ // DICT sends charset number in upper half
+ Uint32 csNumber = (signal->theData[4] >> 16);
+
+ regTabPtr.i = fragOperPtr.p->tableidFrag;
+ ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec);
+
+ Uint32 fragId = fragOperPtr.p->fragidFrag;
+
+ getFragmentrec(regFragPtr, fragId, regTabPtr.p);
+ ndbrequire(regFragPtr.i != RNIL);
+
+ ndbrequire(fragOperPtr.p->attributeCount > 0);
+ fragOperPtr.p->attributeCount--;
+ const bool lastAttr = (fragOperPtr.p->attributeCount == 0);
+
+ if ((regTabPtr.p->tableStatus == DEFINING) &&
+ (fragOperPtr.p->definingFragment)) {
+ ljam();
+ Uint32 firstTabDesIndex = regTabPtr.p->tabDescriptor + (attrId * ZAD_SIZE);
+ setTabDescrWord(firstTabDesIndex, attrDescriptor);
+ Uint32 attrLen = AttributeDescriptor::getSize(attrDescriptor);
+ Uint32 nullBitPos = fragOperPtr.p->currNullBit;
+ Uint32 bitCount = 0;
+
+ if (AttributeDescriptor::getNullable(attrDescriptor)) {
+ if (!AttributeDescriptor::getDynamic(attrDescriptor)) {
+ ljam(); /* NULL ATTR */
+ fragOperPtr.p->currNullBit++;
+ }//if
+ } else {
+ ljam();
+ regTabPtr.p->notNullAttributeMask.set(attrId);
+ }//if
+
+ Uint32 attrDes2 = 0;
+ if (!AttributeDescriptor::getDynamic(attrDescriptor)) {
+ ljam();
+ Uint32 attributePos = regTabPtr.p->tupheadsize;
+ switch (AttributeDescriptor::getArrayType(attrDescriptor)) {
+ case 1:
+ case 2:
+ {
+ ljam();
+ if(attrLen != 0)
+ {
+ ljam();
+ Uint32 bitsUsed =
+ AttributeDescriptor::getArraySize(attrDescriptor) * (1 << attrLen);
+ regTabPtr.p->tupheadsize += ((bitsUsed + 31) >> 5);
+ break;
+ }
+ else
+ {
+ ljam();
+ bitCount = AttributeDescriptor::getArraySize(attrDescriptor);
+ fragOperPtr.p->currNullBit += bitCount;
+ break;
+ }
+ }
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ if(nullBitPos + bitCount + 1 >= MAX_NULL_BITS)
+ {
+ terrorCode = TupAddAttrRef::TooManyBitsUsed;
+ addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId);
+ return;
+ }
+ AttributeOffset::setOffset(attrDes2, attributePos);
+ AttributeOffset::setNullFlagPos(attrDes2, nullBitPos);
+ } else {
+ ndbrequire(false);
+ }//if
+ if (csNumber != 0) {
+ CHARSET_INFO* cs = all_charsets[csNumber];
+ ndbrequire(cs != NULL);
+ Uint32 i = 0;
+ while (i < fragOperPtr.p->charsetIndex) {
+ ljam();
+ if (regTabPtr.p->charsetArray[i] == cs)
+ break;
+ i++;
+ }
+ if (i == fragOperPtr.p->charsetIndex) {
+ ljam();
+ fragOperPtr.p->charsetIndex++;
+ }
+ ndbrequire(i < regTabPtr.p->noOfCharsets);
+ regTabPtr.p->charsetArray[i] = cs;
+ AttributeOffset::setCharsetPos(attrDes2, i);
+ }
+ setTabDescrWord(firstTabDesIndex + 1, attrDes2);
+
+ if (regTabPtr.p->tupheadsize > MAX_TUPLE_SIZE_IN_WORDS) {
+ ljam();
+ terrorCode = ZTOO_LARGE_TUPLE_ERROR;
+ addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId);
+ return;
+ }//if
+ if (lastAttr &&
+ (fragOperPtr.p->currNullBit != fragOperPtr.p->noOfNullBits))
+ {
+ ljam();
+ terrorCode = ZINCONSISTENT_NULL_ATTRIBUTE_COUNT;
+ addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId);
+ return;
+ }//if
+ }//if
+ if (ERROR_INSERTED(4009) && regTabPtr.p->fragid[0] == fragId && attrId == 0 ||
+ ERROR_INSERTED(4010) && regTabPtr.p->fragid[0] == fragId && lastAttr ||
+ ERROR_INSERTED(4011) && regTabPtr.p->fragid[1] == fragId && attrId == 0 ||
+ ERROR_INSERTED(4012) && regTabPtr.p->fragid[1] == fragId && lastAttr) {
+ ljam();
+ terrorCode = 1;
+ addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId);
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }
+/* **************************************************************** */
+/* ************** TUP_ADD_ATTCONF ****************** */
+/* **************************************************************** */
+ signal->theData[0] = fragOperPtr.p->lqhPtrFrag;
+ signal->theData[1] = lastAttr;
+ sendSignal(fragOperPtr.p->lqhBlockrefFrag, GSN_TUP_ADD_ATTCONF, signal, 2, JBB);
+ if (! lastAttr) {
+ ljam();
+ return; /* EXIT AND WAIT FOR MORE */
+ }//if
+ regFragPtr.p->fragStatus = ACTIVE;
+ if (regTabPtr.p->tableStatus == DEFINING) {
+ ljam();
+ setUpQueryRoutines(regTabPtr.p);
+ setUpKeyArray(regTabPtr.p);
+ regTabPtr.p->tableStatus = DEFINED;
+ }//if
+ releaseFragoperrec(fragOperPtr);
+ return;
+}//Dbtup::execTUP_ADD_ATTRREQ()
+
+/*
+ * Descriptor has these parts:
+ *
+ * 0 readFunctionArray ( one for each attribute )
+ * 1 updateFunctionArray ( ditto )
+ * 2 charsetArray ( pointers to distinct CHARSET_INFO )
+ * 3 readKeyArray ( attribute ids of keys )
+ * 4 attributeGroupDescriptor ( currently size 1 but unused )
+ * 5 tabDescriptor ( attribute descriptors, each ZAD_SIZE )
+ */
+
+void Dbtup::setUpDescriptorReferences(Uint32 descriptorReference,
+ Tablerec* const regTabPtr,
+ const Uint32* offset)
+{
+ Uint32* desc = &tableDescriptor[descriptorReference].tabDescr;
+ regTabPtr->readFunctionArray = (ReadFunction*)(desc + offset[0]);
+ regTabPtr->updateFunctionArray = (UpdateFunction*)(desc + offset[1]);
+ regTabPtr->charsetArray = (CHARSET_INFO**)(desc + offset[2]);
+ regTabPtr->readKeyArray = descriptorReference + offset[3];
+ regTabPtr->attributeGroupDescriptor = descriptorReference + offset[4];
+ regTabPtr->tabDescriptor = descriptorReference + offset[5];
+}//Dbtup::setUpDescriptorReferences()
+
+Uint32
+Dbtup::sizeOfReadFunction()
+{
+ ReadFunction* tmp = (ReadFunction*)&tableDescriptor[0];
+ TableDescriptor* start = &tableDescriptor[0];
+ TableDescriptor * end = (TableDescriptor*)(tmp + 1);
+ return (Uint32)(end - start);
+}//Dbtup::sizeOfReadFunction()
+
+void Dbtup::setUpKeyArray(Tablerec* const regTabPtr)
+{
+ ndbrequire((regTabPtr->readKeyArray + regTabPtr->noOfKeyAttr) < cnoOfTabDescrRec);
+ Uint32* keyArray = &tableDescriptor[regTabPtr->readKeyArray].tabDescr;
+ Uint32 countKeyAttr = 0;
+ for (Uint32 i = 0; i < regTabPtr->noOfAttr; i++) {
+ ljam();
+ Uint32 refAttr = regTabPtr->tabDescriptor + (i * ZAD_SIZE);
+ Uint32 attrDescriptor = getTabDescrWord(refAttr);
+ if (AttributeDescriptor::getPrimaryKey(attrDescriptor)) {
+ ljam();
+ AttributeHeader::init(&keyArray[countKeyAttr], i, 0);
+ countKeyAttr++;
+ }//if
+ }//for
+ ndbrequire(countKeyAttr == regTabPtr->noOfKeyAttr);
+}//Dbtup::setUpKeyArray()
+
+void Dbtup::addattrrefuseLab(Signal* signal,
+ FragrecordPtr regFragPtr,
+ FragoperrecPtr fragOperPtr,
+ Tablerec* const regTabPtr,
+ Uint32 fragId)
+{
+ releaseFragPages(regFragPtr.p);
+ deleteFragTab(regTabPtr, fragId);
+ releaseFragrec(regFragPtr);
+ releaseTabDescr(regTabPtr);
+ initTab(regTabPtr);
+
+ signal->theData[0] = fragOperPtr.p->lqhPtrFrag;
+ signal->theData[1] = terrorCode;
+ sendSignal(fragOperPtr.p->lqhBlockrefFrag, GSN_TUP_ADD_ATTRREF, signal, 2, JBB);
+ releaseFragoperrec(fragOperPtr);
+ return;
+}//Dbtup::addattrrefuseLab()
+
+void Dbtup::fragrefuse4Lab(Signal* signal,
+ FragoperrecPtr fragOperPtr,
+ FragrecordPtr regFragPtr,
+ Tablerec* const regTabPtr,
+ Uint32 fragId)
+{
+ releaseFragPages(regFragPtr.p);
+ fragrefuse3Lab(signal, fragOperPtr, regFragPtr, regTabPtr, fragId);
+ initTab(regTabPtr);
+ return;
+}//Dbtup::fragrefuse4Lab()
+
+void Dbtup::fragrefuse3Lab(Signal* signal,
+ FragoperrecPtr fragOperPtr,
+ FragrecordPtr regFragPtr,
+ Tablerec* const regTabPtr,
+ Uint32 fragId)
+{
+ fragrefuse2Lab(signal, fragOperPtr, regFragPtr);
+ deleteFragTab(regTabPtr, fragId);
+ return;
+}//Dbtup::fragrefuse3Lab()
+
+void Dbtup::fragrefuse2Lab(Signal* signal, FragoperrecPtr fragOperPtr, FragrecordPtr regFragPtr)
+{
+ fragrefuse1Lab(signal, fragOperPtr);
+ releaseFragrec(regFragPtr);
+ return;
+}//Dbtup::fragrefuse2Lab()
+
+void Dbtup::fragrefuse1Lab(Signal* signal, FragoperrecPtr fragOperPtr)
+{
+ fragrefuseLab(signal, fragOperPtr);
+ releaseFragoperrec(fragOperPtr);
+ return;
+}//Dbtup::fragrefuse1Lab()
+
+void Dbtup::fragrefuseLab(Signal* signal, FragoperrecPtr fragOperPtr)
+{
+ signal->theData[0] = fragOperPtr.p->lqhPtrFrag;
+ signal->theData[1] = terrorCode;
+ sendSignal(fragOperPtr.p->lqhBlockrefFrag, GSN_TUPFRAGREF, signal, 2, JBB);
+ return;
+}//Dbtup::fragrefuseLab()
+
+void Dbtup::releaseFragoperrec(FragoperrecPtr fragOperPtr)
+{
+ fragOperPtr.p->inUse = false;
+ fragOperPtr.p->nextFragoprec = cfirstfreeFragopr;
+ cfirstfreeFragopr = fragOperPtr.i;
+}//Dbtup::releaseFragoperrec()
+
+void Dbtup::deleteFragTab(Tablerec* const regTabPtr, Uint32 fragId)
+{
+ for (Uint32 i = 0; i < (2 * MAX_FRAG_PER_NODE); i++) {
+ ljam();
+ if (regTabPtr->fragid[i] == fragId) {
+ ljam();
+ regTabPtr->fragid[i] = RNIL;
+ regTabPtr->fragrec[i] = RNIL;
+ return;
+ }//if
+ }//for
+ ndbrequire(false);
+}//Dbtup::deleteFragTab()
+
+/*
+ * LQH aborts on-going create table operation. The table is later
+ * dropped by DICT.
+ */
+void Dbtup::abortAddFragOp(Signal* signal)
+{
+ FragoperrecPtr fragOperPtr;
+
+ fragOperPtr.i = signal->theData[1];
+ ptrCheckGuard(fragOperPtr, cnoOfFragoprec, fragoperrec);
+ ndbrequire(fragOperPtr.p->inUse);
+ releaseFragoperrec(fragOperPtr);
+}
+
+void
+Dbtup::execDROP_TAB_REQ(Signal* signal)
+{
+ ljamEntry();
+ DropTabReq* req = (DropTabReq*)signal->getDataPtr();
+
+ TablerecPtr tabPtr;
+ tabPtr.i = req->tableId;
+ ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
+
+ tabPtr.p->m_dropTable.tabUserRef = req->senderRef;
+ tabPtr.p->m_dropTable.tabUserPtr = req->senderData;
+
+ signal->theData[0] = ZREL_FRAG;
+ signal->theData[1] = tabPtr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+}//Dbtup::execDROP_TAB_REQ()
+
+void Dbtup::releaseTabDescr(Tablerec* const regTabPtr)
+{
+ Uint32 descriptor = regTabPtr->readKeyArray;
+ if (descriptor != RNIL) {
+ ljam();
+ Uint32 offset[10];
+ getTabDescrOffsets(regTabPtr, offset);
+
+ regTabPtr->tabDescriptor = RNIL;
+ regTabPtr->readKeyArray = RNIL;
+ regTabPtr->readFunctionArray = NULL;
+ regTabPtr->updateFunctionArray = NULL;
+ regTabPtr->charsetArray = NULL;
+ regTabPtr->attributeGroupDescriptor= RNIL;
+
+ // move to start of descriptor
+ descriptor -= offset[3];
+ Uint32 retNo = getTabDescrWord(descriptor + ZTD_DATASIZE);
+ ndbrequire(getTabDescrWord(descriptor + ZTD_HEADER) == ZTD_TYPE_NORMAL);
+ ndbrequire(retNo == getTabDescrWord((descriptor + retNo) - ZTD_TR_SIZE));
+ ndbrequire(ZTD_TYPE_NORMAL == getTabDescrWord((descriptor + retNo) - ZTD_TR_TYPE));
+ freeTabDescr(descriptor, retNo);
+ }//if
+}//Dbtup::releaseTabDescr()
+
+void Dbtup::releaseFragment(Signal* signal, Uint32 tableId)
+{
+ TablerecPtr tabPtr;
+ tabPtr.i = tableId;
+ ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
+ Uint32 fragIndex = RNIL;
+ Uint32 fragId = RNIL;
+ Uint32 i = 0;
+ for (i = 0; i < (2 * MAX_FRAG_PER_NODE); i++) {
+ ljam();
+ if (tabPtr.p->fragid[i] != RNIL) {
+ ljam();
+ fragIndex = tabPtr.p->fragrec[i];
+ fragId = tabPtr.p->fragid[i];
+ break;
+ }//if
+ }//for
+ if (fragIndex != RNIL) {
+ ljam();
+
+ FragrecordPtr regFragPtr;
+ regFragPtr.i = fragIndex;
+ ptrCheckGuard(regFragPtr, cnoOfFragrec, fragrecord);
+ releaseFragPages(regFragPtr.p);
+
+ tabPtr.p->fragid[i] = RNIL;
+ tabPtr.p->fragrec[i] = RNIL;
+ releaseFragrec(regFragPtr);
+
+ signal->theData[0] = ZREL_FRAG;
+ signal->theData[1] = tableId;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ }//if
+
+ /**
+ * Finished...
+ */
+ sendFSREMOVEREQ(signal, tabPtr);
+}//Dbtup::releaseFragment()
+
+void Dbtup::sendFSREMOVEREQ(Signal* signal, TablerecPtr tabPtr)
+{
+ FsRemoveReq * const fsReq = (FsRemoveReq *)signal->getDataPtrSend();
+ fsReq->userReference = cownref;
+ fsReq->userPointer = tabPtr.i;
+ fsReq->fileNumber[0] = tabPtr.i;
+ fsReq->fileNumber[1] = (Uint32)-1; // Remove all fragments
+ fsReq->fileNumber[2] = (Uint32)-1; // Remove all data files within fragment
+ fsReq->fileNumber[3] = 255 | // No P-value used here
+ (5 << 8) | // Data-files in D5
+ (0 << 16) | // Data-files
+ (1 << 24); // Version 1 of fileNumber
+
+ fsReq->directory = 1;
+ fsReq->ownDirectory = 1;
+ sendSignal(NDBFS_REF, GSN_FSREMOVEREQ, signal,
+ FsRemoveReq::SignalLength, JBA);
+}//Dbtup::sendFSREMOVEREQ()
+
+void Dbtup::execFSREMOVECONF(Signal* signal)
+{
+ ljamEntry();
+
+ FsConf * const fsConf = (FsConf *)signal->getDataPtrSend();
+ TablerecPtr tabPtr;
+ tabPtr.i = fsConf->userPointer;
+ ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
+
+
+ DropTabConf * const dropConf = (DropTabConf *)signal->getDataPtrSend();
+ dropConf->senderRef = reference();
+ dropConf->senderData = tabPtr.p->m_dropTable.tabUserPtr;
+ dropConf->tableId = tabPtr.i;
+ sendSignal(tabPtr.p->m_dropTable.tabUserRef, GSN_DROP_TAB_CONF,
+ signal, DropTabConf::SignalLength, JBB);
+
+ releaseTabDescr(tabPtr.p);
+ initTab(tabPtr.p);
+}//Dbtup::execFSREMOVECONF()
+
+void Dbtup::execFSREMOVEREF(Signal* signal)
+{
+ ljamEntry();
+ ndbrequire(false);
+}//Dbtup::execFSREMOVEREF()
+
+
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp
new file mode 100644
index 00000000000..9722aa437c0
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp
@@ -0,0 +1,370 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#define DBTUP_C
+#include "Dbtup.hpp"
+#include <RefConvert.hpp>
+#include <ndb_limits.h>
+#include <pc.hpp>
+
+#define ljam() { jamLine(16000 + __LINE__); }
+#define ljamEntry() { jamEntryLine(16000 + __LINE__); }
+
+/* ---------------------------------------------------------------- */
+// 4) Page Memory Manager (buddy algorithm)
+//
+// The following data structures in Dbtup is used by the Page Memory
+// Manager.
+//
+// cfreepageList[16]
+// Pages with a header
+//
+// The cfreepageList is 16 free lists. Free list 0 contains chunks of
+// pages with 2^0 (=1) pages in each chunk. Free list 1 chunks of 2^1
+// (=2) pages in each chunk and so forth upto free list 15 which
+// contains chunks of 2^15 (=32768) pages in each chunk.
+// The cfreepageList array contains the pointer to the first chunk
+// in each of those lists. The lists are doubly linked where the
+// first page in each chunk contains the next and previous references
+// in position ZPAGE_NEXT_CLUST_POS and ZPAGE_PREV_CLUST_POS in the
+// page header.
+// In addition the leading page and the last page in each chunk is marked
+// with a state (=ZFREE_COMMON) in position ZPAGE_STATE_POS in page
+// header. This state indicates that the page is the leading or last page
+// in a chunk of free pages. Furthermore the leading and last page is
+// also marked with a reference to the leading (=ZPAGE_FIRST_CLUST_POS)
+// and the last page (=ZPAGE_LAST_CLUST_POS) in the chunk.
+//
+// The aim of these data structures is to enable a free area handling of
+// free pages based on a buddy algorithm. When allocating pages it is
+// performed in chunks of pages and the algorithm tries to make the
+// chunks as large as possible.
+// This manager is invoked when fragments lack internal page space to
+// accomodate all the data they are requested to store. It is also
+// invoked when fragments deallocate page space back to the free area.
+//
+// The following routines are part of the external interface:
+// void
+// allocConsPages(Uint32 noOfPagesToAllocate, #In
+// Uint32& noOfPagesAllocated, #Out
+// Uint32& retPageRef) #Out
+// void
+// returnCommonArea(Uint32 retPageRef, #In
+// Uint32 retNoPages) #In
+//
+// allocConsPages tries to allocate noOfPagesToAllocate pages in one chunk.
+// If this fails it delivers a chunk as large as possible. It returns the
+// i-value of the first page in the chunk delivered, if zero pages returned
+// this i-value is undefined. It also returns the size of the chunk actually
+// delivered.
+//
+// returnCommonArea is used when somebody is returning pages to the free area.
+// It is used both from internal routines and external routines.
+//
+// The following routines are private routines used to support the
+// above external interface:
+// removeCommonArea()
+// insertCommonArea()
+// findFreeLeftNeighbours()
+// findFreeRightNeighbours()
+// Uint32
+// nextHigherTwoLog(Uint32 input)
+//
+// nextHigherTwoLog is a support routine which is a mathematical function with
+// an integer as input and an integer as output. It calculates the 2-log of
+// (input + 1). If the 2-log of (input + 1) is larger than 15 then the routine
+// will return 15. It is part of the external interface since it is also used
+// by other similar memory management algorithms.
+//
+// External dependencies:
+// None.
+//
+// Side Effects:
+// Apart from the above mentioned data structures there are no more
+// side effects other than through the subroutine parameters in the
+// external interface.
+//
+/* ---------------------------------------------------------------- */
+
+/* ---------------------------------------------------------------- */
+/* CALCULATE THE 2-LOG + 1 OF TMP AND PUT RESULT INTO TBITS */
+/* ---------------------------------------------------------------- */
+Uint32 Dbtup::nextHigherTwoLog(Uint32 input)
+{
+ input = input | (input >> 8);
+ input = input | (input >> 4);
+ input = input | (input >> 2);
+ input = input | (input >> 1);
+ Uint32 output = (input & 0x5555) + ((input >> 1) & 0x5555);
+ output = (output & 0x3333) + ((output >> 2) & 0x3333);
+ output = output + (output >> 4);
+ output = (output & 0xf) + ((output >> 8) & 0xf);
+ return output;
+}//nextHigherTwoLog()
+
+void Dbtup::initializePage()
+{
+ for (Uint32 i = 0; i < 16; i++) {
+ cfreepageList[i] = RNIL;
+ }//for
+ PagePtr pagePtr;
+ for (pagePtr.i = 0; pagePtr.i < cnoOfPage; pagePtr.i++) {
+ ljam();
+ refresh_watch_dog();
+ ptrAss(pagePtr, page);
+ pagePtr.p->pageWord[ZPAGE_PHYSICAL_INDEX] = pagePtr.i;
+ pagePtr.p->pageWord[ZPAGE_NEXT_POS] = pagePtr.i + 1;
+ pagePtr.p->pageWord[ZPAGE_NEXT_CLUST_POS] = RNIL;
+ pagePtr.p->pageWord[ZPAGE_LAST_CLUST_POS] = RNIL;
+ pagePtr.p->pageWord[ZPAGE_PREV_POS] = RNIL;
+ pagePtr.p->pageWord[ZPAGE_STATE_POS] = ZFREE_COMMON;
+ }//for
+ pagePtr.i = cnoOfPage - 1;
+ ptrAss(pagePtr, page);
+ pagePtr.p->pageWord[ZPAGE_NEXT_POS] = RNIL;
+
+ pagePtr.i = 0;
+ ptrAss(pagePtr, page);
+ pagePtr.p->pageWord[ZPAGE_STATE_POS] = ~ZFREE_COMMON;
+
+ for(size_t j = 0; j<MAX_PARALLELL_TUP_SRREQ; j++){
+ pagePtr.i = 1+j;
+ ptrAss(pagePtr, page);
+ pagePtr.p->pageWord[ZPAGE_STATE_POS] = ~ZFREE_COMMON;
+ }
+
+ Uint32 tmp = 1 + MAX_PARALLELL_TUP_SRREQ;
+ returnCommonArea(tmp, cnoOfPage - tmp);
+ cnoOfAllocatedPages = tmp; // Is updated by returnCommonArea
+ c_sr_free_page_0 = ~0;
+}//Dbtup::initializePage()
+
+void Dbtup::allocConsPages(Uint32 noOfPagesToAllocate,
+ Uint32& noOfPagesAllocated,
+ Uint32& allocPageRef)
+{
+ if (noOfPagesToAllocate == 0){
+ ljam();
+ noOfPagesAllocated = 0;
+ return;
+ }//if
+
+ Uint32 firstListToCheck = nextHigherTwoLog(noOfPagesToAllocate - 1);
+ for (Uint32 i = firstListToCheck; i < 16; i++) {
+ ljam();
+ if (cfreepageList[i] != RNIL) {
+ ljam();
+/* ---------------------------------------------------------------- */
+/* PROPER AMOUNT OF PAGES WERE FOUND. NOW SPLIT THE FOUND */
+/* AREA AND RETURN THE PART NOT NEEDED. */
+/* ---------------------------------------------------------------- */
+ noOfPagesAllocated = noOfPagesToAllocate;
+ allocPageRef = cfreepageList[i];
+ removeCommonArea(allocPageRef, i);
+ Uint32 retNo = (1 << i) - noOfPagesToAllocate;
+ Uint32 retPageRef = allocPageRef + noOfPagesToAllocate;
+ returnCommonArea(retPageRef, retNo);
+ return;
+ }//if
+ }//for
+/* ---------------------------------------------------------------- */
+/* PROPER AMOUNT OF PAGES WERE NOT FOUND. FIND AS MUCH AS */
+/* POSSIBLE. */
+/* ---------------------------------------------------------------- */
+ for (Uint32 j = firstListToCheck; (Uint32)~j; j--) {
+ ljam();
+ if (cfreepageList[j] != RNIL) {
+ ljam();
+/* ---------------------------------------------------------------- */
+/* SOME AREA WAS FOUND, ALLOCATE ALL OF IT. */
+/* ---------------------------------------------------------------- */
+ allocPageRef = cfreepageList[j];
+ removeCommonArea(allocPageRef, j);
+ noOfPagesAllocated = 1 << j;
+ findFreeLeftNeighbours(allocPageRef, noOfPagesAllocated,
+ noOfPagesToAllocate);
+ findFreeRightNeighbours(allocPageRef, noOfPagesAllocated,
+ noOfPagesToAllocate);
+
+ return;
+ }//if
+ }//for
+/* ---------------------------------------------------------------- */
+/* NO FREE AREA AT ALL EXISTED. RETURN ZERO PAGES */
+/* ---------------------------------------------------------------- */
+ noOfPagesAllocated = 0;
+ return;
+}//allocConsPages()
+
+void Dbtup::returnCommonArea(Uint32 retPageRef, Uint32 retNo)
+{
+ do {
+ ljam();
+ if (retNo == 0) {
+ ljam();
+ return;
+ }//if
+ Uint32 list = nextHigherTwoLog(retNo) - 1;
+ retNo -= (1 << list);
+ insertCommonArea(retPageRef, list);
+ retPageRef += (1 << list);
+ } while (1);
+}//Dbtup::returnCommonArea()
+
+void Dbtup::findFreeLeftNeighbours(Uint32& allocPageRef,
+ Uint32& noPagesAllocated,
+ Uint32 noOfPagesToAllocate)
+{
+ PagePtr pageFirstPtr, pageLastPtr;
+ Uint32 remainAllocate = noOfPagesToAllocate - noPagesAllocated;
+ while (allocPageRef > 0) {
+ ljam();
+ pageLastPtr.i = allocPageRef - 1;
+ ptrCheckGuard(pageLastPtr, cnoOfPage, page);
+ if (pageLastPtr.p->pageWord[ZPAGE_STATE_POS] != ZFREE_COMMON) {
+ ljam();
+ return;
+ } else {
+ ljam();
+ pageFirstPtr.i = pageLastPtr.p->pageWord[ZPAGE_FIRST_CLUST_POS];
+ ndbrequire(pageFirstPtr.i != RNIL);
+ Uint32 list = nextHigherTwoLog(pageLastPtr.i - pageFirstPtr.i);
+ removeCommonArea(pageFirstPtr.i, list);
+ Uint32 listSize = 1 << list;
+ if (listSize > remainAllocate) {
+ ljam();
+ Uint32 retNo = listSize - remainAllocate;
+ returnCommonArea(pageFirstPtr.i, retNo);
+ allocPageRef = pageFirstPtr.i + retNo;
+ noPagesAllocated = noOfPagesToAllocate;
+ return;
+ } else {
+ ljam();
+ allocPageRef = pageFirstPtr.i;
+ noPagesAllocated += listSize;
+ remainAllocate -= listSize;
+ }//if
+ }//if
+ }//while
+}//Dbtup::findFreeLeftNeighbours()
+
+void Dbtup::findFreeRightNeighbours(Uint32& allocPageRef,
+ Uint32& noPagesAllocated,
+ Uint32 noOfPagesToAllocate)
+{
+ PagePtr pageFirstPtr, pageLastPtr;
+ Uint32 remainAllocate = noOfPagesToAllocate - noPagesAllocated;
+ if (remainAllocate == 0) {
+ ljam();
+ return;
+ }//if
+ while ((allocPageRef + noPagesAllocated) < cnoOfPage) {
+ ljam();
+ pageFirstPtr.i = allocPageRef + noPagesAllocated;
+ ptrCheckGuard(pageFirstPtr, cnoOfPage, page);
+ if (pageFirstPtr.p->pageWord[ZPAGE_STATE_POS] != ZFREE_COMMON) {
+ ljam();
+ return;
+ } else {
+ ljam();
+ pageLastPtr.i = pageFirstPtr.p->pageWord[ZPAGE_LAST_CLUST_POS];
+ ndbrequire(pageLastPtr.i != RNIL);
+ Uint32 list = nextHigherTwoLog(pageLastPtr.i - pageFirstPtr.i);
+ removeCommonArea(pageFirstPtr.i, list);
+ Uint32 listSize = 1 << list;
+ if (listSize > remainAllocate) {
+ ljam();
+ Uint32 retPageRef = pageFirstPtr.i + remainAllocate;
+ Uint32 retNo = listSize - remainAllocate;
+ returnCommonArea(retPageRef, retNo);
+ noPagesAllocated += remainAllocate;
+ return;
+ } else {
+ ljam();
+ noPagesAllocated += listSize;
+ remainAllocate -= listSize;
+ }//if
+ }//if
+ }//while
+}//Dbtup::findFreeRightNeighbours()
+
+void Dbtup::insertCommonArea(Uint32 insPageRef, Uint32 insList)
+{
+ cnoOfAllocatedPages -= (1 << insList);
+ PagePtr pageLastPtr, pageInsPtr;
+
+ pageInsPtr.i = insPageRef;
+ ptrCheckGuard(pageInsPtr, cnoOfPage, page);
+ ndbrequire(insList < 16);
+ pageLastPtr.i = (pageInsPtr.i + (1 << insList)) - 1;
+
+ pageInsPtr.p->pageWord[ZPAGE_NEXT_CLUST_POS] = cfreepageList[insList];
+ pageInsPtr.p->pageWord[ZPAGE_PREV_CLUST_POS] = RNIL;
+ pageInsPtr.p->pageWord[ZPAGE_LAST_CLUST_POS] = pageLastPtr.i;
+ cfreepageList[insList] = pageInsPtr.i;
+
+ ptrCheckGuard(pageLastPtr, cnoOfPage, page);
+ pageLastPtr.p->pageWord[ZPAGE_FIRST_CLUST_POS] = pageInsPtr.i;
+ pageLastPtr.p->pageWord[ZPAGE_NEXT_POS] = RNIL;
+}//Dbtup::insertCommonArea()
+
+void Dbtup::removeCommonArea(Uint32 remPageRef, Uint32 list)
+{
+ cnoOfAllocatedPages += (1 << list);
+ PagePtr pagePrevPtr, pageNextPtr, pageLastPtr, pageSearchPtr, remPagePtr;
+
+ remPagePtr.i = remPageRef;
+ ptrCheckGuard(remPagePtr, cnoOfPage, page);
+ ndbrequire(list < 16);
+ if (cfreepageList[list] == remPagePtr.i) {
+ ljam();
+ cfreepageList[list] = remPagePtr.p->pageWord[ZPAGE_NEXT_CLUST_POS];
+ pageNextPtr.i = cfreepageList[list];
+ if (pageNextPtr.i != RNIL) {
+ ljam();
+ ptrCheckGuard(pageNextPtr, cnoOfPage, page);
+ pageNextPtr.p->pageWord[ZPAGE_PREV_CLUST_POS] = RNIL;
+ }//if
+ } else {
+ pageSearchPtr.i = cfreepageList[list];
+ while (true) {
+ ljam();
+ ptrCheckGuard(pageSearchPtr, cnoOfPage, page);
+ pagePrevPtr = pageSearchPtr;
+ pageSearchPtr.i = pageSearchPtr.p->pageWord[ZPAGE_NEXT_CLUST_POS];
+ if (pageSearchPtr.i == remPagePtr.i) {
+ ljam();
+ break;
+ }//if
+ }//while
+ pageNextPtr.i = remPagePtr.p->pageWord[ZPAGE_NEXT_CLUST_POS];
+ pagePrevPtr.p->pageWord[ZPAGE_NEXT_CLUST_POS] = pageNextPtr.i;
+ if (pageNextPtr.i != RNIL) {
+ ljam();
+ ptrCheckGuard(pageNextPtr, cnoOfPage, page);
+ pageNextPtr.p->pageWord[ZPAGE_PREV_CLUST_POS] = pagePrevPtr.i;
+ }//if
+ }//if
+ remPagePtr.p->pageWord[ZPAGE_NEXT_CLUST_POS] = RNIL;
+ remPagePtr.p->pageWord[ZPAGE_LAST_CLUST_POS] = RNIL;
+ remPagePtr.p->pageWord[ZPAGE_PREV_CLUST_POS] = RNIL;
+
+ pageLastPtr.i = (remPagePtr.i + (1 << list)) - 1;
+ ptrCheckGuard(pageLastPtr, cnoOfPage, page);
+ pageLastPtr.p->pageWord[ZPAGE_FIRST_CLUST_POS] = RNIL;
+}//Dbtup::removeCommonArea()
+
+
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp
new file mode 100644
index 00000000000..1f674876642
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp
@@ -0,0 +1,556 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+#define DBTUP_C
+#include "Dbtup.hpp"
+#include <RefConvert.hpp>
+#include <ndb_limits.h>
+#include <pc.hpp>
+
+#define ljam() { jamLine(14000 + __LINE__); }
+#define ljamEntry() { jamEntryLine(14000 + __LINE__); }
+
+//
+// PageMap is a service used by Dbtup to map logical page id's to physical
+// page id's. The mapping is needs the fragment and the logical page id to
+// provide the physical id.
+//
+// This is a part of Dbtup which is the exclusive user of a certain set of
+// variables on the fragment record and it is the exclusive user of the
+// struct for page ranges.
+//
+//
+// The following methods operate on the data handled by the page map class.
+//
+// Public methods
+// insertPageRange(Uint32 startPageId, # In
+// Uint32 noPages) # In
+// Inserts a range of pages into the mapping structure.
+//
+// void releaseFragPages()
+// Releases all pages and their mappings belonging to a fragment.
+//
+// Uint32 allocFragPages(Uint32 tafpNoAllocRequested)
+// Allocate a set of pages to the fragment from the page manager
+//
+// Uint32 getEmptyPage()
+// Get an empty page from the pool of empty pages on the fragment.
+// It returns the physical page id of the empty page.
+// Returns RNIL if no empty page is available.
+//
+// Uint32 getRealpid(Uint32 logicalPageId)
+// Return the physical page id provided the logical page id
+//
+// void initializePageRange()
+// Initialise free list of page ranges and initialise the page raneg records.
+//
+// void initFragRange()
+// Initialise the fragment variables when allocating a fragment to a table.
+//
+// void initPageRangeSize(Uint32 size)
+// Initialise the number of page ranges.
+//
+// Uint32 getNoOfPages()
+// Get the number of pages on the fragment currently.
+//
+//
+// Private methods
+// Uint32 leafPageRangeFull(PageRangePtr currPageRangePtr)
+//
+// void errorHandler()
+// Method to crash NDB kernel in case of weird data set-up
+//
+// void allocMoreFragPages()
+// When no more empty pages are attached to the fragment and we need more
+// we allocate more pages from the page manager using this method.
+//
+// Private data
+// On the fragment record
+// currentPageRange # The current page range where to insert the next range
+// rootPageRange # The root of the page ranges owned
+// nextStartRange # The next page id to assign when expanding the
+// # page map
+// noOfPages # The number of pages in the fragment
+// emptyPrimPage # The first page of the empty pages in the fragment
+//
+// The full page range struct
+
+Uint32 Dbtup::getEmptyPage(Fragrecord* const regFragPtr)
+{
+ Uint32 logicalPageId = regFragPtr->emptyPrimPage;
+ if (logicalPageId == RNIL) {
+ ljam();
+ allocMoreFragPages(regFragPtr);
+ logicalPageId = regFragPtr->emptyPrimPage;
+ if (logicalPageId == RNIL) {
+ ljam();
+ return RNIL;
+ }//if
+ }//if
+ Uint32 physicalPageId = getRealpid(regFragPtr, logicalPageId);
+ PagePtr pagePtr;
+ pagePtr.i = physicalPageId;
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ regFragPtr->emptyPrimPage = pagePtr.p->pageWord[ZPAGE_NEXT_POS];
+ return physicalPageId;
+}//Dbtup::getEmptyPage()
+
+Uint32 Dbtup::getRealpid(Fragrecord* const regFragPtr, Uint32 logicalPageId)
+{
+ PageRangePtr grpPageRangePtr;
+ Uint32 loopLimit;
+ Uint32 loopCount = 0;
+ Uint32 pageRangeLimit = cnoOfPageRangeRec;
+
+ grpPageRangePtr.i = regFragPtr->rootPageRange;
+ while (true) {
+ ndbrequire(loopCount++ < 100);
+ ndbrequire(grpPageRangePtr.i < pageRangeLimit);
+ ptrAss(grpPageRangePtr, pageRange);
+ loopLimit = grpPageRangePtr.p->currentIndexPos;
+ ndbrequire(loopLimit <= 3);
+ for (Uint32 i = 0; i <= loopLimit; i++) {
+ ljam();
+ if (grpPageRangePtr.p->startRange[i] <= logicalPageId) {
+ if (grpPageRangePtr.p->endRange[i] >= logicalPageId) {
+ if (grpPageRangePtr.p->type[i] == ZLEAF) {
+ ljam();
+ Uint32 realPageId = (logicalPageId - grpPageRangePtr.p->startRange[i]) +
+ grpPageRangePtr.p->basePageId[i];
+ return realPageId;
+ } else {
+ ndbrequire(grpPageRangePtr.p->type[i] == ZNON_LEAF);
+ grpPageRangePtr.i = grpPageRangePtr.p->basePageId[i];
+ }//if
+ }//if
+ }//if
+ }//for
+ }//while
+ return 0;
+}//Dbtup::getRealpid()
+
+Uint32 Dbtup::getNoOfPages(Fragrecord* const regFragPtr)
+{
+ return regFragPtr->noOfPages;
+}//Dbtup::getNoOfPages()
+
+void Dbtup::initPageRangeSize(Uint32 size)
+{
+ cnoOfPageRangeRec = size;
+}//Dbtup::initPageRangeSize()
+
+/* ---------------------------------------------------------------- */
+/* ----------------------- INSERT_PAGE_RANGE_TAB ------------------ */
+/* ---------------------------------------------------------------- */
+/* INSERT A PAGE RANGE INTO THE FRAGMENT */
+/* */
+/* NOTE: THE METHOD IS ATOMIC. EITHER THE ACTION IS */
+/* PERFORMED FULLY OR NO ACTION IS PERFORMED AT ALL. */
+/* TO SUPPORT THIS THE CODE HAS A CLEANUP PART AFTER */
+/* ERRORS. */
+/* ---------------------------------------------------------------- */
+bool Dbtup::insertPageRangeTab(Fragrecord* const regFragPtr,
+ Uint32 startPageId,
+ Uint32 noPages)
+{
+ PageRangePtr currPageRangePtr;
+ if (cfirstfreerange == RNIL) {
+ ljam();
+ return false;
+ }//if
+ currPageRangePtr.i = regFragPtr->currentPageRange;
+ if (currPageRangePtr.i == RNIL) {
+ ljam();
+/* ---------------------------------------------------------------- */
+/* THE FIRST PAGE RANGE IS HANDLED WITH SPECIAL CODE */
+/* ---------------------------------------------------------------- */
+ seizePagerange(currPageRangePtr);
+ regFragPtr->rootPageRange = currPageRangePtr.i;
+ currPageRangePtr.p->currentIndexPos = 0;
+ currPageRangePtr.p->parentPtr = RNIL;
+ } else {
+ ljam();
+ ptrCheckGuard(currPageRangePtr, cnoOfPageRangeRec, pageRange);
+ if (currPageRangePtr.p->currentIndexPos < 3) {
+ ljam();
+/* ---------------------------------------------------------------- */
+/* THE SIMPLE CASE WHEN IT IS ONLY NECESSARY TO FILL IN THE */
+/* NEXT EMPTY POSITION IN THE PAGE RANGE RECORD IS TREATED */
+/* BY COMMON CODE AT THE END OF THE SUBROUTINE. */
+/* ---------------------------------------------------------------- */
+ currPageRangePtr.p->currentIndexPos++;
+ } else {
+ ljam();
+ ndbrequire(currPageRangePtr.p->currentIndexPos == 3);
+ currPageRangePtr.i = leafPageRangeFull(regFragPtr, currPageRangePtr);
+ if (currPageRangePtr.i == RNIL) {
+ return false;
+ }//if
+ ptrCheckGuard(currPageRangePtr, cnoOfPageRangeRec, pageRange);
+ }//if
+ }//if
+ currPageRangePtr.p->startRange[currPageRangePtr.p->currentIndexPos] = regFragPtr->nextStartRange;
+/* ---------------------------------------------------------------- */
+/* NOW SET THE LEAF LEVEL PAGE RANGE RECORD PROPERLY */
+/* PAGE_RANGE_PTR REFERS TO LEAF RECORD WHEN ARRIVING HERE */
+/* ---------------------------------------------------------------- */
+ currPageRangePtr.p->endRange[currPageRangePtr.p->currentIndexPos] =
+ (regFragPtr->nextStartRange + noPages) - 1;
+ currPageRangePtr.p->basePageId[currPageRangePtr.p->currentIndexPos] = startPageId;
+ currPageRangePtr.p->type[currPageRangePtr.p->currentIndexPos] = ZLEAF;
+/* ---------------------------------------------------------------- */
+/* WE NEED TO UPDATE THE CURRENT PAGE RANGE IN CASE IT HAS */
+/* CHANGED. WE ALSO NEED TO UPDATE THE NEXT START RANGE */
+/* ---------------------------------------------------------------- */
+ regFragPtr->currentPageRange = currPageRangePtr.i;
+ regFragPtr->nextStartRange += noPages;
+/* ---------------------------------------------------------------- */
+/* WE NEED TO UPDATE THE END RANGE IN ALL PAGE RANGE RECORDS */
+/* UP TO THE ROOT. */
+/* ---------------------------------------------------------------- */
+ PageRangePtr loopPageRangePtr;
+ loopPageRangePtr = currPageRangePtr;
+ while (true) {
+ ljam();
+ loopPageRangePtr.i = loopPageRangePtr.p->parentPtr;
+ if (loopPageRangePtr.i != RNIL) {
+ ljam();
+ ptrCheckGuard(loopPageRangePtr, cnoOfPageRangeRec, pageRange);
+ ndbrequire(loopPageRangePtr.p->currentIndexPos < 4);
+ loopPageRangePtr.p->endRange[loopPageRangePtr.p->currentIndexPos] += noPages;
+ } else {
+ ljam();
+ break;
+ }//if
+ }//while
+ regFragPtr->noOfPages += noPages;
+ return true;
+}//Dbtup::insertPageRangeTab()
+
+
+void Dbtup::releaseFragPages(Fragrecord* const regFragPtr)
+{
+ if (regFragPtr->rootPageRange == RNIL) {
+ ljam();
+ return;
+ }//if
+ PageRangePtr regPRPtr;
+ regPRPtr.i = regFragPtr->rootPageRange;
+ ptrCheckGuard(regPRPtr, cnoOfPageRangeRec, pageRange);
+ while (true) {
+ ljam();
+ const Uint32 indexPos = regPRPtr.p->currentIndexPos;
+ ndbrequire(indexPos < 4);
+
+ const Uint32 basePageId = regPRPtr.p->basePageId[indexPos];
+ regPRPtr.p->basePageId[indexPos] = RNIL;
+ if (basePageId == RNIL) {
+ ljam();
+ /**
+ * Finished with indexPos continue with next
+ */
+ if (indexPos > 0) {
+ ljam();
+ regPRPtr.p->currentIndexPos--;
+ continue;
+ }//if
+
+ /* ---------------------------------------------------------------- */
+ /* THE PAGE RANGE REC IS EMPTY. RELEASE IT. */
+ /*----------------------------------------------------------------- */
+ Uint32 parentPtr = regPRPtr.p->parentPtr;
+ releasePagerange(regPRPtr);
+
+ if (parentPtr != RNIL) {
+ ljam();
+ regPRPtr.i = parentPtr;
+ ptrCheckGuard(regPRPtr, cnoOfPageRangeRec, pageRange);
+ continue;
+ }//if
+
+ ljam();
+ ndbrequire(regPRPtr.i == regFragPtr->rootPageRange);
+ initFragRange(regFragPtr);
+ return;
+ } else {
+ if (regPRPtr.p->type[indexPos] == ZNON_LEAF) {
+ jam();
+ /* ---------------------------------------------------------------- */
+ // A non-leaf node, we must release everything below it before we
+ // release this node.
+ /* ---------------------------------------------------------------- */
+ regPRPtr.i = basePageId;
+ ptrCheckGuard(regPRPtr, cnoOfPageRangeRec, pageRange);
+ } else {
+ jam();
+ ndbrequire(regPRPtr.p->type[indexPos] == ZLEAF);
+ /* ---------------------------------------------------------------- */
+ /* PAGE_RANGE_PTR /= RNIL AND THE CURRENT POS IS NOT A CHLED. */
+ /*----------------------------------------------------------------- */
+ const Uint32 start = regPRPtr.p->startRange[indexPos];
+ const Uint32 stop = regPRPtr.p->endRange[indexPos];
+ ndbrequire(stop >= start);
+ const Uint32 retNo = (stop - start + 1);
+ returnCommonArea(basePageId, retNo);
+ }//if
+ }//if
+ }//while
+}//Dbtup::releaseFragPages()
+
+void Dbtup::initializePageRange()
+{
+ PageRangePtr regPTRPtr;
+ for (regPTRPtr.i = 0;
+ regPTRPtr.i < cnoOfPageRangeRec; regPTRPtr.i++) {
+ ptrAss(regPTRPtr, pageRange);
+ regPTRPtr.p->nextFree = regPTRPtr.i + 1;
+ }//for
+ regPTRPtr.i = cnoOfPageRangeRec - 1;
+ ptrAss(regPTRPtr, pageRange);
+ regPTRPtr.p->nextFree = RNIL;
+ cfirstfreerange = 0;
+ c_noOfFreePageRanges = cnoOfPageRangeRec;
+}//Dbtup::initializePageRange()
+
+void Dbtup::initFragRange(Fragrecord* const regFragPtr)
+{
+ regFragPtr->emptyPrimPage = RNIL;
+ regFragPtr->rootPageRange = RNIL;
+ regFragPtr->currentPageRange = RNIL;
+ regFragPtr->noOfPages = 0;
+ regFragPtr->nextStartRange = 0;
+}//initFragRange()
+
+Uint32 Dbtup::allocFragPages(Fragrecord* const regFragPtr, Uint32 tafpNoAllocRequested)
+{
+ Uint32 tafpPagesAllocated = 0;
+ while (true) {
+ Uint32 noOfPagesAllocated = 0;
+ Uint32 noPagesToAllocate = tafpNoAllocRequested - tafpPagesAllocated;
+ Uint32 retPageRef = RNIL;
+ allocConsPages(noPagesToAllocate, noOfPagesAllocated, retPageRef);
+ if (noOfPagesAllocated == 0) {
+ ljam();
+ return tafpPagesAllocated;
+ }//if
+/* ---------------------------------------------------------------- */
+/* IT IS NOW TIME TO PUT THE ALLOCATED AREA INTO THE PAGE */
+/* RANGE TABLE. */
+/* ---------------------------------------------------------------- */
+ Uint32 startRange = regFragPtr->nextStartRange;
+ if (!insertPageRangeTab(regFragPtr, retPageRef, noOfPagesAllocated)) {
+ ljam();
+ returnCommonArea(retPageRef, noOfPagesAllocated);
+ return tafpPagesAllocated;
+ }//if
+ tafpPagesAllocated += noOfPagesAllocated;
+ Uint32 loopLimit = retPageRef + noOfPagesAllocated;
+ PagePtr loopPagePtr;
+/* ---------------------------------------------------------------- */
+/* SINCE A NUMBER OF PAGES WERE ALLOCATED FROM COMMON AREA */
+/* WITH SUCCESS IT IS NOW TIME TO CHANGE THE STATE OF */
+/* THOSE PAGES TO EMPTY_MM AND LINK THEM INTO THE EMPTY */
+/* PAGE LIST OF THE FRAGMENT. */
+/* ---------------------------------------------------------------- */
+ for (loopPagePtr.i = retPageRef; loopPagePtr.i < loopLimit; loopPagePtr.i++) {
+ ljam();
+ ptrCheckGuard(loopPagePtr, cnoOfPage, page);
+ loopPagePtr.p->pageWord[ZPAGE_STATE_POS] = ZEMPTY_MM;
+ loopPagePtr.p->pageWord[ZPAGE_FRAG_PAGE_ID_POS] = startRange +
+ (loopPagePtr.i - retPageRef);
+ loopPagePtr.p->pageWord[ZPAGE_NEXT_POS] = loopPagePtr.p->pageWord[ZPAGE_FRAG_PAGE_ID_POS] + 1;
+ }//for
+ loopPagePtr.i = (retPageRef + noOfPagesAllocated) - 1;
+ ptrCheckGuard(loopPagePtr, cnoOfPage, page);
+ loopPagePtr.p->pageWord[ZPAGE_NEXT_POS] = regFragPtr->emptyPrimPage;
+ regFragPtr->emptyPrimPage = startRange;
+/* ---------------------------------------------------------------- */
+/* WAS ENOUGH PAGES ALLOCATED OR ARE MORE NEEDED. */
+/* ---------------------------------------------------------------- */
+ if (tafpPagesAllocated < tafpNoAllocRequested) {
+ ljam();
+ } else {
+ ndbrequire(tafpPagesAllocated == tafpNoAllocRequested);
+ ljam();
+ return tafpNoAllocRequested;
+ }//if
+ }//while
+}//Dbtup::allocFragPages()
+
+void Dbtup::allocMoreFragPages(Fragrecord* const regFragPtr)
+{
+ Uint32 noAllocPages = regFragPtr->noOfPages >> 3; // 12.5%
+ noAllocPages += regFragPtr->noOfPages >> 4; // 6.25%
+ noAllocPages += 2;
+/* -----------------------------------------------------------------*/
+// We will grow by 18.75% plus two more additional pages to grow
+// a little bit quicker in the beginning.
+/* -----------------------------------------------------------------*/
+ allocFragPages(regFragPtr, noAllocPages);
+}//Dbtup::allocMoreFragPages()
+
+Uint32 Dbtup::leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr currPageRangePtr)
+{
+/* ---------------------------------------------------------------- */
+/* THE COMPLEX CASE WHEN THE LEAF NODE IS FULL. GO UP THE TREE*/
+/* TO FIND THE FIRST RECORD WITH A FREE ENTRY. ALLOCATE NEW */
+/* PAGE RANGE RECORDS THEN ALL THE WAY DOWN TO THE LEAF LEVEL */
+/* AGAIN. THE TREE SHOULD ALWAYS REMAIN BALANCED. */
+/* ---------------------------------------------------------------- */
+ PageRangePtr parentPageRangePtr;
+ PageRangePtr foundPageRangePtr;
+ parentPageRangePtr = currPageRangePtr;
+ Uint32 tiprNoLevels = 1;
+ while (true) {
+ ljam();
+ parentPageRangePtr.i = parentPageRangePtr.p->parentPtr;
+ if (parentPageRangePtr.i == RNIL) {
+ ljam();
+/* ---------------------------------------------------------------- */
+/* WE HAVE REACHED THE ROOT. A NEW ROOT MUST BE ALLOCATED. */
+/* ---------------------------------------------------------------- */
+ if (c_noOfFreePageRanges < tiprNoLevels) {
+ ljam();
+ return RNIL;
+ }//if
+ PageRangePtr oldRootPRPtr;
+ PageRangePtr newRootPRPtr;
+ oldRootPRPtr.i = regFragPtr->rootPageRange;
+ ptrCheckGuard(oldRootPRPtr, cnoOfPageRangeRec, pageRange);
+ seizePagerange(newRootPRPtr);
+ regFragPtr->rootPageRange = newRootPRPtr.i;
+ oldRootPRPtr.p->parentPtr = newRootPRPtr.i;
+
+ newRootPRPtr.p->basePageId[0] = oldRootPRPtr.i;
+ newRootPRPtr.p->parentPtr = RNIL;
+ newRootPRPtr.p->startRange[0] = 0;
+ newRootPRPtr.p->endRange[0] = regFragPtr->nextStartRange - 1;
+ newRootPRPtr.p->type[0] = ZNON_LEAF;
+ newRootPRPtr.p->startRange[1] = regFragPtr->nextStartRange;
+ newRootPRPtr.p->endRange[1] = regFragPtr->nextStartRange - 1;
+ newRootPRPtr.p->type[1] = ZNON_LEAF;
+ newRootPRPtr.p->currentIndexPos = 1;
+ foundPageRangePtr = newRootPRPtr;
+ break;
+ } else {
+ ljam();
+ ptrCheckGuard(parentPageRangePtr, cnoOfPageRangeRec, pageRange);
+ if (parentPageRangePtr.p->currentIndexPos < 3) {
+ ljam();
+/* ---------------------------------------------------------------- */
+/* WE HAVE FOUND AN EMPTY ENTRY IN A PAGE RANGE RECORD. */
+/* ALLOCATE A NEW PAGE RANGE RECORD, FILL IN THE START RANGE, */
+/* ALLOCATE A NEW PAGE RANGE RECORD AND UPDATE THE POINTERS */
+/* ---------------------------------------------------------------- */
+ parentPageRangePtr.p->currentIndexPos++;
+ parentPageRangePtr.p->startRange[parentPageRangePtr.p->currentIndexPos] = regFragPtr->nextStartRange;
+ parentPageRangePtr.p->endRange[parentPageRangePtr.p->currentIndexPos] = regFragPtr->nextStartRange - 1;
+ parentPageRangePtr.p->type[parentPageRangePtr.p->currentIndexPos] = ZNON_LEAF;
+ foundPageRangePtr = parentPageRangePtr;
+ break;
+ } else {
+ ljam();
+ ndbrequire(parentPageRangePtr.p->currentIndexPos == 3);
+/* ---------------------------------------------------------------- */
+/* THE PAGE RANGE RECORD WAS FULL. FIND THE PARENT RECORD */
+/* AND INCREASE THE NUMBER OF LEVELS WE HAVE TRAVERSED */
+/* GOING UP THE TREE. */
+/* ---------------------------------------------------------------- */
+ tiprNoLevels++;
+ }//if
+ }//if
+ }//while
+/* ---------------------------------------------------------------- */
+/* REMEMBER THE ERROR LEVEL IN CASE OF ALLOCATION ERRORS */
+/* ---------------------------------------------------------------- */
+ PageRangePtr newPageRangePtr;
+ PageRangePtr prevPageRangePtr;
+ prevPageRangePtr = foundPageRangePtr;
+ if (c_noOfFreePageRanges < tiprNoLevels) {
+ ljam();
+ return RNIL;
+ }//if
+/* ---------------------------------------------------------------- */
+/* NOW WE HAVE PERFORMED THE SEARCH UPWARDS AND FILLED IN THE */
+/* PROPER FIELDS IN THE PAGE RANGE RECORD WHERE SOME SPACE */
+/* WAS FOUND. THE NEXT STEP IS TO ALLOCATE PAGE RANGES SO */
+/* THAT WE KEEP THE B-TREE BALANCED. THE NEW PAGE RANGE */
+/* ARE ALSO PROPERLY UPDATED ON THE PATH TO THE LEAF LEVEL. */
+/* ---------------------------------------------------------------- */
+ while (true) {
+ ljam();
+ seizePagerange(newPageRangePtr);
+ tiprNoLevels--;
+ ndbrequire(prevPageRangePtr.p->currentIndexPos < 4);
+ prevPageRangePtr.p->basePageId[prevPageRangePtr.p->currentIndexPos] = newPageRangePtr.i;
+ newPageRangePtr.p->parentPtr = prevPageRangePtr.i;
+ newPageRangePtr.p->currentIndexPos = 0;
+ if (tiprNoLevels > 0) {
+ ljam();
+ newPageRangePtr.p->startRange[0] = regFragPtr->nextStartRange;
+ newPageRangePtr.p->endRange[0] = regFragPtr->nextStartRange - 1;
+ newPageRangePtr.p->type[0] = ZNON_LEAF;
+ prevPageRangePtr = newPageRangePtr;
+ } else {
+ ljam();
+ break;
+ }//if
+ }//while
+ return newPageRangePtr.i;
+}//Dbtup::leafPageRangeFull()
+
+void Dbtup::releasePagerange(PageRangePtr regPRPtr)
+{
+ regPRPtr.p->nextFree = cfirstfreerange;
+ cfirstfreerange = regPRPtr.i;
+ c_noOfFreePageRanges++;
+}//Dbtup::releasePagerange()
+
+void Dbtup::seizePagerange(PageRangePtr& regPageRangePtr)
+{
+ regPageRangePtr.i = cfirstfreerange;
+ ptrCheckGuard(regPageRangePtr, cnoOfPageRangeRec, pageRange);
+ cfirstfreerange = regPageRangePtr.p->nextFree;
+ regPageRangePtr.p->nextFree = RNIL;
+ regPageRangePtr.p->currentIndexPos = 0;
+ regPageRangePtr.p->parentPtr = RNIL;
+ for (Uint32 i = 0; i < 4; i++) {
+ regPageRangePtr.p->startRange[i] = 1;
+ regPageRangePtr.p->endRange[i] = 0;
+ regPageRangePtr.p->type[i] = ZNON_LEAF;
+ regPageRangePtr.p->basePageId[i] = (Uint32)-1;
+ }//for
+ c_noOfFreePageRanges--;
+}//Dbtup::seizePagerange()
+
+void Dbtup::errorHandler(Uint32 errorCode)
+{
+ switch (errorCode) {
+ case 0:
+ ljam();
+ break;
+ case 1:
+ ljam();
+ break;
+ case 2:
+ ljam();
+ break;
+ default:
+ ljam();
+ }
+ ndbrequire(false);
+}//Dbtup::errorHandler()
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
new file mode 100644
index 00000000000..c3f85cdebd5
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
@@ -0,0 +1,1185 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+#define DBTUP_C
+#include "Dbtup.hpp"
+#include <RefConvert.hpp>
+#include <ndb_limits.h>
+#include <pc.hpp>
+#include <AttributeDescriptor.hpp>
+#include "AttributeOffset.hpp"
+#include <AttributeHeader.hpp>
+
+#define ljam() { jamLine(3000 + __LINE__); }
+#define ljamEntry() { jamEntryLine(3000 + __LINE__); }
+
+void
+Dbtup::setUpQueryRoutines(Tablerec* const regTabPtr)
+{
+ Uint32 startDescriptor = regTabPtr->tabDescriptor;
+ ndbrequire((startDescriptor + (regTabPtr->noOfAttr << ZAD_LOG_SIZE)) <= cnoOfTabDescrRec);
+ for (Uint32 i = 0; i < regTabPtr->noOfAttr; i++) {
+ Uint32 attrDescriptorStart = startDescriptor + (i << ZAD_LOG_SIZE);
+ Uint32 attrDescriptor = tableDescriptor[attrDescriptorStart].tabDescr;
+ Uint32 attrOffset = tableDescriptor[attrDescriptorStart + 1].tabDescr;
+ if (!AttributeDescriptor::getDynamic(attrDescriptor)) {
+ if ((AttributeDescriptor::getArrayType(attrDescriptor) == ZNON_ARRAY) ||
+ (AttributeDescriptor::getArrayType(attrDescriptor) == ZFIXED_ARRAY)) {
+ if (!AttributeDescriptor::getNullable(attrDescriptor)) {
+ if (AttributeDescriptor::getSize(attrDescriptor) == 0){
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readBitsNotNULL;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateBitsNotNULL;
+ } else if (AttributeDescriptor::getSizeInWords(attrDescriptor) == 1){
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHOneWordNotNULL;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHOneWordNotNULL;
+ } else if (AttributeDescriptor::getSizeInWords(attrDescriptor) == 2) {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHTwoWordNotNULL;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHTwoWordNotNULL;
+ } else if (AttributeDescriptor::getSizeInWords(attrDescriptor) > 2) {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHManyWordNotNULL;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNotNULL;
+ } else {
+ ndbrequire(false);
+ }//if
+ // replace functions for char attribute
+ if (AttributeOffset::getCharsetFlag(attrOffset)) {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHManyWordNotNULL;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNotNULL;
+ }
+ } else {
+ if (AttributeDescriptor::getSize(attrDescriptor) == 0){
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readBitsNULLable;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateBitsNULLable;
+ } else if (AttributeDescriptor::getSizeInWords(attrDescriptor) == 1){
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHOneWordNULLable;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNULLable;
+ } else if (AttributeDescriptor::getSizeInWords(attrDescriptor) == 2) {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHTwoWordNULLable;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNULLable;
+ } else if (AttributeDescriptor::getSizeInWords(attrDescriptor) > 2) {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHManyWordNULLable;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNULLable;
+ } else {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHZeroWordNULLable;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNULLable;
+ }//if
+ // replace functions for char attribute
+ if (AttributeOffset::getCharsetFlag(attrOffset)) {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHManyWordNULLable;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNULLable;
+ }
+ }//if
+ } else if (AttributeDescriptor::getArrayType(attrDescriptor) == ZVAR_ARRAY) {
+ if (!AttributeDescriptor::getNullable(attrDescriptor)) {
+ if (AttributeDescriptor::getArraySize(attrDescriptor) == 0) {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readVarSizeUnlimitedNotNULL;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateVarSizeUnlimitedNotNULL;
+ } else if (AttributeDescriptor::getArraySize(attrDescriptor) > ZMAX_SMALL_VAR_ARRAY) {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readBigVarSizeNotNULL;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateBigVarSizeNotNULL;
+ } else {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readSmallVarSizeNotNULL;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateSmallVarSizeNotNULL;
+ }//if
+ } else {
+ if (AttributeDescriptor::getArraySize(attrDescriptor) == 0) {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readVarSizeUnlimitedNULLable;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateVarSizeUnlimitedNULLable;
+ } else if (AttributeDescriptor::getArraySize(attrDescriptor) > ZMAX_SMALL_VAR_ARRAY) {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readBigVarSizeNULLable;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateBigVarSizeNULLable;
+ } else {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readSmallVarSizeNULLable;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateSmallVarSizeNULLable;
+ }//if
+ }//if
+ } else {
+ ndbrequire(false);
+ }//if
+ } else {
+ if ((AttributeDescriptor::getArrayType(attrDescriptor) == ZNON_ARRAY) ||
+ (AttributeDescriptor::getArrayType(attrDescriptor) == ZFIXED_ARRAY)) {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readDynFixedSize;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateDynFixedSize;
+ } else if (AttributeDescriptor::getType(attrDescriptor) == ZVAR_ARRAY) {
+ if (AttributeDescriptor::getArraySize(attrDescriptor) == 0) {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readDynVarSizeUnlimited;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateDynVarSizeUnlimited;
+ } else if (AttributeDescriptor::getArraySize(attrDescriptor) > ZMAX_SMALL_VAR_ARRAY) {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readDynBigVarSize;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateDynBigVarSize;
+ } else {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readDynSmallVarSize;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateDynSmallVarSize;
+ }//if
+ } else {
+ ndbrequire(false);
+ }//if
+ }//if
+ }//for
+}//Dbtup::setUpQueryRoutines()
+
+/* ---------------------------------------------------------------- */
+/* THIS ROUTINE IS USED TO READ A NUMBER OF ATTRIBUTES IN THE */
+/* DATABASE AND PLACE THE RESULT IN ATTRINFO RECORDS. */
+//
+// In addition to the parameters used in the call it also relies on the
+// following variables set-up properly.
+//
+// operPtr.p Operation record pointer
+// fragptr.p Fragment record pointer
+// tabptr.p Table record pointer
+/* ---------------------------------------------------------------- */
+int Dbtup::readAttributes(Page* const pagePtr,
+ Uint32 tupHeadOffset,
+ const Uint32* inBuffer,
+ Uint32 inBufLen,
+ Uint32* outBuffer,
+ Uint32 maxRead,
+ bool xfrmFlag)
+{
+ Tablerec* const regTabPtr = tabptr.p;
+ Uint32 numAttributes = regTabPtr->noOfAttr;
+ Uint32 attrDescriptorStart = regTabPtr->tabDescriptor;
+ Uint32 inBufIndex = 0;
+
+ ndbrequire(attrDescriptorStart + (numAttributes << ZAD_LOG_SIZE) <= cnoOfTabDescrRec);
+
+ tOutBufIndex = 0;
+ tCheckOffset = regTabPtr->tupheadsize;
+ tMaxRead = maxRead;
+ tTupleHeader = &pagePtr->pageWord[tupHeadOffset];
+ tXfrmFlag = xfrmFlag;
+
+ ndbrequire(tupHeadOffset + tCheckOffset <= ZWORDS_ON_PAGE);
+ while (inBufIndex < inBufLen) {
+ Uint32 tmpAttrBufIndex = tOutBufIndex;
+ AttributeHeader ahIn(inBuffer[inBufIndex]);
+ inBufIndex++;
+ Uint32 attributeId = ahIn.getAttributeId();
+ Uint32 attrDescriptorIndex = attrDescriptorStart + (attributeId << ZAD_LOG_SIZE);
+ ljam();
+
+ AttributeHeader::init(&outBuffer[tmpAttrBufIndex], attributeId, 0);
+ AttributeHeader* ahOut = (AttributeHeader*)&outBuffer[tmpAttrBufIndex];
+ tOutBufIndex = tmpAttrBufIndex + 1;
+ if (attributeId < numAttributes) {
+ Uint32 attributeDescriptor = tableDescriptor[attrDescriptorIndex].tabDescr;
+ Uint32 attributeOffset = tableDescriptor[attrDescriptorIndex + 1].tabDescr;
+ ReadFunction f = regTabPtr->readFunctionArray[attributeId];
+ if ((this->*f)(outBuffer,
+ ahOut,
+ attributeDescriptor,
+ attributeOffset)) {
+ continue;
+ } else {
+ return -1;
+ }//if
+ } else if(attributeId & AttributeHeader::PSUEDO){
+ Uint32 sz = read_psuedo(attributeId,
+ outBuffer+tmpAttrBufIndex+1);
+ AttributeHeader::init(&outBuffer[tmpAttrBufIndex], attributeId, sz);
+ tOutBufIndex = tmpAttrBufIndex + 1 + sz;
+ } else {
+ terrorCode = ZATTRIBUTE_ID_ERROR;
+ return -1;
+ }//if
+ }//while
+ return tOutBufIndex;
+}//Dbtup::readAttributes()
+
+#if 0
+int Dbtup::readAttributesWithoutHeader(Page* const pagePtr,
+ Uint32 tupHeadOffset,
+ Uint32* inBuffer,
+ Uint32 inBufLen,
+ Uint32* outBuffer,
+ Uint32* attrBuffer,
+ Uint32 maxRead)
+{
+ Tablerec* const regTabPtr = tabptr.p;
+ Uint32 numAttributes = regTabPtr->noOfAttr;
+ Uint32 attrDescriptorStart = regTabPtr->tabDescriptor;
+ Uint32 inBufIndex = 0;
+ Uint32 attrBufIndex = 0;
+
+ ndbrequire(attrDescriptorStart + (numAttributes << ZAD_LOG_SIZE) <= cnoOfTabDescrRec);
+
+ tOutBufIndex = 0;
+ tCheckOffset = regTabPtr->tupheadsize;
+ tMaxRead = maxRead;
+ tTupleHeader = &pagePtr->pageWord[tupHeadOffset];
+
+ ndbrequire(tupHeadOffset + tCheckOffset <= ZWORDS_ON_PAGE);
+ while (inBufIndex < inBufLen) {
+ AttributeHeader ahIn(inBuffer[inBufIndex]);
+ inBufIndex++;
+ Uint32 attributeId = ahIn.getAttributeId();
+ Uint32 attrDescriptorIndex = attrDescriptorStart + (attributeId << ZAD_LOG_SIZE);
+ ljam();
+
+ AttributeHeader::init(&attrBuffer[attrBufIndex], attributeId, 0);
+ AttributeHeader* ahOut = (AttributeHeader*)&attrBuffer[attrBufIndex];
+ attrBufIndex++;
+ if (attributeId < numAttributes) {
+ Uint32 attributeDescriptor = tableDescriptor[attrDescriptorIndex].tabDescr;
+ Uint32 attributeOffset = tableDescriptor[attrDescriptorIndex + 1].tabDescr;
+ ReadFunction f = regTabPtr->readFunctionArray[attributeId];
+ if ((this->*f)(outBuffer,
+ ahOut,
+ attributeDescriptor,
+ attributeOffset)) {
+ continue;
+ } else {
+ return -1;
+ }//if
+ } else {
+ terrorCode = ZATTRIBUTE_ID_ERROR;
+ return -1;
+ }//if
+ }//while
+ ndbrequire(attrBufIndex == inBufLen);
+ return tOutBufIndex;
+}//Dbtup::readAttributes()
+#endif
+
+bool
+Dbtup::readFixedSizeTHOneWordNotNULL(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ Uint32 indexBuf = tOutBufIndex;
+ Uint32 readOffset = AttributeOffset::getOffset(attrDes2);
+ Uint32 const wordRead = tTupleHeader[readOffset];
+ Uint32 newIndexBuf = indexBuf + 1;
+ Uint32 maxRead = tMaxRead;
+
+ ndbrequire(readOffset < tCheckOffset);
+ if (newIndexBuf <= maxRead) {
+ ljam();
+ outBuffer[indexBuf] = wordRead;
+ ahOut->setDataSize(1);
+ tOutBufIndex = newIndexBuf;
+ return true;
+ } else {
+ ljam();
+ terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
+ return false;
+ }//if
+}//Dbtup::readFixedSizeTHOneWordNotNULL()
+
+bool
+Dbtup::readFixedSizeTHTwoWordNotNULL(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ Uint32 indexBuf = tOutBufIndex;
+ Uint32 readOffset = AttributeOffset::getOffset(attrDes2);
+ Uint32 const wordReadFirst = tTupleHeader[readOffset];
+ Uint32 const wordReadSecond = tTupleHeader[readOffset + 1];
+ Uint32 newIndexBuf = indexBuf + 2;
+ Uint32 maxRead = tMaxRead;
+
+ ndbrequire(readOffset + 1 < tCheckOffset);
+ if (newIndexBuf <= maxRead) {
+ ljam();
+ ahOut->setDataSize(2);
+ outBuffer[indexBuf] = wordReadFirst;
+ outBuffer[indexBuf + 1] = wordReadSecond;
+ tOutBufIndex = newIndexBuf;
+ return true;
+ } else {
+ ljam();
+ terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
+ return false;
+ }//if
+}//Dbtup::readFixedSizeTHTwoWordNotNULL()
+
+bool
+Dbtup::readFixedSizeTHManyWordNotNULL(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ Uint32 indexBuf = tOutBufIndex;
+ Uint32 charsetFlag = AttributeOffset::getCharsetFlag(attrDes2);
+ Uint32 readOffset = AttributeOffset::getOffset(attrDes2);
+ Uint32 attrNoOfWords = AttributeDescriptor::getSizeInWords(attrDescriptor);
+ Uint32 maxRead = tMaxRead;
+
+ ndbrequire((readOffset + attrNoOfWords - 1) < tCheckOffset);
+ if (! charsetFlag || ! tXfrmFlag) {
+ Uint32 newIndexBuf = indexBuf + attrNoOfWords;
+ if (newIndexBuf <= maxRead) {
+ ljam();
+ ahOut->setDataSize(attrNoOfWords);
+ MEMCOPY_NO_WORDS(&outBuffer[indexBuf],
+ &tTupleHeader[readOffset],
+ attrNoOfWords);
+ tOutBufIndex = newIndexBuf;
+ return true;
+ } else {
+ ljam();
+ terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
+ }//if
+ } else {
+ ljam();
+ Tablerec* regTabPtr = tabptr.p;
+ Uint32 srcBytes = AttributeDescriptor::getSizeInBytes(attrDescriptor);
+ uchar* dstPtr = (uchar*)&outBuffer[indexBuf];
+ const uchar* srcPtr = (uchar*)&tTupleHeader[readOffset];
+ Uint32 i = AttributeOffset::getCharsetPos(attrDes2);
+ ndbrequire(i < regTabPtr->noOfCharsets);
+ CHARSET_INFO* cs = regTabPtr->charsetArray[i];
+ Uint32 typeId = AttributeDescriptor::getType(attrDescriptor);
+ Uint32 lb, len;
+ bool ok = NdbSqlUtil::get_var_length(typeId, srcPtr, srcBytes, lb, len);
+ if (ok) {
+ Uint32 xmul = cs->strxfrm_multiply;
+ if (xmul == 0)
+ xmul = 1;
+ // see comment in DbtcMain.cpp
+ Uint32 dstLen = xmul * (srcBytes - lb);
+ Uint32 maxIndexBuf = indexBuf + (dstLen >> 2);
+ if (maxIndexBuf <= maxRead) {
+ ljam();
+ int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len);
+ ndbrequire(n != -1);
+ while ((n & 3) != 0) {
+ dstPtr[n++] = 0;
+ }
+ Uint32 dstWords = (n >> 2);
+ ahOut->setDataSize(dstWords);
+ Uint32 newIndexBuf = indexBuf + dstWords;
+ ndbrequire(newIndexBuf <= maxRead);
+ tOutBufIndex = newIndexBuf;
+ return true;
+ } else {
+ ljam();
+ terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
+ }
+ } else {
+ ljam();
+ terrorCode = ZTUPLE_CORRUPTED_ERROR;
+ }
+ }
+ return false;
+}//Dbtup::readFixedSizeTHManyWordNotNULL()
+
+bool
+Dbtup::readFixedSizeTHOneWordNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ if (!nullFlagCheck(attrDes2)) {
+ ljam();
+ return readFixedSizeTHOneWordNotNULL(outBuffer,
+ ahOut,
+ attrDescriptor,
+ attrDes2);
+ } else {
+ ljam();
+ ahOut->setNULL();
+ return true;
+ }//if
+}//Dbtup::readFixedSizeTHOneWordNULLable()
+
+bool
+Dbtup::readFixedSizeTHTwoWordNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ if (!nullFlagCheck(attrDes2)) {
+ ljam();
+ return readFixedSizeTHTwoWordNotNULL(outBuffer,
+ ahOut,
+ attrDescriptor,
+ attrDes2);
+ } else {
+ ljam();
+ ahOut->setNULL();
+ return true;
+ }//if
+}//Dbtup::readFixedSizeTHTwoWordNULLable()
+
+bool
+Dbtup::readFixedSizeTHManyWordNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ if (!nullFlagCheck(attrDes2)) {
+ ljam();
+ return readFixedSizeTHManyWordNotNULL(outBuffer,
+ ahOut,
+ attrDescriptor,
+ attrDes2);
+ } else {
+ ljam();
+ ahOut->setNULL();
+ return true;
+ }//if
+}//Dbtup::readFixedSizeTHManyWordNULLable()
+
+bool
+Dbtup::readFixedSizeTHZeroWordNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ if (nullFlagCheck(attrDes2)) {
+ ljam();
+ ahOut->setNULL();
+ }//if
+ return true;
+}//Dbtup::readFixedSizeTHZeroWordNULLable()
+
+bool
+Dbtup::nullFlagCheck(Uint32 attrDes2)
+{
+ Tablerec* const regTabPtr = tabptr.p;
+ Uint32 nullFlagOffsetInTuple = AttributeOffset::getNullFlagOffset(attrDes2);
+ ndbrequire(nullFlagOffsetInTuple < regTabPtr->tupNullWords);
+ nullFlagOffsetInTuple += regTabPtr->tupNullIndex;
+ ndbrequire(nullFlagOffsetInTuple < tCheckOffset);
+
+ return (AttributeOffset::isNULL(tTupleHeader[nullFlagOffsetInTuple], attrDes2));
+}//Dbtup::nullFlagCheck()
+
+bool
+Dbtup::readVariableSizedAttr(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::readVariableSizedAttr()
+
+bool
+Dbtup::readVarSizeUnlimitedNotNULL(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::readVarSizeUnlimitedNotNULL()
+
+bool
+Dbtup::readVarSizeUnlimitedNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::readVarSizeUnlimitedNULLable()
+
+bool
+Dbtup::readBigVarSizeNotNULL(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::readBigVarSizeNotNULL()
+
+bool
+Dbtup::readBigVarSizeNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::readBigVarSizeNULLable()
+
+bool
+Dbtup::readSmallVarSizeNotNULL(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::readSmallVarSizeNotNULL()
+
+bool
+Dbtup::readSmallVarSizeNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::readSmallVarSizeNULLable()
+
+bool
+Dbtup::readDynFixedSize(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::readDynFixedSize()
+
+bool
+Dbtup::readDynVarSizeUnlimited(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::readDynVarSizeUnlimited()
+
+bool
+Dbtup::readDynBigVarSize(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::readDynBigVarSize()
+
+bool
+Dbtup::readDynSmallVarSize(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::readDynSmallVarSize()
+
+/* ---------------------------------------------------------------------- */
+/* THIS ROUTINE IS USED TO UPDATE A NUMBER OF ATTRIBUTES. IT IS */
+/* USED BY THE INSERT ROUTINE, THE UPDATE ROUTINE AND IT CAN BE */
+/* CALLED SEVERAL TIMES FROM THE INTERPRETER. */
+// In addition to the parameters used in the call it also relies on the
+// following variables set-up properly.
+//
+// pagep.p Page record pointer
+// fragptr.p Fragment record pointer
+// operPtr.p Operation record pointer
+// tabptr.p Table record pointer
+/* ---------------------------------------------------------------------- */
+int Dbtup::updateAttributes(Page* const pagePtr,
+ Uint32 tupHeadOffset,
+ Uint32* inBuffer,
+ Uint32 inBufLen)
+{
+ Tablerec* const regTabPtr = tabptr.p;
+ Operationrec* const regOperPtr = operPtr.p;
+ Uint32 numAttributes = regTabPtr->noOfAttr;
+ Uint32 attrDescriptorStart = regTabPtr->tabDescriptor;
+ ndbrequire(attrDescriptorStart + (numAttributes << ZAD_LOG_SIZE) <= cnoOfTabDescrRec);
+
+ tCheckOffset = regTabPtr->tupheadsize;
+ tTupleHeader = &pagePtr->pageWord[tupHeadOffset];
+ Uint32 inBufIndex = 0;
+ tInBufIndex = 0;
+ tInBufLen = inBufLen;
+
+ ndbrequire(tupHeadOffset + tCheckOffset <= ZWORDS_ON_PAGE);
+ while (inBufIndex < inBufLen) {
+ AttributeHeader ahIn(inBuffer[inBufIndex]);
+ Uint32 attributeId = ahIn.getAttributeId();
+ Uint32 attrDescriptorIndex = attrDescriptorStart + (attributeId << ZAD_LOG_SIZE);
+ if (attributeId < numAttributes) {
+ Uint32 attrDescriptor = tableDescriptor[attrDescriptorIndex].tabDescr;
+ Uint32 attributeOffset = tableDescriptor[attrDescriptorIndex + 1].tabDescr;
+ if ((AttributeDescriptor::getPrimaryKey(attrDescriptor)) &&
+ (regOperPtr->optype != ZINSERT)) {
+ if (checkUpdateOfPrimaryKey(&inBuffer[inBufIndex], regTabPtr)) {
+ ljam();
+ terrorCode = ZTRY_UPDATE_PRIMARY_KEY;
+ return -1;
+ }//if
+ }//if
+ UpdateFunction f = regTabPtr->updateFunctionArray[attributeId];
+ ljam();
+ regOperPtr->changeMask.set(attributeId);
+ if ((this->*f)(inBuffer,
+ attrDescriptor,
+ attributeOffset)) {
+ inBufIndex = tInBufIndex;
+ continue;
+ } else {
+ ljam();
+ return -1;
+ }//if
+ } else {
+ ljam();
+ terrorCode = ZATTRIBUTE_ID_ERROR;
+ return -1;
+ }//if
+ }//while
+ return 0;
+}//Dbtup::updateAttributes()
+
+bool
+Dbtup::checkUpdateOfPrimaryKey(Uint32* updateBuffer, Tablerec* const regTabPtr)
+{
+ Uint32 keyReadBuffer[MAX_KEY_SIZE_IN_WORDS];
+ Uint32 attributeHeader;
+ AttributeHeader* ahOut = (AttributeHeader*)&attributeHeader;
+ AttributeHeader ahIn(*updateBuffer);
+ Uint32 attributeId = ahIn.getAttributeId();
+ Uint32 attrDescriptorIndex = regTabPtr->tabDescriptor + (attributeId << ZAD_LOG_SIZE);
+ Uint32 attrDescriptor = tableDescriptor[attrDescriptorIndex].tabDescr;
+ Uint32 attributeOffset = tableDescriptor[attrDescriptorIndex + 1].tabDescr;
+ ReadFunction f = regTabPtr->readFunctionArray[attributeId];
+
+ AttributeHeader::init(&attributeHeader, attributeId, 0);
+ tOutBufIndex = 0;
+ tMaxRead = MAX_KEY_SIZE_IN_WORDS;
+
+ bool tmp = tXfrmFlag;
+ tXfrmFlag = false;
+ ndbrequire((this->*f)(&keyReadBuffer[0], ahOut, attrDescriptor, attributeOffset));
+ tXfrmFlag = tmp;
+ ndbrequire(tOutBufIndex == ahOut->getDataSize());
+ if (ahIn.getDataSize() != ahOut->getDataSize()) {
+ ljam();
+ return true;
+ }//if
+ if (memcmp(&keyReadBuffer[0], &updateBuffer[1], tOutBufIndex << 2) != 0) {
+ ljam();
+ return true;
+ }//if
+ return false;
+}//Dbtup::checkUpdateOfPrimaryKey()
+
+#if 0
+void Dbtup::checkPages(Fragrecord* const regFragPtr)
+{
+ Uint32 noPages = getNoOfPages(regFragPtr);
+ for (Uint32 i = 0; i < noPages ; i++) {
+ PagePtr pagePtr;
+ pagePtr.i = getRealpid(regFragPtr, i);
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ ndbrequire(pagePtr.p->pageWord[1] != (RNIL - 1));
+ }
+}
+#endif
+
+bool
+Dbtup::updateFixedSizeTHOneWordNotNULL(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ Uint32 indexBuf = tInBufIndex;
+ Uint32 inBufLen = tInBufLen;
+ Uint32 updateOffset = AttributeOffset::getOffset(attrDes2);
+ AttributeHeader ahIn(inBuffer[indexBuf]);
+ Uint32 nullIndicator = ahIn.isNULL();
+ Uint32 newIndex = indexBuf + 2;
+ ndbrequire(updateOffset < tCheckOffset);
+
+ if (newIndex <= inBufLen) {
+ Uint32 updateWord = inBuffer[indexBuf + 1];
+ if (!nullIndicator) {
+ ljam();
+ tInBufIndex = newIndex;
+ tTupleHeader[updateOffset] = updateWord;
+ return true;
+ } else {
+ ljam();
+ terrorCode = ZNOT_NULL_ATTR;
+ return false;
+ }//if
+ } else {
+ ljam();
+ terrorCode = ZAI_INCONSISTENCY_ERROR;
+ return false;
+ }//if
+ return true;
+}//Dbtup::updateFixedSizeTHOneWordNotNULL()
+
+bool
+Dbtup::updateFixedSizeTHTwoWordNotNULL(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ Uint32 indexBuf = tInBufIndex;
+ Uint32 inBufLen = tInBufLen;
+ Uint32 updateOffset = AttributeOffset::getOffset(attrDes2);
+ AttributeHeader ahIn(inBuffer[indexBuf]);
+ Uint32 nullIndicator = ahIn.isNULL();
+ Uint32 newIndex = indexBuf + 3;
+ ndbrequire((updateOffset + 1) < tCheckOffset);
+
+ if (newIndex <= inBufLen) {
+ Uint32 updateWord1 = inBuffer[indexBuf + 1];
+ Uint32 updateWord2 = inBuffer[indexBuf + 2];
+ if (!nullIndicator) {
+ ljam();
+ tInBufIndex = newIndex;
+ tTupleHeader[updateOffset] = updateWord1;
+ tTupleHeader[updateOffset + 1] = updateWord2;
+ return true;
+ } else {
+ ljam();
+ terrorCode = ZNOT_NULL_ATTR;
+ return false;
+ }//if
+ } else {
+ ljam();
+ terrorCode = ZAI_INCONSISTENCY_ERROR;
+ return false;
+ }//if
+}//Dbtup::updateFixedSizeTHTwoWordNotNULL()
+
+bool
+Dbtup::updateFixedSizeTHManyWordNotNULL(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ Uint32 indexBuf = tInBufIndex;
+ Uint32 inBufLen = tInBufLen;
+ Uint32 updateOffset = AttributeOffset::getOffset(attrDes2);
+ Uint32 charsetFlag = AttributeOffset::getCharsetFlag(attrDes2);
+ AttributeHeader ahIn(inBuffer[indexBuf]);
+ Uint32 nullIndicator = ahIn.isNULL();
+ Uint32 noOfWords = AttributeDescriptor::getSizeInWords(attrDescriptor);
+ Uint32 newIndex = indexBuf + noOfWords + 1;
+ ndbrequire((updateOffset + noOfWords - 1) < tCheckOffset);
+
+ if (newIndex <= inBufLen) {
+ if (!nullIndicator) {
+ ljam();
+ if (charsetFlag) {
+ ljam();
+ Tablerec* regTabPtr = tabptr.p;
+ Uint32 typeId = AttributeDescriptor::getType(attrDescriptor);
+ Uint32 bytes = AttributeDescriptor::getSizeInBytes(attrDescriptor);
+ Uint32 i = AttributeOffset::getCharsetPos(attrDes2);
+ ndbrequire(i < regTabPtr->noOfCharsets);
+ // not const in MySQL
+ CHARSET_INFO* cs = regTabPtr->charsetArray[i];
+ const char* ssrc = (const char*)&inBuffer[tInBufIndex + 1];
+ Uint32 lb, len;
+ if (! NdbSqlUtil::get_var_length(typeId, ssrc, bytes, lb, len)) {
+ ljam();
+ terrorCode = ZINVALID_CHAR_FORMAT;
+ return false;
+ }
+ // fast fix bug#7340
+ if (typeId != NDB_TYPE_TEXT &&
+ (*cs->cset->well_formed_len)(cs, ssrc + lb, ssrc + lb + len, ZNIL) != len) {
+ ljam();
+ terrorCode = ZINVALID_CHAR_FORMAT;
+ return false;
+ }
+ }
+ tInBufIndex = newIndex;
+ MEMCOPY_NO_WORDS(&tTupleHeader[updateOffset],
+ &inBuffer[indexBuf + 1],
+ noOfWords);
+ return true;
+ } else {
+ ljam();
+ terrorCode = ZNOT_NULL_ATTR;
+ return false;
+ }//if
+ } else {
+ ljam();
+ terrorCode = ZAI_INCONSISTENCY_ERROR;
+ return false;
+ }//if
+}//Dbtup::updateFixedSizeTHManyWordNotNULL()
+
+bool
+Dbtup::updateFixedSizeTHManyWordNULLable(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ Tablerec* const regTabPtr = tabptr.p;
+ AttributeHeader ahIn(inBuffer[tInBufIndex]);
+ Uint32 nullIndicator = ahIn.isNULL();
+ Uint32 nullFlagOffset = AttributeOffset::getNullFlagOffset(attrDes2);
+ Uint32 nullFlagBitOffset = AttributeOffset::getNullFlagBitOffset(attrDes2);
+ Uint32 nullWordOffset = nullFlagOffset + regTabPtr->tupNullIndex;
+ ndbrequire((nullFlagOffset < regTabPtr->tupNullWords) &&
+ (nullWordOffset < tCheckOffset));
+ Uint32 nullBits = tTupleHeader[nullWordOffset];
+
+ if (!nullIndicator) {
+ nullBits &= (~(1 << nullFlagBitOffset));
+ ljam();
+ tTupleHeader[nullWordOffset] = nullBits;
+ return updateFixedSizeTHManyWordNotNULL(inBuffer,
+ attrDescriptor,
+ attrDes2);
+ } else {
+ Uint32 newIndex = tInBufIndex + 1;
+ if (newIndex <= tInBufLen) {
+ nullBits |= (1 << nullFlagBitOffset);
+ ljam();
+ tTupleHeader[nullWordOffset] = nullBits;
+ tInBufIndex = newIndex;
+ return true;
+ } else {
+ ljam();
+ terrorCode = ZAI_INCONSISTENCY_ERROR;
+ return false;
+ }//if
+ }//if
+}//Dbtup::updateFixedSizeTHManyWordNULLable()
+
+bool
+Dbtup::updateVariableSizedAttr(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::updateVariableSizedAttr()
+
+bool
+Dbtup::updateVarSizeUnlimitedNotNULL(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::updateVarSizeUnlimitedNotNULL()
+
+bool
+Dbtup::updateVarSizeUnlimitedNULLable(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::updateVarSizeUnlimitedNULLable()
+
+bool
+Dbtup::updateBigVarSizeNotNULL(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::updateBigVarSizeNotNULL()
+
+bool
+Dbtup::updateBigVarSizeNULLable(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::updateBigVarSizeNULLable()
+
+bool
+Dbtup::updateSmallVarSizeNotNULL(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::updateSmallVarSizeNotNULL()
+
+bool
+Dbtup::updateSmallVarSizeNULLable(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::updateSmallVarSizeNULLable()
+
+bool
+Dbtup::updateDynFixedSize(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::updateDynFixedSize()
+
+bool
+Dbtup::updateDynVarSizeUnlimited(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::updateDynVarSizeUnlimited()
+
+bool
+Dbtup::updateDynBigVarSize(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::updateDynBigVarSize()
+
+bool
+Dbtup::updateDynSmallVarSize(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ ljam();
+ terrorCode = ZVAR_SIZED_NOT_SUPPORTED;
+ return false;
+}//Dbtup::updateDynSmallVarSize()
+
+Uint32
+Dbtup::read_psuedo(Uint32 attrId, Uint32* outBuffer){
+ Uint32 tmp[sizeof(SignalHeader)+25];
+ Signal * signal = (Signal*)&tmp;
+ switch(attrId){
+ case AttributeHeader::FRAGMENT:
+ * outBuffer = operPtr.p->fragId >> 1; // remove "hash" bit
+ return 1;
+ case AttributeHeader::FRAGMENT_MEMORY:
+ {
+ Uint64 tmp= fragptr.p->noOfPages;
+ tmp*= 32768;
+ memcpy(outBuffer,&tmp,8);
+ }
+ return 2;
+ case AttributeHeader::ROW_SIZE:
+ * outBuffer = tabptr.p->tupheadsize << 2;
+ return 1;
+ case AttributeHeader::ROW_COUNT:
+ case AttributeHeader::COMMIT_COUNT:
+ signal->theData[0] = operPtr.p->userpointer;
+ signal->theData[1] = attrId;
+
+ EXECUTE_DIRECT(DBLQH, GSN_READ_PSUEDO_REQ, signal, 2);
+ outBuffer[0] = signal->theData[0];
+ outBuffer[1] = signal->theData[1];
+ return 2;
+ case AttributeHeader::RANGE_NO:
+ signal->theData[0] = operPtr.p->userpointer;
+ signal->theData[1] = attrId;
+
+ EXECUTE_DIRECT(DBLQH, GSN_READ_PSUEDO_REQ, signal, 2);
+ outBuffer[0] = signal->theData[0];
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+bool
+Dbtup::readBitsNotNULL(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ Tablerec* const regTabPtr = tabptr.p;
+ Uint32 pos = AttributeOffset::getNullFlagPos(attrDes2);
+ Uint32 bitCount = AttributeDescriptor::getArraySize(attrDescriptor);
+ Uint32 indexBuf = tOutBufIndex;
+ Uint32 newIndexBuf = indexBuf + ((bitCount + 31) >> 5);
+ Uint32 maxRead = tMaxRead;
+
+ if (newIndexBuf <= maxRead) {
+ ljam();
+ ahOut->setDataSize((bitCount + 31) >> 5);
+ tOutBufIndex = newIndexBuf;
+
+ BitmaskImpl::getField(regTabPtr->tupNullWords,
+ tTupleHeader+regTabPtr->tupNullIndex,
+ pos,
+ bitCount,
+ outBuffer+indexBuf);
+
+ return true;
+ } else {
+ ljam();
+ terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
+ return false;
+ }//if
+}
+
+bool
+Dbtup::readBitsNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ Tablerec* const regTabPtr = tabptr.p;
+ Uint32 pos = AttributeOffset::getNullFlagPos(attrDes2);
+ Uint32 bitCount = AttributeDescriptor::getArraySize(attrDescriptor);
+
+ Uint32 indexBuf = tOutBufIndex;
+ Uint32 newIndexBuf = indexBuf + ((bitCount + 31) >> 5);
+ Uint32 maxRead = tMaxRead;
+
+ if(BitmaskImpl::get(regTabPtr->tupNullWords,
+ tTupleHeader+regTabPtr->tupNullIndex,
+ pos))
+ {
+ ljam();
+ ahOut->setNULL();
+ return true;
+ }
+
+
+ if (newIndexBuf <= maxRead) {
+ ljam();
+ ahOut->setDataSize((bitCount + 31) >> 5);
+ tOutBufIndex = newIndexBuf;
+ BitmaskImpl::getField(regTabPtr->tupNullWords,
+ tTupleHeader+regTabPtr->tupNullIndex,
+ pos+1,
+ bitCount,
+ outBuffer+indexBuf);
+ return true;
+ } else {
+ ljam();
+ terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
+ return false;
+ }//if
+}
+
+bool
+Dbtup::updateBitsNotNULL(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ Tablerec* const regTabPtr = tabptr.p;
+ Uint32 indexBuf = tInBufIndex;
+ Uint32 inBufLen = tInBufLen;
+ AttributeHeader ahIn(inBuffer[indexBuf]);
+ Uint32 nullIndicator = ahIn.isNULL();
+ Uint32 pos = AttributeOffset::getNullFlagPos(attrDes2);
+ Uint32 bitCount = AttributeDescriptor::getArraySize(attrDescriptor);
+ Uint32 newIndex = indexBuf + 1 + ((bitCount + 31) >> 5);
+
+ if (newIndex <= inBufLen) {
+ if (!nullIndicator) {
+ BitmaskImpl::setField(regTabPtr->tupNullWords,
+ tTupleHeader+regTabPtr->tupNullIndex,
+ pos,
+ bitCount,
+ inBuffer+indexBuf+1);
+ tInBufIndex = newIndex;
+ return true;
+ } else {
+ ljam();
+ terrorCode = ZNOT_NULL_ATTR;
+ return false;
+ }//if
+ } else {
+ ljam();
+ terrorCode = ZAI_INCONSISTENCY_ERROR;
+ return false;
+ }//if
+ return true;
+}
+
+bool
+Dbtup::updateBitsNULLable(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ Tablerec* const regTabPtr = tabptr.p;
+ AttributeHeader ahIn(inBuffer[tInBufIndex]);
+ Uint32 indexBuf = tInBufIndex;
+ Uint32 nullIndicator = ahIn.isNULL();
+ Uint32 pos = AttributeOffset::getNullFlagPos(attrDes2);
+ Uint32 bitCount = AttributeDescriptor::getArraySize(attrDescriptor);
+
+ if (!nullIndicator) {
+ BitmaskImpl::clear(regTabPtr->tupNullWords,
+ tTupleHeader+regTabPtr->tupNullIndex,
+ pos);
+ BitmaskImpl::setField(regTabPtr->tupNullWords,
+ tTupleHeader+regTabPtr->tupNullIndex,
+ pos+1,
+ bitCount,
+ inBuffer+indexBuf+1);
+
+ Uint32 newIndex = indexBuf + 1 + ((bitCount + 31) >> 5);
+ tInBufIndex = newIndex;
+ return true;
+ } else {
+ Uint32 newIndex = tInBufIndex + 1;
+ if (newIndex <= tInBufLen) {
+ ljam();
+ BitmaskImpl::set(regTabPtr->tupNullWords,
+ tTupleHeader+regTabPtr->tupNullIndex,
+ pos);
+
+ tInBufIndex = newIndex;
+ return true;
+ } else {
+ ljam();
+ terrorCode = ZAI_INCONSISTENCY_ERROR;
+ return false;
+ }//if
+ }//if
+}
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupStoredProcDef.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupStoredProcDef.cpp
new file mode 100644
index 00000000000..3b957688a1c
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupStoredProcDef.cpp
@@ -0,0 +1,230 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+#define DBTUP_C
+#include "Dbtup.hpp"
+#include <RefConvert.hpp>
+#include <ndb_limits.h>
+#include <pc.hpp>
+
+#define ljam() { jamLine(18000 + __LINE__); }
+#define ljamEntry() { jamEntryLine(18000 + __LINE__); }
+
+/* ---------------------------------------------------------------- */
+/* ---------------------------------------------------------------- */
+/* ------------ADD/DROP STORED PROCEDURE MODULE ------------------- */
+/* ---------------------------------------------------------------- */
+/* ---------------------------------------------------------------- */
+void Dbtup::execSTORED_PROCREQ(Signal* signal)
+{
+ OperationrecPtr regOperPtr;
+ TablerecPtr regTabPtr;
+ ljamEntry();
+ regOperPtr.i = signal->theData[0];
+ ptrCheckGuard(regOperPtr, cnoOfOprec, operationrec);
+ regTabPtr.i = signal->theData[1];
+ ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec);
+
+ Uint32 requestInfo = signal->theData[3];
+
+ ndbrequire(regOperPtr.p->transstate == IDLE ||
+ ((regOperPtr.p->transstate == ERROR_WAIT_STORED_PROCREQ) &&
+ (requestInfo == ZSTORED_PROCEDURE_DELETE)));
+ ndbrequire(regTabPtr.p->tableStatus == DEFINED);
+ switch (requestInfo) {
+ case ZSCAN_PROCEDURE:
+ ljam();
+ scanProcedure(signal,
+ regOperPtr.p,
+ signal->theData[4]);
+ break;
+ case ZCOPY_PROCEDURE:
+ ljam();
+ copyProcedure(signal, regTabPtr, regOperPtr.p);
+ break;
+ case ZSTORED_PROCEDURE_DELETE:
+ ljam();
+ deleteScanProcedure(signal, regOperPtr.p);
+ break;
+ default:
+ ndbrequire(false);
+ }//switch
+}//Dbtup::execSTORED_PROCREQ()
+
+void Dbtup::deleteScanProcedure(Signal* signal,
+ Operationrec* regOperPtr)
+{
+ StoredProcPtr storedPtr;
+ Uint32 storedProcId = signal->theData[4];
+ c_storedProcPool.getPtr(storedPtr, storedProcId);
+ ndbrequire(storedPtr.p->storedCode == ZSCAN_PROCEDURE);
+ ndbrequire(storedPtr.p->storedCounter == 0);
+ Uint32 firstAttrinbuf = storedPtr.p->storedLinkFirst;
+ storedPtr.p->storedCode = ZSTORED_PROCEDURE_FREE;
+ storedPtr.p->storedLinkFirst = RNIL;
+ storedPtr.p->storedLinkLast = RNIL;
+ storedPtr.p->storedProcLength = 0;
+ c_storedProcPool.release(storedPtr);
+ freeAttrinbufrec(firstAttrinbuf);
+ regOperPtr->currentAttrinbufLen = 0;
+ regOperPtr->transstate = IDLE;
+ signal->theData[0] = regOperPtr->userpointer;
+ signal->theData[1] = storedProcId;
+ sendSignal(regOperPtr->userblockref, GSN_STORED_PROCCONF, signal, 2, JBB);
+}//Dbtup::deleteScanProcedure()
+
+void Dbtup::scanProcedure(Signal* signal,
+ Operationrec* regOperPtr,
+ Uint32 lenAttrInfo)
+{
+//--------------------------------------------------------
+// We introduce the maxCheck so that there is always one
+// stored procedure entry free for copy procedures. Thus
+// no amount of scanning can cause problems for the node
+// recovery functionality.
+//--------------------------------------------------------
+ StoredProcPtr storedPtr;
+ c_storedProcPool.seize(storedPtr);
+ ndbrequire(storedPtr.i != RNIL);
+ storedPtr.p->storedCode = ZSCAN_PROCEDURE;
+ storedPtr.p->storedCounter = 0;
+ storedPtr.p->storedProcLength = lenAttrInfo;
+ storedPtr.p->storedLinkFirst = RNIL;
+ storedPtr.p->storedLinkLast = RNIL;
+ regOperPtr->transstate = WAIT_STORED_PROCEDURE_ATTR_INFO;
+ regOperPtr->attrinbufLen = lenAttrInfo;
+ regOperPtr->currentAttrinbufLen = 0;
+ regOperPtr->pageOffset = storedPtr.i;
+}//Dbtup::scanProcedure()
+
+void Dbtup::copyProcedure(Signal* signal,
+ TablerecPtr regTabPtr,
+ Operationrec* regOperPtr)
+{
+ Uint32 TnoOfAttributes = regTabPtr.p->noOfAttr;
+ scanProcedure(signal,
+ regOperPtr,
+ TnoOfAttributes);
+
+ Uint32 length = 0;
+ for (Uint32 Ti = 0; Ti < TnoOfAttributes; Ti++) {
+ AttributeHeader::init(&signal->theData[length + 1], Ti, 0);
+ length++;
+ if (length == 24) {
+ ljam();
+ ndbrequire(storedProcedureAttrInfo(signal, regOperPtr, length, 1, true));
+ length = 0;
+ }//if
+ }//for
+ if (length != 0) {
+ ljam();
+ ndbrequire(storedProcedureAttrInfo(signal, regOperPtr, length, 1, true));
+ }//if
+ ndbrequire(regOperPtr->currentAttrinbufLen == 0);
+}//Dbtup::copyProcedure()
+
+bool Dbtup::storedProcedureAttrInfo(Signal* signal,
+ Operationrec* regOperPtr,
+ Uint32 length,
+ Uint32 firstWord,
+ bool copyProcedure)
+{
+ AttrbufrecPtr regAttrPtr;
+ Uint32 RnoFree = cnoFreeAttrbufrec;
+ if (ERROR_INSERTED(4004) && !copyProcedure) {
+ CLEAR_ERROR_INSERT_VALUE;
+ storedSeizeAttrinbufrecErrorLab(signal, regOperPtr);
+ return false;
+ }//if
+ regOperPtr->currentAttrinbufLen += length;
+ ndbrequire(regOperPtr->currentAttrinbufLen <= regOperPtr->attrinbufLen);
+ if ((RnoFree > MIN_ATTRBUF) ||
+ (copyProcedure)) {
+ ljam();
+ regAttrPtr.i = cfirstfreeAttrbufrec;
+ ptrCheckGuard(regAttrPtr, cnoOfAttrbufrec, attrbufrec);
+ regAttrPtr.p->attrbuf[ZBUF_DATA_LEN] = 0;
+ cfirstfreeAttrbufrec = regAttrPtr.p->attrbuf[ZBUF_NEXT];
+ cnoFreeAttrbufrec = RnoFree - 1;
+ regAttrPtr.p->attrbuf[ZBUF_NEXT] = RNIL;
+ } else {
+ ljam();
+ storedSeizeAttrinbufrecErrorLab(signal, regOperPtr);
+ return false;
+ }//if
+ if (regOperPtr->firstAttrinbufrec == RNIL) {
+ ljam();
+ regOperPtr->firstAttrinbufrec = regAttrPtr.i;
+ }//if
+ regAttrPtr.p->attrbuf[ZBUF_NEXT] = RNIL;
+ if (regOperPtr->lastAttrinbufrec != RNIL) {
+ AttrbufrecPtr tempAttrinbufptr;
+ ljam();
+ tempAttrinbufptr.i = regOperPtr->lastAttrinbufrec;
+ ptrCheckGuard(tempAttrinbufptr, cnoOfAttrbufrec, attrbufrec);
+ tempAttrinbufptr.p->attrbuf[ZBUF_NEXT] = regAttrPtr.i;
+ }//if
+ regOperPtr->lastAttrinbufrec = regAttrPtr.i;
+
+ regAttrPtr.p->attrbuf[ZBUF_DATA_LEN] = length;
+ MEMCOPY_NO_WORDS(&regAttrPtr.p->attrbuf[0],
+ &signal->theData[firstWord],
+ length);
+
+ if (regOperPtr->currentAttrinbufLen < regOperPtr->attrinbufLen) {
+ ljam();
+ return true;
+ }//if
+ if (ERROR_INSERTED(4005) && !copyProcedure) {
+ CLEAR_ERROR_INSERT_VALUE;
+ storedSeizeAttrinbufrecErrorLab(signal, regOperPtr);
+ return false;
+ }//if
+
+ StoredProcPtr storedPtr;
+ c_storedProcPool.getPtr(storedPtr, (Uint32)regOperPtr->pageOffset);
+ ndbrequire(storedPtr.p->storedCode == ZSCAN_PROCEDURE);
+
+ regOperPtr->currentAttrinbufLen = 0;
+ storedPtr.p->storedLinkFirst = regOperPtr->firstAttrinbufrec;
+ storedPtr.p->storedLinkLast = regOperPtr->lastAttrinbufrec;
+ regOperPtr->firstAttrinbufrec = RNIL;
+ regOperPtr->lastAttrinbufrec = RNIL;
+ regOperPtr->transstate = IDLE;
+ signal->theData[0] = regOperPtr->userpointer;
+ signal->theData[1] = storedPtr.i;
+ sendSignal(regOperPtr->userblockref, GSN_STORED_PROCCONF, signal, 2, JBB);
+ return true;
+}//Dbtup::storedProcedureAttrInfo()
+
+void Dbtup::storedSeizeAttrinbufrecErrorLab(Signal* signal,
+ Operationrec* regOperPtr)
+{
+ StoredProcPtr storedPtr;
+ c_storedProcPool.getPtr(storedPtr, (Uint32)regOperPtr->pageOffset);
+ ndbrequire(storedPtr.p->storedCode == ZSCAN_PROCEDURE);
+
+ storedPtr.p->storedLinkFirst = regOperPtr->firstAttrinbufrec;
+ regOperPtr->firstAttrinbufrec = RNIL;
+ regOperPtr->lastAttrinbufrec = RNIL;
+ regOperPtr->transstate = ERROR_WAIT_STORED_PROCREQ;
+ signal->theData[0] = regOperPtr->userpointer;
+ signal->theData[1] = ZSTORED_SEIZE_ATTRINBUFREC_ERROR;
+ signal->theData[2] = regOperPtr->pageOffset;
+ sendSignal(regOperPtr->userblockref, GSN_STORED_PROCREF, signal, 3, JBB);
+}//Dbtup::storedSeizeAttrinbufrecErrorLab()
+
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp
new file mode 100644
index 00000000000..33d63e8ce49
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp
@@ -0,0 +1,1021 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+#define DBTUP_C
+#include "Dbtup.hpp"
+#include <RefConvert.hpp>
+#include <ndb_limits.h>
+#include <pc.hpp>
+#include <signaldata/EventReport.hpp>
+#include <signaldata/FsConf.hpp>
+#include <signaldata/FsRef.hpp>
+
+#define ljam() { jamLine(26000 + __LINE__); }
+#define ljamEntry() { jamEntryLine(26000 + __LINE__); }
+
+/* **************************************************************** */
+/* ********************* SYSTEM RESTART MANAGER ******************* */
+/* **************************************************************** */
+/***************************************************************/
+/* CHECK RESTART STATE AND SET NEW STATE CALLED IN OPEN, */
+/* READ AND COPY STATES */
+/***************************************************************/
+void Dbtup::execTUP_SRREQ(Signal* signal)
+{
+ RestartInfoRecordPtr riPtr;
+ PendingFileOpenInfoPtr pfoiPtr;
+
+ ljamEntry();
+ Uint32 userPtr = signal->theData[0];
+ Uint32 userBlockref = signal->theData[1];
+ Uint32 tableId = signal->theData[2];
+ Uint32 fragId = signal->theData[3];
+ Uint32 checkpointNumber = signal->theData[4];
+
+ seizeRestartInfoRecord(riPtr);
+
+ riPtr.p->sriUserptr = userPtr;
+ riPtr.p->sriBlockref = userBlockref;
+ riPtr.p->sriState = OPENING_DATA_FILE;
+ riPtr.p->sriCheckpointVersion = checkpointNumber;
+ riPtr.p->sriFragid = fragId;
+ riPtr.p->sriTableId = tableId;
+
+ /* OPEN THE DATA FILE IN THE FOLLOWING FORM */
+ /* D5/DBTUP/T<TABID>/F<FRAGID>/S<CHECKPOINT_NUMBER>.DATA */
+ Uint32 fileType = 1; /* VERSION */
+ fileType = (fileType << 8) | 0; /* .DATA */
+ fileType = (fileType << 8) | 5; /* D5 */
+ fileType = (fileType << 8) | 0xff; /* DON'T USE P DIRECTORY LEVEL */
+ Uint32 fileFlag = 0; /* READ ONLY */
+
+ seizePendingFileOpenInfoRecord(pfoiPtr);
+ pfoiPtr.p->pfoOpenType = LCP_DATA_FILE_READ;
+ pfoiPtr.p->pfoRestartInfoP = riPtr.i;
+
+ signal->theData[0] = cownref;
+ signal->theData[1] = pfoiPtr.i;
+ signal->theData[2] = tableId;
+ signal->theData[3] = fragId;
+ signal->theData[4] = checkpointNumber;
+ signal->theData[5] = fileType;
+ signal->theData[6] = fileFlag;
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
+ return;
+}//Dbtup::execTUP_SRREQ()
+
+void Dbtup::seizeRestartInfoRecord(RestartInfoRecordPtr& riPtr)
+{
+ riPtr.i = cfirstfreeSri;
+ ptrCheckGuard(riPtr, cnoOfRestartInfoRec, restartInfoRecord);
+ cfirstfreeSri = riPtr.p->sriNextRec;
+ riPtr.p->sriNextRec = RNIL;
+}//Dbtup::seizeRestartInfoRecord()
+
+void Dbtup::rfrReadRestartInfoLab(Signal* signal, RestartInfoRecordPtr riPtr)
+{
+ DiskBufferSegmentInfoPtr dbsiPtr;
+
+ seizeDiskBufferSegmentRecord(dbsiPtr);
+ riPtr.p->sriDataBufferSegmentP = dbsiPtr.i;
+ Uint32 retPageRef = RNIL;
+ Uint32 noAllocPages = 1;
+ Uint32 noOfPagesAllocated;
+ {
+ /**
+ * Use low pages for 0-pages during SR
+ * bitmask of free pages is kept in c_sr_free_page_0
+ */
+ Uint32 tmp = c_sr_free_page_0;
+ for(Uint32 i = 1; i<(1+MAX_PARALLELL_TUP_SRREQ); i++){
+ if(tmp & (1 << i)){
+ retPageRef = i;
+ c_sr_free_page_0 = tmp & (~(1 << i));
+ break;
+ }
+ }
+ ndbrequire(retPageRef != RNIL);
+ }
+
+ dbsiPtr.p->pdxDataPage[0] = retPageRef;
+ dbsiPtr.p->pdxNumDataPages = 1;
+ dbsiPtr.p->pdxFilePage = 0;
+ rfrReadNextDataSegment(signal, riPtr, dbsiPtr);
+ dbsiPtr.p->pdxOperation = CHECKPOINT_DATA_READ_PAGE_ZERO;
+}//Dbtup::rfrReadRestartInfoLab()
+
+/***************************************************************/
+/* THE RESTART INFORMATION IS NOW READ INTO THE DATA BUFFER */
+/* USE THE RESTART INFORMATION TO INITIATE THE RESTART RECORD */
+/***************************************************************/
+void
+Dbtup::rfrInitRestartInfoLab(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr)
+{
+ Uint32 TzeroDataPage[64];
+ Uint32 Ti;
+ FragrecordPtr regFragPtr;
+ LocalLogInfoPtr lliPtr;
+ PagePtr pagePtr;
+ RestartInfoRecordPtr riPtr;
+ TablerecPtr regTabPtr;
+
+ riPtr.i = dbsiPtr.p->pdxRestartInfoP;
+ ptrCheckGuard(riPtr, cnoOfRestartInfoRec, restartInfoRecord);
+
+ regTabPtr.i = riPtr.p->sriTableId;
+ ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec);
+
+ Uint32 fragId = riPtr.p->sriFragid;
+ getFragmentrec(regFragPtr, fragId, regTabPtr.p);
+ riPtr.p->sriFragP = regFragPtr.i;
+
+ /* ----- PAGE ALLOCATION --- */
+ /* ALLOCATE PAGES TO FRAGMENT, INSERT THEM INTO PAGE RANGE TABLE AND */
+ /* ALSO CONVERT THEM INTO EMPTY PAGES AND INSERT THEM INTO THE EMPTY LIST */
+ /* OF THE FRAGMENT. SET ALL LISTS OF FREE PAGES TO RNIL */
+
+ ndbrequire(cfirstfreerange != RNIL);
+ pagePtr.i = dbsiPtr.p->pdxDataPage[0];
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ for (Ti = 0; Ti < 63; Ti++) {
+ /***************************************************************/
+ // Save Important content from Page zero in stack variable so
+ // that we can immediately release page zero.
+ /***************************************************************/
+ TzeroDataPage[Ti] = pagePtr.p->pageWord[Ti];
+ }//for
+ /************************************************************************/
+ /* NOW WE DON'T NEED THE RESTART INFO BUFFER PAGE ANYMORE */
+ /* LETS REMOVE IT AND REUSE THE SEGMENT FOR REAL DATA PAGES */
+ /* REMOVE ONE PAGE ONLY, PAGEP IS ALREADY SET TO THE RESTART INFO PAGE */
+ /************************************************************************/
+ {
+ ndbrequire(pagePtr.i > 0 && pagePtr.i <= MAX_PARALLELL_TUP_SRREQ);
+ c_sr_free_page_0 |= (1 << pagePtr.i);
+ }
+
+ Uint32 undoFileVersion = TzeroDataPage[ZSRI_UNDO_FILE_VER];
+ lliPtr.i = (undoFileVersion << 2) + (regTabPtr.i & 0x3);
+ ptrCheckGuard(lliPtr, cnoOfParallellUndoFiles, localLogInfo);
+ riPtr.p->sriLocalLogInfoP = lliPtr.i;
+
+ ndbrequire(regFragPtr.p->fragTableId == regTabPtr.i);
+ ndbrequire(regFragPtr.p->fragmentId == fragId);
+
+ regFragPtr.p->fragStatus = SYSTEM_RESTART;
+
+ regFragPtr.p->noCopyPagesAlloc = TzeroDataPage[ZSRI_NO_COPY_PAGES_ALLOC];
+
+ riPtr.p->sriCurDataPageFromBuffer = 0;
+ riPtr.p->sriNumDataPages = TzeroDataPage[ZSRI_NO_OF_FRAG_PAGES_POS];
+
+ ndbrequire(riPtr.p->sriNumDataPages >= regFragPtr.p->noOfPages);
+ const Uint32 pageCount = riPtr.p->sriNumDataPages - regFragPtr.p->noOfPages;
+ if(pageCount > 0){
+ Uint32 noAllocPages = allocFragPages(regFragPtr.p, pageCount);
+ ndbrequire(noAllocPages == pageCount);
+ }//if
+ ndbrequire(getNoOfPages(regFragPtr.p) == riPtr.p->sriNumDataPages);
+
+/***************************************************************/
+// Set the variables on fragment record which might have been
+// affected by allocFragPages.
+/***************************************************************/
+
+ regFragPtr.p->emptyPrimPage = TzeroDataPage[ZSRI_EMPTY_PRIM_PAGE];
+ regFragPtr.p->thFreeFirst = TzeroDataPage[ZSRI_TH_FREE_FIRST];
+ regFragPtr.p->thFreeCopyFirst = TzeroDataPage[ZSRI_TH_FREE_COPY_FIRST];
+
+/***************************************************************/
+/* THE RESTART INFORMATION IS NOW READ INTO THE DATA BUFFER */
+/* USE THE RESTART INFORMATION TO INITIATE THE FRAGMENT */
+/***************************************************************/
+ /**
+ * IF THIS UNDO FILE IS NOT OPEN, IT WILL BE OPENED HERE AND THE EXECUTION
+ * WILL CONTINUE WHEN THE FSOPENCONF IS ENTERED.
+ * IF IT'S ALREADY IN USE THE EXECUTION WILL CONTINUE BY A
+ * CONTINUE B SIGNAL
+ */
+ if (lliPtr.p->lliActiveLcp == 0) {
+ PendingFileOpenInfoPtr pfoiPtr;
+ ljam();
+/***************************************************************/
+/* OPEN THE UNDO FILE FOR READ */
+/* THE FILE HANDLE WILL BE SET IN THE LOCAL_LOG_INFO_REC */
+/* UPON FSOPENCONF */
+/***************************************************************/
+ cnoOfLocalLogInfo++;
+ /* F_LEVEL NOT USED */
+ Uint32 fileType = 1; /* VERSION */
+ fileType = (fileType << 8) | 2; /* .LOCLOG */
+ fileType = (fileType << 8) | 6; /* D6 */
+ fileType = (fileType << 8) | 0xff; /* DON'T USE P DIRECTORY LEVEL */
+ Uint32 fileFlag = 0; /* READ ONLY */
+
+ seizePendingFileOpenInfoRecord(pfoiPtr);
+ pfoiPtr.p->pfoOpenType = LCP_UNDO_FILE_READ;
+ pfoiPtr.p->pfoRestartInfoP = riPtr.i;
+
+ signal->theData[0] = cownref;
+ signal->theData[1] = pfoiPtr.i;
+ signal->theData[2] = lliPtr.i;
+ signal->theData[3] = 0xFFFFFFFF;
+ signal->theData[4] = undoFileVersion;
+ signal->theData[5] = fileType;
+ signal->theData[6] = fileFlag;
+ sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA);
+
+ lliPtr.p->lliPrevRecordId = 0;
+ lliPtr.p->lliActiveLcp = 1;
+ lliPtr.p->lliNumFragments = 1;
+ } else {
+ ljam();
+ signal->theData[0] = ZCONT_LOAD_DP;
+ signal->theData[1] = riPtr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ lliPtr.p->lliNumFragments++;
+ }//if
+ /* RETAIN THE HIGH- AND LOWSCORE ID:S OF THE LOGRECORD POSITIONS. WE HAVE TO EXECUTE THE */
+ /* UNDO LOG BETWEEN THE END AND START RECORDS FOR ALL RECORDS THAT INCLUDE FRAGMENTS OF */
+ /* THE RIGHT CHECKPOINT VERSION TO COMPLETE THE OPERATION WE HAVE TO RUN ALL LOGS THAT */
+ /* HAS THE NUMBER OF LCP ELEMENT GREATER THAN 0, I.E. IS INCLUDED. */
+ if (TzeroDataPage[ZSRI_UNDO_LOG_END_REC_ID] > lliPtr.p->lliPrevRecordId) {
+ ljam();
+ lliPtr.p->lliPrevRecordId = TzeroDataPage[ZSRI_UNDO_LOG_END_REC_ID];
+ lliPtr.p->lliEndPageId = TzeroDataPage[ZSRI_UNDO_LOG_END_PAGE_ID];
+ }//if
+ return;
+}//Dbtup::rfrInitRestartInfoLab()
+
+/***************************************************************/
+/* LOAD THE NEXT DATA PAGE SEGMENT INTO MEMORY */
+/***************************************************************/
+void Dbtup::rfrLoadDataPagesLab(Signal* signal, RestartInfoRecordPtr riPtr, DiskBufferSegmentInfoPtr dbsiPtr)
+{
+ FragrecordPtr regFragPtr;
+
+ if (riPtr.p->sriCurDataPageFromBuffer >= riPtr.p->sriNumDataPages) {
+ ljam();
+ rfrCompletedLab(signal, riPtr);
+ return;
+ }//if
+ Uint32 startPage = riPtr.p->sriCurDataPageFromBuffer;
+ Uint32 endPage;
+ if ((startPage + ZDB_SEGMENT_SIZE) < riPtr.p->sriNumDataPages) {
+ ljam();
+ endPage = startPage + ZDB_SEGMENT_SIZE;
+ } else {
+ ljam();
+ endPage = riPtr.p->sriNumDataPages;
+ }//if
+ regFragPtr.i = riPtr.p->sriFragP;
+ ptrCheckGuard(regFragPtr, cnoOfFragrec, fragrecord);
+ ndbrequire((endPage - startPage) <= 16);
+ Uint32 i = 0;
+ for (Uint32 pageId = startPage; pageId < endPage; pageId++) {
+ ljam();
+ dbsiPtr.p->pdxDataPage[i] = getRealpid(regFragPtr.p, pageId);
+ i++;
+ }//for
+ dbsiPtr.p->pdxNumDataPages = endPage - startPage; /* SET THE NUMBER OF DATA PAGES */
+ riPtr.p->sriCurDataPageFromBuffer = endPage;
+ dbsiPtr.p->pdxFilePage = startPage + 1;
+ rfrReadNextDataSegment(signal, riPtr, dbsiPtr);
+ return;
+}//Dbtup::rfrLoadDataPagesLab()
+
+void Dbtup::rfrCompletedLab(Signal* signal, RestartInfoRecordPtr riPtr)
+{
+ PendingFileOpenInfoPtr pfoPtr;
+/* ---------------------------------------------------------------------- */
+/* CLOSE THE DATA FILE BEFORE SENDING TUP_SRCONF */
+/* ---------------------------------------------------------------------- */
+ seizePendingFileOpenInfoRecord(pfoPtr);
+ pfoPtr.p->pfoOpenType = LCP_DATA_FILE_READ;
+ pfoPtr.p->pfoCheckpointInfoP = riPtr.i;
+
+ signal->theData[0] = riPtr.p->sriDataFileHandle;
+ signal->theData[1] = cownref;
+ signal->theData[2] = pfoPtr.i;
+ signal->theData[3] = 0;
+ sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 4, JBA);
+}//Dbtup::rfrCompletedLab()
+
+void Dbtup::rfrClosedDataFileLab(Signal* signal, Uint32 restartIndex)
+{
+ RestartInfoRecordPtr riPtr;
+ DiskBufferSegmentInfoPtr dbsiPtr;
+
+ riPtr.i = restartIndex;
+ ptrCheckGuard(riPtr, cnoOfRestartInfoRec, restartInfoRecord);
+ riPtr.p->sriDataFileHandle = RNIL;
+ dbsiPtr.i = riPtr.p->sriDataBufferSegmentP;
+ ptrCheckGuard(dbsiPtr, cnoOfConcurrentWriteOp, diskBufferSegmentInfo);
+ releaseDiskBufferSegmentRecord(dbsiPtr);
+ signal->theData[0] = riPtr.p->sriUserptr;
+ signal->theData[1] = riPtr.p->sriFragP;
+ sendSignal(riPtr.p->sriBlockref, GSN_TUP_SRCONF, signal, 2, JBB);
+ releaseRestartInfoRecord(riPtr);
+}//Dbtup::rfrClosedDataFileLab()
+
+/* ---------------------------------------------------------------- */
+/* ---------------------- EXECUTE LOCAL LOG ---------------------- */
+/* ---------------------------------------------------------------- */
+void Dbtup::execSTART_RECREQ(Signal* signal)
+{
+ ljamEntry();
+ clqhUserpointer = signal->theData[0];
+ clqhBlockref = signal->theData[1];
+
+ for (int i = 0; i < ZNO_CHECKPOINT_RECORDS; i++){
+ cSrUndoRecords[i] = 0;
+ }//for
+
+ if (cnoOfLocalLogInfo == 0) {
+ ljam();
+/* ---------------------------------------------------------------- */
+/* THERE WERE NO LOCAL LOGS TO EXECUTE IN THIS SYSTEM RESTART */
+/* ---------------------------------------------------------------- */
+ xlcRestartCompletedLab(signal);
+ return;
+ }//if
+ LocalLogInfoPtr lliPtr;
+ for (lliPtr.i = 0; lliPtr.i < 16; lliPtr.i++) {
+ ljam();
+ ptrAss(lliPtr, localLogInfo);
+ if (lliPtr.p->lliActiveLcp == 1) {
+ ljam();
+ signal->theData[0] = ZSTART_EXEC_UNDO_LOG;
+ signal->theData[1] = lliPtr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ }//if
+ }//for
+ return;
+}//Dbtup::execSTART_RECREQ()
+
+void Dbtup::closeExecUndoLogLab(Signal* signal, LocalLogInfoPtr lliPtr)
+{
+ PendingFileOpenInfoPtr pfoPtr;
+/* ---------------------------------------------------------------------- */
+/* CLOSE THE UNDO LOG BEFORE COMPLETION OF THE SYSTEM RESTART */
+/* ---------------------------------------------------------------------- */
+ seizePendingFileOpenInfoRecord(pfoPtr);
+ pfoPtr.p->pfoOpenType = LCP_UNDO_FILE_READ;
+ pfoPtr.p->pfoCheckpointInfoP = lliPtr.i;
+
+ signal->theData[0] = lliPtr.p->lliUndoFileHandle;
+ signal->theData[1] = cownref;
+ signal->theData[2] = pfoPtr.i;
+ signal->theData[3] = 0;
+ sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 4, JBA);
+ return;
+}//Dbtup::closeExecUndoLogLab()
+
+void Dbtup::endExecUndoLogLab(Signal* signal, Uint32 lliIndex)
+{
+ DiskBufferSegmentInfoPtr dbsiPtr;
+ LocalLogInfoPtr lliPtr;
+
+ lliPtr.i = lliIndex;
+ ptrCheckGuard(lliPtr, cnoOfParallellUndoFiles, localLogInfo);
+ lliPtr.p->lliUndoFileHandle = RNIL;
+ lliPtr.p->lliActiveLcp = 0;
+/* ---------------------------------------------------------------------- */
+/* WE HAVE NOW CLOSED THE LOG. WE WAIT FOR ALL LOCAL LOGS TO */
+/* COMPLETE LOG EXECUTION BEFORE SENDING THE RESPONSE TO LQH. */
+/* ---------------------------------------------------------------------- */
+ dbsiPtr.i = lliPtr.p->lliUndoBufferSegmentP;
+ ptrCheckGuard(dbsiPtr, cnoOfConcurrentWriteOp, diskBufferSegmentInfo);
+ freeDiskBufferSegmentRecord(signal, dbsiPtr);
+ lliPtr.p->lliUndoBufferSegmentP = RNIL;
+ for (lliPtr.i = 0; lliPtr.i < 16; lliPtr.i++) {
+ ljam();
+ ptrAss(lliPtr, localLogInfo);
+ if (lliPtr.p->lliActiveLcp == 1) {
+ ljam();
+ return;
+ }//if
+ }//for
+ xlcRestartCompletedLab(signal);
+ return;
+}//Dbtup::endExecUndoLogLab()
+
+void Dbtup::xlcRestartCompletedLab(Signal* signal)
+{
+ cnoOfLocalLogInfo = 0;
+
+ signal->theData[0] = NDB_LE_UNDORecordsExecuted;
+ signal->theData[1] = DBTUP; // From block
+ signal->theData[2] = 0; // Total records executed
+ for (int i = 0; i < 10; i++) {
+ if (i < ZNO_CHECKPOINT_RECORDS){
+ signal->theData[i+3] = cSrUndoRecords[i];
+ signal->theData[2] += cSrUndoRecords[i];
+ } else {
+ signal->theData[i+3] = 0; // Unsused data
+ }//if
+ }//for
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 12, JBB);
+
+/* ---------------------------------------------------------------------- */
+/* ALL LOCAL LOGS HAVE COMPLETED. WE HAVE COMPLETED OUR PART OF THE */
+/* SYSTEM RESTART. */
+/* ---------------------------------------------------------------------- */
+ signal->theData[0] = clqhUserpointer;
+ sendSignal(clqhBlockref, GSN_START_RECCONF, signal, 1, JBB);
+ return;
+}//Dbtup::xlcRestartCompletedLab()
+
+void Dbtup::startExecUndoLogLab(Signal* signal, Uint32 lliIndex)
+{
+ DiskBufferSegmentInfoPtr dbsiPtr;
+ LocalLogInfoPtr lliPtr;
+
+/* ---------------------------------------------------------------------- */
+/* START EXECUTING THE LOG FOR THIS PART. WE BEGIN BY READING THE */
+/* LAST 16 PAGES. */
+/* ---------------------------------------------------------------------- */
+ /* SET THE PREVIOS RECORD TO THE LAST ONE BECAUSE THAT'S WHERE TO START */
+ lliPtr.i = lliIndex;
+ ptrCheckGuard(lliPtr, cnoOfParallellUndoFiles, localLogInfo);
+
+ allocRestartUndoBufferSegment(signal, dbsiPtr, lliPtr);
+ lliPtr.p->lliUndoBufferSegmentP = dbsiPtr.i;
+ dbsiPtr.p->pdxCheckpointInfoP = lliPtr.i;
+ if (lliPtr.p->lliEndPageId > ((2 * ZUB_SEGMENT_SIZE) - 1)) {
+ ljam();
+ dbsiPtr.p->pdxNumDataPages = ZUB_SEGMENT_SIZE;
+ dbsiPtr.p->pdxFilePage = lliPtr.p->lliEndPageId - (ZUB_SEGMENT_SIZE - 1);
+ } else if (lliPtr.p->lliEndPageId > (ZUB_SEGMENT_SIZE - 1)) {
+ ljam();
+ dbsiPtr.p->pdxNumDataPages = lliPtr.p->lliEndPageId - (ZUB_SEGMENT_SIZE - 1);
+ dbsiPtr.p->pdxFilePage = ZUB_SEGMENT_SIZE;
+ } else {
+ ljam();
+ dbsiPtr.p->pdxNumDataPages = lliPtr.p->lliEndPageId + 1;
+ dbsiPtr.p->pdxFilePage = 0;
+ rfrReadNextUndoSegment(signal, dbsiPtr, lliPtr);
+ return;
+ }//if
+ rfrReadFirstUndoSegment(signal, dbsiPtr, lliPtr);
+ return;
+}//Dbtup::startExecUndoLogLab()
+
+void Dbtup::rfrReadSecondUndoLogLab(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr)
+{
+ LocalLogInfoPtr lliPtr;
+ lliPtr.i = dbsiPtr.p->pdxCheckpointInfoP;
+ ptrCheckGuard(lliPtr, cnoOfParallellUndoFiles, localLogInfo);
+
+ dbsiPtr.p->pdxNumDataPages = ZUB_SEGMENT_SIZE;
+ dbsiPtr.p->pdxFilePage -= ZUB_SEGMENT_SIZE;
+ rfrReadNextUndoSegment(signal, dbsiPtr, lliPtr);
+ return;
+}//Dbtup::rfrReadSecondUndoLogLab()
+
+void Dbtup::readExecUndoLogLab(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr, LocalLogInfoPtr lliPtr)
+{
+/* ---------------------------------------------------------------------- */
+/* THE NEXT UNDO LOG RECORD HAS NOT BEEN READ FROM DISK YET. WE WILL*/
+/* READ UPTO 8 PAGES BACKWARDS OF THE UNDO LOG FILE. WE WILL KEEP */
+/* THE LAST 8 PAGES TO ENSURE THAT WE WILL BE ABLE TO READ THE NEXT */
+/* LOG RECORD EVEN IF IT SPANS UPTO 8 PAGES. */
+/* ---------------------------------------------------------------------- */
+ if (dbsiPtr.p->pdxFilePage >= ZUB_SEGMENT_SIZE) {
+ ljam();
+ for (Uint32 i = 0; i < ZUB_SEGMENT_SIZE; i++) {
+ ljam();
+ Uint32 savePageId = dbsiPtr.p->pdxDataPage[i + ZUB_SEGMENT_SIZE];
+ dbsiPtr.p->pdxDataPage[i + ZUB_SEGMENT_SIZE] = dbsiPtr.p->pdxDataPage[i];
+ dbsiPtr.p->pdxDataPage[i] = savePageId;
+ }//for
+ dbsiPtr.p->pdxNumDataPages = ZUB_SEGMENT_SIZE;
+ dbsiPtr.p->pdxFilePage = dbsiPtr.p->pdxFilePage - ZUB_SEGMENT_SIZE;
+ } else {
+ ljam();
+ Uint32 dataPages[16];
+ ndbrequire(dbsiPtr.p->pdxFilePage > 0);
+ ndbrequire(dbsiPtr.p->pdxFilePage <= ZUB_SEGMENT_SIZE);
+ Uint32 i;
+ for (i = 0; i < dbsiPtr.p->pdxFilePage; i++) {
+ ljam();
+ dataPages[i] = dbsiPtr.p->pdxDataPage[i + ZUB_SEGMENT_SIZE];
+ }//for
+ for (i = 0; i < ZUB_SEGMENT_SIZE; i++) {
+ ljam();
+ dataPages[i + dbsiPtr.p->pdxFilePage] = dbsiPtr.p->pdxDataPage[i];
+ }//for
+ Uint32 limitLoop = ZUB_SEGMENT_SIZE + dbsiPtr.p->pdxFilePage;
+ for (i = 0; i < limitLoop; i++) {
+ ljam();
+ dbsiPtr.p->pdxDataPage[i] = dataPages[i];
+ }//for
+ dbsiPtr.p->pdxNumDataPages = dbsiPtr.p->pdxFilePage;
+ dbsiPtr.p->pdxFilePage = 0;
+ }//if
+ rfrReadNextUndoSegment(signal, dbsiPtr, lliPtr);
+ return;
+}//Dbtup::readExecUndoLogLab()
+
+void Dbtup::rfrReadNextDataSegment(Signal* signal, RestartInfoRecordPtr riPtr, DiskBufferSegmentInfoPtr dbsiPtr)
+{
+ dbsiPtr.p->pdxRestartInfoP = riPtr.i;
+ dbsiPtr.p->pdxOperation = CHECKPOINT_DATA_READ;
+ ndbrequire(dbsiPtr.p->pdxNumDataPages <= 8);
+
+ signal->theData[0] = riPtr.p->sriDataFileHandle;
+ signal->theData[1] = cownref;
+ signal->theData[2] = dbsiPtr.i;
+ signal->theData[3] = 2;
+ signal->theData[4] = ZBASE_ADDR_PAGE_WORD;
+ signal->theData[5] = dbsiPtr.p->pdxNumDataPages;
+ for (Uint32 i = 0; i < dbsiPtr.p->pdxNumDataPages; i++) {
+ ljam();
+ signal->theData[6 + i] = dbsiPtr.p->pdxDataPage[i];
+ }//for
+ signal->theData[6 + dbsiPtr.p->pdxNumDataPages] = dbsiPtr.p->pdxFilePage;
+ sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 15, JBA);
+}//Dbtup::rfrReadNextDataSegment()
+
+/* ---------------------------------------------------------------- */
+/* ------------------- RFR_READ_FIRST_UNDO_SEGMENT ---------------- */
+/* ---------------------------------------------------------------- */
+/* THIS ROUTINE READS IN THE FIRST UNDO SEGMENT INTO THE CURRENTLY */
+/* ACTIVE UNDO BUFFER SEGMENT */
+/* -----------------------------------------------------------------*/
+void Dbtup::rfrReadFirstUndoSegment(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr, LocalLogInfoPtr lliPtr)
+{
+ dbsiPtr.p->pdxOperation = CHECKPOINT_UNDO_READ_FIRST;
+
+ signal->theData[0] = lliPtr.p->lliUndoFileHandle;
+ signal->theData[1] = cownref;
+ signal->theData[2] = dbsiPtr.i;
+ signal->theData[3] = 1;
+ signal->theData[4] = ZBASE_ADDR_UNDO_WORD;
+ signal->theData[5] = dbsiPtr.p->pdxNumDataPages;
+ signal->theData[6] = dbsiPtr.p->pdxDataPage[ZUB_SEGMENT_SIZE];
+ signal->theData[7] = dbsiPtr.p->pdxFilePage;
+ sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 8, JBA);
+}//Dbtup::rfrReadFirstUndoSegment()
+
+/* ---------------------------------------------------------------- */
+/* ------------------- RFR_READ_NEXT_UNDO_SEGMENT ----------------- */
+/* ---------------------------------------------------------------- */
+/* THIS ROUTINE READS IN THE NEXT UNDO SEGMENT INTO THE CURRENTLY */
+/* ACTIVE UNDO BUFFER SEGMENT AND SWITCH TO THE UNACTIVE, READY ONE */
+/* -----------------------------------------------------------------*/
+void Dbtup::rfrReadNextUndoSegment(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr, LocalLogInfoPtr lliPtr)
+{
+ dbsiPtr.p->pdxOperation = CHECKPOINT_UNDO_READ;
+
+ signal->theData[0] = lliPtr.p->lliUndoFileHandle;
+ signal->theData[1] = cownref;
+ signal->theData[2] = dbsiPtr.i;
+ signal->theData[3] = 1;
+ signal->theData[4] = ZBASE_ADDR_UNDO_WORD;
+ signal->theData[5] = dbsiPtr.p->pdxNumDataPages;
+ signal->theData[6] = dbsiPtr.p->pdxDataPage[0];
+ signal->theData[7] = dbsiPtr.p->pdxFilePage;
+ sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 8, JBA);
+}//Dbtup::rfrReadNextUndoSegment()
+
+void Dbtup::xlcGetNextRecordLab(Signal* signal, DiskBufferSegmentInfoPtr dbsiPtr, LocalLogInfoPtr lliPtr)
+{
+ Uint32 loopCount = 0;
+/* ---------------------------------------------------------------------- */
+/* EXECUTE A NEW SET OF UNDO LOG RECORDS. */
+/* ---------------------------------------------------------------------- */
+ XlcStruct xlcStruct;
+
+ xlcStruct.LliPtr = lliPtr;
+ xlcStruct.DbsiPtr = dbsiPtr;
+
+ do {
+ ljam();
+ loopCount++;
+ if (loopCount == 20) {
+ ljam();
+ signal->theData[0] = ZCONT_EXECUTE_LC;
+ signal->theData[1] = xlcStruct.LliPtr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ }//if
+ if (xlcStruct.LliPtr.p->lliPrevRecordId == 0) {
+ ljam();
+ closeExecUndoLogLab(signal, xlcStruct.LliPtr);
+ return;
+ }//if
+ xlcStruct.PageId = xlcStruct.LliPtr.p->lliPrevRecordId >> ZUNDO_RECORD_ID_PAGE_INDEX;
+ xlcStruct.PageIndex = xlcStruct.LliPtr.p->lliPrevRecordId & ZUNDO_RECORD_ID_PAGE_INDEX_MASK;
+ if (xlcStruct.PageId < xlcStruct.DbsiPtr.p->pdxFilePage) {
+ ljam();
+ readExecUndoLogLab(signal, xlcStruct.DbsiPtr, xlcStruct.LliPtr);
+ return;
+ }//if
+ ndbrequire((xlcStruct.PageId - xlcStruct.DbsiPtr.p->pdxFilePage) < 16);
+ xlcStruct.UPPtr.i = xlcStruct.DbsiPtr.p->pdxDataPage[xlcStruct.PageId - xlcStruct.DbsiPtr.p->pdxFilePage];
+ ptrCheckGuard(xlcStruct.UPPtr, cnoOfUndoPage, undoPage);
+ xlcGetLogHeader(xlcStruct);
+ getFragmentrec(xlcStruct.FragPtr, xlcStruct.FragId, xlcStruct.TabPtr.p);
+ if (xlcStruct.FragPtr.i == RNIL) {
+ ljam();
+ continue;
+ }//if
+ if (xlcStruct.FragPtr.p->fragStatus != SYSTEM_RESTART) {
+ ljam();
+ continue;
+ }//if
+ ndbrequire(xlcStruct.LogRecordType < ZNO_CHECKPOINT_RECORDS);
+ cSrUndoRecords[xlcStruct.LogRecordType]++;
+ switch (xlcStruct.LogRecordType) {
+ case ZLCPR_TYPE_INSERT_TH:
+ ljam();
+ xlcInsertTh(xlcStruct);
+ break;
+ case ZLCPR_TYPE_DELETE_TH:
+ ljam();
+ xlcDeleteTh(xlcStruct);
+ break;
+ case ZLCPR_TYPE_UPDATE_TH:
+ ljam();
+ xlcUpdateTh(xlcStruct);
+ break;
+ case ZLCPR_TYPE_INSERT_TH_NO_DATA:
+ ljam();
+ xlcInsertTh(xlcStruct);
+ break;
+ case ZLCPR_ABORT_UPDATE:
+ ljam();
+ xlcAbortUpdate(signal, xlcStruct);
+ break;
+ case ZLCPR_ABORT_INSERT:
+ ljam();
+ xlcAbortInsert(signal, xlcStruct);
+ break;
+ case ZTABLE_DESCRIPTOR:
+ ljam();
+ xlcTableDescriptor(xlcStruct);
+ if (xlcStruct.LliPtr.p->lliNumFragments == 0) {
+ ljam();
+ closeExecUndoLogLab(signal, xlcStruct.LliPtr);
+ return;
+ }//if
+ break;
+ case ZLCPR_UNDO_LOG_PAGE_HEADER:
+ ljam();
+ xlcUndoLogPageHeader(xlcStruct);
+ break;
+ case ZINDICATE_NO_OP_ACTIVE:
+ ljam();
+ xlcIndicateNoOpActive(xlcStruct);
+ break;
+ case ZLCPR_TYPE_UPDATE_GCI:
+ ljam();
+ xlcUpdateGCI(xlcStruct);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+ } while (1);
+}//Dbtup::xlcGetNextRecordLab()
+
+/* ---------------------------------------------------------------- */
+/* ----------------- XLC_GET_LOG_HEADER ---------------------- */
+/* ---------------------------------------------------------------- */
+void Dbtup::xlcGetLogHeader(XlcStruct& xlcStruct)
+{
+ Uint32 pIndex = xlcStruct.PageIndex;
+ Uint32 fragId;
+ Uint32 tableId;
+ Uint32 prevId;
+ if ((pIndex + 4) < ZWORDS_ON_PAGE) {
+ UndoPage* const regUndoPagePtr = xlcStruct.UPPtr.p;
+ ljam();
+ xlcStruct.LogRecordType = regUndoPagePtr->undoPageWord[pIndex];
+ prevId = regUndoPagePtr->undoPageWord[pIndex + 1];
+ tableId = regUndoPagePtr->undoPageWord[pIndex + 2];
+ fragId = regUndoPagePtr->undoPageWord[pIndex + 3];
+ xlcStruct.PageIndex = pIndex + 4;
+ } else {
+ ljam();
+ xlcStruct.LogRecordType = xlcGetLogWord(xlcStruct);
+ prevId = xlcGetLogWord(xlcStruct);
+ tableId = xlcGetLogWord(xlcStruct);
+ fragId = xlcGetLogWord(xlcStruct);
+ }//if
+ xlcStruct.LliPtr.p->lliPrevRecordId = prevId;
+ xlcStruct.FragId = fragId;
+ xlcStruct.TabPtr.i = tableId;
+ ptrCheckGuard(xlcStruct.TabPtr, cnoOfTablerec, tablerec);
+}//Dbtup::xlcGetLogHeader()
+
+/* ------------------------------------------------------------------- */
+/* ---------------------- XLC_GET_LOG_WORD --------------------------- */
+/* ------------------------------------------------------------------- */
+Uint32 Dbtup::xlcGetLogWord(XlcStruct& xlcStruct)
+{
+ Uint32 pIndex = xlcStruct.PageIndex;
+ ndbrequire(xlcStruct.UPPtr.p != NULL);
+ ndbrequire(pIndex < ZWORDS_ON_PAGE);
+ Uint32 logWord = xlcStruct.UPPtr.p->undoPageWord[pIndex];
+ pIndex++;
+ xlcStruct.PageIndex = pIndex;
+ if (pIndex == ZWORDS_ON_PAGE) {
+ ljam();
+ xlcStruct.PageIndex = ZUNDO_PAGE_HEADER_SIZE;
+ xlcStruct.PageId++;
+ if ((xlcStruct.PageId - xlcStruct.DbsiPtr.p->pdxFilePage) >= (2 * ZUB_SEGMENT_SIZE)) {
+ ljam();
+ xlcStruct.UPPtr.i = RNIL;
+ ptrNull(xlcStruct.UPPtr);
+ } else {
+ ljam();
+ Uint32 index = xlcStruct.PageId - xlcStruct.DbsiPtr.p->pdxFilePage;
+ ndbrequire(index < 16);
+ xlcStruct.UPPtr.i = xlcStruct.DbsiPtr.p->pdxDataPage[index];
+ ptrCheckGuard(xlcStruct.UPPtr, cnoOfUndoPage, undoPage);
+ }//if
+ }//if
+ return logWord;
+}//Dbtup::xlcGetLogWord()
+
+ /****************************************************/
+ /* INSERT A TUPLE HEADER THE DATA IS THE TUPLE DATA */
+ /****************************************************/
+void Dbtup::xlcInsertTh(XlcStruct& xlcStruct)
+{
+ PagePtr pagePtr;
+ Fragrecord* const regFragPtr = xlcStruct.FragPtr.p;
+ Tablerec* const regTabPtr = xlcStruct.TabPtr.p;
+
+ Uint32 fragPageId = xlcGetLogWord(xlcStruct);
+ Uint32 pageIndex = xlcGetLogWord(xlcStruct);
+ ndbrequire((pageIndex & 1) == 0);
+ pagePtr.i = getRealpid(regFragPtr, fragPageId);
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ Uint32 pageOffset;
+ getThAtPageSr(pagePtr.p, pageOffset);
+ ndbrequire(pageOffset == (ZPAGE_HEADER_SIZE + (regTabPtr->tupheadsize * (pageIndex >> 1))));
+ if (xlcStruct.LogRecordType == ZLCPR_TYPE_INSERT_TH) {
+ ljam();
+ xlcCopyData(xlcStruct, pageOffset, regTabPtr->tupheadsize, pagePtr);
+ } else {
+ ndbrequire(xlcStruct.LogRecordType == ZLCPR_TYPE_INSERT_TH_NO_DATA);
+ ljam();
+ }//if
+/* ----------------------------------------*/
+/* INDICATE THAT NO OPERATIONS ACTIVE */
+/* ----------------------------------------*/
+ ndbrequire(pageOffset < ZWORDS_ON_PAGE);
+ pagePtr.p->pageWord[pageOffset] = RNIL;
+}//Dbtup::xlcInsertTh()
+
+ /**********************************************/
+ /* DELETE A TUPLE HEADER - NO ADDITIONAL DATA */
+ /**********************************************/
+void Dbtup::xlcDeleteTh(XlcStruct& xlcStruct)
+{
+ PagePtr pagePtr;
+ Fragrecord* const regFragPtr = xlcStruct.FragPtr.p;
+ Tablerec* const regTabPtr = xlcStruct.TabPtr.p;
+
+ Uint32 fragPageId = xlcGetLogWord(xlcStruct);
+ Uint32 pageIndex = xlcGetLogWord(xlcStruct);
+ ndbrequire((pageIndex & 1) == 0);
+ pagePtr.i = getRealpid(regFragPtr, fragPageId);
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ Uint32 pageOffset = ZPAGE_HEADER_SIZE + (regTabPtr->tupheadsize * (pageIndex >> 1));
+ freeThSr(regTabPtr, pagePtr.p, pageOffset);
+}//Dbtup::xlcDeleteTh()
+
+ /*****************************************************/
+ /* UPDATE A TUPLE HEADER, THE DATA IS THE TUPLE DATA */
+ /*****************************************************/
+void Dbtup::xlcUpdateTh(XlcStruct& xlcStruct)
+{
+ PagePtr pagePtr;
+ Fragrecord* const regFragPtr = xlcStruct.FragPtr.p;
+ Tablerec* const regTabPtr = xlcStruct.TabPtr.p;
+
+ Uint32 fragPageId = xlcGetLogWord(xlcStruct);
+ Uint32 pageIndex = xlcGetLogWord(xlcStruct);
+ ndbrequire((pageIndex & 1) == 0);
+ pagePtr.i = getRealpid(regFragPtr, fragPageId);
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ Uint32 pageOffset = ZPAGE_HEADER_SIZE + (regTabPtr->tupheadsize * (pageIndex >> 1));
+ xlcCopyData(xlcStruct, pageOffset, regTabPtr->tupheadsize, pagePtr);
+/* ----------------------------------------*/
+/* INDICATE THAT NO OPERATIONS ACTIVE */
+/* ----------------------------------------*/
+ ndbrequire(pageOffset < ZWORDS_ON_PAGE);
+ pagePtr.p->pageWord[pageOffset] = RNIL;
+}//Dbtup::xlcUpdateTh()
+
+ /**************************************************/
+ /* ABORT AN INSERT OPERATION - NO ADDITIONAL DATA */
+ /**************************************************/
+void Dbtup::xlcAbortInsert(Signal* signal, XlcStruct& xlcStruct)
+{
+ PagePtr pagePtr;
+ Fragrecord* const regFragPtr = xlcStruct.FragPtr.p;
+ Tablerec* const regTabPtr = xlcStruct.TabPtr.p;
+
+ Uint32 fragPageId = xlcGetLogWord(xlcStruct);
+ Uint32 pageIndex = xlcGetLogWord(xlcStruct);
+ ndbrequire((pageIndex & 1) == 0);
+ pagePtr.i = getRealpid(regFragPtr, fragPageId);
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ Uint32 pageOffset = ZPAGE_HEADER_SIZE + (regTabPtr->tupheadsize * (pageIndex >> 1));
+ freeTh(regFragPtr, regTabPtr, signal, pagePtr.p, pageOffset);
+}//Dbtup::xlcAbortInsert()
+
+ /*****************************************************/
+ /* COPY DATA FROM COPY TUPLE TO ORIGINAL TUPLE */
+ /*****************************************************/
+void Dbtup::xlcAbortUpdate(Signal* signal, XlcStruct& xlcStruct)
+{
+ PagePtr pagePtr;
+ Fragrecord* const regFragPtr = xlcStruct.FragPtr.p;
+ Tablerec* const regTabPtr = xlcStruct.TabPtr.p;
+ Uint32 tuple_size = regTabPtr->tupheadsize;
+
+ Uint32 fragPageIdC = xlcGetLogWord(xlcStruct);
+ Uint32 pageIndexC = xlcGetLogWord(xlcStruct);
+ ndbrequire((pageIndexC & 1) == 0);
+ Uint32 TdestPageId = getRealpid(regFragPtr, fragPageIdC);
+ Uint32 TcmDestIndex = ZPAGE_HEADER_SIZE +
+ (tuple_size * (pageIndexC >> 1));
+
+ Uint32 fragPageId = xlcGetLogWord(xlcStruct);
+ Uint32 pageIndex = xlcGetLogWord(xlcStruct);
+ ndbrequire((pageIndex & 1) == 0);
+ Uint32 TsourcePageId = getRealpid(regFragPtr, fragPageId);
+ Uint32 TcmSourceIndex = ZPAGE_HEADER_SIZE +
+ (tuple_size * (pageIndex >> 1));
+ Uint32 end_source = tuple_size + TcmSourceIndex;
+ Uint32 end_dest = tuple_size + TcmDestIndex;
+
+ void* Tdestination = (void*)&page[TdestPageId].pageWord[TcmDestIndex + 1];
+ const void* Tsource =
+ (void*)&page[TsourcePageId].pageWord[TcmSourceIndex + 1];
+
+ ndbrequire(TsourcePageId < cnoOfPage &&
+ TdestPageId < cnoOfPage &&
+ end_source <= ZWORDS_ON_PAGE &&
+ end_dest <= ZWORDS_ON_PAGE);
+ MEMCOPY_NO_WORDS(Tdestination, Tsource, (tuple_size - 1));
+
+ pagePtr.i = TsourcePageId;
+ ptrAss(pagePtr, page);
+ freeTh(regFragPtr, regTabPtr, signal, pagePtr.p, TcmSourceIndex);
+
+ pagePtr.i = TdestPageId;
+ ptrAss(pagePtr, page);
+ pagePtr.p->pageWord[TcmDestIndex] = RNIL;
+}//Dbtup::xlcAbortUpdate()
+
+ /*****************************/
+ /* RESTORE UPDATED GCI VALUE */
+ /*****************************/
+void Dbtup::xlcUpdateGCI(XlcStruct& xlcStruct)
+{
+ PagePtr pagePtr;
+ Fragrecord* const regFragPtr = xlcStruct.FragPtr.p;
+ Tablerec* const regTabPtr = xlcStruct.TabPtr.p;
+
+ Uint32 fragPageId = xlcGetLogWord(xlcStruct);
+ Uint32 pageIndex = xlcGetLogWord(xlcStruct);
+ Uint32 restoredGCI = xlcGetLogWord(xlcStruct);
+
+ ndbrequire((pageIndex & 1) == 0);
+ pagePtr.i = getRealpid(regFragPtr, fragPageId);
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ Uint32 pageOffset = ZPAGE_HEADER_SIZE + (regTabPtr->tupheadsize * (pageIndex >> 1));
+ Uint32 gciOffset = pageOffset + regTabPtr->tupGCPIndex;
+ ndbrequire((gciOffset < ZWORDS_ON_PAGE) &&
+ (regTabPtr->tupGCPIndex < regTabPtr->tupheadsize));
+ pagePtr.p->pageWord[gciOffset] = restoredGCI;
+}//Dbtup::xlcUpdateGCI()
+
+ /*****************************************************/
+ /* READ TABLE DESCRIPTOR FROM UNDO LOG */
+ /*****************************************************/
+void Dbtup::xlcTableDescriptor(XlcStruct& xlcStruct)
+{
+ xlcStruct.LliPtr.p->lliNumFragments--;
+ xlcStruct.FragPtr.p->fragStatus = ACTIVE;
+}//Dbtup::xlcTableDescriptor()
+
+ /********************************************************/
+ /* UPDATE PAGE STATE AND NEXT POINTER IN PAGE */
+ /********************************************************/
+void Dbtup::xlcUndoLogPageHeader(XlcStruct& xlcStruct)
+{
+ Fragrecord* const regFragPtr = xlcStruct.FragPtr.p;
+ PagePtr xlcPagep;
+
+ Uint32 fragPageId = xlcGetLogWord(xlcStruct);
+ xlcPagep.i = getRealpid(regFragPtr, fragPageId);
+ ptrCheckGuard(xlcPagep, cnoOfPage, page);
+ Uint32 logWord = xlcGetLogWord(xlcStruct);
+ ndbrequire(logWord != 0);
+ ndbrequire(logWord <= ZAC_MM_FREE_COPY);
+
+ xlcPagep.p->pageWord[ZPAGE_STATE_POS] = logWord;
+ xlcPagep.p->pageWord[ZPAGE_NEXT_POS] = xlcGetLogWord(xlcStruct);
+}//Dbtup::xlcUndoLogPageHeader()
+
+ /********************************************************/
+ /* INDICATE THAT NO OPERATIONS ACTIVE */
+ /********************************************************/
+void Dbtup::xlcIndicateNoOpActive(XlcStruct& xlcStruct)
+{
+ PagePtr pagePtr;
+ Fragrecord* const regFragPtr = xlcStruct.FragPtr.p;
+ Tablerec* const regTabPtr = xlcStruct.TabPtr.p;
+
+ Uint32 fragPageId = xlcGetLogWord(xlcStruct);
+ Uint32 pageIndex = xlcGetLogWord(xlcStruct);
+ ndbrequire((pageIndex & 1) == 0);
+ pagePtr.i = getRealpid(regFragPtr, fragPageId);
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ Uint32 pageOffset = ZPAGE_HEADER_SIZE + (regTabPtr->tupheadsize * (pageIndex >> 1));
+/* ----------------------------------------*/
+/* INDICATE THAT NO OPERATIONS ACTIVE */
+/* ----------------------------------------*/
+ ndbrequire(pageOffset < ZWORDS_ON_PAGE);
+ pagePtr.p->pageWord[pageOffset] = RNIL;
+}//Dbtup::xlcIndicateNoOpActive()
+
+ /********************************************************/
+ /* THIS IS THE COMMON ROUTINE TO COPY DATA FROM THE */
+ /* UNDO BUFFER TO THE DATA PAGES. IT USES THE */
+ /* XLC_REQUEST_SEGMENT SUB TO GET MORE DATA WHEN NEEDED */
+ /********************************************************/
+void Dbtup::xlcCopyData(XlcStruct& xlcStruct, Uint32 pageOffset, Uint32 noOfWords, PagePtr pagePtr)
+{
+ ndbrequire((pageOffset + noOfWords - 1) < ZWORDS_ON_PAGE);
+ for (Uint32 i = 1; i < noOfWords; i++) {
+ ljam();
+ pagePtr.p->pageWord[pageOffset + i] = xlcGetLogWord(xlcStruct);
+ }//for
+}//Dbtup::xlcCopyData()
+
+void Dbtup::allocRestartUndoBufferSegment(Signal* signal, DiskBufferSegmentInfoPtr& dbsiPtr, LocalLogInfoPtr lliPtr)
+{
+ UndoPagePtr undoPagePtr;
+
+ ndbrequire(cfirstfreeUndoSeg != RNIL);
+ if (cnoFreeUndoSeg == ZMIN_PAGE_LIMIT_TUP_COMMITREQ) {
+ EXECUTE_DIRECT(DBLQH, GSN_TUP_COM_BLOCK, signal, 1);
+ ljamEntry();
+ }//if
+ cnoFreeUndoSeg--;
+ ndbrequire(cnoFreeUndoSeg >= 0);
+ undoPagePtr.i = cfirstfreeUndoSeg;
+ ptrCheckGuard(undoPagePtr, cnoOfUndoPage, undoPage);
+ cfirstfreeUndoSeg = undoPagePtr.p->undoPageWord[ZPAGE_NEXT_POS];
+ undoPagePtr.p->undoPageWord[ZPAGE_NEXT_POS] = RNIL;
+ seizeDiskBufferSegmentRecord(dbsiPtr);
+ dbsiPtr.p->pdxBuffertype = UNDO_RESTART_PAGES;
+ dbsiPtr.p->pdxUndoBufferSet[0] = undoPagePtr.i;
+ Uint32 i;
+ for (i = 0; i < ZUB_SEGMENT_SIZE; i++) {
+ dbsiPtr.p->pdxDataPage[i] = undoPagePtr.i + i;
+ }//for
+
+ ndbrequire(cfirstfreeUndoSeg != RNIL);
+ if (cnoFreeUndoSeg == ZMIN_PAGE_LIMIT_TUP_COMMITREQ) {
+ EXECUTE_DIRECT(DBLQH, GSN_TUP_COM_BLOCK, signal, 1);
+ ljamEntry();
+ }//if
+ cnoFreeUndoSeg--;
+ ndbrequire(cnoFreeUndoSeg >= 0);
+ undoPagePtr.i = cfirstfreeUndoSeg;
+ ptrCheckGuard(undoPagePtr, cnoOfUndoPage, undoPage);
+ cfirstfreeUndoSeg = undoPagePtr.p->undoPageWord[ZPAGE_NEXT_POS];
+ undoPagePtr.p->undoPageWord[ZPAGE_NEXT_POS] = RNIL;
+ dbsiPtr.p->pdxUndoBufferSet[1] = undoPagePtr.i;
+// lliPtr.p->lliUndoPage = undoPagePtr.i;
+ for (i = ZUB_SEGMENT_SIZE; i < (2 * ZUB_SEGMENT_SIZE); i++) {
+ dbsiPtr.p->pdxDataPage[i] = undoPagePtr.i + (i - ZUB_SEGMENT_SIZE);
+ }//for
+ return;
+}//Dbtup::allocRestartUndoBufferSegment()
+
+
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp
new file mode 100644
index 00000000000..642ba270760
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp
@@ -0,0 +1,212 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+#define DBTUP_C
+#include "Dbtup.hpp"
+#include <RefConvert.hpp>
+#include <ndb_limits.h>
+#include <pc.hpp>
+
+#define ljam() { jamLine(22000 + __LINE__); }
+#define ljamEntry() { jamEntryLine(22000 + __LINE__); }
+
+/* **************************************************************** */
+/* *********** TABLE DESCRIPTOR MEMORY MANAGER ******************** */
+/* **************************************************************** */
+/* This module is used to allocate and deallocate table descriptor */
+/* memory attached to fragments (could be allocated per table */
+/* instead. Performs its task by a buddy algorithm. */
+/* **************************************************************** */
+
+Uint32
+Dbtup::getTabDescrOffsets(const Tablerec* regTabPtr, Uint32* offset)
+{
+ // belongs to configure.in
+ unsigned sizeOfPointer = sizeof(CHARSET_INFO*);
+ ndbrequire((sizeOfPointer & 0x3) == 0);
+ sizeOfPointer = (sizeOfPointer >> 2);
+ // do in layout order and return offsets (see DbtupMeta.cpp)
+ Uint32 allocSize = 0;
+ // magically aligned to 8 bytes
+ offset[0] = allocSize += ZTD_SIZE;
+ offset[1] = allocSize += regTabPtr->noOfAttr * sizeOfReadFunction();
+ offset[2] = allocSize += regTabPtr->noOfAttr * sizeOfReadFunction();
+ offset[3] = allocSize += regTabPtr->noOfCharsets * sizeOfPointer;
+ offset[4] = allocSize += regTabPtr->noOfKeyAttr;
+ offset[5] = allocSize += regTabPtr->noOfAttributeGroups;
+ allocSize += regTabPtr->noOfAttr * ZAD_SIZE;
+ allocSize += ZTD_TRAILER_SIZE;
+ // return number of words
+ return allocSize;
+}
+
+Uint32 Dbtup::allocTabDescr(const Tablerec* regTabPtr, Uint32* offset)
+{
+ Uint32 reference = RNIL;
+ Uint32 allocSize = getTabDescrOffsets(regTabPtr, offset);
+/* ---------------------------------------------------------------- */
+/* ALWAYS ALLOCATE A MULTIPLE OF 16 BYTES */
+/* ---------------------------------------------------------------- */
+ allocSize = (((allocSize - 1) >> 4) + 1) << 4;
+ Uint32 list = nextHigherTwoLog(allocSize - 1); /* CALCULATE WHICH LIST IT BELONGS TO */
+ for (Uint32 i = list; i < 16; i++) {
+ ljam();
+ if (cfreeTdList[i] != RNIL) {
+ ljam();
+ reference = cfreeTdList[i];
+ removeTdArea(reference, i); /* REMOVE THE AREA FROM THE FREELIST */
+ Uint32 retNo = (1 << i) - allocSize; /* CALCULATE THE DIFFERENCE */
+ if (retNo >= ZTD_FREE_SIZE) {
+ ljam();
+ Uint32 retRef = reference + allocSize; /* SET THE RETURN POINTER */
+ retNo = itdaMergeTabDescr(retRef, retNo); /* MERGE WITH POSSIBLE RIGHT NEIGHBOURS */
+ freeTabDescr(retRef, retNo); /* RETURN UNUSED TD SPACE TO THE TD AREA */
+ } else {
+ ljam();
+ allocSize = 1 << i;
+ }//if
+ break;
+ }//if
+ }//for
+ if (reference == RNIL) {
+ ljam();
+ terrorCode = ZMEM_NOTABDESCR_ERROR;
+ return RNIL;
+ } else {
+ ljam();
+ setTabDescrWord((reference + allocSize) - ZTD_TR_TYPE, ZTD_TYPE_NORMAL);
+ setTabDescrWord(reference + ZTD_DATASIZE, allocSize);
+
+ /* INITIALIZE THE TRAILER RECORD WITH TYPE AND SIZE */
+ /* THE TRAILER IS USED TO SIMPLIFY MERGE OF FREE AREAS */
+
+ setTabDescrWord(reference + ZTD_HEADER, ZTD_TYPE_NORMAL);
+ setTabDescrWord((reference + allocSize) - ZTD_TR_SIZE, allocSize);
+ return reference;
+ }//if
+}//Dbtup::allocTabDescr()
+
+void Dbtup::freeTabDescr(Uint32 retRef, Uint32 retNo)
+{
+ while (retNo >= ZTD_FREE_SIZE) {
+ ljam();
+ Uint32 list = nextHigherTwoLog(retNo);
+ list--; /* RETURN TO NEXT LOWER LIST */
+ Uint32 sizeOfChunk = 1 << list;
+ insertTdArea(sizeOfChunk, retRef, list);
+ retRef += sizeOfChunk;
+ retNo -= sizeOfChunk;
+ }//while
+}//Dbtup::freeTabDescr()
+
+Uint32
+Dbtup::getTabDescrWord(Uint32 index)
+{
+ ndbrequire(index < cnoOfTabDescrRec);
+ return tableDescriptor[index].tabDescr;
+}//Dbtup::getTabDescrWord()
+
+void
+Dbtup::setTabDescrWord(Uint32 index, Uint32 word)
+{
+ ndbrequire(index < cnoOfTabDescrRec);
+ tableDescriptor[index].tabDescr = word;
+}//Dbtup::setTabDescrWord()
+
+void Dbtup::insertTdArea(Uint32 sizeOfChunk, Uint32 tabDesRef, Uint32 list)
+{
+ ndbrequire(list < 16);
+ setTabDescrWord(tabDesRef + ZTD_FL_HEADER, ZTD_TYPE_FREE);
+ setTabDescrWord(tabDesRef + ZTD_FL_NEXT, cfreeTdList[list]);
+ if (cfreeTdList[list] != RNIL) {
+ ljam(); /* PREVIOUSLY EMPTY SLOT */
+ setTabDescrWord(cfreeTdList[list] + ZTD_FL_PREV, tabDesRef);
+ }//if
+ cfreeTdList[list] = tabDesRef; /* RELINK THE LIST */
+
+ setTabDescrWord(tabDesRef + ZTD_FL_PREV, RNIL);
+ setTabDescrWord(tabDesRef + ZTD_FL_SIZE, 1 << list);
+ setTabDescrWord((tabDesRef + (1 << list)) - ZTD_TR_TYPE, ZTD_TYPE_FREE);
+ setTabDescrWord((tabDesRef + (1 << list)) - ZTD_TR_SIZE, 1 << list);
+}//Dbtup::insertTdArea()
+
+/* ---------------------------------------------------------------- */
+/* ----------------------- MERGE_TAB_DESCR ------------------------ */
+/* ---------------------------------------------------------------- */
+/* INPUT: TAB_DESCR_PTR POINTING AT THE CURRENT CHUNK */
+/* */
+/* SHORTNAME: MTD */
+/* -----------------------------------------------------------------*/
+Uint32 Dbtup::itdaMergeTabDescr(Uint32 retRef, Uint32 retNo)
+{
+ /* THE SIZE OF THE PART TO MERGE MUST BE OF THE SAME SIZE AS THE INSERTED PART */
+ /* THIS IS TRUE EITHER IF ONE PART HAS THE SAME SIZE OR THE SUM OF BOTH PARTS */
+ /* TOGETHER HAS THE SAME SIZE AS THE PART TO BE INSERTED */
+ /* FIND THE SIZES OF THE PARTS TO THE RIGHT OF THE PART TO BE REINSERTED */
+ while ((retRef + retNo) < cnoOfTabDescrRec) {
+ ljam();
+ Uint32 tabDesRef = retRef + retNo;
+ Uint32 headerWord = getTabDescrWord(tabDesRef + ZTD_FL_HEADER);
+ if (headerWord == ZTD_TYPE_FREE) {
+ ljam();
+ Uint32 sizeOfMergedPart = getTabDescrWord(tabDesRef + ZTD_FL_SIZE);
+
+ retNo += sizeOfMergedPart;
+ Uint32 list = nextHigherTwoLog(sizeOfMergedPart - 1);
+ removeTdArea(tabDesRef, list);
+ } else {
+ ljam();
+ return retNo;
+ }//if
+ }//while
+ ndbrequire((retRef + retNo) == cnoOfTabDescrRec);
+ return retNo;
+}//Dbtup::itdaMergeTabDescr()
+
+/* ---------------------------------------------------------------- */
+/* ------------------------ REMOVE_TD_AREA ------------------------ */
+/* ---------------------------------------------------------------- */
+/* */
+/* THIS ROUTINE REMOVES A TD CHUNK FROM THE POOL OF TD RECORDS */
+/* */
+/* INPUT: TLIST LIST TO USE */
+/* TAB_DESCR_PTR POINTS TO THE CHUNK TO BE REMOVED */
+/* */
+/* SHORTNAME: RMTA */
+/* -----------------------------------------------------------------*/
+void Dbtup::removeTdArea(Uint32 tabDesRef, Uint32 list)
+{
+ ndbrequire(list < 16);
+ Uint32 tabDescrNextPtr = getTabDescrWord(tabDesRef + ZTD_FL_NEXT);
+ Uint32 tabDescrPrevPtr = getTabDescrWord(tabDesRef + ZTD_FL_PREV);
+
+ setTabDescrWord(tabDesRef + ZTD_HEADER, ZTD_TYPE_NORMAL);
+ setTabDescrWord((tabDesRef + (1 << list)) - ZTD_TR_TYPE, ZTD_TYPE_NORMAL);
+
+ if (tabDesRef == cfreeTdList[list]) {
+ ljam();
+ cfreeTdList[list] = tabDescrNextPtr; /* RELINK THE LIST */
+ }//if
+ if (tabDescrNextPtr != RNIL) {
+ ljam();
+ setTabDescrWord(tabDescrNextPtr + ZTD_FL_PREV, tabDescrPrevPtr);
+ }//if
+ if (tabDescrPrevPtr != RNIL) {
+ ljam();
+ setTabDescrWord(tabDescrPrevPtr + ZTD_FL_NEXT, tabDescrNextPtr);
+ }//if
+}//Dbtup::removeTdArea()
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
new file mode 100644
index 00000000000..476a4b5724b
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
@@ -0,0 +1,1150 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+#define DBTUP_C
+#include "Dbtup.hpp"
+#include <RefConvert.hpp>
+#include <ndb_limits.h>
+#include <pc.hpp>
+#include <AttributeDescriptor.hpp>
+#include "AttributeOffset.hpp"
+#include <AttributeHeader.hpp>
+#include <signaldata/FireTrigOrd.hpp>
+#include <signaldata/CreateTrig.hpp>
+#include <signaldata/TuxMaint.hpp>
+
+#define ljam() { jamLine(7000 + __LINE__); }
+#define ljamEntry() { jamEntryLine(7000 + __LINE__); }
+
+/* **************************************************************** */
+/* ---------------------------------------------------------------- */
+/* ----------------------- TRIGGER HANDLING ----------------------- */
+/* ---------------------------------------------------------------- */
+/* **************************************************************** */
+
+ArrayList<Dbtup::TupTriggerData>*
+Dbtup::findTriggerList(Tablerec* table,
+ TriggerType::Value ttype,
+ TriggerActionTime::Value ttime,
+ TriggerEvent::Value tevent)
+{
+ ArrayList<TupTriggerData>* tlist = NULL;
+ switch (ttype) {
+ case TriggerType::SUBSCRIPTION:
+ case TriggerType::SUBSCRIPTION_BEFORE:
+ switch (tevent) {
+ case TriggerEvent::TE_INSERT:
+ ljam();
+ if (ttime == TriggerActionTime::TA_DETACHED)
+ tlist = &table->subscriptionInsertTriggers;
+ break;
+ case TriggerEvent::TE_UPDATE:
+ ljam();
+ if (ttime == TriggerActionTime::TA_DETACHED)
+ tlist = &table->subscriptionUpdateTriggers;
+ break;
+ case TriggerEvent::TE_DELETE:
+ ljam();
+ if (ttime == TriggerActionTime::TA_DETACHED)
+ tlist = &table->subscriptionDeleteTriggers;
+ break;
+ default:
+ break;
+ }
+ break;
+ case TriggerType::SECONDARY_INDEX:
+ switch (tevent) {
+ case TriggerEvent::TE_INSERT:
+ ljam();
+ if (ttime == TriggerActionTime::TA_AFTER)
+ tlist = &table->afterInsertTriggers;
+ break;
+ case TriggerEvent::TE_UPDATE:
+ ljam();
+ if (ttime == TriggerActionTime::TA_AFTER)
+ tlist = &table->afterUpdateTriggers;
+ break;
+ case TriggerEvent::TE_DELETE:
+ ljam();
+ if (ttime == TriggerActionTime::TA_AFTER)
+ tlist = &table->afterDeleteTriggers;
+ break;
+ default:
+ break;
+ }
+ break;
+ case TriggerType::ORDERED_INDEX:
+ switch (tevent) {
+ case TriggerEvent::TE_CUSTOM:
+ ljam();
+ if (ttime == TriggerActionTime::TA_CUSTOM)
+ tlist = &table->tuxCustomTriggers;
+ break;
+ default:
+ break;
+ }
+ break;
+ case TriggerType::READ_ONLY_CONSTRAINT:
+ switch (tevent) {
+ case TriggerEvent::TE_UPDATE:
+ ljam();
+ if (ttime == TriggerActionTime::TA_AFTER)
+ tlist = &table->constraintUpdateTriggers;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return tlist;
+}
+
+// Trigger signals
+void
+Dbtup::execCREATE_TRIG_REQ(Signal* signal)
+{
+ ljamEntry();
+ BlockReference senderRef = signal->getSendersBlockRef();
+ const CreateTrigReq reqCopy = *(const CreateTrigReq*)signal->getDataPtr();
+ const CreateTrigReq* const req = &reqCopy;
+
+ // Find table
+ TablerecPtr tabPtr;
+ tabPtr.i = req->getTableId();
+ ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
+
+ // Create trigger and associate it with the table
+ if (createTrigger(tabPtr.p, req)) {
+ ljam();
+ // Send conf
+ CreateTrigConf* const conf = (CreateTrigConf*)signal->getDataPtrSend();
+ conf->setUserRef(reference());
+ conf->setConnectionPtr(req->getConnectionPtr());
+ conf->setRequestType(req->getRequestType());
+ conf->setTableId(req->getTableId());
+ conf->setIndexId(req->getIndexId());
+ conf->setTriggerId(req->getTriggerId());
+ conf->setTriggerInfo(req->getTriggerInfo());
+ sendSignal(senderRef, GSN_CREATE_TRIG_CONF,
+ signal, CreateTrigConf::SignalLength, JBB);
+ } else {
+ ljam();
+ // Send ref
+ CreateTrigRef* const ref = (CreateTrigRef*)signal->getDataPtrSend();
+ ref->setUserRef(reference());
+ ref->setConnectionPtr(req->getConnectionPtr());
+ ref->setRequestType(req->getRequestType());
+ ref->setTableId(req->getTableId());
+ ref->setIndexId(req->getIndexId());
+ ref->setTriggerId(req->getTriggerId());
+ ref->setTriggerInfo(req->getTriggerInfo());
+ ref->setErrorCode(CreateTrigRef::TooManyTriggers);
+ sendSignal(senderRef, GSN_CREATE_TRIG_REF,
+ signal, CreateTrigRef::SignalLength, JBB);
+ }
+}//Dbtup::execCREATE_TRIG_REQ()
+
+void
+Dbtup::execDROP_TRIG_REQ(Signal* signal)
+{
+ ljamEntry();
+ BlockReference senderRef = signal->getSendersBlockRef();
+ const DropTrigReq reqCopy = *(const DropTrigReq*)signal->getDataPtr();
+ const DropTrigReq* const req = &reqCopy;
+
+ // Find table
+ TablerecPtr tabPtr;
+ tabPtr.i = req->getTableId();
+ ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
+
+ // Drop trigger
+ Uint32 r = dropTrigger(tabPtr.p, req);
+ if (r == 0){
+ // Send conf
+ DropTrigConf* const conf = (DropTrigConf*)signal->getDataPtrSend();
+ conf->setUserRef(senderRef);
+ conf->setConnectionPtr(req->getConnectionPtr());
+ conf->setRequestType(req->getRequestType());
+ conf->setTableId(req->getTableId());
+ conf->setIndexId(req->getIndexId());
+ conf->setTriggerId(req->getTriggerId());
+ sendSignal(senderRef, GSN_DROP_TRIG_CONF,
+ signal, DropTrigConf::SignalLength, JBB);
+ } else {
+ // Send ref
+ DropTrigRef* const ref = (DropTrigRef*)signal->getDataPtrSend();
+ ref->setUserRef(senderRef);
+ ref->setConnectionPtr(req->getConnectionPtr());
+ ref->setRequestType(req->getRequestType());
+ ref->setTableId(req->getTableId());
+ ref->setIndexId(req->getIndexId());
+ ref->setTriggerId(req->getTriggerId());
+ ref->setErrorCode((DropTrigRef::ErrorCode)r);
+ ref->setErrorLine(__LINE__);
+ ref->setErrorNode(refToNode(reference()));
+ sendSignal(senderRef, GSN_DROP_TRIG_REF,
+ signal, DropTrigRef::SignalLength, JBB);
+ }
+}//Dbtup::DROP_TRIG_REQ()
+
+/* ---------------------------------------------------------------- */
+/* ------------------------- createTrigger ------------------------ */
+/* */
+/* Creates a new trigger record by fetching one from the trigger */
+/* pool and associates it with the given table. */
+/* Trigger type can be one of secondary_index, subscription, */
+/* constraint(NYI), foreign_key(NYI), schema_upgrade(NYI), */
+/* api_trigger(NYI) or sql_trigger(NYI). */
+/* Note that this method only checks for total number of allowed */
+/* triggers. Checking the number of allowed triggers per table is */
+/* done by TRIX. */
+/* */
+/* ---------------------------------------------------------------- */
+bool
+Dbtup::createTrigger(Tablerec* table, const CreateTrigReq* req)
+{
+ if (ERROR_INSERTED(4003)) {
+ CLEAR_ERROR_INSERT_VALUE;
+ return false;
+ }
+ TriggerType::Value ttype = req->getTriggerType();
+ TriggerActionTime::Value ttime = req->getTriggerActionTime();
+ TriggerEvent::Value tevent = req->getTriggerEvent();
+
+ ArrayList<TupTriggerData>* tlist = findTriggerList(table, ttype, ttime, tevent);
+ ndbrequire(tlist != NULL);
+
+ TriggerPtr tptr;
+ if (!tlist->seize(tptr))
+ return false;
+
+ // Set trigger id
+ tptr.p->triggerId = req->getTriggerId();
+
+ // ndbout_c("Create TupTrigger %u = %u %u %u %u", tptr.p->triggerId, table, ttype, ttime, tevent);
+
+ // Set index id
+ tptr.p->indexId = req->getIndexId();
+
+ // Set trigger type etc
+ tptr.p->triggerType = ttype;
+ tptr.p->triggerActionTime = ttime;
+ tptr.p->triggerEvent = tevent;
+
+ tptr.p->sendBeforeValues = true;
+ if ((tptr.p->triggerType == TriggerType::SUBSCRIPTION) &&
+ ((tptr.p->triggerEvent == TriggerEvent::TE_UPDATE) ||
+ (tptr.p->triggerEvent == TriggerEvent::TE_DELETE))) {
+ ljam();
+ tptr.p->sendBeforeValues = false;
+ }
+ tptr.p->sendOnlyChangedAttributes = false;
+ if (((tptr.p->triggerType == TriggerType::SUBSCRIPTION) ||
+ (tptr.p->triggerType == TriggerType::SUBSCRIPTION_BEFORE)) &&
+ (tptr.p->triggerEvent == TriggerEvent::TE_UPDATE)) {
+ ljam();
+ tptr.p->sendOnlyChangedAttributes = true;
+ }
+
+ // Set monitor all
+ tptr.p->monitorAllAttributes = req->getMonitorAllAttributes();
+ tptr.p->monitorReplicas = req->getMonitorReplicas();
+ tptr.p->m_receiverBlock = refToBlock(req->getReceiverRef());
+
+ tptr.p->attributeMask.clear();
+ if (tptr.p->monitorAllAttributes) {
+ ljam();
+ for(Uint32 i = 0; i < table->noOfAttr; i++) {
+ if (!primaryKey(table, i)) {
+ ljam();
+ tptr.p->attributeMask.set(i);
+ }
+ }
+ } else {
+ // Set attribute mask
+ ljam();
+ tptr.p->attributeMask = req->getAttributeMask();
+ }
+ return true;
+}//Dbtup::createTrigger()
+
+bool
+Dbtup::primaryKey(Tablerec* const regTabPtr, Uint32 attrId)
+{
+ Uint32 attrDescriptorStart = regTabPtr->tabDescriptor;
+ Uint32 attrDescriptor = getTabDescrWord(attrDescriptorStart + (attrId * ZAD_SIZE));
+ return (bool)AttributeDescriptor::getPrimaryKey(attrDescriptor);
+}//Dbtup::primaryKey()
+
+/* ---------------------------------------------------------------- */
+/* -------------------------- dropTrigger ------------------------- */
+/* */
+/* Deletes a trigger record by disassociating it with the given */
+/* table and returning it to the trigger pool. */
+/* Trigger type can be one of secondary_index, subscription, */
+/* constraint(NYI), foreign_key(NYI), schema_upgrade(NYI), */
+/* api_trigger(NYI) or sql_trigger(NYI). */
+/* */
+/* ---------------------------------------------------------------- */
+Uint32
+Dbtup::dropTrigger(Tablerec* table, const DropTrigReq* req)
+{
+ Uint32 triggerId = req->getTriggerId();
+
+ TriggerType::Value ttype = req->getTriggerType();
+ TriggerActionTime::Value ttime = req->getTriggerActionTime();
+ TriggerEvent::Value tevent = req->getTriggerEvent();
+
+ // ndbout_c("Drop TupTrigger %u = %u %u %u %u", triggerId, table, ttype, ttime, tevent);
+
+ ArrayList<TupTriggerData>* tlist = findTriggerList(table, ttype, ttime, tevent);
+ ndbrequire(tlist != NULL);
+
+ Ptr<TupTriggerData> ptr;
+ for (tlist->first(ptr); !ptr.isNull(); tlist->next(ptr)) {
+ ljam();
+ if (ptr.p->triggerId == triggerId) {
+ ljam();
+ tlist->release(ptr.i);
+ return 0;
+ }
+ }
+ return DropTrigRef::TriggerNotFound;
+}//Dbtup::dropTrigger()
+
+/* ---------------------------------------------------------------- */
+/* -------------- checkImmediateTriggersAfterOp ------------------ */
+/* */
+/* Called after an insert, delete, or update operation takes */
+/* place. Fetches before tuple for deletes and updates and */
+/* after tuple for inserts and updates. */
+/* Executes immediate triggers by sending FIRETRIGORD */
+/* */
+/* ---------------------------------------------------------------- */
+void Dbtup::checkImmediateTriggersAfterInsert(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const regTablePtr)
+{
+ if(refToBlock(regOperPtr->coordinatorTC) == DBLQH) {
+ return;
+ }
+
+ if ((regOperPtr->primaryReplica) &&
+ (!(regTablePtr->afterInsertTriggers.isEmpty()))) {
+ ljam();
+ fireImmediateTriggers(signal,
+ regTablePtr->afterInsertTriggers,
+ regOperPtr);
+ }//if
+}//Dbtup::checkImmediateTriggersAfterInsert()
+
+void Dbtup::checkImmediateTriggersAfterUpdate(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const regTablePtr)
+{
+ if(refToBlock(regOperPtr->coordinatorTC) == DBLQH) {
+ return;
+ }
+
+ if ((regOperPtr->primaryReplica) &&
+ (!(regTablePtr->afterUpdateTriggers.isEmpty()))) {
+ ljam();
+ fireImmediateTriggers(signal,
+ regTablePtr->afterUpdateTriggers,
+ regOperPtr);
+ }//if
+ if ((regOperPtr->primaryReplica) &&
+ (!(regTablePtr->constraintUpdateTriggers.isEmpty()))) {
+ ljam();
+ fireImmediateTriggers(signal,
+ regTablePtr->constraintUpdateTriggers,
+ regOperPtr);
+ }//if
+}//Dbtup::checkImmediateTriggersAfterUpdate()
+
+void Dbtup::checkImmediateTriggersAfterDelete(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const regTablePtr)
+{
+ if(refToBlock(regOperPtr->coordinatorTC) == DBLQH) {
+ return;
+ }
+
+ if ((regOperPtr->primaryReplica) &&
+ (!(regTablePtr->afterDeleteTriggers.isEmpty()))) {
+ ljam();
+ executeTriggers(signal,
+ regTablePtr->afterDeleteTriggers,
+ regOperPtr);
+ }//if
+}//Dbtup::checkImmediateTriggersAfterDelete()
+
+#if 0
+/* ---------------------------------------------------------------- */
+/* --------------------- checkDeferredTriggers -------------------- */
+/* */
+/* Called before commit after an insert, delete, or update */
+/* operation. Fetches before tuple for deletes and updates and */
+/* after tuple for inserts and updates. */
+/* Executes deferred triggers by sending FIRETRIGORD */
+/* */
+/* ---------------------------------------------------------------- */
+void Dbtup::checkDeferredTriggers(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const regTablePtr)
+{
+ ljam();
+ // NYI
+}//Dbtup::checkDeferredTriggers()
+#endif
+
+/* ---------------------------------------------------------------- */
+/* --------------------- checkDetachedTriggers -------------------- */
+/* */
+/* Called at commit after an insert, delete, or update operation. */
+/* Fetches before tuple for deletes and updates and */
+/* after tuple for inserts and updates. */
+/* Executes detached triggers by sending FIRETRIGORD */
+/* */
+/* ---------------------------------------------------------------- */
+void Dbtup::checkDetachedTriggers(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const regTablePtr)
+{
+ switch(regOperPtr->optype) {
+ case(ZINSERT):
+ ljam();
+ if (regTablePtr->subscriptionInsertTriggers.isEmpty()) {
+ // Table has no active triggers monitoring inserts at commit
+ ljam();
+ return;
+ }//if
+
+ // If any fired immediate insert trigger then fetch after tuple
+ fireDetachedTriggers(signal,
+ regTablePtr->subscriptionInsertTriggers,
+ regOperPtr);
+ break;
+ case(ZDELETE):
+ ljam();
+ if (regTablePtr->subscriptionDeleteTriggers.isEmpty()) {
+ // Table has no active triggers monitoring deletes at commit
+ ljam();
+ return;
+ }//if
+
+ // Execute any after delete triggers by sending
+ // FIRETRIGORD with the before tuple
+ executeTriggers(signal,
+ regTablePtr->subscriptionDeleteTriggers,
+ regOperPtr);
+ break;
+ case(ZUPDATE):
+ ljam();
+ if (regTablePtr->subscriptionUpdateTriggers.isEmpty()) {
+ // Table has no active triggers monitoring updates at commit
+ ljam();
+ return;
+ }//if
+
+ // If any fired immediate update trigger then fetch after tuple
+ // and send two FIRETRIGORD one with before tuple and one with after tuple
+ fireDetachedTriggers(signal,
+ regTablePtr->subscriptionUpdateTriggers,
+ regOperPtr);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+}//Dbtup::CheckDetachedTriggers()
+
+void
+Dbtup::fireImmediateTriggers(Signal* signal,
+ ArrayList<TupTriggerData>& triggerList,
+ Operationrec* const regOperPtr)
+{
+ TriggerPtr trigPtr;
+ triggerList.first(trigPtr);
+ while (trigPtr.i != RNIL) {
+ ljam();
+ if (trigPtr.p->monitorAllAttributes ||
+ trigPtr.p->attributeMask.overlaps(regOperPtr->changeMask)) {
+ ljam();
+ executeTrigger(signal,
+ trigPtr.p,
+ regOperPtr);
+ }//if
+ triggerList.next(trigPtr);
+ }//while
+}//Dbtup::fireImmediateTriggers()
+
+#if 0
+void
+Dbtup::fireDeferredTriggers(Signal* signal,
+ ArrayList<TupTriggerData>& triggerList,
+ Operationrec* const regOperPtr)
+{
+ TriggerPtr trigPtr;
+ triggerList.first(trigPtr);
+ while (trigPtr.i != RNIL) {
+ ljam();
+ if (trigPtr.p->monitorAllAttributes ||
+ trigPtr.p->attributeMask.overlaps(regOperPtr->changeMask)) {
+ ljam();
+ executeTrigger(signal,
+ trigPtr,
+ regOperPtr);
+ }//if
+ triggerList.next(trigPtr);
+ }//while
+}//Dbtup::fireDeferredTriggers()
+#endif
+
+void
+Dbtup::fireDetachedTriggers(Signal* signal,
+ ArrayList<TupTriggerData>& triggerList,
+ Operationrec* const regOperPtr)
+{
+ TriggerPtr trigPtr;
+ triggerList.first(trigPtr);
+ while (trigPtr.i != RNIL) {
+ ljam();
+ if ((trigPtr.p->monitorReplicas || regOperPtr->primaryReplica) &&
+ (trigPtr.p->monitorAllAttributes ||
+ trigPtr.p->attributeMask.overlaps(regOperPtr->changeMask))) {
+ ljam();
+ executeTrigger(signal,
+ trigPtr.p,
+ regOperPtr);
+ }//if
+ triggerList.next(trigPtr);
+ }//while
+}//Dbtup::fireDetachedTriggers()
+
+void Dbtup::executeTriggers(Signal* signal,
+ ArrayList<TupTriggerData>& triggerList,
+ Operationrec* regOperPtr)
+{
+ TriggerPtr trigPtr;
+ triggerList.first(trigPtr);
+ while (trigPtr.i != RNIL) {
+ ljam();
+ executeTrigger(signal,
+ trigPtr.p,
+ regOperPtr);
+ triggerList.next(trigPtr);
+
+ }//while
+}//Dbtup::executeTriggers()
+
+void Dbtup::executeTrigger(Signal* signal,
+ TupTriggerData* const trigPtr,
+ Operationrec* const regOperPtr)
+{
+
+ /**
+ * The block below does not work together with GREP.
+ * I have 2 db nodes (2 replicas) -> one node group.
+ * I want to have FIRETRIG_ORD sent to all SumaParticipants,
+ * from all nodes in the node group described above. However,
+ * only one of the nodes in the node group actually sends the
+ * FIRE_TRIG_ORD, and the other node enters this "hack" below.
+ * I don't really know what the code snippet below does, but it
+ * does not work with GREP the way Lars and I want it.
+ * We need to have triggers fired from both the primary and the
+ * backup replica, not only the primary as it is now.
+ *
+ * Note: In Suma, I have changed triggers to be created with
+ * setMonitorReplicas(true).
+ * /Johan
+ *
+ * See RT 709
+ */
+ // XXX quick fix to NR, should fix in LQHKEYREQ instead
+ /*
+ if (refToBlock(regOperPtr->coordinatorTC) == DBLQH) {
+ jam();
+ return;
+ }
+ */
+ BlockReference ref = trigPtr->m_receiverBlock;
+ Uint32* const keyBuffer = &cinBuffer[0];
+ Uint32* const mainBuffer = &coutBuffer[0];
+ Uint32* const copyBuffer = &clogMemBuffer[0];
+
+ Uint32 noPrimKey, noMainWords, noCopyWords;
+
+ if (ref == BACKUP) {
+ ljam();
+ /*
+ In order for the implementation of BACKUP to work even when changing
+ primaries in the middle of the backup we need to set the trigger on
+ all replicas. This check checks whether this is the node where this
+ trigger should be fired. The check should preferably have been put
+ completely in the BACKUP block but it was about five times simpler
+ to put it here and also much faster for the backup (small overhead
+ for everybody else.
+ */
+ signal->theData[0] = trigPtr->triggerId;
+ signal->theData[1] = regOperPtr->fragId;
+ EXECUTE_DIRECT(BACKUP, GSN_BACKUP_TRIG_REQ, signal, 2);
+ ljamEntry();
+ if (signal->theData[0] == 0) {
+ ljam();
+ return;
+ }//if
+ }//if
+ if (!readTriggerInfo(trigPtr,
+ regOperPtr,
+ keyBuffer,
+ noPrimKey,
+ mainBuffer,
+ noMainWords,
+ copyBuffer,
+ noCopyWords)) {
+ ljam();
+ return;
+ }//if
+//--------------------------------------------------------------------
+// Now all data for this trigger has been read. It is now time to send
+// the trigger information consisting of two or three sets of TRIG_
+// ATTRINFO signals and one FIRE_TRIG_ORD signal.
+// We start by setting common header info for all TRIG_ATTRINFO signals.
+//--------------------------------------------------------------------
+ bool executeDirect;
+ TrigAttrInfo* const trigAttrInfo = (TrigAttrInfo *)signal->getDataPtrSend();
+ trigAttrInfo->setConnectionPtr(regOperPtr->tcOpIndex);
+ trigAttrInfo->setTriggerId(trigPtr->triggerId);
+
+ switch(trigPtr->triggerType) {
+ case (TriggerType::SECONDARY_INDEX):
+ ljam();
+ ref = regOperPtr->coordinatorTC;
+ executeDirect = false;
+ break;
+ case (TriggerType::SUBSCRIPTION):
+ case (TriggerType::SUBSCRIPTION_BEFORE):
+ ljam();
+ // Since only backup uses subscription triggers we send to backup directly for now
+ ref = trigPtr->m_receiverBlock;
+ executeDirect = true;
+ break;
+ case (TriggerType::READ_ONLY_CONSTRAINT):
+ terrorCode = ZREAD_ONLY_CONSTRAINT_VIOLATION;
+ // XXX should return status and abort the rest
+ return;
+ default:
+ ndbrequire(false);
+ executeDirect= false; // remove warning
+ }//switch
+
+ regOperPtr->noFiredTriggers++;
+
+ trigAttrInfo->setAttrInfoType(TrigAttrInfo::PRIMARY_KEY);
+ sendTrigAttrInfo(signal, keyBuffer, noPrimKey, executeDirect, ref);
+
+ Uint32 noAfter = 0;
+ Uint32 noBefore = 0;
+ switch(regOperPtr->optype) {
+ case(ZINSERT):
+ ljam();
+ // Send AttrInfo signals with new attribute values
+ trigAttrInfo->setAttrInfoType(TrigAttrInfo::AFTER_VALUES);
+ sendTrigAttrInfo(signal, mainBuffer, noMainWords, executeDirect, ref);
+ noAfter = noMainWords;
+ break;
+ case(ZDELETE):
+ if (trigPtr->sendBeforeValues) {
+ ljam();
+ trigAttrInfo->setAttrInfoType(TrigAttrInfo::BEFORE_VALUES);
+ sendTrigAttrInfo(signal, mainBuffer, noMainWords, executeDirect, ref);
+ noBefore = noMainWords;
+ }//if
+ break;
+ case(ZUPDATE):
+ ljam();
+ if (trigPtr->sendBeforeValues) {
+ ljam();
+ trigAttrInfo->setAttrInfoType(TrigAttrInfo::BEFORE_VALUES);
+ sendTrigAttrInfo(signal, copyBuffer, noCopyWords, executeDirect, ref);
+ noBefore = noCopyWords;
+ }//if
+ trigAttrInfo->setAttrInfoType(TrigAttrInfo::AFTER_VALUES);
+ sendTrigAttrInfo(signal, mainBuffer, noMainWords, executeDirect, ref);
+ noAfter = noMainWords;
+ break;
+ default:
+ ndbrequire(false);
+ }//switch
+ sendFireTrigOrd(signal,
+ regOperPtr,
+ trigPtr,
+ noPrimKey,
+ noBefore,
+ noAfter);
+}//Dbtup::executeTrigger()
+
+Uint32 Dbtup::setAttrIds(Bitmask<MAXNROFATTRIBUTESINWORDS>& attributeMask,
+ Uint32 noOfAttributes,
+ Uint32* inBuffer)
+{
+ Uint32 bufIndx = 0;
+ for (Uint32 i = 0; i < noOfAttributes; i++) {
+ ljam();
+ if (attributeMask.get(i)) {
+ ljam();
+ AttributeHeader::init(&inBuffer[bufIndx++], i, 0);
+ }//if
+ }//for
+ return bufIndx;
+}//Dbtup::setAttrIds()
+
+bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr,
+ Operationrec* const regOperPtr,
+ Uint32* const keyBuffer,
+ Uint32& noPrimKey,
+ Uint32* const mainBuffer,
+ Uint32& noMainWords,
+ Uint32* const copyBuffer,
+ Uint32& noCopyWords)
+{
+ noCopyWords = 0;
+ noMainWords = 0;
+ Uint32 readBuffer[MAX_ATTRIBUTES_IN_TABLE];
+ PagePtr pagep;
+
+//---------------------------------------------------------------------------
+// Set-up variables needed by readAttributes operPtr.p, tabptr.p
+//---------------------------------------------------------------------------
+ operPtr.p = regOperPtr;
+ tabptr.i = regOperPtr->tableRef;
+ ptrCheckGuard(tabptr, cnoOfTablerec, tablerec);
+ Tablerec* const regTabPtr = tabptr.p;
+//--------------------------------------------------------------------
+// Initialise pagep and tuple offset for read of main tuple
+//--------------------------------------------------------------------
+ Uint32 tupheadoffset = regOperPtr->pageOffset;
+ pagep.i = regOperPtr->realPageId;
+ ptrCheckGuard(pagep, cnoOfPage, page);
+
+//--------------------------------------------------------------------
+// Read Primary Key Values
+//--------------------------------------------------------------------
+ int ret= readAttributes(pagep.p,
+ tupheadoffset,
+ &tableDescriptor[regTabPtr->readKeyArray].tabDescr,
+ regTabPtr->noOfKeyAttr,
+ keyBuffer,
+ ZATTR_BUFFER_SIZE,
+ false);
+ ndbrequire(ret != -1);
+ noPrimKey= ret;
+
+ Uint32 numAttrsToRead;
+ if ((regOperPtr->optype == ZUPDATE) &&
+ (trigPtr->sendOnlyChangedAttributes)) {
+ ljam();
+//--------------------------------------------------------------------
+// Update that sends only changed information
+//--------------------------------------------------------------------
+ Bitmask<MAXNROFATTRIBUTESINWORDS> attributeMask;
+ attributeMask = trigPtr->attributeMask;
+ attributeMask.bitAND(regOperPtr->changeMask);
+ numAttrsToRead = setAttrIds(attributeMask, regTabPtr->noOfAttr, &readBuffer[0]);
+
+ } else if ((regOperPtr->optype == ZDELETE) &&
+ (!trigPtr->sendBeforeValues)) {
+ ljam();
+//--------------------------------------------------------------------
+// Delete without sending before values only read Primary Key
+//--------------------------------------------------------------------
+ return true;
+ } else {
+ ljam();
+//--------------------------------------------------------------------
+// All others send all attributes that are monitored
+//--------------------------------------------------------------------
+ numAttrsToRead = setAttrIds(trigPtr->attributeMask, regTabPtr->noOfAttr, &readBuffer[0]);
+ }//if
+ ndbrequire(numAttrsToRead < MAX_ATTRIBUTES_IN_TABLE);
+//--------------------------------------------------------------------
+// Read Main tuple values
+//--------------------------------------------------------------------
+ if ((regOperPtr->optype != ZDELETE) ||
+ (trigPtr->sendBeforeValues)) {
+ ljam();
+ int ret= readAttributes(pagep.p,
+ tupheadoffset,
+ &readBuffer[0],
+ numAttrsToRead,
+ mainBuffer,
+ ZATTR_BUFFER_SIZE,
+ false);
+ ndbrequire(ret != -1);
+ noMainWords= ret;
+ } else {
+ ljam();
+ noMainWords = 0;
+ }//if
+//--------------------------------------------------------------------
+// Read Copy tuple values for UPDATE's
+//--------------------------------------------------------------------
+// Initialise pagep and tuple offset for read of copy tuple
+//--------------------------------------------------------------------
+ if ((regOperPtr->optype == ZUPDATE) &&
+ (trigPtr->sendBeforeValues)) {
+ ljam();
+
+ tupheadoffset = regOperPtr->pageOffsetC;
+ pagep.i = regOperPtr->realPageIdC;
+ ptrCheckGuard(pagep, cnoOfPage, page);
+
+ int ret= readAttributes(pagep.p,
+ tupheadoffset,
+ &readBuffer[0],
+ numAttrsToRead,
+ copyBuffer,
+ ZATTR_BUFFER_SIZE,
+ false);
+
+ ndbrequire(ret != -1);
+ noCopyWords = ret;
+ if ((noMainWords == noCopyWords) &&
+ (memcmp(mainBuffer, copyBuffer, noMainWords << 2) == 0)) {
+//--------------------------------------------------------------------
+// Although a trigger was fired it was not necessary since the old
+// value and the new value was exactly the same
+//--------------------------------------------------------------------
+ ljam();
+ return false;
+ }//if
+ }//if
+ return true;
+}//Dbtup::readTriggerInfo()
+
+void Dbtup::sendTrigAttrInfo(Signal* signal,
+ Uint32* data,
+ Uint32 dataLen,
+ bool executeDirect,
+ BlockReference receiverReference)
+{
+ TrigAttrInfo* const trigAttrInfo = (TrigAttrInfo *)signal->getDataPtrSend();
+ Uint32 sigLen;
+ Uint32 dataIndex = 0;
+ do {
+ sigLen = dataLen - dataIndex;
+ if (sigLen > TrigAttrInfo::DataLength) {
+ ljam();
+ sigLen = TrigAttrInfo::DataLength;
+ }//if
+ MEMCOPY_NO_WORDS(trigAttrInfo->getData(),
+ data + dataIndex,
+ sigLen);
+ if (executeDirect) {
+ ljam();
+ EXECUTE_DIRECT(receiverReference,
+ GSN_TRIG_ATTRINFO,
+ signal,
+ TrigAttrInfo::StaticLength + sigLen);
+ ljamEntry();
+ } else {
+ ljam();
+ sendSignal(receiverReference,
+ GSN_TRIG_ATTRINFO,
+ signal,
+ TrigAttrInfo::StaticLength + sigLen,
+ JBB);
+ }//if
+ dataIndex += sigLen;
+ } while (dataLen != dataIndex);
+}//Dbtup::sendTrigAttrInfo()
+
+void Dbtup::sendFireTrigOrd(Signal* signal,
+ Operationrec * const regOperPtr,
+ TupTriggerData* const trigPtr,
+ Uint32 noPrimKeyWords,
+ Uint32 noBeforeValueWords,
+ Uint32 noAfterValueWords)
+{
+ FireTrigOrd* const fireTrigOrd = (FireTrigOrd *)signal->getDataPtrSend();
+
+ fireTrigOrd->setConnectionPtr(regOperPtr->tcOpIndex);
+ fireTrigOrd->setTriggerId(trigPtr->triggerId);
+
+ switch(regOperPtr->optype) {
+ case(ZINSERT):
+ ljam();
+ fireTrigOrd->setTriggerEvent(TriggerEvent::TE_INSERT);
+ break;
+ case(ZDELETE):
+ ljam();
+ fireTrigOrd->setTriggerEvent(TriggerEvent::TE_DELETE);
+ break;
+ case(ZUPDATE):
+ ljam();
+ fireTrigOrd->setTriggerEvent(TriggerEvent::TE_UPDATE);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+
+ fireTrigOrd->setNoOfPrimaryKeyWords(noPrimKeyWords);
+ fireTrigOrd->setNoOfBeforeValueWords(noBeforeValueWords);
+ fireTrigOrd->setNoOfAfterValueWords(noAfterValueWords);
+
+ switch(trigPtr->triggerType) {
+ case (TriggerType::SECONDARY_INDEX):
+ ljam();
+ sendSignal(regOperPtr->coordinatorTC, GSN_FIRE_TRIG_ORD,
+ signal, FireTrigOrd::SignalLength, JBB);
+ break;
+ case (TriggerType::SUBSCRIPTION_BEFORE): // Only Suma
+ ljam();
+ // Since only backup uses subscription triggers we
+ // send to backup directly for now
+ fireTrigOrd->setGCI(regOperPtr->gci);
+ fireTrigOrd->setHashValue(regOperPtr->hashValue);
+ EXECUTE_DIRECT(trigPtr->m_receiverBlock,
+ GSN_FIRE_TRIG_ORD,
+ signal,
+ FireTrigOrd::SignalWithHashValueLength);
+ break;
+ case (TriggerType::SUBSCRIPTION):
+ ljam();
+ // Since only backup uses subscription triggers we
+ // send to backup directly for now
+ fireTrigOrd->setGCI(regOperPtr->gci);
+ EXECUTE_DIRECT(trigPtr->m_receiverBlock,
+ GSN_FIRE_TRIG_ORD,
+ signal,
+ FireTrigOrd::SignalWithGCILength);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }//switch
+}//Dbtup::sendFireTrigOrd()
+
+/*
+ * Ordered index triggers.
+ *
+ * Insert: add entry to index
+ * Update: add entry to index, de|ay remove until commit
+ * Delete: do nothing, delay remove until commit
+ * Commit: remove entry delayed from update and delete
+ * Abort : remove entry added by insert and update
+ *
+ * See Notes.txt for the details.
+ */
+
+int
+Dbtup::executeTuxInsertTriggers(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const regTabPtr)
+{
+ TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
+ PagePtr pagePtr;
+ pagePtr.i = regOperPtr->realPageId;
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ Uint32 tupVersion = pagePtr.p->pageWord[regOperPtr->pageOffset + 1];
+ ndbrequire(tupVersion == regOperPtr->tupVersion);
+ // fill in constant part
+ req->tableId = regOperPtr->tableRef;
+ req->fragId = regOperPtr->fragId;
+ req->pageId = regOperPtr->realPageId;
+ req->pageOffset = regOperPtr->pageOffset;
+ req->tupVersion = tupVersion;
+ req->opInfo = TuxMaintReq::OpAdd;
+ // loop over index list
+ const ArrayList<TupTriggerData>& triggerList = regTabPtr->tuxCustomTriggers;
+ TriggerPtr triggerPtr;
+ triggerList.first(triggerPtr);
+ while (triggerPtr.i != RNIL) {
+ ljam();
+ req->indexId = triggerPtr.p->indexId;
+ req->errorCode = RNIL;
+ EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ,
+ signal, TuxMaintReq::SignalLength);
+ ljamEntry();
+ if (req->errorCode != 0) {
+ ljam();
+ terrorCode = req->errorCode;
+ return -1;
+ }
+ triggerList.next(triggerPtr);
+ }
+ return 0;
+}
+
+int
+Dbtup::executeTuxUpdateTriggers(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const regTabPtr)
+{
+ TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
+ PagePtr pagePtr;
+ pagePtr.i = regOperPtr->realPageId;
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ Uint32 tupVersion = pagePtr.p->pageWord[regOperPtr->pageOffset + 1];
+ ndbrequire(tupVersion == regOperPtr->tupVersion);
+ // fill in constant part
+ req->tableId = regOperPtr->tableRef;
+ req->fragId = regOperPtr->fragId;
+ req->pageId = regOperPtr->realPageId;
+ req->pageOffset = regOperPtr->pageOffset;
+ req->tupVersion = tupVersion;
+ req->opInfo = TuxMaintReq::OpAdd;
+ // loop over index list
+ const ArrayList<TupTriggerData>& triggerList = regTabPtr->tuxCustomTriggers;
+ TriggerPtr triggerPtr;
+ triggerList.first(triggerPtr);
+ while (triggerPtr.i != RNIL) {
+ ljam();
+ req->indexId = triggerPtr.p->indexId;
+ req->errorCode = RNIL;
+ EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ,
+ signal, TuxMaintReq::SignalLength);
+ ljamEntry();
+ if (req->errorCode != 0) {
+ ljam();
+ terrorCode = req->errorCode;
+ return -1;
+ }
+ triggerList.next(triggerPtr);
+ }
+ return 0;
+}
+
+int
+Dbtup::executeTuxDeleteTriggers(Signal* signal,
+ Operationrec* const regOperPtr,
+ Tablerec* const regTabPtr)
+{
+ // do nothing
+ return 0;
+}
+
+void
+Dbtup::executeTuxCommitTriggers(Signal* signal,
+ Operationrec* regOperPtr,
+ Tablerec* const regTabPtr)
+{
+ TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
+ // get version
+ // XXX could add prevTupVersion to Operationrec
+ Uint32 tupVersion;
+ if (regOperPtr->optype == ZINSERT) {
+ if (! regOperPtr->deleteInsertFlag)
+ return;
+ ljam();
+ PagePtr pagePtr;
+ pagePtr.i = regOperPtr->realPageIdC;
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ tupVersion = pagePtr.p->pageWord[regOperPtr->pageOffsetC + 1];
+ ndbrequire(tupVersion != regOperPtr->tupVersion);
+ } else if (regOperPtr->optype == ZUPDATE) {
+ ljam();
+ PagePtr pagePtr;
+ pagePtr.i = regOperPtr->realPageIdC;
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ tupVersion = pagePtr.p->pageWord[regOperPtr->pageOffsetC + 1];
+ ndbrequire(tupVersion != regOperPtr->tupVersion);
+ } else if (regOperPtr->optype == ZDELETE) {
+ if (regOperPtr->deleteInsertFlag)
+ return;
+ ljam();
+ PagePtr pagePtr;
+ pagePtr.i = regOperPtr->realPageId;
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ tupVersion = pagePtr.p->pageWord[regOperPtr->pageOffset + 1];
+ ndbrequire(tupVersion == regOperPtr->tupVersion);
+ } else {
+ ndbrequire(false);
+ tupVersion= 0; // remove warning
+ }
+ // fill in constant part
+ req->tableId = regOperPtr->tableRef;
+ req->fragId = regOperPtr->fragId;
+ req->pageId = regOperPtr->realPageId;
+ req->pageOffset = regOperPtr->pageOffset;
+ req->tupVersion = tupVersion;
+ req->opInfo = TuxMaintReq::OpRemove;
+ // loop over index list
+ const ArrayList<TupTriggerData>& triggerList = regTabPtr->tuxCustomTriggers;
+ TriggerPtr triggerPtr;
+ triggerList.first(triggerPtr);
+ while (triggerPtr.i != RNIL) {
+ ljam();
+ req->indexId = triggerPtr.p->indexId;
+ req->errorCode = RNIL;
+ EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ,
+ signal, TuxMaintReq::SignalLength);
+ ljamEntry();
+ // commit must succeed
+ ndbrequire(req->errorCode == 0);
+ triggerList.next(triggerPtr);
+ }
+}
+
+void
+Dbtup::executeTuxAbortTriggers(Signal* signal,
+ Operationrec* regOperPtr,
+ Tablerec* const regTabPtr)
+{
+ TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
+ // get version
+ Uint32 tupVersion;
+ if (regOperPtr->optype == ZINSERT) {
+ ljam();
+ tupVersion = regOperPtr->tupVersion;
+ } else if (regOperPtr->optype == ZUPDATE) {
+ ljam();
+ tupVersion = regOperPtr->tupVersion;
+ } else if (regOperPtr->optype == ZDELETE) {
+ ljam();
+ return;
+ } else {
+ ndbrequire(false);
+ tupVersion= 0; // remove warning
+ }
+ // fill in constant part
+ req->tableId = regOperPtr->tableRef;
+ req->fragId = regOperPtr->fragId;
+ req->pageId = regOperPtr->realPageId;
+ req->pageOffset = regOperPtr->pageOffset;
+ req->tupVersion = tupVersion;
+ req->opInfo = TuxMaintReq::OpRemove;
+ // loop over index list
+ const ArrayList<TupTriggerData>& triggerList = regTabPtr->tuxCustomTriggers;
+ TriggerPtr triggerPtr;
+ triggerList.first(triggerPtr);
+ while (triggerPtr.i != RNIL) {
+ ljam();
+ req->indexId = triggerPtr.p->indexId;
+ req->errorCode = RNIL,
+ EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ,
+ signal, TuxMaintReq::SignalLength);
+ ljamEntry();
+ // abort must succeed
+ ndbrequire(req->errorCode == 0);
+ triggerList.next(triggerPtr);
+ }
+}
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupUndoLog.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupUndoLog.cpp
new file mode 100644
index 00000000000..869f399583f
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupUndoLog.cpp
@@ -0,0 +1,284 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+#define DBTUP_C
+#include "Dbtup.hpp"
+#include <RefConvert.hpp>
+#include <ndb_limits.h>
+#include <pc.hpp>
+
+#define ljam() { jamLine(12000 + __LINE__); }
+#define ljamEntry() { jamEntryLine(12000 + __LINE__); }
+
+void Dbtup::cprAddData(Signal* signal,
+ Fragrecord* const regFragPtr,
+ Uint32 pageIndex,
+ Uint32 noOfWords,
+ Uint32 startOffset)
+{
+ UndoPagePtr undoPagePtr;
+ PagePtr pagePtr;
+ LocalLogInfoPtr regLliPtr;
+
+ regLliPtr.i = regFragPtr->checkpointVersion;
+ ptrCheckGuard(regLliPtr, cnoOfParallellUndoFiles, localLogInfo);
+
+ pagePtr.i = pageIndex;
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ undoPagePtr.i = regLliPtr.p->lliUndoPage;
+ ptrCheckGuard(undoPagePtr, cnoOfUndoPage, undoPage);
+
+ startOffset++;
+ noOfWords--;
+ if ((regLliPtr.p->lliUndoWord + noOfWords) < ZWORDS_ON_PAGE) {
+ ljam();
+ MEMCOPY_NO_WORDS(&undoPagePtr.p->undoPageWord[regLliPtr.p->lliUndoWord],
+ &pagePtr.p->pageWord[startOffset],
+ noOfWords);
+ regLliPtr.p->lliUndoWord += noOfWords;
+ } else {
+ for (Uint32 i = 0; i < noOfWords; i++) {
+ ljam();
+ Uint32 undoWord = pagePtr.p->pageWord[startOffset + i];
+ cprAddUndoLogWord(signal, regLliPtr.p, undoWord);
+ }//for
+ }//if
+}//Dbtup::cprAddData()
+
+void Dbtup::cprAddLogHeader(Signal* signal,
+ LocalLogInfo* const lliPtr,
+ Uint32 recordType,
+ Uint32 tableId,
+ Uint32 fragId)
+{
+ Uint32 prevRecId = lliPtr->lliPrevRecordId;
+ lliPtr->lliPrevRecordId = lliPtr->lliUndoWord + (lliPtr->lliLogFilePage << ZUNDO_RECORD_ID_PAGE_INDEX);
+ cprAddUndoLogWord(signal, lliPtr, recordType);
+ cprAddUndoLogWord(signal, lliPtr, prevRecId);
+ cprAddUndoLogWord(signal, lliPtr, tableId);
+ cprAddUndoLogWord(signal, lliPtr, fragId);
+}//Dbtup::cprAddLogHeader()
+
+void Dbtup::cprAddGCIUpdate(Signal* signal,
+ Uint32 prevGCI,
+ Fragrecord* const regFragPtr)
+{
+ LocalLogInfoPtr regLliPtr;
+ regLliPtr.i = regFragPtr->checkpointVersion;
+ ptrCheckGuard(regLliPtr, cnoOfParallellUndoFiles, localLogInfo);
+
+ cprAddUndoLogWord(signal, regLliPtr.p, prevGCI);
+}//Dbtup::cprAddLogHeader()
+
+void Dbtup::cprAddUndoLogPageHeader(Signal* signal,
+ Page* const regPagePtr,
+ Fragrecord* const regFragPtr)
+{
+ UndoPagePtr regUndoPagePtr;
+ LocalLogInfoPtr regLliPtr;
+
+ regLliPtr.i = regFragPtr->checkpointVersion;
+ ptrCheckGuard(regLliPtr, cnoOfParallellUndoFiles, localLogInfo);
+
+ Uint32 prevRecId = regLliPtr.p->lliPrevRecordId;
+ Uint32 lliWord = regLliPtr.p->lliUndoWord;
+ regLliPtr.p->lliPrevRecordId = lliWord +
+ (regLliPtr.p->lliLogFilePage << ZUNDO_RECORD_ID_PAGE_INDEX);
+ if ((lliWord + 7) < ZWORDS_ON_PAGE) {
+ ljam();
+ regUndoPagePtr.i = regLliPtr.p->lliUndoPage;
+ ptrCheckGuard(regUndoPagePtr, cnoOfUndoPage, undoPage);
+
+ regUndoPagePtr.p->undoPageWord[lliWord] = ZLCPR_UNDO_LOG_PAGE_HEADER;
+ regUndoPagePtr.p->undoPageWord[lliWord + 1] = prevRecId;
+ regUndoPagePtr.p->undoPageWord[lliWord + 2] = regFragPtr->fragTableId;
+ regUndoPagePtr.p->undoPageWord[lliWord + 3] = regFragPtr->fragmentId;
+ regUndoPagePtr.p->undoPageWord[lliWord + 4] = regPagePtr->pageWord[ZPAGE_FRAG_PAGE_ID_POS];
+ regUndoPagePtr.p->undoPageWord[lliWord + 5] = regPagePtr->pageWord[ZPAGE_STATE_POS];
+ regUndoPagePtr.p->undoPageWord[lliWord + 6] = regPagePtr->pageWord[ZPAGE_NEXT_POS];
+ regLliPtr.p->lliUndoWord = lliWord + 7;
+ } else {
+ ljam();
+ cprAddUndoLogWord(signal, regLliPtr.p, ZLCPR_UNDO_LOG_PAGE_HEADER);
+ cprAddUndoLogWord(signal, regLliPtr.p, prevRecId);
+ cprAddUndoLogWord(signal, regLliPtr.p, regFragPtr->fragTableId);
+ cprAddUndoLogWord(signal, regLliPtr.p, regFragPtr->fragmentId);
+ cprAddUndoLogWord(signal, regLliPtr.p, regPagePtr->pageWord[ZPAGE_FRAG_PAGE_ID_POS]);
+ cprAddUndoLogWord(signal, regLliPtr.p, regPagePtr->pageWord[ZPAGE_STATE_POS]);
+ cprAddUndoLogWord(signal, regLliPtr.p, regPagePtr->pageWord[ZPAGE_NEXT_POS]);
+ }//if
+}//Dbtup::cprAddUndoLogPageHeader()
+
+void Dbtup::cprAddUndoLogRecord(Signal* signal,
+ Uint32 recordType,
+ Uint32 pageId,
+ Uint32 pageIndex,
+ Uint32 tableId,
+ Uint32 fragId,
+ Uint32 localLogIndex)
+{
+ LocalLogInfoPtr regLliPtr;
+ UndoPagePtr regUndoPagePtr;
+
+ regLliPtr.i = localLogIndex;
+ ptrCheckGuard(regLliPtr, cnoOfParallellUndoFiles, localLogInfo);
+
+ Uint32 prevRecId = regLliPtr.p->lliPrevRecordId;
+ Uint32 lliWord = regLliPtr.p->lliUndoWord;
+
+ regLliPtr.p->lliPrevRecordId = lliWord +
+ (regLliPtr.p->lliLogFilePage << ZUNDO_RECORD_ID_PAGE_INDEX);
+ if ((lliWord + 6) < ZWORDS_ON_PAGE) {
+ ljam();
+ regUndoPagePtr.i = regLliPtr.p->lliUndoPage;
+ ptrCheckGuard(regUndoPagePtr, cnoOfUndoPage, undoPage);
+ regUndoPagePtr.p->undoPageWord[lliWord] = recordType;
+ regUndoPagePtr.p->undoPageWord[lliWord + 1] = prevRecId;
+ regUndoPagePtr.p->undoPageWord[lliWord + 2] = tableId;
+ regUndoPagePtr.p->undoPageWord[lliWord + 3] = fragId;
+ regUndoPagePtr.p->undoPageWord[lliWord + 4] = pageId;
+ regUndoPagePtr.p->undoPageWord[lliWord + 5] = pageIndex;
+
+ regLliPtr.p->lliUndoWord = lliWord + 6;
+ } else {
+ ljam();
+ cprAddUndoLogWord(signal, regLliPtr.p, recordType);
+ cprAddUndoLogWord(signal, regLliPtr.p, prevRecId);
+ cprAddUndoLogWord(signal, regLliPtr.p, tableId);
+ cprAddUndoLogWord(signal, regLliPtr.p, fragId);
+ cprAddUndoLogWord(signal, regLliPtr.p, pageId);
+ cprAddUndoLogWord(signal, regLliPtr.p, pageIndex);
+ }//if
+}//Dbtup::cprAddUndoLogRecord()
+
+void Dbtup::cprAddAbortUpdate(Signal* signal,
+ LocalLogInfo* const lliPtr,
+ Operationrec* const regOperPtr)
+{
+ Uint32 lliWord = lliPtr->lliUndoWord;
+ if ((lliWord + 4) < ZWORDS_ON_PAGE) {
+ ljam();
+ UndoPagePtr regUndoPagePtr;
+ regUndoPagePtr.i = lliPtr->lliUndoPage;
+ ptrCheckGuard(regUndoPagePtr, cnoOfUndoPage, undoPage);
+
+ regUndoPagePtr.p->undoPageWord[lliWord] = regOperPtr->fragPageId;
+ regUndoPagePtr.p->undoPageWord[lliWord + 1] = regOperPtr->pageIndex;
+ regUndoPagePtr.p->undoPageWord[lliWord + 2] = regOperPtr->fragPageIdC;
+ regUndoPagePtr.p->undoPageWord[lliWord + 3] = regOperPtr->pageIndexC;
+ lliPtr->lliUndoWord = lliWord + 4;
+ } else {
+ ljam();
+ cprAddUndoLogWord(signal, lliPtr, regOperPtr->fragPageId);
+ cprAddUndoLogWord(signal, lliPtr, regOperPtr->pageIndex);
+ cprAddUndoLogWord(signal, lliPtr, regOperPtr->fragPageIdC);
+ cprAddUndoLogWord(signal, lliPtr, regOperPtr->pageIndexC);
+ }//if
+}//Dbtup::cprAddAbortUpdate()
+
+void Dbtup::cprAddUndoLogWord(Signal* signal, LocalLogInfo* const lliPtr, Uint32 undoWord)
+{
+ DiskBufferSegmentInfoPtr dbsiPtr;
+ UndoPagePtr regUndoPagePtr;
+
+ ljam();
+ regUndoPagePtr.i = lliPtr->lliUndoPage;
+ ptrCheckGuard(regUndoPagePtr, cnoOfUndoPage, undoPage);
+ ndbrequire(lliPtr->lliUndoWord < ZWORDS_ON_PAGE);
+ regUndoPagePtr.p->undoPageWord[lliPtr->lliUndoWord] = undoWord;
+
+ lliPtr->lliUndoWord++;
+ if (lliPtr->lliUndoWord == ZWORDS_ON_PAGE) {
+ ljam();
+ lliPtr->lliUndoWord = ZUNDO_PAGE_HEADER_SIZE;
+ lliPtr->lliUndoPage++;
+ if (clblPageCounter > 0) {
+ ljam();
+ clblPageCounter--;
+ }//if
+ dbsiPtr.i = lliPtr->lliUndoBufferSegmentP;
+ ptrCheckGuard(dbsiPtr, cnoOfConcurrentWriteOp, diskBufferSegmentInfo);
+ dbsiPtr.p->pdxNumDataPages++;
+ ndbrequire(dbsiPtr.p->pdxNumDataPages < 16);
+ lliPtr->lliLogFilePage++;
+ if (dbsiPtr.p->pdxNumDataPages == ZUB_SEGMENT_SIZE) {
+ ljam();
+ lcpWriteUndoSegment(signal, lliPtr, false);
+ }//if
+ }//if
+}//Dbtup::cprAddUndoLogWord()
+
+void Dbtup::lcpWriteUndoSegment(Signal* signal, LocalLogInfo* const lliPtr, bool flushFlag)
+{
+ DiskBufferSegmentInfoPtr dbsiPtr;
+
+ dbsiPtr.i = lliPtr->lliUndoBufferSegmentP;
+ ptrCheckGuard(dbsiPtr, cnoOfConcurrentWriteOp, diskBufferSegmentInfo);
+ Uint32 flags = 1;
+ lliPtr->lliUndoPagesToDiskWithoutSynch += dbsiPtr.p->pdxNumDataPages;
+ if ((lliPtr->lliUndoPagesToDiskWithoutSynch > MAX_PAGES_WITHOUT_SYNCH) ||
+ (flushFlag)) {
+ ljam();
+/* ---------------------------------------------------------------- */
+// To avoid synching too big chunks at a time we synch after writing
+// a certain number of data pages. (e.g. 2 MBytes).
+/* ---------------------------------------------------------------- */
+ lliPtr->lliUndoPagesToDiskWithoutSynch = 0;
+ flags |= 0x10; //Set synch flag unconditionally
+ }//if
+ dbsiPtr.p->pdxOperation = CHECKPOINT_UNDO_WRITE;
+ signal->theData[0] = lliPtr->lliUndoFileHandle;
+ signal->theData[1] = cownref;
+ signal->theData[2] = dbsiPtr.i;
+ signal->theData[3] = flags;
+ signal->theData[4] = ZBASE_ADDR_UNDO_WORD;
+ signal->theData[5] = dbsiPtr.p->pdxNumDataPages;
+ signal->theData[6] = dbsiPtr.p->pdxDataPage[0];
+ signal->theData[7] = dbsiPtr.p->pdxFilePage;
+ sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA);
+
+ DiskBufferSegmentInfoPtr newDbsiPtr;
+ UndoPagePtr newUndoPagePtr;
+
+ seizeUndoBufferSegment(signal, newUndoPagePtr);
+ seizeDiskBufferSegmentRecord(newDbsiPtr);
+ newDbsiPtr.p->pdxBuffertype = UNDO_PAGES;
+ for (Uint32 i = 0; i < ZUB_SEGMENT_SIZE; i++) {
+ newDbsiPtr.p->pdxDataPage[i] = newUndoPagePtr.i + i;
+ }//for
+ newDbsiPtr.p->pdxFilePage = lliPtr->lliLogFilePage;
+ lliPtr->lliUndoPage = newUndoPagePtr.i;
+ lliPtr->lliUndoBufferSegmentP = newDbsiPtr.i;
+}//Dbtup::lcpWriteUndoSegment()
+
+void Dbtup::seizeUndoBufferSegment(Signal* signal, UndoPagePtr& regUndoPagePtr)
+{
+ if (cnoFreeUndoSeg == ZMIN_PAGE_LIMIT_TUP_COMMITREQ) {
+ EXECUTE_DIRECT(DBLQH, GSN_TUP_COM_BLOCK, signal, 1);
+ ljamEntry();
+ }//if
+ cnoFreeUndoSeg--;
+ ndbrequire(cnoFreeUndoSeg >= 0);
+ ndbrequire(cfirstfreeUndoSeg != RNIL);
+ regUndoPagePtr.i = cfirstfreeUndoSeg;
+ ptrCheckGuard(regUndoPagePtr, cnoOfUndoPage, undoPage);
+ cfirstfreeUndoSeg = regUndoPagePtr.p->undoPageWord[ZPAGE_NEXT_POS];
+ regUndoPagePtr.p->undoPageWord[ZPAGE_NEXT_POS] = RNIL;
+}//Dbtup::seizeUndoBufferSegment()
+
+
+
diff --git a/storage/ndb/src/kernel/blocks/dbtup/Makefile.am b/storage/ndb/src/kernel/blocks/dbtup/Makefile.am
new file mode 100644
index 00000000000..e51410e6be3
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtup/Makefile.am
@@ -0,0 +1,41 @@
+noinst_LIBRARIES = libdbtup.a
+
+libdbtup_a_SOURCES = \
+ DbtupExecQuery.cpp \
+ DbtupBuffer.cpp \
+ DbtupRoutines.cpp \
+ DbtupCommit.cpp \
+ DbtupFixAlloc.cpp \
+ DbtupTrigger.cpp \
+ DbtupAbort.cpp \
+ DbtupLCP.cpp \
+ DbtupUndoLog.cpp \
+ DbtupPageMap.cpp \
+ DbtupPagMan.cpp \
+ DbtupStoredProcDef.cpp \
+ DbtupMeta.cpp \
+ DbtupTabDesMan.cpp \
+ DbtupGen.cpp \
+ DbtupSystemRestart.cpp \
+ DbtupIndex.cpp \
+ DbtupDebug.cpp
+
+include $(top_srcdir)/ndb/config/common.mk.am
+include $(top_srcdir)/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libdbtup.dsp
+
+libdbtup.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-lib.am \
+ $(top_srcdir)/ndb/config/win-name \
+ $(top_srcdir)/ndb/config/win-includes \
+ $(top_srcdir)/ndb/config/win-sources \
+ $(top_srcdir)/ndb/config/win-libraries
+ cat $(top_srcdir)/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/ndb/config/win-sources $@ $(libdbtup_a_SOURCES)
+ @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/storage/ndb/src/kernel/blocks/dbtup/Notes.txt b/storage/ndb/src/kernel/blocks/dbtup/Notes.txt
new file mode 100644
index 00000000000..9d47c591fe8
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtup/Notes.txt
@@ -0,0 +1,183 @@
+Operations, tuples, versions
+============================
+
+Operation types.
+
+INSERT insert new original tuple, or insert after delete
+UPDATE update
+DELETE delete
+
+Following need not be considered here.
+
+READ does not change tuples or versions
+WRITE turns into INSERT or UPDATE in LQH
+
+We use more specific names in some cases:
+
+first/INSERT initial insert of new tuple
+delete/INSERT INSERT preceded by DELETE
+DELETE/last DELETE as last operation
+DELETE/insert DELETE followed by INSERT
+
+Tuple + op Can be followed by
+-------------- ------------------
+does not exist first/INSERT
+tuple exists UPDATE DELETE
+INSERT UPDATE DELETE
+UPDATE UPDATE DELETE
+DELETE delete/INSERT
+
+Operations on same tuple are kept in doubly linked list until
+commit or abort. The links at both ends are RNIL i.e. the list
+is not circular. The links are:
+
+nextActiveOp the operation BEFORE this one, in event order
+prevActiveOp the operation AFTER this one, in event order
+
+Operations are done on the "original tuple" i.e. the tuple is
+modified in place. If an operation is about to write over data
+in original tuple, it first copies the tuple to a "copy tuple".
+
+Operation Copy tuple
+--------- ----------
+first/INSERT no
+delete/INSERT yes (this is in effect an update)
+UPDATE yes
+DELETE no
+
+The operation points to the tuples via:
+
+realPageId page i-value of original tuple
+pageOffset word offset of original tuple on the page
+realPageIdC page i-value of copy tuple or RNIL is no copy exists
+pageOffsetC word offset of copy tuple on the page
+
+The original tuple and the copy tuple (if any) point back to
+the operation via word 0. In copy tuple this pointer is never
+changed. In original tuple however it always points to the LATEST
+existing operation i.e. the one with prevActiveOp == RNIL.
+Thus word 0 of original tuple is changed on 2 occasions:
+
+- when a new operation is added to the list
+- when commit or abort removes the latest operation
+
+Note that commit/abort of operations occurs in random order.
+The list is adjusted accordingly.
+
+Versions
+--------
+
+Tuple version is stored in tuple word 1. A new original tuple
+gets version 0. The version is incremented by each new operation
+which makes a copy tuple. Version number wraps around at 15 bits.
+
+When a copy tuple is made, the version in original tuple is copied
+to copy tuple as part of tuple data. This takes place before
+the version in original tuple is updated.
+
+Each operation record contains tuple version called tupVersion.
+
+- at insert of new original tuple, tupVersion is set to 0
+
+- if tuple already exists, the FIRST operation (in event order)
+ reads tupVersion from tuple word 1. If the operation is
+ not DELETE, the version is incremented
+
+- subsequent operation reads tupVersion from the operation
+ BEFORE it (nextActiveOp). If this subsequent operation is
+ not DELETE, the version is incremented
+
+When the operation writes the tuple it sets word 1 to tupVersion.
+In detail, per operation type, where INSERT is divided into
+insert of new original tuple and insert after delete:
+
+Operation Copy Increment Set version in original
+--------- ---- --------- -----------------------
+first/INSERT no no yes, to 0
+delete/INSERT yes yes yes
+UPDATE yes yes yes
+DELETE no no no
+
+Thus an existing version is incremented if and only if
+a copy tuple is made.
+
+Ordered index maintenance
+-------------------------
+
+Each index entry has logical tuple address and tuple version.
+Index entries are added during prepare phase (when each operation
+is executed) and removed during commit or abort phase.
+
+Access to correct tuple version (original or copy) is required
+in TUX which reads index key values 1) to check that at least one
+is not null 2) to do tree search 3) to set min/max prefixes.
+See "Read attributes" below.
+
+An additional complication is that commit/abort of operations
+arrives in random order. So we cannot check for, for example,
+DELETE/insert by looking at prevActiveOp.
+
+Phase Op Action Version in
+----- -- ------ ----------
+prepare INSERT add op and original
+prepare UPDATE add op and original
+prepare DELETE none -
+
+commit first/INSERT none -
+commit delete/INSERT remove copy tuple 1)
+commit UPDATE remove copy tuple 1)
+commit DELETE/last remove op and original
+commit DELETE/insert none -
+
+abort INSERT remove op
+abort UPDATE remove op
+abort DELETE none -
+
+1) alternatively, store prevTupVersion in operation record.
+
+Read attributes, query status
+-----------------------------
+
+TUP_READ_ATTRS signal (or equivalent direct call) reads attribute
+values. Input is logical address of original tuple and tuple
+version. The steps are:
+
+- Translate logical address to physical address of original tuple.
+
+- If version of original tuple in word 1 is right, stop.
+
+- Otherwise word 0 points to LATEST not yet deleted operation.
+ Walk through operation list via nextActiveOp.
+
+- If an operation on the list has realPageIdC == RNIL, skip it.
+
+- Otherwise find copy tuple via realPageIdC, pageOffsetC.
+ If the version of the copy tuple in word 1 is right, stop.
+
+- Call readAttributes() on the tuple found (original or copy).
+
+In short, the version must exist in some not yet deleted tuple,
+either in original or in some copy.
+
+Note that this must work during all phases since index code
+needs to read index key attributes from correct tuple version in
+each add/remove operation.
+
+TUP_QUERY_TH signal (or equivalent direct call) does same search
+for tuple version. It is called from index scan and returns info
+used to decide if the scan can see the tuple.
+
+This signal may also be called during any phase since commit/abort
+of all operations is not done in one time-slice.
+
+Commit and abort
+----------------
+
+[ hairy stuff ]
+
+Problems
+--------
+
+Current abort code can destroy a tuple version too early. This
+happens in test case "ticuur" (insert-commit-update-update-rollback),
+if abort of first update arrives before abort of second update.
diff --git a/storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp b/storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
new file mode 100644
index 00000000000..5c12472a0f7
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
@@ -0,0 +1,1291 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef DBTUX_H
+#define DBTUX_H
+
+#include <ndb_limits.h>
+#include <SimulatedBlock.hpp>
+#include <AttributeDescriptor.hpp>
+#include <AttributeHeader.hpp>
+#include <ArrayPool.hpp>
+#include <DataBuffer.hpp>
+#include <md5_hash.hpp>
+
+// big brother
+#include <Dbtup.hpp>
+
+// signal classes
+#include <signaldata/DictTabInfo.hpp>
+#include <signaldata/TuxContinueB.hpp>
+#include <signaldata/TupFrag.hpp>
+#include <signaldata/AlterIndx.hpp>
+#include <signaldata/DropTab.hpp>
+#include <signaldata/TuxMaint.hpp>
+#include <signaldata/AccScan.hpp>
+#include <signaldata/TuxBound.hpp>
+#include <signaldata/NextScan.hpp>
+#include <signaldata/AccLock.hpp>
+#include <signaldata/DumpStateOrd.hpp>
+
+// debug
+#ifdef VM_TRACE
+#include <NdbOut.hpp>
+#include <OutputStream.hpp>
+#endif
+
+// jams
+#undef jam
+#undef jamEntry
+#ifdef DBTUX_GEN_CPP
+#define jam() jamLine(10000 + __LINE__)
+#define jamEntry() jamEntryLine(10000 + __LINE__)
+#endif
+#ifdef DBTUX_META_CPP
+#define jam() jamLine(20000 + __LINE__)
+#define jamEntry() jamEntryLine(20000 + __LINE__)
+#endif
+#ifdef DBTUX_MAINT_CPP
+#define jam() jamLine(30000 + __LINE__)
+#define jamEntry() jamEntryLine(30000 + __LINE__)
+#endif
+#ifdef DBTUX_NODE_CPP
+#define jam() jamLine(40000 + __LINE__)
+#define jamEntry() jamEntryLine(40000 + __LINE__)
+#endif
+#ifdef DBTUX_TREE_CPP
+#define jam() jamLine(50000 + __LINE__)
+#define jamEntry() jamEntryLine(50000 + __LINE__)
+#endif
+#ifdef DBTUX_SCAN_CPP
+#define jam() jamLine(60000 + __LINE__)
+#define jamEntry() jamEntryLine(60000 + __LINE__)
+#endif
+#ifdef DBTUX_SEARCH_CPP
+#define jam() jamLine(70000 + __LINE__)
+#define jamEntry() jamEntryLine(70000 + __LINE__)
+#endif
+#ifdef DBTUX_CMP_CPP
+#define jam() jamLine(80000 + __LINE__)
+#define jamEntry() jamEntryLine(80000 + __LINE__)
+#endif
+#ifdef DBTUX_DEBUG_CPP
+#define jam() jamLine(90000 + __LINE__)
+#define jamEntry() jamEntryLine(90000 + __LINE__)
+#endif
+#ifndef jam
+#define jam() jamLine(__LINE__)
+#define jamEntry() jamEntryLine(__LINE__)
+#endif
+
+#undef max
+#undef min
+
+class Configuration;
+
+class Dbtux : public SimulatedBlock {
+public:
+ Dbtux(const Configuration& conf);
+ virtual ~Dbtux();
+
+ // pointer to TUP instance in this thread
+ Dbtup* c_tup;
+
+private:
+ // sizes are in words (Uint32)
+ STATIC_CONST( MaxIndexFragments = 2 * MAX_FRAG_PER_NODE );
+ STATIC_CONST( MaxIndexAttributes = MAX_ATTRIBUTES_IN_INDEX );
+ STATIC_CONST( MaxAttrDataSize = 2048 );
+public:
+ STATIC_CONST( DescPageSize = 256 );
+private:
+ STATIC_CONST( MaxTreeNodeSize = MAX_TTREE_NODE_SIZE );
+ STATIC_CONST( MaxPrefSize = MAX_TTREE_PREF_SIZE );
+ STATIC_CONST( ScanBoundSegmentSize = 7 );
+ STATIC_CONST( MaxAccLockOps = MAX_PARALLEL_OP_PER_SCAN );
+ BLOCK_DEFINES(Dbtux);
+
+ // forward declarations
+ struct DescEnt;
+
+ /*
+ * Pointer to array of Uint32.
+ */
+ struct Data {
+ private:
+ Uint32* m_data;
+ public:
+ Data();
+ Data(Uint32* data);
+ Data& operator=(Uint32* data);
+ operator Uint32*() const;
+ Data& operator+=(size_t n);
+ AttributeHeader& ah() const;
+ };
+ friend class Data;
+
+ /*
+ * Pointer to array of constant Uint32.
+ */
+ struct ConstData;
+ friend struct ConstData;
+ struct ConstData {
+ private:
+ const Uint32* m_data;
+ public:
+ ConstData();
+ ConstData(const Uint32* data);
+ ConstData& operator=(const Uint32* data);
+ operator const Uint32*() const;
+ ConstData& operator+=(size_t n);
+ const AttributeHeader& ah() const;
+ // non-const pointer can be cast to const pointer
+ ConstData(Data data);
+ ConstData& operator=(Data data);
+ };
+
+ // AttributeHeader size is assumed to be 1 word
+ STATIC_CONST( AttributeHeaderSize = 1 );
+
+ /*
+ * Logical tuple address, "local key". Identifies table tuples.
+ */
+ typedef Uint32 TupAddr;
+ STATIC_CONST( NullTupAddr = (Uint32)-1 );
+
+ /*
+ * Physical tuple address in TUP. Provides fast access to table tuple
+ * or index node. Valid within the db node and across timeslices.
+ * Not valid between db nodes or across restarts.
+ *
+ * To avoid wasting an Uint16 the pageid is split in two.
+ */
+ struct TupLoc {
+ private:
+ Uint16 m_pageId1; // page i-value (big-endian)
+ Uint16 m_pageId2;
+ Uint16 m_pageOffset; // page offset in words
+ public:
+ TupLoc();
+ TupLoc(Uint32 pageId, Uint16 pageOffset);
+ Uint32 getPageId() const;
+ void setPageId(Uint32 pageId);
+ Uint32 getPageOffset() const;
+ void setPageOffset(Uint32 pageOffset);
+ bool operator==(const TupLoc& loc) const;
+ bool operator!=(const TupLoc& loc) const;
+ };
+
+ /*
+ * There is no const member NullTupLoc since the compiler may not be
+ * able to optimize it to TupLoc() constants. Instead null values are
+ * constructed on the stack with TupLoc().
+ */
+#define NullTupLoc TupLoc()
+
+ // tree definitions
+
+ /*
+ * Tree entry. Points to a tuple in primary table via physical
+ * address of "original" tuple and tuple version.
+ *
+ * ZTUP_VERSION_BITS must be 15 (or less).
+ */
+ struct TreeEnt;
+ friend struct TreeEnt;
+ struct TreeEnt {
+ TupLoc m_tupLoc; // address of original tuple
+ unsigned m_tupVersion : 15; // version
+ unsigned m_fragBit : 1; // which duplicated table fragment
+ TreeEnt();
+ // methods
+ bool eq(const TreeEnt ent) const;
+ int cmp(const TreeEnt ent) const;
+ };
+ STATIC_CONST( TreeEntSize = sizeof(TreeEnt) >> 2 );
+ static const TreeEnt NullTreeEnt;
+
+ /*
+ * Tree node has 1) fixed part 2) a prefix of index key data for min
+ * entry 3) max and min entries 4) rest of entries 5) one extra entry
+ * used as work space.
+ *
+ * struct TreeNode part 1, size 6 words
+ * min prefix part 2, size TreeHead::m_prefSize
+ * max entry part 3
+ * min entry part 3
+ * rest of entries part 4
+ * work entry part 5
+ *
+ * There are 3 links to other nodes: left child, right child, parent.
+ * Occupancy (number of entries) is at least 1 except temporarily when
+ * a node is about to be removed.
+ */
+ struct TreeNode;
+ friend struct TreeNode;
+ struct TreeNode {
+ TupLoc m_link[3]; // link to 0-left child 1-right child 2-parent
+ unsigned m_side : 2; // we are 0-left child 1-right child 2-root
+ unsigned m_balance : 2; // balance -1, 0, +1 plus 1 for Solaris CC
+ unsigned pad1 : 4;
+ Uint8 m_occup; // current number of entries
+ Uint32 m_nodeScan; // list of scans at this node
+ TreeNode();
+ };
+ STATIC_CONST( NodeHeadSize = sizeof(TreeNode) >> 2 );
+
+ /*
+ * Tree node "access size" was for an early version with signal
+ * interface to TUP. It is now used only to compute sizes.
+ */
+ enum AccSize {
+ AccNone = 0,
+ AccHead = 1, // part 1
+ AccPref = 2, // parts 1-3
+ AccFull = 3 // parts 1-5
+ };
+
+ /*
+ * Tree header. There is one in each fragment. Contains tree
+ * parameters and address of root node.
+ */
+ struct TreeHead;
+ friend struct TreeHead;
+ struct TreeHead {
+ Uint8 m_nodeSize; // words in tree node
+ Uint8 m_prefSize; // words in min prefix
+ Uint8 m_minOccup; // min entries in internal node
+ Uint8 m_maxOccup; // max entries in node
+ TupLoc m_root; // root node
+ TreeHead();
+ // methods
+ unsigned getSize(AccSize acc) const;
+ Data getPref(TreeNode* node) const;
+ TreeEnt* getEntList(TreeNode* node) const;
+ };
+
+ /*
+ * Tree position. Specifies node, position within node (from 0 to
+ * m_occup), and whether the position is at an existing entry or
+ * before one (if any). Position m_occup points past the node and is
+ * also represented by position 0 of next node. Includes direction
+ * used by scan.
+ */
+ struct TreePos;
+ friend struct TreePos;
+ struct TreePos {
+ TupLoc m_loc; // physical node address
+ Uint16 m_pos; // position 0 to m_occup
+ Uint8 m_match; // at an existing entry
+ Uint8 m_dir; // see scanNext()
+ TreePos();
+ };
+
+ // packed metadata
+
+ /*
+ * Descriptor page. The "hot" metadata for an index is stored as
+ * a contiguous array of words on some page.
+ */
+ struct DescPage;
+ friend struct DescPage;
+ struct DescPage {
+ Uint32 m_nextPage;
+ Uint32 m_numFree; // number of free words
+ union {
+ Uint32 m_data[DescPageSize];
+ Uint32 nextPool;
+ };
+ DescPage();
+ };
+ typedef Ptr<DescPage> DescPagePtr;
+ ArrayPool<DescPage> c_descPagePool;
+ Uint32 c_descPageList;
+
+ /*
+ * Header for index metadata. Size must be multiple of word size.
+ */
+ struct DescHead {
+ unsigned m_indexId : 24;
+ unsigned pad1 : 8;
+ };
+ STATIC_CONST( DescHeadSize = sizeof(DescHead) >> 2 );
+
+ /*
+ * Attribute metadata. Size must be multiple of word size.
+ *
+ * Prefix comparison of char data must use strxfrm and binary
+ * comparison. The charset is currently unused.
+ */
+ struct DescAttr {
+ Uint32 m_attrDesc; // standard AttributeDescriptor
+ Uint16 m_primaryAttrId;
+ unsigned m_typeId : 6;
+ unsigned m_charset : 10;
+ };
+ STATIC_CONST( DescAttrSize = sizeof(DescAttr) >> 2 );
+
+ /*
+ * Complete metadata for one index. The array of attributes has
+ * variable size.
+ */
+ struct DescEnt;
+ friend struct DescEnt;
+ struct DescEnt {
+ DescHead m_descHead;
+ DescAttr m_descAttr[1]; // variable size data
+ };
+
+ // range scan
+
+ /*
+ * Scan bounds are stored in linked list of segments.
+ */
+ typedef DataBuffer<ScanBoundSegmentSize> ScanBound;
+ typedef DataBuffer<ScanBoundSegmentSize>::ConstDataBufferIterator ScanBoundIterator;
+ typedef DataBuffer<ScanBoundSegmentSize>::DataBufferPool ScanBoundPool;
+ ScanBoundPool c_scanBoundPool;
+
+ /*
+ * Scan operation.
+ *
+ * Tuples are locked one at a time. The current lock op is set to
+ * RNIL as soon as the lock is obtained and passed to LQH. We must
+ * however remember all locks which LQH has not returned for unlocking
+ * since they must be aborted by us when the scan is closed.
+ *
+ * Scan state describes the entry we are interested in. There is
+ * a separate lock wait flag. It may be for current entry or it may
+ * be for an entry we were moved away from. In any case nothing
+ * happens with current entry before lock wait flag is cleared.
+ *
+ * An unfinished scan is always linked to some tree node, and has
+ * current position and direction (see comments at scanNext). There
+ * is also a copy of latest entry found.
+ */
+ struct ScanOp;
+ friend struct ScanOp;
+ struct ScanOp {
+ enum {
+ Undef = 0,
+ First = 1, // before first entry
+ Current = 2, // at current before locking
+ Blocked = 3, // at current waiting for ACC lock
+ Locked = 4, // at current and locked or no lock needed
+ Next = 5, // looking for next extry
+ Last = 6, // after last entry
+ Aborting = 7, // lock wait at scan close
+ Invalid = 9 // cannot return REF to LQH currently
+ };
+ Uint16 m_state;
+ Uint16 m_lockwait;
+ Uint32 m_userPtr; // scanptr.i in LQH
+ Uint32 m_userRef;
+ Uint32 m_tableId;
+ Uint32 m_indexId;
+ Uint32 m_fragId;
+ Uint32 m_fragPtrI;
+ Uint32 m_transId1;
+ Uint32 m_transId2;
+ Uint32 m_savePointId;
+ // lock waited for or obtained and not yet passed to LQH
+ Uint32 m_accLockOp;
+ Uint8 m_readCommitted; // no locking
+ Uint8 m_lockMode;
+ Uint8 m_descending;
+ ScanBound m_boundMin;
+ ScanBound m_boundMax;
+ ScanBound* m_bound[2]; // pointers to above 2
+ Uint16 m_boundCnt[2]; // number of bounds in each
+ TreePos m_scanPos; // position
+ TreeEnt m_scanEnt; // latest entry found
+ Uint32 m_nodeScan; // next scan at node (single-linked)
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ };
+ Uint32 prevList;
+ /*
+ * Locks obtained and passed to LQH but not yet returned by LQH.
+ * The max was increased from 16 to 992 (default 64). Record max
+ * ever used in this scan. TODO fix quadratic behaviour
+ */
+ Uint32 m_maxAccLockOps;
+ Uint32 m_accLockOps[MaxAccLockOps];
+ ScanOp(ScanBoundPool& scanBoundPool);
+ };
+ typedef Ptr<ScanOp> ScanOpPtr;
+ ArrayPool<ScanOp> c_scanOpPool;
+
+ // indexes and fragments
+
+ /*
+ * Ordered index. Top level data structure. The primary table (table
+ * being indexed) lives in TUP.
+ */
+ struct Index;
+ friend struct Index;
+ struct Index {
+ enum State {
+ NotDefined = 0,
+ Defining = 1,
+ Online = 2, // triggers activated and build done
+ Dropping = 9
+ };
+ State m_state;
+ DictTabInfo::TableType m_tableType;
+ Uint32 m_tableId;
+ Uint16 unused;
+ Uint16 m_numFrags;
+ Uint32 m_fragId[MaxIndexFragments];
+ Uint32 m_fragPtrI[MaxIndexFragments];
+ Uint32 m_descPage; // descriptor page
+ Uint16 m_descOff; // offset within the page
+ Uint16 m_numAttrs;
+ bool m_storeNullKey;
+ union {
+ Uint32 nextPool;
+ };
+ Index();
+ };
+ typedef Ptr<Index> IndexPtr;
+ ArrayPool<Index> c_indexPool;
+
+ /*
+ * Fragment of an index, as known to DIH/TC. Represents the two
+ * duplicate fragments known to LQH/ACC/TUP. Includes tree header.
+ * There are no maintenance operation records yet.
+ */
+ struct Frag;
+ friend struct Frag;
+ struct Frag {
+ Uint32 m_tableId; // copy from index level
+ Uint32 m_indexId;
+ Uint16 unused;
+ Uint16 m_fragId;
+ Uint32 m_descPage; // copy from index level
+ Uint16 m_descOff;
+ Uint16 m_numAttrs;
+ bool m_storeNullKey;
+ TreeHead m_tree;
+ TupLoc m_freeLoc; // list of free index nodes
+ DLList<ScanOp> m_scanList; // current scans on this fragment
+ Uint32 m_tupIndexFragPtrI;
+ Uint32 m_tupTableFragPtrI[2];
+ Uint32 m_accTableFragPtrI[2];
+ union {
+ Uint32 nextPool;
+ };
+ Frag(ArrayPool<ScanOp>& scanOpPool);
+ };
+ typedef Ptr<Frag> FragPtr;
+ ArrayPool<Frag> c_fragPool;
+
+ /*
+ * Fragment metadata operation.
+ */
+ struct FragOp {
+ Uint32 m_userPtr;
+ Uint32 m_userRef;
+ Uint32 m_indexId;
+ Uint32 m_fragId;
+ Uint32 m_fragPtrI;
+ Uint32 m_fragNo; // fragment number starting at zero
+ Uint32 m_numAttrsRecvd;
+ union {
+ Uint32 nextPool;
+ };
+ FragOp();
+ };
+ typedef Ptr<FragOp> FragOpPtr;
+ ArrayPool<FragOp> c_fragOpPool;
+
+ // node handles
+
+ /*
+ * A node handle is a reference to a tree node in TUP. It is used to
+ * operate on the node. Node handles are allocated on the stack.
+ */
+ struct NodeHandle;
+ friend struct NodeHandle;
+ struct NodeHandle {
+ Frag& m_frag; // fragment using the node
+ TupLoc m_loc; // physical node address
+ TreeNode* m_node; // pointer to node storage
+ NodeHandle(Frag& frag);
+ NodeHandle(const NodeHandle& node);
+ NodeHandle& operator=(const NodeHandle& node);
+ // check if unassigned
+ bool isNull();
+ // getters
+ TupLoc getLink(unsigned i);
+ unsigned getChilds(); // cannot spell
+ unsigned getSide();
+ unsigned getOccup();
+ int getBalance();
+ Uint32 getNodeScan();
+ // setters
+ void setLink(unsigned i, TupLoc loc);
+ void setSide(unsigned i);
+ void setOccup(unsigned n);
+ void setBalance(int b);
+ void setNodeScan(Uint32 scanPtrI);
+ // access other parts of the node
+ Data getPref();
+ TreeEnt getEnt(unsigned pos);
+ TreeEnt getMinMax(unsigned i);
+ // for ndbrequire and ndbassert
+ void progError(int line, int cause, const char* file);
+ };
+
+ // methods
+
+ /*
+ * DbtuxGen.cpp
+ */
+ void execCONTINUEB(Signal* signal);
+ void execSTTOR(Signal* signal);
+ void execREAD_CONFIG_REQ(Signal* signal);
+ // utils
+ void setKeyAttrs(const Frag& frag);
+ void readKeyAttrs(const Frag& frag, TreeEnt ent, unsigned start, Data keyData);
+ void readTablePk(const Frag& frag, TreeEnt ent, Data pkData, unsigned& pkSize);
+ void copyAttrs(const Frag& frag, ConstData data1, Data data2, unsigned maxlen2 = MaxAttrDataSize);
+
+ /*
+ * DbtuxMeta.cpp
+ */
+ void execTUXFRAGREQ(Signal* signal);
+ void execTUX_ADD_ATTRREQ(Signal* signal);
+ void execALTER_INDX_REQ(Signal* signal);
+ void execDROP_TAB_REQ(Signal* signal);
+ bool allocDescEnt(IndexPtr indexPtr);
+ void freeDescEnt(IndexPtr indexPtr);
+ void abortAddFragOp(Signal* signal);
+ void dropIndex(Signal* signal, IndexPtr indexPtr, Uint32 senderRef, Uint32 senderData);
+
+ /*
+ * DbtuxMaint.cpp
+ */
+ void execTUX_MAINT_REQ(Signal* signal);
+
+ /*
+ * DbtuxNode.cpp
+ */
+ int allocNode(Signal* signal, NodeHandle& node);
+ void selectNode(NodeHandle& node, TupLoc loc);
+ void insertNode(NodeHandle& node);
+ void deleteNode(NodeHandle& node);
+ void setNodePref(NodeHandle& node);
+ // node operations
+ void nodePushUp(NodeHandle& node, unsigned pos, const TreeEnt& ent, Uint32 scanList);
+ void nodePushUpScans(NodeHandle& node, unsigned pos);
+ void nodePopDown(NodeHandle& node, unsigned pos, TreeEnt& en, Uint32* scanList);
+ void nodePopDownScans(NodeHandle& node, unsigned pos);
+ void nodePushDown(NodeHandle& node, unsigned pos, TreeEnt& ent, Uint32& scanList);
+ void nodePushDownScans(NodeHandle& node, unsigned pos);
+ void nodePopUp(NodeHandle& node, unsigned pos, TreeEnt& ent, Uint32 scanList);
+ void nodePopUpScans(NodeHandle& node, unsigned pos);
+ void nodeSlide(NodeHandle& dstNode, NodeHandle& srcNode, unsigned cnt, unsigned i);
+ // scans linked to node
+ void addScanList(NodeHandle& node, unsigned pos, Uint32 scanList);
+ void removeScanList(NodeHandle& node, unsigned pos, Uint32& scanList);
+ void moveScanList(NodeHandle& node, unsigned pos);
+ void linkScan(NodeHandle& node, ScanOpPtr scanPtr);
+ void unlinkScan(NodeHandle& node, ScanOpPtr scanPtr);
+ bool islinkScan(NodeHandle& node, ScanOpPtr scanPtr);
+
+ /*
+ * DbtuxTree.cpp
+ */
+ // add entry
+ void treeAdd(Frag& frag, TreePos treePos, TreeEnt ent);
+ void treeAddFull(Frag& frag, NodeHandle lubNode, unsigned pos, TreeEnt ent);
+ void treeAddNode(Frag& frag, NodeHandle lubNode, unsigned pos, TreeEnt ent, NodeHandle parentNode, unsigned i);
+ void treeAddRebalance(Frag& frag, NodeHandle node, unsigned i);
+ // remove entry
+ void treeRemove(Frag& frag, TreePos treePos);
+ void treeRemoveInner(Frag& frag, NodeHandle lubNode, unsigned pos);
+ void treeRemoveSemi(Frag& frag, NodeHandle node, unsigned i);
+ void treeRemoveLeaf(Frag& frag, NodeHandle node);
+ void treeRemoveNode(Frag& frag, NodeHandle node);
+ void treeRemoveRebalance(Frag& frag, NodeHandle node, unsigned i);
+ // rotate
+ void treeRotateSingle(Frag& frag, NodeHandle& node, unsigned i);
+ void treeRotateDouble(Frag& frag, NodeHandle& node, unsigned i);
+
+ /*
+ * DbtuxScan.cpp
+ */
+ void execACC_SCANREQ(Signal* signal);
+ void execTUX_BOUND_INFO(Signal* signal);
+ void execNEXT_SCANREQ(Signal* signal);
+ void execACC_CHECK_SCAN(Signal* signal);
+ void execACCKEYCONF(Signal* signal);
+ void execACCKEYREF(Signal* signal);
+ void execACC_ABORTCONF(Signal* signal);
+ void scanFirst(ScanOpPtr scanPtr);
+ void scanNext(ScanOpPtr scanPtr, bool fromMaintReq);
+ bool scanVisible(ScanOpPtr scanPtr, TreeEnt ent);
+ void scanClose(Signal* signal, ScanOpPtr scanPtr);
+ void addAccLockOp(ScanOp& scan, Uint32 accLockOp);
+ void removeAccLockOp(ScanOp& scan, Uint32 accLockOp);
+ void releaseScanOp(ScanOpPtr& scanPtr);
+
+ /*
+ * DbtuxSearch.cpp
+ */
+ void searchToAdd(Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos);
+ void searchToRemove(Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos);
+ void searchToScan(Frag& frag, ConstData boundInfo, unsigned boundCount, bool descending, TreePos& treePos);
+ void searchToScanAscending(Frag& frag, ConstData boundInfo, unsigned boundCount, TreePos& treePos);
+ void searchToScanDescending(Frag& frag, ConstData boundInfo, unsigned boundCount, TreePos& treePos);
+
+ /*
+ * DbtuxCmp.cpp
+ */
+ int cmpSearchKey(const Frag& frag, unsigned& start, ConstData searchKey, ConstData entryData, unsigned maxlen = MaxAttrDataSize);
+ int cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigned boundCount, ConstData entryData, unsigned maxlen = MaxAttrDataSize);
+
+ /*
+ * DbtuxDebug.cpp
+ */
+ void execDUMP_STATE_ORD(Signal* signal);
+#ifdef VM_TRACE
+ struct PrintPar {
+ char m_path[100]; // LR prefix
+ unsigned m_side; // expected side
+ TupLoc m_parent; // expected parent address
+ int m_depth; // returned depth
+ unsigned m_occup; // returned occupancy
+ TreeEnt m_minmax[2]; // returned subtree min and max
+ bool m_ok; // returned status
+ PrintPar();
+ };
+ void printTree(Signal* signal, Frag& frag, NdbOut& out);
+ void printNode(Frag& frag, NdbOut& out, TupLoc loc, PrintPar& par);
+ friend class NdbOut& operator<<(NdbOut&, const TupLoc&);
+ friend class NdbOut& operator<<(NdbOut&, const TreeEnt&);
+ friend class NdbOut& operator<<(NdbOut&, const TreeNode&);
+ friend class NdbOut& operator<<(NdbOut&, const TreeHead&);
+ friend class NdbOut& operator<<(NdbOut&, const TreePos&);
+ friend class NdbOut& operator<<(NdbOut&, const DescAttr&);
+ friend class NdbOut& operator<<(NdbOut&, const ScanOp&);
+ friend class NdbOut& operator<<(NdbOut&, const Index&);
+ friend class NdbOut& operator<<(NdbOut&, const Frag&);
+ friend class NdbOut& operator<<(NdbOut&, const FragOp&);
+ friend class NdbOut& operator<<(NdbOut&, const NodeHandle&);
+ FILE* debugFile;
+ NdbOut debugOut;
+ unsigned debugFlags;
+ enum {
+ DebugMeta = 1, // log create and drop index
+ DebugMaint = 2, // log maintenance ops
+ DebugTree = 4, // log and check tree after each op
+ DebugScan = 8 // log scans
+ };
+ STATIC_CONST( DataFillByte = 0xa2 );
+ STATIC_CONST( NodeFillByte = 0xa4 );
+#endif
+
+ // start up info
+ Uint32 c_internalStartPhase;
+ Uint32 c_typeOfStart;
+
+ /*
+ * Global data set at operation start. Unpacked from index metadata.
+ * Not passed as parameter to methods. Invalid across timeslices.
+ *
+ * TODO inline all into index metadata
+ */
+
+ // index key attr ids with sizes in AttributeHeader format
+ Data c_keyAttrs;
+
+ // pointers to index key comparison functions
+ NdbSqlUtil::Cmp** c_sqlCmp;
+
+ /*
+ * Other buffers used during the operation.
+ */
+
+ // buffer for search key data with headers
+ Data c_searchKey;
+
+ // buffer for current entry key data with headers
+ Data c_entryKey;
+
+ // buffer for scan bounds and keyinfo (primary key)
+ Data c_dataBuffer;
+
+ // inlined utils
+ DescEnt& getDescEnt(Uint32 descPage, Uint32 descOff);
+ Uint32 getTupAddr(const Frag& frag, TreeEnt ent);
+ static unsigned min(unsigned x, unsigned y);
+ static unsigned max(unsigned x, unsigned y);
+};
+
+// Dbtux::Data
+
+inline
+Dbtux::Data::Data() :
+ m_data(0)
+{
+}
+
+inline
+Dbtux::Data::Data(Uint32* data) :
+ m_data(data)
+{
+}
+
+inline Dbtux::Data&
+Dbtux::Data::operator=(Uint32* data)
+{
+ m_data = data;
+ return *this;
+}
+
+inline
+Dbtux::Data::operator Uint32*() const
+{
+ return m_data;
+}
+
+inline Dbtux::Data&
+Dbtux::Data::operator+=(size_t n)
+{
+ m_data += n;
+ return *this;
+}
+
+inline AttributeHeader&
+Dbtux::Data::ah() const
+{
+ return *reinterpret_cast<AttributeHeader*>(m_data);
+}
+
+// Dbtux::ConstData
+
+inline
+Dbtux::ConstData::ConstData() :
+ m_data(0)
+{
+}
+
+inline
+Dbtux::ConstData::ConstData(const Uint32* data) :
+ m_data(data)
+{
+}
+
+inline Dbtux::ConstData&
+Dbtux::ConstData::operator=(const Uint32* data)
+{
+ m_data = data;
+ return *this;
+}
+
+inline
+Dbtux::ConstData::operator const Uint32*() const
+{
+ return m_data;
+}
+
+inline Dbtux::ConstData&
+Dbtux::ConstData::operator+=(size_t n)
+{
+ m_data += n;
+ return *this;
+}
+
+inline const AttributeHeader&
+Dbtux::ConstData::ah() const
+{
+ return *reinterpret_cast<const AttributeHeader*>(m_data);
+}
+
+inline
+Dbtux::ConstData::ConstData(Data data) :
+ m_data(static_cast<Uint32*>(data))
+{
+}
+
+inline Dbtux::ConstData&
+Dbtux::ConstData::operator=(Data data)
+{
+ m_data = static_cast<Uint32*>(data);
+ return *this;
+}
+
+// Dbtux::TupLoc
+
+inline
+Dbtux::TupLoc::TupLoc() :
+ m_pageId1(RNIL >> 16),
+ m_pageId2(RNIL & 0xFFFF),
+ m_pageOffset(0)
+{
+}
+
+inline
+Dbtux::TupLoc::TupLoc(Uint32 pageId, Uint16 pageOffset) :
+ m_pageId1(pageId >> 16),
+ m_pageId2(pageId & 0xFFFF),
+ m_pageOffset(pageOffset)
+{
+}
+
+inline Uint32
+Dbtux::TupLoc::getPageId() const
+{
+ return (m_pageId1 << 16) | m_pageId2;
+}
+
+inline void
+Dbtux::TupLoc::setPageId(Uint32 pageId)
+{
+ m_pageId1 = (pageId >> 16);
+ m_pageId2 = (pageId & 0xFFFF);
+}
+
+inline Uint32
+Dbtux::TupLoc::getPageOffset() const
+{
+ return (Uint32)m_pageOffset;
+}
+
+inline void
+Dbtux::TupLoc::setPageOffset(Uint32 pageOffset)
+{
+ m_pageOffset = (Uint16)pageOffset;
+}
+
+inline bool
+Dbtux::TupLoc::operator==(const TupLoc& loc) const
+{
+ return
+ m_pageId1 == loc.m_pageId1 &&
+ m_pageId2 == loc.m_pageId2 &&
+ m_pageOffset == loc.m_pageOffset;
+}
+
+inline bool
+Dbtux::TupLoc::operator!=(const TupLoc& loc) const
+{
+ return ! (*this == loc);
+}
+
+// Dbtux::TreeEnt
+
+inline
+Dbtux::TreeEnt::TreeEnt() :
+ m_tupLoc(),
+ m_tupVersion(0),
+ m_fragBit(0)
+{
+}
+
+inline bool
+Dbtux::TreeEnt::eq(const TreeEnt ent) const
+{
+ return
+ m_tupLoc == ent.m_tupLoc &&
+ m_tupVersion == ent.m_tupVersion &&
+ m_fragBit == ent.m_fragBit;
+}
+
+inline int
+Dbtux::TreeEnt::cmp(const TreeEnt ent) const
+{
+ if (m_tupLoc.getPageId() < ent.m_tupLoc.getPageId())
+ return -1;
+ if (m_tupLoc.getPageId() > ent.m_tupLoc.getPageId())
+ return +1;
+ if (m_tupLoc.getPageOffset() < ent.m_tupLoc.getPageOffset())
+ return -1;
+ if (m_tupLoc.getPageOffset() > ent.m_tupLoc.getPageOffset())
+ return +1;
+ if (m_tupVersion < ent.m_tupVersion)
+ return -1;
+ if (m_tupVersion > ent.m_tupVersion)
+ return +1;
+ if (m_fragBit < ent.m_fragBit)
+ return -1;
+ if (m_fragBit > ent.m_fragBit)
+ return +1;
+ return 0;
+}
+
+// Dbtux::TreeNode
+
+inline
+Dbtux::TreeNode::TreeNode() :
+ m_side(2),
+ m_balance(0 + 1),
+ pad1(0),
+ m_occup(0),
+ m_nodeScan(RNIL)
+{
+ m_link[0] = NullTupLoc;
+ m_link[1] = NullTupLoc;
+ m_link[2] = NullTupLoc;
+}
+
+// Dbtux::TreeHead
+
+inline
+Dbtux::TreeHead::TreeHead() :
+ m_nodeSize(0),
+ m_prefSize(0),
+ m_minOccup(0),
+ m_maxOccup(0),
+ m_root()
+{
+}
+
+inline unsigned
+Dbtux::TreeHead::getSize(AccSize acc) const
+{
+ switch (acc) {
+ case AccNone:
+ return 0;
+ case AccHead:
+ return NodeHeadSize;
+ case AccPref:
+ return NodeHeadSize + m_prefSize + 2 * TreeEntSize;
+ case AccFull:
+ return m_nodeSize;
+ }
+ return 0;
+}
+
+inline Dbtux::Data
+Dbtux::TreeHead::getPref(TreeNode* node) const
+{
+ Uint32* ptr = (Uint32*)node + NodeHeadSize;
+ return ptr;
+}
+
+inline Dbtux::TreeEnt*
+Dbtux::TreeHead::getEntList(TreeNode* node) const
+{
+ Uint32* ptr = (Uint32*)node + NodeHeadSize + m_prefSize;
+ return (TreeEnt*)ptr;
+}
+
+// Dbtux::TreePos
+
+inline
+Dbtux::TreePos::TreePos() :
+ m_loc(),
+ m_pos(ZNIL),
+ m_match(false),
+ m_dir(255)
+{
+}
+
+// Dbtux::DescPage
+
+inline
+Dbtux::DescPage::DescPage() :
+ m_nextPage(RNIL),
+ m_numFree(ZNIL)
+{
+ for (unsigned i = 0; i < DescPageSize; i++) {
+#ifdef VM_TRACE
+ m_data[i] = 0x13571357;
+#else
+ m_data[i] = 0;
+#endif
+ }
+}
+
+// Dbtux::ScanOp
+
+inline
+Dbtux::ScanOp::ScanOp(ScanBoundPool& scanBoundPool) :
+ m_state(Undef),
+ m_lockwait(false),
+ m_userPtr(RNIL),
+ m_userRef(RNIL),
+ m_tableId(RNIL),
+ m_indexId(RNIL),
+ m_fragPtrI(RNIL),
+ m_transId1(0),
+ m_transId2(0),
+ m_savePointId(0),
+ m_accLockOp(RNIL),
+ m_readCommitted(0),
+ m_lockMode(0),
+ m_descending(0),
+ m_boundMin(scanBoundPool),
+ m_boundMax(scanBoundPool),
+ m_scanPos(),
+ m_scanEnt(),
+ m_nodeScan(RNIL),
+ m_maxAccLockOps(0)
+{
+ m_bound[0] = &m_boundMin;
+ m_bound[1] = &m_boundMax;
+ m_boundCnt[0] = 0;
+ m_boundCnt[1] = 0;
+#ifdef VM_TRACE
+ for (unsigned i = 0; i < MaxAccLockOps; i++) {
+ m_accLockOps[i] = 0x1f1f1f1f;
+ }
+#endif
+}
+
+// Dbtux::Index
+
+inline
+Dbtux::Index::Index() :
+ m_state(NotDefined),
+ m_tableType(DictTabInfo::UndefTableType),
+ m_tableId(RNIL),
+ m_numFrags(0),
+ m_descPage(RNIL),
+ m_descOff(0),
+ m_numAttrs(0),
+ m_storeNullKey(false)
+{
+ for (unsigned i = 0; i < MaxIndexFragments; i++) {
+ m_fragId[i] = ZNIL;
+ m_fragPtrI[i] = RNIL;
+ };
+}
+
+// Dbtux::Frag
+
+inline
+Dbtux::Frag::Frag(ArrayPool<ScanOp>& scanOpPool) :
+ m_tableId(RNIL),
+ m_indexId(RNIL),
+ m_fragId(ZNIL),
+ m_descPage(RNIL),
+ m_descOff(0),
+ m_numAttrs(ZNIL),
+ m_storeNullKey(false),
+ m_tree(),
+ m_freeLoc(),
+ m_scanList(scanOpPool),
+ m_tupIndexFragPtrI(RNIL)
+{
+ m_tupTableFragPtrI[0] = RNIL;
+ m_tupTableFragPtrI[1] = RNIL;
+ m_accTableFragPtrI[0] = RNIL;
+ m_accTableFragPtrI[1] = RNIL;
+}
+
+// Dbtux::FragOp
+
+inline
+Dbtux::FragOp::FragOp() :
+ m_userPtr(RNIL),
+ m_userRef(RNIL),
+ m_indexId(RNIL),
+ m_fragId(ZNIL),
+ m_fragPtrI(RNIL),
+ m_fragNo(ZNIL),
+ m_numAttrsRecvd(ZNIL)
+{
+}
+
+// Dbtux::NodeHandle
+
+inline
+Dbtux::NodeHandle::NodeHandle(Frag& frag) :
+ m_frag(frag),
+ m_loc(),
+ m_node(0)
+{
+}
+
+inline
+Dbtux::NodeHandle::NodeHandle(const NodeHandle& node) :
+ m_frag(node.m_frag),
+ m_loc(node.m_loc),
+ m_node(node.m_node)
+{
+}
+
+inline Dbtux::NodeHandle&
+Dbtux::NodeHandle::operator=(const NodeHandle& node)
+{
+ ndbassert(&m_frag == &node.m_frag);
+ m_loc = node.m_loc;
+ m_node = node.m_node;
+ return *this;
+}
+
+inline bool
+Dbtux::NodeHandle::isNull()
+{
+ return m_node == 0;
+}
+
+inline Dbtux::TupLoc
+Dbtux::NodeHandle::getLink(unsigned i)
+{
+ ndbrequire(i <= 2);
+ return m_node->m_link[i];
+}
+
+inline unsigned
+Dbtux::NodeHandle::getChilds()
+{
+ return (m_node->m_link[0] != NullTupLoc) + (m_node->m_link[1] != NullTupLoc);
+}
+
+inline unsigned
+Dbtux::NodeHandle::getSide()
+{
+ return m_node->m_side;
+}
+
+inline unsigned
+Dbtux::NodeHandle::getOccup()
+{
+ return m_node->m_occup;
+}
+
+inline int
+Dbtux::NodeHandle::getBalance()
+{
+ return (int)m_node->m_balance - 1;
+}
+
+inline Uint32
+Dbtux::NodeHandle::getNodeScan()
+{
+ return m_node->m_nodeScan;
+}
+
+inline void
+Dbtux::NodeHandle::setLink(unsigned i, TupLoc loc)
+{
+ ndbrequire(i <= 2);
+ m_node->m_link[i] = loc;
+}
+
+inline void
+Dbtux::NodeHandle::setSide(unsigned i)
+{
+ ndbrequire(i <= 2);
+ m_node->m_side = i;
+}
+
+inline void
+Dbtux::NodeHandle::setOccup(unsigned n)
+{
+ TreeHead& tree = m_frag.m_tree;
+ ndbrequire(n <= tree.m_maxOccup);
+ m_node->m_occup = n;
+}
+
+inline void
+Dbtux::NodeHandle::setBalance(int b)
+{
+ ndbrequire(abs(b) <= 1);
+ m_node->m_balance = (unsigned)(b + 1);
+}
+
+inline void
+Dbtux::NodeHandle::setNodeScan(Uint32 scanPtrI)
+{
+ m_node->m_nodeScan = scanPtrI;
+}
+
+inline Dbtux::Data
+Dbtux::NodeHandle::getPref()
+{
+ TreeHead& tree = m_frag.m_tree;
+ return tree.getPref(m_node);
+}
+
+inline Dbtux::TreeEnt
+Dbtux::NodeHandle::getEnt(unsigned pos)
+{
+ TreeHead& tree = m_frag.m_tree;
+ TreeEnt* entList = tree.getEntList(m_node);
+ const unsigned occup = m_node->m_occup;
+ ndbrequire(pos < occup);
+ return entList[(1 + pos) % occup];
+}
+
+inline Dbtux::TreeEnt
+Dbtux::NodeHandle::getMinMax(unsigned i)
+{
+ const unsigned occup = m_node->m_occup;
+ ndbrequire(i <= 1 && occup != 0);
+ return getEnt(i == 0 ? 0 : occup - 1);
+}
+
+// parameters for methods
+
+#ifdef VM_TRACE
+inline
+Dbtux::PrintPar::PrintPar() :
+ // caller fills in
+ m_path(),
+ m_side(255),
+ m_parent(),
+ // default return values
+ m_depth(0),
+ m_occup(0),
+ m_ok(true)
+{
+}
+#endif
+
+// utils
+
+inline Dbtux::DescEnt&
+Dbtux::getDescEnt(Uint32 descPage, Uint32 descOff)
+{
+ DescPagePtr pagePtr;
+ pagePtr.i = descPage;
+ c_descPagePool.getPtr(pagePtr);
+ ndbrequire(descOff < DescPageSize);
+ DescEnt* descEnt = (DescEnt*)&pagePtr.p->m_data[descOff];
+ return *descEnt;
+}
+
+inline Uint32
+Dbtux::getTupAddr(const Frag& frag, TreeEnt ent)
+{
+ const Uint32 tableFragPtrI = frag.m_tupTableFragPtrI[ent.m_fragBit];
+ const TupLoc tupLoc = ent.m_tupLoc;
+ Uint32 tupAddr = NullTupAddr;
+ c_tup->tuxGetTupAddr(tableFragPtrI, tupLoc.getPageId(), tupLoc.getPageOffset(), tupAddr);
+ jamEntry();
+ return tupAddr;
+}
+
+inline unsigned
+Dbtux::min(unsigned x, unsigned y)
+{
+ return x < y ? x : y;
+}
+
+inline unsigned
+Dbtux::max(unsigned x, unsigned y)
+{
+ return x > y ? x : y;
+}
+
+#endif
diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp
new file mode 100644
index 00000000000..cf815b14c1a
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp
@@ -0,0 +1,175 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#define DBTUX_CMP_CPP
+#include "Dbtux.hpp"
+
+/*
+ * Search key vs node prefix or entry.
+ *
+ * The comparison starts at given attribute position. The position is
+ * updated by number of equal initial attributes found. The entry data
+ * may be partial in which case CmpUnknown may be returned.
+ *
+ * The attributes are normalized and have variable size given in words.
+ */
+int
+Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, ConstData searchKey, ConstData entryData, unsigned maxlen)
+{
+ const unsigned numAttrs = frag.m_numAttrs;
+ const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff);
+ // skip to right position in search key only
+ for (unsigned i = 0; i < start; i++) {
+ jam();
+ searchKey += AttributeHeaderSize + searchKey.ah().getDataSize();
+ }
+ // number of words of entry data left
+ unsigned len2 = maxlen;
+ int ret = 0;
+ while (start < numAttrs) {
+ if (len2 <= AttributeHeaderSize) {
+ jam();
+ ret = NdbSqlUtil::CmpUnknown;
+ break;
+ }
+ len2 -= AttributeHeaderSize;
+ if (! searchKey.ah().isNULL()) {
+ if (! entryData.ah().isNULL()) {
+ jam();
+ // verify attribute id
+ const DescAttr& descAttr = descEnt.m_descAttr[start];
+ ndbrequire(searchKey.ah().getAttributeId() == descAttr.m_primaryAttrId);
+ ndbrequire(entryData.ah().getAttributeId() == descAttr.m_primaryAttrId);
+ // sizes
+ const unsigned size1 = searchKey.ah().getDataSize();
+ const unsigned size2 = min(entryData.ah().getDataSize(), len2);
+ len2 -= size2;
+ // compare
+ NdbSqlUtil::Cmp* const cmp = c_sqlCmp[start];
+ const Uint32* const p1 = &searchKey[AttributeHeaderSize];
+ const Uint32* const p2 = &entryData[AttributeHeaderSize];
+ const bool full = (maxlen == MaxAttrDataSize);
+ ret = (*cmp)(0, p1, size1 << 2, p2, size2 << 2, full);
+ if (ret != 0) {
+ jam();
+ break;
+ }
+ } else {
+ jam();
+ // not NULL > NULL
+ ret = +1;
+ break;
+ }
+ } else {
+ if (! entryData.ah().isNULL()) {
+ jam();
+ // NULL < not NULL
+ ret = -1;
+ break;
+ }
+ }
+ searchKey += AttributeHeaderSize + searchKey.ah().getDataSize();
+ entryData += AttributeHeaderSize + entryData.ah().getDataSize();
+ start++;
+ }
+ return ret;
+}
+
+/*
+ * Scan bound vs node prefix or entry.
+ *
+ * Compare lower or upper bound and index entry data. The entry data
+ * may be partial in which case CmpUnknown may be returned. Otherwise
+ * returns -1 if the bound is to the left of the entry and +1 if the
+ * bound is to the right of the entry.
+ *
+ * The routine is similar to cmpSearchKey, but 0 is never returned.
+ * Suppose all attributes compare equal. Recall that all bounds except
+ * possibly the last one are non-strict. Use the given bound direction
+ * (0-lower 1-upper) and strictness of last bound to return -1 or +1.
+ *
+ * Following example illustrates this. We are at (a=2, b=3).
+ *
+ * idir bounds strict return
+ * 0 a >= 2 and b >= 3 no -1
+ * 0 a >= 2 and b > 3 yes +1
+ * 1 a <= 2 and b <= 3 no +1
+ * 1 a <= 2 and b < 3 yes -1
+ *
+ * The attributes are normalized and have variable size given in words.
+ */
+int
+Dbtux::cmpScanBound(const Frag& frag, unsigned idir, ConstData boundInfo, unsigned boundCount, ConstData entryData, unsigned maxlen)
+{
+ const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff);
+ // direction 0-lower 1-upper
+ ndbrequire(idir <= 1);
+ // number of words of data left
+ unsigned len2 = maxlen;
+ // in case of no bounds, init last type to something non-strict
+ unsigned type = 4;
+ while (boundCount != 0) {
+ if (len2 <= AttributeHeaderSize) {
+ jam();
+ return NdbSqlUtil::CmpUnknown;
+ }
+ len2 -= AttributeHeaderSize;
+ // get and skip bound type (it is used after the loop)
+ type = boundInfo[0];
+ boundInfo += 1;
+ if (! boundInfo.ah().isNULL()) {
+ if (! entryData.ah().isNULL()) {
+ jam();
+ // verify attribute id
+ const Uint32 index = boundInfo.ah().getAttributeId();
+ ndbrequire(index < frag.m_numAttrs);
+ const DescAttr& descAttr = descEnt.m_descAttr[index];
+ ndbrequire(entryData.ah().getAttributeId() == descAttr.m_primaryAttrId);
+ // sizes
+ const unsigned size1 = boundInfo.ah().getDataSize();
+ const unsigned size2 = min(entryData.ah().getDataSize(), len2);
+ len2 -= size2;
+ // compare
+ NdbSqlUtil::Cmp* const cmp = c_sqlCmp[index];
+ const Uint32* const p1 = &boundInfo[AttributeHeaderSize];
+ const Uint32* const p2 = &entryData[AttributeHeaderSize];
+ const bool full = (maxlen == MaxAttrDataSize);
+ int ret = (*cmp)(0, p1, size1 << 2, p2, size2 << 2, full);
+ if (ret != 0) {
+ jam();
+ return ret;
+ }
+ } else {
+ jam();
+ // not NULL > NULL
+ return +1;
+ }
+ } else {
+ jam();
+ if (! entryData.ah().isNULL()) {
+ jam();
+ // NULL < not NULL
+ return -1;
+ }
+ }
+ boundInfo += AttributeHeaderSize + boundInfo.ah().getDataSize();
+ entryData += AttributeHeaderSize + entryData.ah().getDataSize();
+ boundCount -= 1;
+ }
+ // all attributes were equal
+ const int strict = (type & 0x1);
+ return (idir == 0 ? (strict == 0 ? -1 : +1) : (strict == 0 ? +1 : -1));
+}
diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp
new file mode 100644
index 00000000000..ed29dc57915
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp
@@ -0,0 +1,443 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#define DBTUX_DEBUG_CPP
+#include "Dbtux.hpp"
+
+/*
+ * 12001 log file 0-close 1-open 2-append 3-append to signal log
+ * 12002 log flags 1-meta 2-maint 4-tree 8-scan
+ */
+void
+Dbtux::execDUMP_STATE_ORD(Signal* signal)
+{
+ jamEntry();
+#ifdef VM_TRACE
+ if (signal->theData[0] == DumpStateOrd::TuxLogToFile) {
+ unsigned flag = signal->theData[1];
+ const char* const tuxlog = "tux.log";
+ FILE* slFile = globalSignalLoggers.getOutputStream();
+ if (flag <= 3) {
+ if (debugFile != 0) {
+ if (debugFile != slFile)
+ fclose(debugFile);
+ debugFile = 0;
+ debugOut = *new NdbOut(*new NullOutputStream());
+ }
+ if (flag == 1)
+ debugFile = fopen(tuxlog, "w");
+ if (flag == 2)
+ debugFile = fopen(tuxlog, "a");
+ if (flag == 3)
+ debugFile = slFile;
+ if (debugFile != 0)
+ debugOut = *new NdbOut(*new FileOutputStream(debugFile));
+ }
+ return;
+ }
+ if (signal->theData[0] == DumpStateOrd::TuxSetLogFlags) {
+ debugFlags = signal->theData[1];
+ return;
+ }
+ if (signal->theData[0] == DumpStateOrd::TuxMetaDataJunk) {
+ // read table definition
+ Uint32 tableId = signal->theData[1];
+ Uint32 tableVersion = signal->theData[2];
+ int ret;
+ MetaData md(this);
+ MetaData::Table table;
+ MetaData::Attribute attribute;
+ infoEvent("md: read table %u %u", tableId, tableVersion);
+ if ((ret = md.lock(false)) < 0) {
+ infoEvent("md.lock error %d", ret);
+ return;
+ }
+ if ((ret = md.getTable(table, tableId, tableVersion)) < 0) {
+ infoEvent("md.getTable error %d", ret);
+ // lock is released by destructor
+ return;
+ }
+ infoEvent("md: %s type=%d attrs=%u", table.tableName, table.tableType, table.noOfAttributes);
+ for (Uint32 i = 0; i < table.noOfAttributes; i++) {
+ if ((ret = md.getAttribute(attribute, table, i)) < 0) {
+ infoEvent("mg.getAttribute %u error %d", i, ret);
+ // lock is released by destructor
+ return;
+ }
+ infoEvent("md: %d %s", attribute.attributeId, attribute.attributeName);
+ }
+ if ((ret = md.unlock(false)) < 0) {
+ infoEvent("md.unlock error %d", ret);
+ return;
+ }
+ return;
+ }
+#endif
+}
+
+#ifdef VM_TRACE
+
+void
+Dbtux::printTree(Signal* signal, Frag& frag, NdbOut& out)
+{
+ TreeHead& tree = frag.m_tree;
+ PrintPar par;
+ strcpy(par.m_path, ".");
+ par.m_side = 2;
+ par.m_parent = NullTupLoc;
+ printNode(frag, out, tree.m_root, par);
+ out.m_out->flush();
+ if (! par.m_ok) {
+ if (debugFile == 0) {
+ signal->theData[0] = 12001;
+ signal->theData[1] = 1;
+ execDUMP_STATE_ORD(signal);
+ if (debugFile != 0) {
+ printTree(signal, frag, debugOut);
+ }
+ }
+ ndbrequire(false);
+ }
+}
+
+void
+Dbtux::printNode(Frag& frag, NdbOut& out, TupLoc loc, PrintPar& par)
+{
+ if (loc == NullTupLoc) {
+ par.m_depth = 0;
+ return;
+ }
+ TreeHead& tree = frag.m_tree;
+ NodeHandle node(frag);
+ selectNode(node, loc);
+ out << par.m_path << " " << node << endl;
+ // check children
+ PrintPar cpar[2];
+ ndbrequire(strlen(par.m_path) + 1 < sizeof(par.m_path));
+ for (unsigned i = 0; i <= 1; i++) {
+ sprintf(cpar[i].m_path, "%s%c", par.m_path, "LR"[i]);
+ cpar[i].m_side = i;
+ cpar[i].m_depth = 0;
+ cpar[i].m_parent = loc;
+ printNode(frag, out, node.getLink(i), cpar[i]);
+ if (! cpar[i].m_ok) {
+ par.m_ok = false;
+ }
+ }
+ static const char* const sep = " *** ";
+ // check child-parent links
+ if (node.getLink(2) != par.m_parent) {
+ par.m_ok = false;
+ out << par.m_path << sep;
+ out << "parent loc " << hex << node.getLink(2);
+ out << " should be " << hex << par.m_parent << endl;
+ }
+ if (node.getSide() != par.m_side) {
+ par.m_ok = false;
+ out << par.m_path << sep;
+ out << "side " << dec << node.getSide();
+ out << " should be " << dec << par.m_side << endl;
+ }
+ // check balance
+ const int balance = -cpar[0].m_depth + cpar[1].m_depth;
+ if (node.getBalance() != balance) {
+ par.m_ok = false;
+ out << par.m_path << sep;
+ out << "balance " << node.getBalance();
+ out << " should be " << balance << endl;
+ }
+ if (abs(node.getBalance()) > 1) {
+ par.m_ok = false;
+ out << par.m_path << sep;
+ out << "balance " << node.getBalance() << " is invalid" << endl;
+ }
+ // check occupancy
+ if (node.getOccup() == 0 || node.getOccup() > tree.m_maxOccup) {
+ par.m_ok = false;
+ out << par.m_path << sep;
+ out << "occupancy " << node.getOccup();
+ out << " zero or greater than max " << tree.m_maxOccup << endl;
+ }
+ // check for occupancy of interior node
+ if (node.getChilds() == 2 && node.getOccup() < tree.m_minOccup) {
+ par.m_ok = false;
+ out << par.m_path << sep;
+ out << "occupancy " << node.getOccup() << " of interior node";
+ out << " less than min " << tree.m_minOccup << endl;
+ }
+#ifdef dbtux_totally_groks_t_trees
+ // check missed semi-leaf/leaf merge
+ for (unsigned i = 0; i <= 1; i++) {
+ if (node.getLink(i) != NullTupLoc &&
+ node.getLink(1 - i) == NullTupLoc &&
+ // our semi-leaf seems to satify interior minOccup condition
+ node.getOccup() < tree.m_minOccup) {
+ par.m_ok = false;
+ out << par.m_path << sep;
+ out << "missed merge with child " << i << endl;
+ }
+ }
+#endif
+ // check inline prefix
+ { ConstData data1 = node.getPref();
+ Uint32 data2[MaxPrefSize];
+ memset(data2, DataFillByte, MaxPrefSize << 2);
+ readKeyAttrs(frag, node.getMinMax(0), 0, c_searchKey);
+ copyAttrs(frag, c_searchKey, data2, tree.m_prefSize);
+ for (unsigned n = 0; n < tree.m_prefSize; n++) {
+ if (data1[n] != data2[n]) {
+ par.m_ok = false;
+ out << par.m_path << sep;
+ out << "inline prefix mismatch word " << n;
+ out << " value " << hex << data1[n];
+ out << " should be " << hex << data2[n] << endl;
+ break;
+ }
+ }
+ }
+ // check ordering within node
+ for (unsigned j = 1; j < node.getOccup(); j++) {
+ const TreeEnt ent1 = node.getEnt(j - 1);
+ const TreeEnt ent2 = node.getEnt(j);
+ unsigned start = 0;
+ readKeyAttrs(frag, ent1, start, c_searchKey);
+ readKeyAttrs(frag, ent2, start, c_entryKey);
+ int ret = cmpSearchKey(frag, start, c_searchKey, c_entryKey);
+ if (ret == 0)
+ ret = ent1.cmp(ent2);
+ if (ret != -1) {
+ par.m_ok = false;
+ out << par.m_path << sep;
+ out << " disorder within node at pos " << j << endl;
+ }
+ }
+ // check ordering wrt subtrees
+ for (unsigned i = 0; i <= 1; i++) {
+ if (node.getLink(i) == NullTupLoc)
+ continue;
+ const TreeEnt ent1 = cpar[i].m_minmax[1 - i];
+ const TreeEnt ent2 = node.getMinMax(i);
+ unsigned start = 0;
+ readKeyAttrs(frag, ent1, start, c_searchKey);
+ readKeyAttrs(frag, ent2, start, c_entryKey);
+ int ret = cmpSearchKey(frag, start, c_searchKey, c_entryKey);
+ if (ret == 0)
+ ret = ent1.cmp(ent2);
+ if (ret != (i == 0 ? -1 : +1)) {
+ par.m_ok = false;
+ out << par.m_path << sep;
+ out << " disorder wrt subtree " << i << endl;
+ }
+ }
+ // return values
+ par.m_depth = 1 + max(cpar[0].m_depth, cpar[1].m_depth);
+ par.m_occup = node.getOccup();
+ for (unsigned i = 0; i <= 1; i++) {
+ if (node.getLink(i) == NullTupLoc)
+ par.m_minmax[i] = node.getMinMax(i);
+ else
+ par.m_minmax[i] = cpar[i].m_minmax[i];
+ }
+}
+
+NdbOut&
+operator<<(NdbOut& out, const Dbtux::TupLoc& loc)
+{
+ if (loc == Dbtux::NullTupLoc) {
+ out << "null";
+ } else {
+ out << dec << loc.getPageId();
+ out << "." << dec << loc.getPageOffset();
+ }
+ return out;
+}
+
+NdbOut&
+operator<<(NdbOut& out, const Dbtux::TreeEnt& ent)
+{
+ out << dec << ent.m_fragBit;
+ out << "-" << ent.m_tupLoc;
+ out << "-" << dec << ent.m_tupVersion;
+ return out;
+}
+
+NdbOut&
+operator<<(NdbOut& out, const Dbtux::TreeNode& node)
+{
+ out << "[TreeNode " << hex << &node;
+ out << " [left " << node.m_link[0] << "]";
+ out << " [right " << node.m_link[1] << "]";
+ out << " [up " << node.m_link[2] << "]";
+ out << " [side " << dec << node.m_side << "]";
+ out << " [occup " << dec << node.m_occup << "]";
+ out << " [balance " << dec << (int)node.m_balance - 1 << "]";
+ out << " [nodeScan " << hex << node.m_nodeScan << "]";
+ out << "]";
+ return out;
+}
+
+NdbOut&
+operator<<(NdbOut& out, const Dbtux::TreeHead& tree)
+{
+ out << "[TreeHead " << hex << &tree;
+ out << " [nodeSize " << dec << tree.m_nodeSize << "]";
+ out << " [prefSize " << dec << tree.m_prefSize << "]";
+ out << " [minOccup " << dec << tree.m_minOccup << "]";
+ out << " [maxOccup " << dec << tree.m_maxOccup << "]";
+ out << " [AccHead " << dec << tree.getSize(Dbtux::AccHead) << "]";
+ out << " [AccPref " << dec << tree.getSize(Dbtux::AccPref) << "]";
+ out << " [AccFull " << dec << tree.getSize(Dbtux::AccFull) << "]";
+ out << " [root " << hex << tree.m_root << "]";
+ out << "]";
+ return out;
+}
+
+NdbOut&
+operator<<(NdbOut& out, const Dbtux::TreePos& pos)
+{
+ out << "[TreePos " << hex << &pos;
+ out << " [loc " << pos.m_loc << "]";
+ out << " [pos " << dec << pos.m_pos << "]";
+ out << " [match " << dec << pos.m_match << "]";
+ out << " [dir " << dec << pos.m_dir << "]";
+ out << "]";
+ return out;
+}
+
+NdbOut&
+operator<<(NdbOut& out, const Dbtux::DescAttr& descAttr)
+{
+ out << "[DescAttr " << hex << &descAttr;
+ out << " [attrDesc " << hex << descAttr.m_attrDesc;
+ out << " [primaryAttrId " << dec << descAttr.m_primaryAttrId << "]";
+ out << " [typeId " << dec << descAttr.m_typeId << "]";
+ out << "]";
+ return out;
+}
+
+NdbOut&
+operator<<(NdbOut& out, const Dbtux::ScanOp& scan)
+{
+ out << "[ScanOp " << hex << &scan;
+ out << " [state " << dec << scan.m_state << "]";
+ out << " [lockwait " << dec << scan.m_lockwait << "]";
+ out << " [indexId " << dec << scan.m_indexId << "]";
+ out << " [fragId " << dec << scan.m_fragId << "]";
+ out << " [transId " << hex << scan.m_transId1 << " " << scan.m_transId2 << "]";
+ out << " [savePointId " << dec << scan.m_savePointId << "]";
+ out << " [accLockOp " << hex << scan.m_accLockOp << "]";
+ out << " [accLockOps";
+ for (unsigned i = 0; i < scan.m_maxAccLockOps; i++) {
+ if (scan.m_accLockOps[i] != RNIL)
+ out << " " << hex << scan.m_accLockOps[i];
+ }
+ out << "]";
+ out << " [readCommitted " << dec << scan.m_readCommitted << "]";
+ out << " [lockMode " << dec << scan.m_lockMode << "]";
+ out << " [descending " << dec << scan.m_descending << "]";
+ out << " [pos " << scan.m_scanPos << "]";
+ out << " [ent " << scan.m_scanEnt << "]";
+ for (unsigned i = 0; i <= 1; i++) {
+ out << " [bound " << dec << i;
+ Dbtux::ScanBound& bound = *scan.m_bound[i];
+ Dbtux::ScanBoundIterator iter;
+ bound.first(iter);
+ for (unsigned j = 0; j < bound.getSize(); j++) {
+ out << " " << hex << *iter.data;
+ bound.next(iter);
+ }
+ out << "]";
+ }
+ out << "]";
+ return out;
+}
+
+NdbOut&
+operator<<(NdbOut& out, const Dbtux::Index& index)
+{
+ out << "[Index " << hex << &index;
+ out << " [tableId " << dec << index.m_tableId << "]";
+ out << " [numFrags " << dec << index.m_numFrags << "]";
+ for (unsigned i = 0; i < index.m_numFrags; i++) {
+ out << " [frag " << dec << i << " ";
+ // dangerous and wrong
+ Dbtux* tux = (Dbtux*)globalData.getBlock(DBTUX);
+ const Dbtux::Frag& frag = *tux->c_fragPool.getPtr(index.m_fragPtrI[i]);
+ out << frag;
+ out << "]";
+ }
+ out << " [descPage " << hex << index.m_descPage << "]";
+ out << " [descOff " << dec << index.m_descOff << "]";
+ out << " [numAttrs " << dec << index.m_numAttrs << "]";
+ out << "]";
+ return out;
+}
+
+NdbOut&
+operator<<(NdbOut& out, const Dbtux::Frag& frag)
+{
+ out << "[Frag " << hex << &frag;
+ out << " [tableId " << dec << frag.m_tableId << "]";
+ out << " [indexId " << dec << frag.m_indexId << "]";
+ out << " [fragId " << dec << frag.m_fragId << "]";
+ out << " [descPage " << hex << frag.m_descPage << "]";
+ out << " [descOff " << dec << frag.m_descOff << "]";
+ out << " [numAttrs " << dec << frag.m_numAttrs << "]";
+ out << " [tree " << frag.m_tree << "]";
+ out << "]";
+ return out;
+}
+
+NdbOut&
+operator<<(NdbOut& out, const Dbtux::FragOp& fragOp)
+{
+ out << "[FragOp " << hex << &fragOp;
+ out << " [userPtr " << dec << fragOp.m_userPtr << "]";
+ out << " [indexId " << dec << fragOp.m_indexId << "]";
+ out << " [fragId " << dec << fragOp.m_fragId << "]";
+ out << " [fragNo " << dec << fragOp.m_fragNo << "]";
+ out << " numAttrsRecvd " << dec << fragOp.m_numAttrsRecvd << "]";
+ out << "]";
+ return out;
+}
+
+NdbOut&
+operator<<(NdbOut& out, const Dbtux::NodeHandle& node)
+{
+ const Dbtux::Frag& frag = node.m_frag;
+ const Dbtux::TreeHead& tree = frag.m_tree;
+ out << "[NodeHandle " << hex << &node;
+ out << " [loc " << node.m_loc << "]";
+ out << " [node " << *node.m_node << "]";
+ const Uint32* data;
+ out << " [pref";
+ data = (const Uint32*)node.m_node + Dbtux::NodeHeadSize;
+ for (unsigned j = 0; j < tree.m_prefSize; j++)
+ out << " " << hex << data[j];
+ out << "]";
+ out << " [entList";
+ unsigned numpos = node.m_node->m_occup;
+ data = (const Uint32*)node.m_node + Dbtux::NodeHeadSize + tree.m_prefSize;
+ const Dbtux::TreeEnt* entList = (const Dbtux::TreeEnt*)data;
+ // print entries in logical order
+ for (unsigned pos = 1; pos <= numpos; pos++)
+ out << " " << entList[pos % numpos];
+ out << "]";
+ out << "]";
+ return out;
+}
+
+#endif
diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp
new file mode 100644
index 00000000000..5640fdf2899
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp
@@ -0,0 +1,317 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#define DBTUX_GEN_CPP
+#include "Dbtux.hpp"
+
+Dbtux::Dbtux(const Configuration& conf) :
+ SimulatedBlock(DBTUX, conf),
+ c_tup(0),
+ c_descPageList(RNIL),
+#ifdef VM_TRACE
+ debugFile(0),
+ debugOut(*new NullOutputStream()),
+ debugFlags(0),
+#endif
+ c_internalStartPhase(0),
+ c_typeOfStart(NodeState::ST_ILLEGAL_TYPE),
+ c_dataBuffer(0)
+{
+ BLOCK_CONSTRUCTOR(Dbtux);
+ // verify size assumptions (also when release-compiled)
+ ndbrequire(
+ (sizeof(TreeEnt) & 0x3) == 0 &&
+ (sizeof(TreeNode) & 0x3) == 0 &&
+ (sizeof(DescHead) & 0x3) == 0 &&
+ (sizeof(DescAttr) & 0x3) == 0
+ );
+ /*
+ * DbtuxGen.cpp
+ */
+ addRecSignal(GSN_CONTINUEB, &Dbtux::execCONTINUEB);
+ addRecSignal(GSN_STTOR, &Dbtux::execSTTOR);
+ addRecSignal(GSN_READ_CONFIG_REQ, &Dbtux::execREAD_CONFIG_REQ, true);
+ /*
+ * DbtuxMeta.cpp
+ */
+ addRecSignal(GSN_TUXFRAGREQ, &Dbtux::execTUXFRAGREQ);
+ addRecSignal(GSN_TUX_ADD_ATTRREQ, &Dbtux::execTUX_ADD_ATTRREQ);
+ addRecSignal(GSN_ALTER_INDX_REQ, &Dbtux::execALTER_INDX_REQ);
+ addRecSignal(GSN_DROP_TAB_REQ, &Dbtux::execDROP_TAB_REQ);
+ /*
+ * DbtuxMaint.cpp
+ */
+ addRecSignal(GSN_TUX_MAINT_REQ, &Dbtux::execTUX_MAINT_REQ);
+ /*
+ * DbtuxScan.cpp
+ */
+ addRecSignal(GSN_ACC_SCANREQ, &Dbtux::execACC_SCANREQ);
+ addRecSignal(GSN_TUX_BOUND_INFO, &Dbtux::execTUX_BOUND_INFO);
+ addRecSignal(GSN_NEXT_SCANREQ, &Dbtux::execNEXT_SCANREQ);
+ addRecSignal(GSN_ACC_CHECK_SCAN, &Dbtux::execACC_CHECK_SCAN);
+ addRecSignal(GSN_ACCKEYCONF, &Dbtux::execACCKEYCONF);
+ addRecSignal(GSN_ACCKEYREF, &Dbtux::execACCKEYREF);
+ addRecSignal(GSN_ACC_ABORTCONF, &Dbtux::execACC_ABORTCONF);
+ /*
+ * DbtuxDebug.cpp
+ */
+ addRecSignal(GSN_DUMP_STATE_ORD, &Dbtux::execDUMP_STATE_ORD);
+}
+
+Dbtux::~Dbtux()
+{
+}
+
+void
+Dbtux::execCONTINUEB(Signal* signal)
+{
+ jamEntry();
+ const Uint32* data = signal->getDataPtr();
+ switch (data[0]) {
+ case TuxContinueB::DropIndex: // currently unused
+ {
+ IndexPtr indexPtr;
+ c_indexPool.getPtr(indexPtr, data[1]);
+ dropIndex(signal, indexPtr, data[2], data[3]);
+ }
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }
+}
+
+/*
+ * STTOR is sent to one block at a time. In NDBCNTR it triggers
+ * NDB_STTOR to the "old" blocks. STTOR carries start phase (SP) and
+ * NDB_STTOR carries internal start phase (ISP).
+ *
+ * SP ISP activities
+ * 1 none
+ * 2 1
+ * 3 2 recover metadata, activate indexes
+ * 4 3 recover data
+ * 5 4-6
+ * 6 skip
+ * 7 skip
+ * 8 7 build non-logged indexes on SR
+ *
+ * DBTUX catches type of start (IS, SR, NR, INR) at SP 3 and updates
+ * internal start phase at SP 7. These are used to prevent index
+ * maintenance operations caused by redo log at SR.
+ */
+void
+Dbtux::execSTTOR(Signal* signal)
+{
+ jamEntry();
+ Uint32 startPhase = signal->theData[1];
+ switch (startPhase) {
+ case 1:
+ jam();
+ CLEAR_ERROR_INSERT_VALUE;
+ c_tup = (Dbtup*)globalData.getBlock(DBTUP);
+ ndbrequire(c_tup != 0);
+ break;
+ case 3:
+ jam();
+ c_typeOfStart = signal->theData[7];
+ break;
+ case 7:
+ c_internalStartPhase = 6;
+ default:
+ jam();
+ break;
+ }
+ signal->theData[0] = 0; // garbage
+ signal->theData[1] = 0; // garbage
+ signal->theData[2] = 0; // garbage
+ signal->theData[3] = 1;
+ signal->theData[4] = 3; // for c_typeOfStart
+ signal->theData[5] = 7; // for c_internalStartPhase
+ signal->theData[6] = 255;
+ sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 7, JBB);
+}
+
+void
+Dbtux::execREAD_CONFIG_REQ(Signal* signal)
+{
+ jamEntry();
+
+ const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr();
+ Uint32 ref = req->senderRef;
+ Uint32 senderData = req->senderData;
+ ndbrequire(req->noOfParameters == 0);
+
+ Uint32 nIndex;
+ Uint32 nFragment;
+ Uint32 nAttribute;
+ Uint32 nScanOp;
+
+ const ndb_mgm_configuration_iterator * p =
+ theConfiguration.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUX_INDEX, &nIndex));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUX_FRAGMENT, &nFragment));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUX_ATTRIBUTE, &nAttribute));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUX_SCAN_OP, &nScanOp));
+
+ const Uint32 nDescPage = (nIndex * DescHeadSize + nAttribute * DescAttrSize + DescPageSize - 1) / DescPageSize;
+ const Uint32 nScanBoundWords = nScanOp * ScanBoundSegmentSize * 4;
+
+ c_indexPool.setSize(nIndex);
+ c_fragPool.setSize(nFragment);
+ c_descPagePool.setSize(nDescPage);
+ c_fragOpPool.setSize(MaxIndexFragments);
+ c_scanOpPool.setSize(nScanOp);
+ c_scanBoundPool.setSize(nScanBoundWords);
+ /*
+ * Index id is physical array index. We seize and initialize all
+ * index records now.
+ */
+ IndexPtr indexPtr;
+ while (1) {
+ jam();
+ refresh_watch_dog();
+ c_indexPool.seize(indexPtr);
+ if (indexPtr.i == RNIL) {
+ jam();
+ break;
+ }
+ new (indexPtr.p) Index();
+ }
+ // allocate buffers
+ c_keyAttrs = (Uint32*)allocRecord("c_keyAttrs", sizeof(Uint32), MaxIndexAttributes);
+ c_sqlCmp = (NdbSqlUtil::Cmp**)allocRecord("c_sqlCmp", sizeof(NdbSqlUtil::Cmp*), MaxIndexAttributes);
+ c_searchKey = (Uint32*)allocRecord("c_searchKey", sizeof(Uint32), MaxAttrDataSize);
+ c_entryKey = (Uint32*)allocRecord("c_entryKey", sizeof(Uint32), MaxAttrDataSize);
+ c_dataBuffer = (Uint32*)allocRecord("c_dataBuffer", sizeof(Uint64), (MaxAttrDataSize + 1) >> 1);
+ // ack
+ ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = senderData;
+ sendSignal(ref, GSN_READ_CONFIG_CONF, signal,
+ ReadConfigConf::SignalLength, JBB);
+}
+
+// utils
+
+void
+Dbtux::setKeyAttrs(const Frag& frag)
+{
+ Data keyAttrs = c_keyAttrs; // global
+ NdbSqlUtil::Cmp** sqlCmp = c_sqlCmp; // global
+ const unsigned numAttrs = frag.m_numAttrs;
+ const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff);
+ for (unsigned i = 0; i < numAttrs; i++) {
+ jam();
+ const DescAttr& descAttr = descEnt.m_descAttr[i];
+ Uint32 size = AttributeDescriptor::getSizeInWords(descAttr.m_attrDesc);
+ // set attr id and fixed size
+ keyAttrs.ah() = AttributeHeader(descAttr.m_primaryAttrId, size);
+ keyAttrs += 1;
+ // set comparison method pointer
+ const NdbSqlUtil::Type& sqlType = NdbSqlUtil::getTypeBinary(descAttr.m_typeId);
+ ndbrequire(sqlType.m_cmp != 0);
+ *(sqlCmp++) = sqlType.m_cmp;
+ }
+}
+
+void
+Dbtux::readKeyAttrs(const Frag& frag, TreeEnt ent, unsigned start, Data keyData)
+{
+ ConstData keyAttrs = c_keyAttrs; // global
+ const Uint32 tableFragPtrI = frag.m_tupTableFragPtrI[ent.m_fragBit];
+ const TupLoc tupLoc = ent.m_tupLoc;
+ const Uint32 tupVersion = ent.m_tupVersion;
+ ndbrequire(start < frag.m_numAttrs);
+ const Uint32 numAttrs = frag.m_numAttrs - start;
+ // skip to start position in keyAttrs only
+ keyAttrs += start;
+ int ret = c_tup->tuxReadAttrs(tableFragPtrI, tupLoc.getPageId(), tupLoc.getPageOffset(), tupVersion, keyAttrs, numAttrs, keyData);
+ jamEntry();
+ // TODO handle error
+ ndbrequire(ret > 0);
+#ifdef VM_TRACE
+ if (debugFlags & (DebugMaint | DebugScan)) {
+ debugOut << "readKeyAttrs:" << endl;
+ ConstData data = keyData;
+ Uint32 totalSize = 0;
+ for (Uint32 i = start; i < frag.m_numAttrs; i++) {
+ Uint32 attrId = data.ah().getAttributeId();
+ Uint32 dataSize = data.ah().getDataSize();
+ debugOut << i << " attrId=" << attrId << " size=" << dataSize;
+ data += 1;
+ for (Uint32 j = 0; j < dataSize; j++) {
+ debugOut << " " << hex << data[0];
+ data += 1;
+ }
+ debugOut << endl;
+ totalSize += 1 + dataSize;
+ }
+ ndbassert((int)totalSize == ret);
+ }
+#endif
+}
+
+void
+Dbtux::readTablePk(const Frag& frag, TreeEnt ent, Data pkData, unsigned& pkSize)
+{
+ const Uint32 tableFragPtrI = frag.m_tupTableFragPtrI[ent.m_fragBit];
+ const TupLoc tupLoc = ent.m_tupLoc;
+ int ret = c_tup->tuxReadPk(tableFragPtrI, tupLoc.getPageId(), tupLoc.getPageOffset(), pkData, true);
+ jamEntry();
+ // TODO handle error
+ ndbrequire(ret > 0);
+ pkSize = ret;
+}
+
+/*
+ * Copy attribute data with headers. Input is all index key data.
+ * Copies whatever fits.
+ */
+void
+Dbtux::copyAttrs(const Frag& frag, ConstData data1, Data data2, unsigned maxlen2)
+{
+ unsigned n = frag.m_numAttrs;
+ unsigned len2 = maxlen2;
+ while (n != 0) {
+ jam();
+ const unsigned dataSize = data1.ah().getDataSize();
+ // copy header
+ if (len2 == 0)
+ return;
+ data2[0] = data1[0];
+ data1 += 1;
+ data2 += 1;
+ len2 -= 1;
+ // copy data
+ for (unsigned i = 0; i < dataSize; i++) {
+ if (len2 == 0)
+ return;
+ data2[i] = data1[i];
+ len2 -= 1;
+ }
+ data1 += dataSize;
+ data2 += dataSize;
+ n -= 1;
+ }
+#ifdef VM_TRACE
+ memset(data2, DataFillByte, len2 << 2);
+#endif
+}
+
+BLOCK_FUNCTIONS(Dbtux)
diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp
new file mode 100644
index 00000000000..4b568badc67
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp
@@ -0,0 +1,183 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#define DBTUX_MAINT_CPP
+#include "Dbtux.hpp"
+
+/*
+ * Maintain index.
+ */
+
+void
+Dbtux::execTUX_MAINT_REQ(Signal* signal)
+{
+ jamEntry();
+ TuxMaintReq* const sig = (TuxMaintReq*)signal->getDataPtrSend();
+ // ignore requests from redo log
+ if (c_internalStartPhase < 6 &&
+ c_typeOfStart != NodeState::ST_NODE_RESTART &&
+ c_typeOfStart != NodeState::ST_INITIAL_NODE_RESTART) {
+ jam();
+#ifdef VM_TRACE
+ if (debugFlags & DebugMaint) {
+ TupLoc tupLoc(sig->pageId, sig->pageOffset);
+ debugOut << "opInfo=" << hex << sig->opInfo;
+ debugOut << " tableId=" << dec << sig->tableId;
+ debugOut << " indexId=" << dec << sig->indexId;
+ debugOut << " fragId=" << dec << sig->fragId;
+ debugOut << " tupLoc=" << tupLoc;
+ debugOut << " tupVersion=" << dec << sig->tupVersion;
+ debugOut << " -- ignored at ISP=" << dec << c_internalStartPhase;
+ debugOut << " TOS=" << dec << c_typeOfStart;
+ debugOut << endl;
+ }
+#endif
+ sig->errorCode = 0;
+ return;
+ }
+ TuxMaintReq reqCopy = *sig;
+ TuxMaintReq* const req = &reqCopy;
+ const Uint32 opCode = req->opInfo & 0xFF;
+ const Uint32 opFlag = req->opInfo >> 8;
+ // get the index
+ IndexPtr indexPtr;
+ c_indexPool.getPtr(indexPtr, req->indexId);
+ ndbrequire(indexPtr.p->m_tableId == req->tableId);
+ // get base fragment id and extra bits
+ const Uint32 fragId = req->fragId & ~1;
+ const Uint32 fragBit = req->fragId & 1;
+ // get the fragment
+ FragPtr fragPtr;
+ fragPtr.i = RNIL;
+ for (unsigned i = 0; i < indexPtr.p->m_numFrags; i++) {
+ jam();
+ if (indexPtr.p->m_fragId[i] == fragId) {
+ jam();
+ c_fragPool.getPtr(fragPtr, indexPtr.p->m_fragPtrI[i]);
+ break;
+ }
+ }
+ ndbrequire(fragPtr.i != RNIL);
+ Frag& frag = *fragPtr.p;
+ // set up index keys for this operation
+ setKeyAttrs(frag);
+ // set up search entry
+ TreeEnt ent;
+ ent.m_tupLoc = TupLoc(req->pageId, req->pageOffset);
+ ent.m_tupVersion = req->tupVersion;
+ ent.m_fragBit = fragBit;
+ // read search key
+ readKeyAttrs(frag, ent, 0, c_searchKey);
+ if (! frag.m_storeNullKey) {
+ // check if all keys are null
+ const unsigned numAttrs = frag.m_numAttrs;
+ bool allNull = true;
+ for (unsigned i = 0; i < numAttrs; i++) {
+ if (c_searchKey[i] != 0) {
+ jam();
+ allNull = false;
+ break;
+ }
+ }
+ if (allNull) {
+ jam();
+ req->errorCode = 0;
+ *sig = *req;
+ return;
+ }
+ }
+#ifdef VM_TRACE
+ if (debugFlags & DebugMaint) {
+ debugOut << "opCode=" << dec << opCode;
+ debugOut << " opFlag=" << dec << opFlag;
+ debugOut << " tableId=" << dec << req->tableId;
+ debugOut << " indexId=" << dec << req->indexId;
+ debugOut << " fragId=" << dec << req->fragId;
+ debugOut << " entry=" << ent;
+ debugOut << endl;
+ }
+#endif
+ // do the operation
+ req->errorCode = 0;
+ TreePos treePos;
+ switch (opCode) {
+ case TuxMaintReq::OpAdd:
+ jam();
+ searchToAdd(frag, c_searchKey, ent, treePos);
+#ifdef VM_TRACE
+ if (debugFlags & DebugMaint) {
+ debugOut << treePos << (treePos.m_match ? " - error" : "") << endl;
+ }
+#endif
+ if (treePos.m_match) {
+ jam();
+ // there is no "Building" state so this will have to do
+ if (indexPtr.p->m_state == Index::Online) {
+ jam();
+ req->errorCode = TuxMaintReq::SearchError;
+ }
+ break;
+ }
+ /*
+ * At most one new node is inserted in the operation. Pre-allocate
+ * it so that the operation cannot fail.
+ */
+ if (frag.m_freeLoc == NullTupLoc) {
+ jam();
+ NodeHandle node(frag);
+ req->errorCode = allocNode(signal, node);
+ if (req->errorCode != 0) {
+ jam();
+ break;
+ }
+ // link to freelist
+ node.setLink(0, frag.m_freeLoc);
+ frag.m_freeLoc = node.m_loc;
+ ndbrequire(frag.m_freeLoc != NullTupLoc);
+ }
+ treeAdd(frag, treePos, ent);
+ break;
+ case TuxMaintReq::OpRemove:
+ jam();
+ searchToRemove(frag, c_searchKey, ent, treePos);
+#ifdef VM_TRACE
+ if (debugFlags & DebugMaint) {
+ debugOut << treePos << (! treePos.m_match ? " - error" : "") << endl;
+ }
+#endif
+ if (! treePos.m_match) {
+ jam();
+ // there is no "Building" state so this will have to do
+ if (indexPtr.p->m_state == Index::Online) {
+ jam();
+ req->errorCode = TuxMaintReq::SearchError;
+ }
+ break;
+ }
+ treeRemove(frag, treePos);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }
+#ifdef VM_TRACE
+ if (debugFlags & DebugTree) {
+ printTree(signal, frag, debugOut);
+ }
+#endif
+ // copy back
+ *sig = *req;
+}
diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp
new file mode 100644
index 00000000000..93c4a583624
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp
@@ -0,0 +1,513 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#define DBTUX_META_CPP
+#include "Dbtux.hpp"
+#include <my_sys.h>
+
+/*
+ * Create index.
+ *
+ * For historical reasons it looks like we are adding random fragments
+ * and attributes to existing index. In fact all fragments must be
+ * created at one time and they have identical attributes.
+ */
+
+void
+Dbtux::execTUXFRAGREQ(Signal* signal)
+{
+ jamEntry();
+ if (signal->theData[0] == (Uint32)-1) {
+ jam();
+ abortAddFragOp(signal);
+ return;
+ }
+ const TuxFragReq reqCopy = *(const TuxFragReq*)signal->getDataPtr();
+ const TuxFragReq* const req = &reqCopy;
+ IndexPtr indexPtr;
+ indexPtr.i = RNIL;
+ FragOpPtr fragOpPtr;
+ fragOpPtr.i = RNIL;
+ TuxFragRef::ErrorCode errorCode = TuxFragRef::NoError;
+ do {
+ // get the index record
+ if (req->tableId >= c_indexPool.getSize()) {
+ jam();
+ errorCode = TuxFragRef::InvalidRequest;
+ break;
+ }
+ c_indexPool.getPtr(indexPtr, req->tableId);
+ if (indexPtr.p->m_state != Index::NotDefined &&
+ indexPtr.p->m_state != Index::Defining) {
+ jam();
+ errorCode = TuxFragRef::InvalidRequest;
+ indexPtr.i = RNIL; // leave alone
+ break;
+ }
+ // get new operation record
+ c_fragOpPool.seize(fragOpPtr);
+ ndbrequire(fragOpPtr.i != RNIL);
+ new (fragOpPtr.p) FragOp();
+ fragOpPtr.p->m_userPtr = req->userPtr;
+ fragOpPtr.p->m_userRef = req->userRef;
+ fragOpPtr.p->m_indexId = req->tableId;
+ fragOpPtr.p->m_fragId = req->fragId;
+ fragOpPtr.p->m_fragNo = indexPtr.p->m_numFrags;
+ fragOpPtr.p->m_numAttrsRecvd = 0;
+#ifdef VM_TRACE
+ if (debugFlags & DebugMeta) {
+ debugOut << "Seize frag op " << fragOpPtr.i << " " << *fragOpPtr.p << endl;
+ }
+#endif
+ // check if index has place for more fragments
+ ndbrequire(indexPtr.p->m_numFrags < MaxIndexFragments);
+ // seize new fragment record
+ FragPtr fragPtr;
+ c_fragPool.seize(fragPtr);
+ if (fragPtr.i == RNIL) {
+ jam();
+ errorCode = TuxFragRef::NoFreeFragment;
+ break;
+ }
+ new (fragPtr.p) Frag(c_scanOpPool);
+ fragPtr.p->m_tableId = req->primaryTableId;
+ fragPtr.p->m_indexId = req->tableId;
+ fragPtr.p->m_fragId = req->fragId;
+ fragPtr.p->m_numAttrs = req->noOfAttr;
+ fragPtr.p->m_storeNullKey = true; // not yet configurable
+ fragPtr.p->m_tupIndexFragPtrI = req->tupIndexFragPtrI;
+ fragPtr.p->m_tupTableFragPtrI[0] = req->tupTableFragPtrI[0];
+ fragPtr.p->m_tupTableFragPtrI[1] = req->tupTableFragPtrI[1];
+ fragPtr.p->m_accTableFragPtrI[0] = req->accTableFragPtrI[0];
+ fragPtr.p->m_accTableFragPtrI[1] = req->accTableFragPtrI[1];
+ // add the fragment to the index
+ indexPtr.p->m_fragId[indexPtr.p->m_numFrags] = req->fragId;
+ indexPtr.p->m_fragPtrI[indexPtr.p->m_numFrags] = fragPtr.i;
+ indexPtr.p->m_numFrags++;
+ // save under operation
+ fragOpPtr.p->m_fragPtrI = fragPtr.i;
+ // prepare to receive attributes
+ if (fragOpPtr.p->m_fragNo == 0) {
+ jam();
+ // receiving first fragment
+ ndbrequire(
+ indexPtr.p->m_state == Index::NotDefined &&
+ DictTabInfo::isOrderedIndex(req->tableType) &&
+ req->noOfAttr > 0 &&
+ req->noOfAttr <= MaxIndexAttributes &&
+ indexPtr.p->m_descPage == RNIL);
+ indexPtr.p->m_state = Index::Defining;
+ indexPtr.p->m_tableType = (DictTabInfo::TableType)req->tableType;
+ indexPtr.p->m_tableId = req->primaryTableId;
+ indexPtr.p->m_numAttrs = req->noOfAttr;
+ indexPtr.p->m_storeNullKey = true; // not yet configurable
+ // allocate attribute descriptors
+ if (! allocDescEnt(indexPtr)) {
+ jam();
+ errorCode = TuxFragRef::NoFreeAttributes;
+ break;
+ }
+ } else {
+ // receiving subsequent fragment
+ jam();
+ ndbrequire(
+ indexPtr.p->m_state == Index::Defining &&
+ indexPtr.p->m_tableType == (DictTabInfo::TableType)req->tableType &&
+ indexPtr.p->m_tableId == req->primaryTableId &&
+ indexPtr.p->m_numAttrs == req->noOfAttr);
+ }
+ // copy metadata address to each fragment
+ fragPtr.p->m_descPage = indexPtr.p->m_descPage;
+ fragPtr.p->m_descOff = indexPtr.p->m_descOff;
+#ifdef VM_TRACE
+ if (debugFlags & DebugMeta) {
+ debugOut << "Add frag " << fragPtr.i << " " << *fragPtr.p << endl;
+ }
+#endif
+ // error inserts
+ if (ERROR_INSERTED(12001) && fragOpPtr.p->m_fragNo == 0 ||
+ ERROR_INSERTED(12002) && fragOpPtr.p->m_fragNo == 1) {
+ jam();
+ errorCode = (TuxFragRef::ErrorCode)1;
+ CLEAR_ERROR_INSERT_VALUE;
+ break;
+ }
+ // success
+ TuxFragConf* const conf = (TuxFragConf*)signal->getDataPtrSend();
+ conf->userPtr = req->userPtr;
+ conf->tuxConnectPtr = fragOpPtr.i;
+ conf->fragPtr = fragPtr.i;
+ conf->fragId = fragPtr.p->m_fragId;
+ sendSignal(req->userRef, GSN_TUXFRAGCONF,
+ signal, TuxFragConf::SignalLength, JBB);
+ return;
+ } while (0);
+ // error
+ TuxFragRef* const ref = (TuxFragRef*)signal->getDataPtrSend();
+ ref->userPtr = req->userPtr;
+ ref->errorCode = errorCode;
+ sendSignal(req->userRef, GSN_TUXFRAGREF,
+ signal, TuxFragRef::SignalLength, JBB);
+ if (fragOpPtr.i != RNIL) {
+#ifdef VM_TRACE
+ if (debugFlags & DebugMeta) {
+ debugOut << "Release on frag error frag op " << fragOpPtr.i << " " << *fragOpPtr.p << endl;
+ }
+#endif
+ c_fragOpPool.release(fragOpPtr);
+ }
+ if (indexPtr.i != RNIL) {
+ jam();
+ // let DICT drop the unfinished index
+ }
+}
+
+void
+Dbtux::execTUX_ADD_ATTRREQ(Signal* signal)
+{
+ jamEntry();
+ const TuxAddAttrReq reqCopy = *(const TuxAddAttrReq*)signal->getDataPtr();
+ const TuxAddAttrReq* const req = &reqCopy;
+ // get the records
+ FragOpPtr fragOpPtr;
+ IndexPtr indexPtr;
+ FragPtr fragPtr;
+ c_fragOpPool.getPtr(fragOpPtr, req->tuxConnectPtr);
+ c_indexPool.getPtr(indexPtr, fragOpPtr.p->m_indexId);
+ c_fragPool.getPtr(fragPtr, fragOpPtr.p->m_fragPtrI);
+ TuxAddAttrRef::ErrorCode errorCode = TuxAddAttrRef::NoError;
+ do {
+ // expected attribute id
+ const unsigned attrId = fragOpPtr.p->m_numAttrsRecvd++;
+ ndbrequire(
+ indexPtr.p->m_state == Index::Defining &&
+ attrId < indexPtr.p->m_numAttrs &&
+ attrId == req->attrId);
+ // define the attribute
+ DescEnt& descEnt = getDescEnt(indexPtr.p->m_descPage, indexPtr.p->m_descOff);
+ DescAttr& descAttr = descEnt.m_descAttr[attrId];
+ descAttr.m_attrDesc = req->attrDescriptor;
+ descAttr.m_primaryAttrId = req->primaryAttrId;
+ descAttr.m_typeId = AttributeDescriptor::getType(req->attrDescriptor);
+ descAttr.m_charset = (req->extTypeInfo >> 16);
+#ifdef VM_TRACE
+ if (debugFlags & DebugMeta) {
+ debugOut << "Add frag " << fragPtr.i << " attr " << attrId << " " << descAttr << endl;
+ }
+#endif
+ // check that type is valid and has a binary comparison method
+ const NdbSqlUtil::Type& type = NdbSqlUtil::getTypeBinary(descAttr.m_typeId);
+ if (type.m_typeId == NdbSqlUtil::Type::Undefined ||
+ type.m_cmp == 0) {
+ jam();
+ errorCode = TuxAddAttrRef::InvalidAttributeType;
+ break;
+ }
+ if (descAttr.m_charset != 0) {
+ CHARSET_INFO *cs = all_charsets[descAttr.m_charset];
+ ndbrequire(cs != 0);
+ if (! NdbSqlUtil::usable_in_ordered_index(descAttr.m_typeId, cs)) {
+ jam();
+ errorCode = TuxAddAttrRef::InvalidCharset;
+ break;
+ }
+ }
+ const bool lastAttr = (indexPtr.p->m_numAttrs == fragOpPtr.p->m_numAttrsRecvd);
+ if (ERROR_INSERTED(12003) && fragOpPtr.p->m_fragNo == 0 && attrId == 0 ||
+ ERROR_INSERTED(12004) && fragOpPtr.p->m_fragNo == 0 && lastAttr ||
+ ERROR_INSERTED(12005) && fragOpPtr.p->m_fragNo == 1 && attrId == 0 ||
+ ERROR_INSERTED(12006) && fragOpPtr.p->m_fragNo == 1 && lastAttr) {
+ errorCode = (TuxAddAttrRef::ErrorCode)1;
+ CLEAR_ERROR_INSERT_VALUE;
+ break;
+ }
+ if (lastAttr) {
+ jam();
+ // initialize tree header
+ TreeHead& tree = fragPtr.p->m_tree;
+ new (&tree) TreeHead();
+ // make these configurable later
+ tree.m_nodeSize = MAX_TTREE_NODE_SIZE;
+ tree.m_prefSize = MAX_TTREE_PREF_SIZE;
+ const unsigned maxSlack = MAX_TTREE_NODE_SLACK;
+ // size up to and including first 2 entries
+ const unsigned pref = tree.getSize(AccPref);
+ if (! (pref <= tree.m_nodeSize)) {
+ jam();
+ errorCode = TuxAddAttrRef::InvalidNodeSize;
+ break;
+ }
+ const unsigned slots = (tree.m_nodeSize - pref) / TreeEntSize;
+ // leave out work space entry
+ tree.m_maxOccup = 2 + slots - 1;
+ // min occupancy of interior node must be at least 2
+ if (! (2 + maxSlack <= tree.m_maxOccup)) {
+ jam();
+ errorCode = TuxAddAttrRef::InvalidNodeSize;
+ break;
+ }
+ tree.m_minOccup = tree.m_maxOccup - maxSlack;
+ // root node does not exist (also set by ctor)
+ tree.m_root = NullTupLoc;
+#ifdef VM_TRACE
+ if (debugFlags & DebugMeta) {
+ if (fragOpPtr.p->m_fragNo == 0) {
+ debugOut << "Index id=" << indexPtr.i;
+ debugOut << " nodeSize=" << tree.m_nodeSize;
+ debugOut << " headSize=" << NodeHeadSize;
+ debugOut << " prefSize=" << tree.m_prefSize;
+ debugOut << " entrySize=" << TreeEntSize;
+ debugOut << " minOccup=" << tree.m_minOccup;
+ debugOut << " maxOccup=" << tree.m_maxOccup;
+ debugOut << endl;
+ }
+ }
+#endif
+ // fragment is defined
+#ifdef VM_TRACE
+ if (debugFlags & DebugMeta) {
+ debugOut << "Release frag op " << fragOpPtr.i << " " << *fragOpPtr.p << endl;
+ }
+#endif
+ c_fragOpPool.release(fragOpPtr);
+ }
+ // success
+ TuxAddAttrConf* conf = (TuxAddAttrConf*)signal->getDataPtrSend();
+ conf->userPtr = fragOpPtr.p->m_userPtr;
+ conf->lastAttr = lastAttr;
+ sendSignal(fragOpPtr.p->m_userRef, GSN_TUX_ADD_ATTRCONF,
+ signal, TuxAddAttrConf::SignalLength, JBB);
+ return;
+ } while (0);
+ // error
+ TuxAddAttrRef* ref = (TuxAddAttrRef*)signal->getDataPtrSend();
+ ref->userPtr = fragOpPtr.p->m_userPtr;
+ ref->errorCode = errorCode;
+ sendSignal(fragOpPtr.p->m_userRef, GSN_TUX_ADD_ATTRREF,
+ signal, TuxAddAttrRef::SignalLength, JBB);
+#ifdef VM_TRACE
+ if (debugFlags & DebugMeta) {
+ debugOut << "Release on attr error frag op " << fragOpPtr.i << " " << *fragOpPtr.p << endl;
+ }
+#endif
+ c_fragOpPool.release(fragOpPtr);
+ // let DICT drop the unfinished index
+}
+
+/*
+ * LQH aborts on-going create index operation.
+ */
+void
+Dbtux::abortAddFragOp(Signal* signal)
+{
+ FragOpPtr fragOpPtr;
+ IndexPtr indexPtr;
+ c_fragOpPool.getPtr(fragOpPtr, signal->theData[1]);
+ c_indexPool.getPtr(indexPtr, fragOpPtr.p->m_indexId);
+#ifdef VM_TRACE
+ if (debugFlags & DebugMeta) {
+ debugOut << "Release on abort frag op " << fragOpPtr.i << " " << *fragOpPtr.p << endl;
+ }
+#endif
+ c_fragOpPool.release(fragOpPtr);
+ // let DICT drop the unfinished index
+}
+
+/*
+ * Set index online. Currently at system restart this arrives before
+ * build and is therefore not correct.
+ */
+void
+Dbtux::execALTER_INDX_REQ(Signal* signal)
+{
+ jamEntry();
+ const AlterIndxReq reqCopy = *(const AlterIndxReq*)signal->getDataPtr();
+ const AlterIndxReq* const req = &reqCopy;
+ // set index online after build
+ IndexPtr indexPtr;
+ c_indexPool.getPtr(indexPtr, req->getIndexId());
+ indexPtr.p->m_state = Index::Online;
+#ifdef VM_TRACE
+ if (debugFlags & DebugMeta) {
+ debugOut << "Online index " << indexPtr.i << " " << *indexPtr.p << endl;
+ }
+#endif
+ // success
+ AlterIndxConf* const conf = (AlterIndxConf*)signal->getDataPtrSend();
+ conf->setUserRef(reference());
+ conf->setConnectionPtr(req->getConnectionPtr());
+ conf->setRequestType(req->getRequestType());
+ conf->setTableId(req->getTableId());
+ conf->setIndexId(req->getIndexId());
+ conf->setIndexVersion(req->getIndexVersion());
+ sendSignal(req->getUserRef(), GSN_ALTER_INDX_CONF,
+ signal, AlterIndxConf::SignalLength, JBB);
+}
+
+/*
+ * Drop index.
+ *
+ * Uses same DROP_TAB_REQ signal as normal tables.
+ */
+
+void
+Dbtux::execDROP_TAB_REQ(Signal* signal)
+{
+ jamEntry();
+ const DropTabReq reqCopy = *(const DropTabReq*)signal->getDataPtr();
+ const DropTabReq* const req = &reqCopy;
+ IndexPtr indexPtr;
+
+ Uint32 tableId = req->tableId;
+ Uint32 senderRef = req->senderRef;
+ Uint32 senderData = req->senderData;
+ if (tableId >= c_indexPool.getSize()) {
+ jam();
+ // reply to sender
+ DropTabConf* const conf = (DropTabConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = senderData;
+ conf->tableId = tableId;
+ sendSignal(senderRef, GSN_DROP_TAB_CONF,
+ signal, DropTabConf::SignalLength, JBB);
+ return;
+ }
+
+ c_indexPool.getPtr(indexPtr, req->tableId);
+ // drop works regardless of index state
+#ifdef VM_TRACE
+ if (debugFlags & DebugMeta) {
+ debugOut << "Drop index " << indexPtr.i << " " << *indexPtr.p << endl;
+ }
+#endif
+ ndbrequire(req->senderRef != 0);
+ dropIndex(signal, indexPtr, req->senderRef, req->senderData);
+}
+
+void
+Dbtux::dropIndex(Signal* signal, IndexPtr indexPtr, Uint32 senderRef, Uint32 senderData)
+{
+ jam();
+ indexPtr.p->m_state = Index::Dropping;
+ // drop fragments
+ while (indexPtr.p->m_numFrags > 0) {
+ jam();
+ Uint32 i = --indexPtr.p->m_numFrags;
+ FragPtr fragPtr;
+ c_fragPool.getPtr(fragPtr, indexPtr.p->m_fragPtrI[i]);
+ c_fragPool.release(fragPtr);
+ }
+ // drop attributes
+ if (indexPtr.p->m_descPage != RNIL) {
+ jam();
+ freeDescEnt(indexPtr);
+ indexPtr.p->m_descPage = RNIL;
+ }
+ if (senderRef != 0) {
+ jam();
+ // reply to sender
+ DropTabConf* const conf = (DropTabConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = senderData;
+ conf->tableId = indexPtr.i;
+ sendSignal(senderRef, GSN_DROP_TAB_CONF,
+ signal, DropTabConf::SignalLength, JBB);
+ }
+ new (indexPtr.p) Index();
+}
+
+/*
+ * Subroutines.
+ */
+
+bool
+Dbtux::allocDescEnt(IndexPtr indexPtr)
+{
+ jam();
+ const unsigned size = DescHeadSize + indexPtr.p->m_numAttrs * DescAttrSize;
+ DescPagePtr pagePtr;
+ pagePtr.i = c_descPageList;
+ while (pagePtr.i != RNIL) {
+ jam();
+ c_descPagePool.getPtr(pagePtr);
+ if (pagePtr.p->m_numFree >= size) {
+ jam();
+ break;
+ }
+ pagePtr.i = pagePtr.p->m_nextPage;
+ }
+ if (pagePtr.i == RNIL) {
+ jam();
+ if (! c_descPagePool.seize(pagePtr)) {
+ jam();
+ return false;
+ }
+ new (pagePtr.p) DescPage();
+ // add in front of list
+ pagePtr.p->m_nextPage = c_descPageList;
+ c_descPageList = pagePtr.i;
+ pagePtr.p->m_numFree = DescPageSize;
+ }
+ ndbrequire(pagePtr.p->m_numFree >= size);
+ indexPtr.p->m_descPage = pagePtr.i;
+ indexPtr.p->m_descOff = DescPageSize - pagePtr.p->m_numFree;
+ pagePtr.p->m_numFree -= size;
+ DescEnt& descEnt = getDescEnt(indexPtr.p->m_descPage, indexPtr.p->m_descOff);
+ descEnt.m_descHead.m_indexId = indexPtr.i;
+ descEnt.m_descHead.pad1 = 0;
+ return true;
+}
+
+void
+Dbtux::freeDescEnt(IndexPtr indexPtr)
+{
+ DescPagePtr pagePtr;
+ c_descPagePool.getPtr(pagePtr, indexPtr.p->m_descPage);
+ Uint32* const data = pagePtr.p->m_data;
+ const unsigned size = DescHeadSize + indexPtr.p->m_numAttrs * DescAttrSize;
+ unsigned off = indexPtr.p->m_descOff;
+ // move the gap to the free area at the top
+ while (off + size < DescPageSize - pagePtr.p->m_numFree) {
+ jam();
+ // next entry to move over the gap
+ DescEnt& descEnt2 = *(DescEnt*)&data[off + size];
+ Uint32 indexId2 = descEnt2.m_descHead.m_indexId;
+ Index& index2 = *c_indexPool.getPtr(indexId2);
+ unsigned size2 = DescHeadSize + index2.m_numAttrs * DescAttrSize;
+ ndbrequire(
+ index2.m_descPage == pagePtr.i &&
+ index2.m_descOff == off + size);
+ // move the entry (overlapping copy if size < size2)
+ unsigned i;
+ for (i = 0; i < size2; i++) {
+ jam();
+ data[off + i] = data[off + size + i];
+ }
+ off += size2;
+ // adjust page offset in index and all fragments
+ index2.m_descOff -= size;
+ for (i = 0; i < index2.m_numFrags; i++) {
+ jam();
+ Frag& frag2 = *c_fragPool.getPtr(index2.m_fragPtrI[i]);
+ frag2.m_descOff -= size;
+ ndbrequire(
+ frag2.m_descPage == index2.m_descPage &&
+ frag2.m_descOff == index2.m_descOff);
+ }
+ }
+ ndbrequire(off + size == DescPageSize - pagePtr.p->m_numFree);
+ pagePtr.p->m_numFree += size;
+}
diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp
new file mode 100644
index 00000000000..855a8ed1c29
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp
@@ -0,0 +1,581 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#define DBTUX_NODE_CPP
+#include "Dbtux.hpp"
+
+/*
+ * Allocate index node in TUP.
+ */
+int
+Dbtux::allocNode(Signal* signal, NodeHandle& node)
+{
+ Frag& frag = node.m_frag;
+ Uint32 pageId = NullTupLoc.getPageId();
+ Uint32 pageOffset = NullTupLoc.getPageOffset();
+ Uint32* node32 = 0;
+ int errorCode = c_tup->tuxAllocNode(signal, frag.m_tupIndexFragPtrI, pageId, pageOffset, node32);
+ jamEntry();
+ if (errorCode == 0) {
+ jam();
+ node.m_loc = TupLoc(pageId, pageOffset);
+ node.m_node = reinterpret_cast<TreeNode*>(node32);
+ ndbrequire(node.m_loc != NullTupLoc && node.m_node != 0);
+ }
+ return errorCode;
+}
+
+/*
+ * Set handle to point to existing node.
+ */
+void
+Dbtux::selectNode(NodeHandle& node, TupLoc loc)
+{
+ Frag& frag = node.m_frag;
+ ndbrequire(loc != NullTupLoc);
+ Uint32 pageId = loc.getPageId();
+ Uint32 pageOffset = loc.getPageOffset();
+ Uint32* node32 = 0;
+ c_tup->tuxGetNode(frag.m_tupIndexFragPtrI, pageId, pageOffset, node32);
+ jamEntry();
+ node.m_loc = loc;
+ node.m_node = reinterpret_cast<TreeNode*>(node32);
+ ndbrequire(node.m_loc != NullTupLoc && node.m_node != 0);
+}
+
+/*
+ * Set handle to point to new node. Uses a pre-allocated node.
+ */
+void
+Dbtux::insertNode(NodeHandle& node)
+{
+ Frag& frag = node.m_frag;
+ // unlink from freelist
+ selectNode(node, frag.m_freeLoc);
+ frag.m_freeLoc = node.getLink(0);
+ new (node.m_node) TreeNode();
+#ifdef VM_TRACE
+ TreeHead& tree = frag.m_tree;
+ memset(node.getPref(), DataFillByte, tree.m_prefSize << 2);
+ TreeEnt* entList = tree.getEntList(node.m_node);
+ memset(entList, NodeFillByte, (tree.m_maxOccup + 1) * (TreeEntSize << 2));
+#endif
+}
+
+/*
+ * Delete existing node. Simply put it on the freelist.
+ */
+void
+Dbtux::deleteNode(NodeHandle& node)
+{
+ Frag& frag = node.m_frag;
+ ndbrequire(node.getOccup() == 0);
+ // link to freelist
+ node.setLink(0, frag.m_freeLoc);
+ frag.m_freeLoc = node.m_loc;
+ // invalidate the handle
+ node.m_loc = NullTupLoc;
+ node.m_node = 0;
+}
+
+/*
+ * Set prefix. Copies the number of words that fits. Includes
+ * attribute headers for now. XXX use null mask instead
+ */
+void
+Dbtux::setNodePref(NodeHandle& node)
+{
+ const Frag& frag = node.m_frag;
+ const TreeHead& tree = frag.m_tree;
+ readKeyAttrs(frag, node.getMinMax(0), 0, c_entryKey);
+ copyAttrs(frag, c_entryKey, node.getPref(), tree.m_prefSize);
+}
+
+// node operations
+
+/*
+ * Add entry at position. Move entries greater than or equal to the old
+ * one (if any) to the right.
+ *
+ * X
+ * v
+ * A B C D E _ _ => A B C X D E _
+ * 0 1 2 3 4 5 6 0 1 2 3 4 5 6
+ *
+ * Add list of scans at the new entry.
+ */
+void
+Dbtux::nodePushUp(NodeHandle& node, unsigned pos, const TreeEnt& ent, Uint32 scanList)
+{
+ Frag& frag = node.m_frag;
+ TreeHead& tree = frag.m_tree;
+ const unsigned occup = node.getOccup();
+ ndbrequire(occup < tree.m_maxOccup && pos <= occup);
+ // fix old scans
+ if (node.getNodeScan() != RNIL)
+ nodePushUpScans(node, pos);
+ // fix node
+ TreeEnt* const entList = tree.getEntList(node.m_node);
+ entList[occup] = entList[0];
+ TreeEnt* const tmpList = entList + 1;
+ for (unsigned i = occup; i > pos; i--) {
+ jam();
+ tmpList[i] = tmpList[i - 1];
+ }
+ tmpList[pos] = ent;
+ entList[0] = entList[occup + 1];
+ node.setOccup(occup + 1);
+ // add new scans
+ if (scanList != RNIL)
+ addScanList(node, pos, scanList);
+ // fix prefix
+ if (occup == 0 || pos == 0)
+ setNodePref(node);
+}
+
+void
+Dbtux::nodePushUpScans(NodeHandle& node, unsigned pos)
+{
+ const unsigned occup = node.getOccup();
+ ScanOpPtr scanPtr;
+ scanPtr.i = node.getNodeScan();
+ do {
+ jam();
+ c_scanOpPool.getPtr(scanPtr);
+ TreePos& scanPos = scanPtr.p->m_scanPos;
+ ndbrequire(scanPos.m_loc == node.m_loc && scanPos.m_pos < occup);
+ if (scanPos.m_pos >= pos) {
+ jam();
+#ifdef VM_TRACE
+ if (debugFlags & DebugScan) {
+ debugOut << "Fix scan " << scanPtr.i << " " << *scanPtr.p << endl;
+ debugOut << "At pushUp pos=" << pos << " " << node << endl;
+ }
+#endif
+ scanPos.m_pos++;
+ }
+ scanPtr.i = scanPtr.p->m_nodeScan;
+ } while (scanPtr.i != RNIL);
+}
+
+/*
+ * Remove and return entry at position. Move entries greater than the
+ * removed one to the left. This is the opposite of nodePushUp.
+ *
+ * D
+ * ^ ^
+ * A B C D E F _ => A B C E F _ _
+ * 0 1 2 3 4 5 6 0 1 2 3 4 5 6
+ *
+ * Scans at removed entry are returned if non-zero location is passed or
+ * else moved forward.
+ */
+void
+Dbtux::nodePopDown(NodeHandle& node, unsigned pos, TreeEnt& ent, Uint32* scanList)
+{
+ Frag& frag = node.m_frag;
+ TreeHead& tree = frag.m_tree;
+ const unsigned occup = node.getOccup();
+ ndbrequire(occup <= tree.m_maxOccup && pos < occup);
+ if (node.getNodeScan() != RNIL) {
+ // remove or move scans at this position
+ if (scanList == 0)
+ moveScanList(node, pos);
+ else
+ removeScanList(node, pos, *scanList);
+ // fix other scans
+ if (node.getNodeScan() != RNIL)
+ nodePopDownScans(node, pos);
+ }
+ // fix node
+ TreeEnt* const entList = tree.getEntList(node.m_node);
+ entList[occup] = entList[0];
+ TreeEnt* const tmpList = entList + 1;
+ ent = tmpList[pos];
+ for (unsigned i = pos; i < occup - 1; i++) {
+ jam();
+ tmpList[i] = tmpList[i + 1];
+ }
+ entList[0] = entList[occup - 1];
+ node.setOccup(occup - 1);
+ // fix prefix
+ if (occup != 1 && pos == 0)
+ setNodePref(node);
+}
+
+void
+Dbtux::nodePopDownScans(NodeHandle& node, unsigned pos)
+{
+ const unsigned occup = node.getOccup();
+ ScanOpPtr scanPtr;
+ scanPtr.i = node.getNodeScan();
+ do {
+ jam();
+ c_scanOpPool.getPtr(scanPtr);
+ TreePos& scanPos = scanPtr.p->m_scanPos;
+ ndbrequire(scanPos.m_loc == node.m_loc && scanPos.m_pos < occup);
+ // handled before
+ ndbrequire(scanPos.m_pos != pos);
+ if (scanPos.m_pos > pos) {
+ jam();
+#ifdef VM_TRACE
+ if (debugFlags & DebugScan) {
+ debugOut << "Fix scan " << scanPtr.i << " " << *scanPtr.p << endl;
+ debugOut << "At popDown pos=" << pos << " " << node << endl;
+ }
+#endif
+ scanPos.m_pos--;
+ }
+ scanPtr.i = scanPtr.p->m_nodeScan;
+ } while (scanPtr.i != RNIL);
+}
+
+/*
+ * Add entry at existing position. Move entries less than or equal to
+ * the old one to the left. Remove and return old min entry.
+ *
+ * X A
+ * ^ v ^
+ * A B C D E _ _ => B C D X E _ _
+ * 0 1 2 3 4 5 6 0 1 2 3 4 5 6
+ *
+ * Return list of scans at the removed position 0.
+ */
+void
+Dbtux::nodePushDown(NodeHandle& node, unsigned pos, TreeEnt& ent, Uint32& scanList)
+{
+ Frag& frag = node.m_frag;
+ TreeHead& tree = frag.m_tree;
+ const unsigned occup = node.getOccup();
+ ndbrequire(occup <= tree.m_maxOccup && pos < occup);
+ if (node.getNodeScan() != RNIL) {
+ // remove scans at 0
+ removeScanList(node, 0, scanList);
+ // fix other scans
+ if (node.getNodeScan() != RNIL)
+ nodePushDownScans(node, pos);
+ }
+ // fix node
+ TreeEnt* const entList = tree.getEntList(node.m_node);
+ entList[occup] = entList[0];
+ TreeEnt* const tmpList = entList + 1;
+ TreeEnt oldMin = tmpList[0];
+ for (unsigned i = 0; i < pos; i++) {
+ jam();
+ tmpList[i] = tmpList[i + 1];
+ }
+ tmpList[pos] = ent;
+ ent = oldMin;
+ entList[0] = entList[occup];
+ // fix prefix
+ if (true)
+ setNodePref(node);
+}
+
+void
+Dbtux::nodePushDownScans(NodeHandle& node, unsigned pos)
+{
+ const unsigned occup = node.getOccup();
+ ScanOpPtr scanPtr;
+ scanPtr.i = node.getNodeScan();
+ do {
+ jam();
+ c_scanOpPool.getPtr(scanPtr);
+ TreePos& scanPos = scanPtr.p->m_scanPos;
+ ndbrequire(scanPos.m_loc == node.m_loc && scanPos.m_pos < occup);
+ // handled before
+ ndbrequire(scanPos.m_pos != 0);
+ if (scanPos.m_pos <= pos) {
+ jam();
+#ifdef VM_TRACE
+ if (debugFlags & DebugScan) {
+ debugOut << "Fix scan " << scanPtr.i << " " << *scanPtr.p << endl;
+ debugOut << "At pushDown pos=" << pos << " " << node << endl;
+ }
+#endif
+ scanPos.m_pos--;
+ }
+ scanPtr.i = scanPtr.p->m_nodeScan;
+ } while (scanPtr.i != RNIL);
+}
+
+/*
+ * Remove and return entry at position. Move entries less than the
+ * removed one to the right. Replace min entry by the input entry.
+ * This is the opposite of nodePushDown.
+ *
+ * X D
+ * v ^ ^
+ * A B C D E _ _ => X A B C E _ _
+ * 0 1 2 3 4 5 6 0 1 2 3 4 5 6
+ *
+ * Move scans at removed entry and add scans at the new entry.
+ */
+void
+Dbtux::nodePopUp(NodeHandle& node, unsigned pos, TreeEnt& ent, Uint32 scanList)
+{
+ Frag& frag = node.m_frag;
+ TreeHead& tree = frag.m_tree;
+ const unsigned occup = node.getOccup();
+ ndbrequire(occup <= tree.m_maxOccup && pos < occup);
+ if (node.getNodeScan() != RNIL) {
+ // move scans whose entry disappears
+ moveScanList(node, pos);
+ // fix other scans
+ if (node.getNodeScan() != RNIL)
+ nodePopUpScans(node, pos);
+ }
+ // fix node
+ TreeEnt* const entList = tree.getEntList(node.m_node);
+ entList[occup] = entList[0];
+ TreeEnt* const tmpList = entList + 1;
+ TreeEnt newMin = ent;
+ ent = tmpList[pos];
+ for (unsigned i = pos; i > 0; i--) {
+ jam();
+ tmpList[i] = tmpList[i - 1];
+ }
+ tmpList[0] = newMin;
+ entList[0] = entList[occup];
+ // add scans
+ if (scanList != RNIL)
+ addScanList(node, 0, scanList);
+ // fix prefix
+ if (true)
+ setNodePref(node);
+}
+
+void
+Dbtux::nodePopUpScans(NodeHandle& node, unsigned pos)
+{
+ const unsigned occup = node.getOccup();
+ ScanOpPtr scanPtr;
+ scanPtr.i = node.getNodeScan();
+ do {
+ jam();
+ c_scanOpPool.getPtr(scanPtr);
+ TreePos& scanPos = scanPtr.p->m_scanPos;
+ ndbrequire(scanPos.m_loc == node.m_loc && scanPos.m_pos < occup);
+ ndbrequire(scanPos.m_pos != pos);
+ if (scanPos.m_pos < pos) {
+ jam();
+#ifdef VM_TRACE
+ if (debugFlags & DebugScan) {
+ debugOut << "Fix scan " << scanPtr.i << " " << *scanPtr.p << endl;
+ debugOut << "At popUp pos=" << pos << " " << node << endl;
+ }
+#endif
+ scanPos.m_pos++;
+ }
+ scanPtr.i = scanPtr.p->m_nodeScan;
+ } while (scanPtr.i != RNIL);
+}
+
+/*
+ * Move number of entries from another node to this node before the min
+ * (i=0) or after the max (i=1). Expensive but not often used.
+ */
+void
+Dbtux::nodeSlide(NodeHandle& dstNode, NodeHandle& srcNode, unsigned cnt, unsigned i)
+{
+ Frag& frag = dstNode.m_frag;
+ TreeHead& tree = frag.m_tree;
+ ndbrequire(i <= 1);
+ while (cnt != 0) {
+ TreeEnt ent;
+ Uint32 scanList = RNIL;
+ nodePopDown(srcNode, i == 0 ? srcNode.getOccup() - 1 : 0, ent, &scanList);
+ nodePushUp(dstNode, i == 0 ? 0 : dstNode.getOccup(), ent, scanList);
+ cnt--;
+ }
+}
+
+// scans linked to node
+
+
+/*
+ * Add list of scans to node at given position.
+ */
+void
+Dbtux::addScanList(NodeHandle& node, unsigned pos, Uint32 scanList)
+{
+ ScanOpPtr scanPtr;
+ scanPtr.i = scanList;
+ do {
+ jam();
+ c_scanOpPool.getPtr(scanPtr);
+#ifdef VM_TRACE
+ if (debugFlags & DebugScan) {
+ debugOut << "Add scan " << scanPtr.i << " " << *scanPtr.p << endl;
+ debugOut << "To pos=" << pos << " " << node << endl;
+ }
+#endif
+ const Uint32 nextPtrI = scanPtr.p->m_nodeScan;
+ scanPtr.p->m_nodeScan = RNIL;
+ linkScan(node, scanPtr);
+ TreePos& scanPos = scanPtr.p->m_scanPos;
+ // set position but leave direction alone
+ scanPos.m_loc = node.m_loc;
+ scanPos.m_pos = pos;
+ scanPtr.i = nextPtrI;
+ } while (scanPtr.i != RNIL);
+}
+
+/*
+ * Remove list of scans from node at given position. The return
+ * location must point to existing list (in fact RNIL always).
+ */
+void
+Dbtux::removeScanList(NodeHandle& node, unsigned pos, Uint32& scanList)
+{
+ ScanOpPtr scanPtr;
+ scanPtr.i = node.getNodeScan();
+ do {
+ jam();
+ c_scanOpPool.getPtr(scanPtr);
+ const Uint32 nextPtrI = scanPtr.p->m_nodeScan;
+ TreePos& scanPos = scanPtr.p->m_scanPos;
+ ndbrequire(scanPos.m_loc == node.m_loc);
+ if (scanPos.m_pos == pos) {
+ jam();
+#ifdef VM_TRACE
+ if (debugFlags & DebugScan) {
+ debugOut << "Remove scan " << scanPtr.i << " " << *scanPtr.p << endl;
+ debugOut << "Fron pos=" << pos << " " << node << endl;
+ }
+#endif
+ unlinkScan(node, scanPtr);
+ scanPtr.p->m_nodeScan = scanList;
+ scanList = scanPtr.i;
+ // unset position but leave direction alone
+ scanPos.m_loc = NullTupLoc;
+ scanPos.m_pos = ZNIL;
+ }
+ scanPtr.i = nextPtrI;
+ } while (scanPtr.i != RNIL);
+}
+
+/*
+ * Move list of scans away from entry about to be removed. Uses scan
+ * method scanNext().
+ */
+void
+Dbtux::moveScanList(NodeHandle& node, unsigned pos)
+{
+ ScanOpPtr scanPtr;
+ scanPtr.i = node.getNodeScan();
+ do {
+ jam();
+ c_scanOpPool.getPtr(scanPtr);
+ TreePos& scanPos = scanPtr.p->m_scanPos;
+ const Uint32 nextPtrI = scanPtr.p->m_nodeScan;
+ ndbrequire(scanPos.m_loc == node.m_loc);
+ if (scanPos.m_pos == pos) {
+ jam();
+#ifdef VM_TRACE
+ if (debugFlags & DebugScan) {
+ debugOut << "Move scan " << scanPtr.i << " " << *scanPtr.p << endl;
+ debugOut << "At pos=" << pos << " " << node << endl;
+ }
+#endif
+ scanNext(scanPtr, true);
+ ndbrequire(! (scanPos.m_loc == node.m_loc && scanPos.m_pos == pos));
+ }
+ scanPtr.i = nextPtrI;
+ } while (scanPtr.i != RNIL);
+}
+
+/*
+ * Link scan to the list under the node. The list is single-linked and
+ * ordering does not matter.
+ */
+void
+Dbtux::linkScan(NodeHandle& node, ScanOpPtr scanPtr)
+{
+#ifdef VM_TRACE
+ if (debugFlags & DebugScan) {
+ debugOut << "Link scan " << scanPtr.i << " " << *scanPtr.p << endl;
+ debugOut << "To node " << node << endl;
+ }
+#endif
+ ndbrequire(! islinkScan(node, scanPtr) && scanPtr.p->m_nodeScan == RNIL);
+ scanPtr.p->m_nodeScan = node.getNodeScan();
+ node.setNodeScan(scanPtr.i);
+}
+
+/*
+ * Unlink a scan from the list under the node.
+ */
+void
+Dbtux::unlinkScan(NodeHandle& node, ScanOpPtr scanPtr)
+{
+#ifdef VM_TRACE
+ if (debugFlags & DebugScan) {
+ debugOut << "Unlink scan " << scanPtr.i << " " << *scanPtr.p << endl;
+ debugOut << "From node " << node << endl;
+ }
+#endif
+ ScanOpPtr currPtr;
+ currPtr.i = node.getNodeScan();
+ ScanOpPtr prevPtr;
+ prevPtr.i = RNIL;
+ while (true) {
+ jam();
+ c_scanOpPool.getPtr(currPtr);
+ Uint32 nextPtrI = currPtr.p->m_nodeScan;
+ if (currPtr.i == scanPtr.i) {
+ jam();
+ if (prevPtr.i == RNIL) {
+ node.setNodeScan(nextPtrI);
+ } else {
+ jam();
+ prevPtr.p->m_nodeScan = nextPtrI;
+ }
+ scanPtr.p->m_nodeScan = RNIL;
+ // check for duplicates
+ ndbrequire(! islinkScan(node, scanPtr));
+ return;
+ }
+ prevPtr = currPtr;
+ currPtr.i = nextPtrI;
+ }
+}
+
+/*
+ * Check if a scan is linked to this node. Only for ndbrequire.
+ */
+bool
+Dbtux::islinkScan(NodeHandle& node, ScanOpPtr scanPtr)
+{
+ ScanOpPtr currPtr;
+ currPtr.i = node.getNodeScan();
+ while (currPtr.i != RNIL) {
+ jam();
+ c_scanOpPool.getPtr(currPtr);
+ if (currPtr.i == scanPtr.i) {
+ jam();
+ return true;
+ }
+ currPtr.i = currPtr.p->m_nodeScan;
+ }
+ return false;
+}
+
+void
+Dbtux::NodeHandle::progError(int line, int cause, const char* file)
+{
+ ErrorReporter::handleAssert("Dbtux::NodeHandle: assert failed", file, line);
+}
diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp
new file mode 100644
index 00000000000..a61b7c1f5ca
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp
@@ -0,0 +1,1041 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#define DBTUX_SCAN_CPP
+#include "Dbtux.hpp"
+#include <my_sys.h>
+
+void
+Dbtux::execACC_SCANREQ(Signal* signal)
+{
+ jamEntry();
+ const AccScanReq reqCopy = *(const AccScanReq*)signal->getDataPtr();
+ const AccScanReq* const req = &reqCopy;
+ ScanOpPtr scanPtr;
+ scanPtr.i = RNIL;
+ do {
+ // get the index
+ IndexPtr indexPtr;
+ c_indexPool.getPtr(indexPtr, req->tableId);
+ // get the fragment
+ FragPtr fragPtr;
+ fragPtr.i = RNIL;
+ for (unsigned i = 0; i < indexPtr.p->m_numFrags; i++) {
+ jam();
+ if (indexPtr.p->m_fragId[i] == req->fragmentNo << 1) {
+ jam();
+ c_fragPool.getPtr(fragPtr, indexPtr.p->m_fragPtrI[i]);
+ break;
+ }
+ }
+ ndbrequire(fragPtr.i != RNIL);
+ Frag& frag = *fragPtr.p;
+ // must be normal DIH/TC fragment
+ TreeHead& tree = frag.m_tree;
+ // check for empty fragment
+ if (tree.m_root == NullTupLoc) {
+ jam();
+ AccScanConf* const conf = (AccScanConf*)signal->getDataPtrSend();
+ conf->scanPtr = req->senderData;
+ conf->accPtr = RNIL;
+ conf->flag = AccScanConf::ZEMPTY_FRAGMENT;
+ sendSignal(req->senderRef, GSN_ACC_SCANCONF,
+ signal, AccScanConf::SignalLength, JBB);
+ return;
+ }
+ // seize from pool and link to per-fragment list
+ if (! frag.m_scanList.seize(scanPtr)) {
+ jam();
+ break;
+ }
+ new (scanPtr.p) ScanOp(c_scanBoundPool);
+ scanPtr.p->m_state = ScanOp::First;
+ scanPtr.p->m_userPtr = req->senderData;
+ scanPtr.p->m_userRef = req->senderRef;
+ scanPtr.p->m_tableId = indexPtr.p->m_tableId;
+ scanPtr.p->m_indexId = indexPtr.i;
+ scanPtr.p->m_fragId = fragPtr.p->m_fragId;
+ scanPtr.p->m_fragPtrI = fragPtr.i;
+ scanPtr.p->m_transId1 = req->transId1;
+ scanPtr.p->m_transId2 = req->transId2;
+ scanPtr.p->m_savePointId = req->savePointId;
+ scanPtr.p->m_readCommitted = AccScanReq::getReadCommittedFlag(req->requestInfo);
+ scanPtr.p->m_lockMode = AccScanReq::getLockMode(req->requestInfo);
+ scanPtr.p->m_descending = AccScanReq::getDescendingFlag(req->requestInfo);
+ /*
+ * readCommitted lockMode keyInfo
+ * 1 0 0 - read committed (no lock)
+ * 0 0 0 - read latest (read lock)
+ * 0 1 1 - read exclusive (write lock)
+ */
+#ifdef VM_TRACE
+ if (debugFlags & DebugScan) {
+ debugOut << "Seize scan " << scanPtr.i << " " << *scanPtr.p << endl;
+ }
+#endif
+ // conf
+ AccScanConf* const conf = (AccScanConf*)signal->getDataPtrSend();
+ conf->scanPtr = req->senderData;
+ conf->accPtr = scanPtr.i;
+ conf->flag = AccScanConf::ZNOT_EMPTY_FRAGMENT;
+ sendSignal(req->senderRef, GSN_ACC_SCANCONF,
+ signal, AccScanConf::SignalLength, JBB);
+ return;
+ } while (0);
+ if (scanPtr.i != RNIL) {
+ jam();
+ releaseScanOp(scanPtr);
+ }
+ // LQH does not handle REF
+ signal->theData[0] = 0x313;
+ sendSignal(req->senderRef, GSN_ACC_SCANREF,
+ signal, 1, JBB);
+}
+
+/*
+ * Receive bounds for scan in single direct call. The bounds can arrive
+ * in any order. Attribute ids are those of index table.
+ *
+ * Replace EQ by equivalent LE + GE. Check for conflicting bounds.
+ * Check that sets of lower and upper bounds are on initial sequences of
+ * keys and that all but possibly last bound is non-strict.
+ *
+ * Finally save the sets of lower and upper bounds (i.e. start key and
+ * end key). Full bound type is included but only the strict bit is
+ * used since lower and upper have now been separated.
+ */
+void
+Dbtux::execTUX_BOUND_INFO(Signal* signal)
+{
+ jamEntry();
+ // get records
+ TuxBoundInfo* const sig = (TuxBoundInfo*)signal->getDataPtrSend();
+ const TuxBoundInfo* const req = (const TuxBoundInfo*)sig;
+ ScanOp& scan = *c_scanOpPool.getPtr(req->tuxScanPtrI);
+ const Index& index = *c_indexPool.getPtr(scan.m_indexId);
+ const DescEnt& descEnt = getDescEnt(index.m_descPage, index.m_descOff);
+ // collect normalized lower and upper bounds
+ struct BoundInfo {
+ int type2; // with EQ -> LE/GE
+ Uint32 offset; // offset in xfrmData
+ Uint32 size;
+ };
+ BoundInfo boundInfo[2][MaxIndexAttributes];
+ const unsigned dstSize = 1024 * MAX_XFRM_MULTIPLY;
+ Uint32 xfrmData[dstSize];
+ Uint32 dstPos = 0;
+ // largest attrId seen plus one
+ Uint32 maxAttrId[2] = { 0, 0 };
+ // walk through entries
+ const Uint32* const data = (Uint32*)sig + TuxBoundInfo::SignalLength;
+ Uint32 offset = 0;
+ while (offset + 2 <= req->boundAiLength) {
+ jam();
+ const unsigned type = data[offset];
+ const AttributeHeader* ah = (const AttributeHeader*)&data[offset + 1];
+ const Uint32 attrId = ah->getAttributeId();
+ const Uint32 dataSize = ah->getDataSize();
+ if (type > 4 || attrId >= index.m_numAttrs || dstPos + 2 + dataSize > dstSize) {
+ jam();
+ scan.m_state = ScanOp::Invalid;
+ sig->errorCode = TuxBoundInfo::InvalidAttrInfo;
+ return;
+ }
+ // copy header
+ xfrmData[dstPos + 0] = data[offset + 0];
+ xfrmData[dstPos + 1] = data[offset + 1];
+ // copy bound value
+ Uint32 dstWords = 0;
+ if (! ah->isNULL()) {
+ jam();
+ const DescAttr& descAttr = descEnt.m_descAttr[attrId];
+ Uint32 srcBytes = AttributeDescriptor::getSizeInBytes(descAttr.m_attrDesc);
+ Uint32 srcWords = (srcBytes + 3) / 4;
+ if (srcWords != dataSize) {
+ jam();
+ scan.m_state = ScanOp::Invalid;
+ sig->errorCode = TuxBoundInfo::InvalidAttrInfo;
+ return;
+ }
+ uchar* dstPtr = (uchar*)&xfrmData[dstPos + 2];
+ const uchar* srcPtr = (const uchar*)&data[offset + 2];
+ if (descAttr.m_charset == 0) {
+ memcpy(dstPtr, srcPtr, srcWords << 2);
+ dstWords = srcWords;
+ } else {
+ jam();
+ Uint32 typeId = descAttr.m_typeId;
+ Uint32 lb, len;
+ bool ok = NdbSqlUtil::get_var_length(typeId, srcPtr, srcBytes, lb, len);
+ if (! ok) {
+ jam();
+ scan.m_state = ScanOp::Invalid;
+ sig->errorCode = TuxBoundInfo::InvalidCharFormat;
+ return;
+ }
+ CHARSET_INFO* cs = all_charsets[descAttr.m_charset];
+ Uint32 xmul = cs->strxfrm_multiply;
+ if (xmul == 0)
+ xmul = 1;
+ // see comment in DbtcMain.cpp
+ Uint32 dstLen = xmul * (srcBytes - lb);
+ if (dstLen > ((dstSize - dstPos) << 2)) {
+ jam();
+ scan.m_state = ScanOp::Invalid;
+ sig->errorCode = TuxBoundInfo::TooMuchAttrInfo;
+ return;
+ }
+ int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len);
+ ndbrequire(n != -1);
+ while ((n & 3) != 0) {
+ dstPtr[n++] = 0;
+ }
+ dstWords = n / 4;
+ }
+ }
+ for (unsigned j = 0; j <= 1; j++) {
+ jam();
+ // check if lower/upper bit matches
+ const unsigned luBit = (j << 1);
+ if ((type & 0x2) != luBit && type != 4)
+ continue;
+ // EQ -> LE, GE
+ const unsigned type2 = (type & 0x1) | luBit;
+ // fill in any gap
+ while (maxAttrId[j] <= attrId) {
+ jam();
+ BoundInfo& b = boundInfo[j][maxAttrId[j]++];
+ b.type2 = -1;
+ }
+ BoundInfo& b = boundInfo[j][attrId];
+ if (b.type2 != -1) {
+ // compare with previously defined bound
+ if (b.type2 != (int)type2 ||
+ b.size != 2 + dstWords ||
+ memcmp(&xfrmData[b.offset + 2], &xfrmData[dstPos + 2], dstWords << 2) != 0) {
+ jam();
+ scan.m_state = ScanOp::Invalid;
+ sig->errorCode = TuxBoundInfo::InvalidBounds;
+ return;
+ }
+ } else {
+ // fix length
+ AttributeHeader* ah = (AttributeHeader*)&xfrmData[dstPos + 1];
+ ah->setDataSize(dstWords);
+ // enter new bound
+ jam();
+ b.type2 = type2;
+ b.offset = dstPos;
+ b.size = 2 + dstWords;
+ }
+ }
+ // jump to next
+ offset += 2 + dataSize;
+ dstPos += 2 + dstWords;
+ }
+ if (offset != req->boundAiLength) {
+ jam();
+ scan.m_state = ScanOp::Invalid;
+ sig->errorCode = TuxBoundInfo::InvalidAttrInfo;
+ return;
+ }
+ for (unsigned j = 0; j <= 1; j++) {
+ // save lower/upper bound in index attribute id order
+ for (unsigned i = 0; i < maxAttrId[j]; i++) {
+ jam();
+ const BoundInfo& b = boundInfo[j][i];
+ // check for gap or strict bound before last
+ if (b.type2 == -1 || (i + 1 < maxAttrId[j] && (b.type2 & 0x1))) {
+ jam();
+ scan.m_state = ScanOp::Invalid;
+ sig->errorCode = TuxBoundInfo::InvalidBounds;
+ return;
+ }
+ bool ok = scan.m_bound[j]->append(&xfrmData[b.offset], b.size);
+ if (! ok) {
+ jam();
+ scan.m_state = ScanOp::Invalid;
+ sig->errorCode = TuxBoundInfo::OutOfBuffers;
+ return;
+ }
+ }
+ scan.m_boundCnt[j] = maxAttrId[j];
+ }
+ // no error
+ sig->errorCode = 0;
+}
+
+void
+Dbtux::execNEXT_SCANREQ(Signal* signal)
+{
+ jamEntry();
+ const NextScanReq reqCopy = *(const NextScanReq*)signal->getDataPtr();
+ const NextScanReq* const req = &reqCopy;
+ ScanOpPtr scanPtr;
+ scanPtr.i = req->accPtr;
+ c_scanOpPool.getPtr(scanPtr);
+ ScanOp& scan = *scanPtr.p;
+ Frag& frag = *c_fragPool.getPtr(scan.m_fragPtrI);
+#ifdef VM_TRACE
+ if (debugFlags & DebugScan) {
+ debugOut << "NEXT_SCANREQ scan " << scanPtr.i << " " << scan << endl;
+ }
+#endif
+ // handle unlock previous and close scan
+ switch (req->scanFlag) {
+ case NextScanReq::ZSCAN_NEXT:
+ jam();
+ break;
+ case NextScanReq::ZSCAN_NEXT_COMMIT:
+ jam();
+ case NextScanReq::ZSCAN_COMMIT:
+ jam();
+ if (! scan.m_readCommitted) {
+ jam();
+ AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend();
+ lockReq->returnCode = RNIL;
+ lockReq->requestInfo = AccLockReq::Unlock;
+ lockReq->accOpPtr = req->accOperationPtr;
+ EXECUTE_DIRECT(DBACC, GSN_ACC_LOCKREQ, signal, AccLockReq::UndoSignalLength);
+ jamEntry();
+ ndbrequire(lockReq->returnCode == AccLockReq::Success);
+ removeAccLockOp(scan, req->accOperationPtr);
+ }
+ if (req->scanFlag == NextScanReq::ZSCAN_COMMIT) {
+ jam();
+ NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend();
+ conf->scanPtr = scan.m_userPtr;
+ unsigned signalLength = 1;
+ sendSignal(scanPtr.p->m_userRef, GSN_NEXT_SCANCONF,
+ signal, signalLength, JBB);
+ return;
+ }
+ break;
+ case NextScanReq::ZSCAN_CLOSE:
+ jam();
+ // unlink from tree node first to avoid state changes
+ if (scan.m_scanPos.m_loc != NullTupLoc) {
+ jam();
+ const TupLoc loc = scan.m_scanPos.m_loc;
+ NodeHandle node(frag);
+ selectNode(node, loc);
+ unlinkScan(node, scanPtr);
+ scan.m_scanPos.m_loc = NullTupLoc;
+ }
+ if (scan.m_lockwait) {
+ jam();
+ ndbrequire(scan.m_accLockOp != RNIL);
+ // use ACC_ABORTCONF to flush out any reply in job buffer
+ AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend();
+ lockReq->returnCode = RNIL;
+ lockReq->requestInfo = AccLockReq::AbortWithConf;
+ lockReq->accOpPtr = scan.m_accLockOp;
+ EXECUTE_DIRECT(DBACC, GSN_ACC_LOCKREQ, signal, AccLockReq::UndoSignalLength);
+ jamEntry();
+ ndbrequire(lockReq->returnCode == AccLockReq::Success);
+ scan.m_state = ScanOp::Aborting;
+ return;
+ }
+ if (scan.m_state == ScanOp::Locked) {
+ jam();
+ ndbrequire(scan.m_accLockOp != RNIL);
+ AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend();
+ lockReq->returnCode = RNIL;
+ lockReq->requestInfo = AccLockReq::Unlock;
+ lockReq->accOpPtr = scan.m_accLockOp;
+ EXECUTE_DIRECT(DBACC, GSN_ACC_LOCKREQ, signal, AccLockReq::UndoSignalLength);
+ jamEntry();
+ ndbrequire(lockReq->returnCode == AccLockReq::Success);
+ scan.m_accLockOp = RNIL;
+ }
+ scan.m_state = ScanOp::Aborting;
+ scanClose(signal, scanPtr);
+ return;
+ case NextScanReq::ZSCAN_NEXT_ABORT:
+ jam();
+ default:
+ jam();
+ ndbrequire(false);
+ break;
+ }
+ // start looking for next scan result
+ AccCheckScan* checkReq = (AccCheckScan*)signal->getDataPtrSend();
+ checkReq->accPtr = scanPtr.i;
+ checkReq->checkLcpStop = AccCheckScan::ZNOT_CHECK_LCP_STOP;
+ EXECUTE_DIRECT(DBTUX, GSN_ACC_CHECK_SCAN, signal, AccCheckScan::SignalLength);
+ jamEntry();
+}
+
+void
+Dbtux::execACC_CHECK_SCAN(Signal* signal)
+{
+ jamEntry();
+ const AccCheckScan reqCopy = *(const AccCheckScan*)signal->getDataPtr();
+ const AccCheckScan* const req = &reqCopy;
+ ScanOpPtr scanPtr;
+ scanPtr.i = req->accPtr;
+ c_scanOpPool.getPtr(scanPtr);
+ ScanOp& scan = *scanPtr.p;
+ Frag& frag = *c_fragPool.getPtr(scan.m_fragPtrI);
+#ifdef VM_TRACE
+ if (debugFlags & DebugScan) {
+ debugOut << "ACC_CHECK_SCAN scan " << scanPtr.i << " " << scan << endl;
+ }
+#endif
+ if (req->checkLcpStop == AccCheckScan::ZCHECK_LCP_STOP) {
+ jam();
+ signal->theData[0] = scan.m_userPtr;
+ signal->theData[1] = true;
+ EXECUTE_DIRECT(DBLQH, GSN_CHECK_LCP_STOP, signal, 2);
+ jamEntry();
+ return; // stop
+ }
+ if (scan.m_lockwait) {
+ jam();
+ // LQH asks if we are waiting for lock and we tell it to ask again
+ const TreeEnt ent = scan.m_scanEnt;
+ NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend();
+ conf->scanPtr = scan.m_userPtr;
+ conf->accOperationPtr = RNIL; // no tuple returned
+ conf->fragId = frag.m_fragId | ent.m_fragBit;
+ unsigned signalLength = 3;
+ // if TC has ordered scan close, it will be detected here
+ sendSignal(scan.m_userRef, GSN_NEXT_SCANCONF,
+ signal, signalLength, JBB);
+ return; // stop
+ }
+ if (scan.m_state == ScanOp::First) {
+ jam();
+ // search is done only once in single range scan
+ scanFirst(scanPtr);
+#ifdef VM_TRACE
+ if (debugFlags & DebugScan) {
+ debugOut << "First scan " << scanPtr.i << " " << scan << endl;
+ }
+#endif
+ }
+ if (scan.m_state == ScanOp::Next) {
+ jam();
+ // look for next
+ scanNext(scanPtr, false);
+ }
+ // for reading tuple key in Current or Locked state
+ Data pkData = c_dataBuffer;
+ unsigned pkSize = 0; // indicates not yet done
+ if (scan.m_state == ScanOp::Current) {
+ // found an entry to return
+ jam();
+ ndbrequire(scan.m_accLockOp == RNIL);
+ if (! scan.m_readCommitted) {
+ jam();
+ const TreeEnt ent = scan.m_scanEnt;
+ // read tuple key
+ readTablePk(frag, ent, pkData, pkSize);
+ // get read lock or exclusive lock
+ AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend();
+ lockReq->returnCode = RNIL;
+ lockReq->requestInfo =
+ scan.m_lockMode == 0 ? AccLockReq::LockShared : AccLockReq::LockExclusive;
+ lockReq->accOpPtr = RNIL;
+ lockReq->userPtr = scanPtr.i;
+ lockReq->userRef = reference();
+ lockReq->tableId = scan.m_tableId;
+ lockReq->fragId = frag.m_fragId | ent.m_fragBit;
+ lockReq->fragPtrI = frag.m_accTableFragPtrI[ent.m_fragBit];
+ const Uint32* const buf32 = static_cast<Uint32*>(pkData);
+ const Uint64* const buf64 = reinterpret_cast<const Uint64*>(buf32);
+ lockReq->hashValue = md5_hash(buf64, pkSize);
+ lockReq->tupAddr = getTupAddr(frag, ent);
+ lockReq->transId1 = scan.m_transId1;
+ lockReq->transId2 = scan.m_transId2;
+ // execute
+ EXECUTE_DIRECT(DBACC, GSN_ACC_LOCKREQ, signal, AccLockReq::LockSignalLength);
+ jamEntry();
+ switch (lockReq->returnCode) {
+ case AccLockReq::Success:
+ jam();
+ scan.m_state = ScanOp::Locked;
+ scan.m_accLockOp = lockReq->accOpPtr;
+#ifdef VM_TRACE
+ if (debugFlags & DebugScan) {
+ debugOut << "Lock immediate scan " << scanPtr.i << " " << scan << endl;
+ }
+#endif
+ break;
+ case AccLockReq::IsBlocked:
+ jam();
+ // normal lock wait
+ scan.m_state = ScanOp::Blocked;
+ scan.m_lockwait = true;
+ scan.m_accLockOp = lockReq->accOpPtr;
+#ifdef VM_TRACE
+ if (debugFlags & DebugScan) {
+ debugOut << "Lock wait scan " << scanPtr.i << " " << scan << endl;
+ }
+#endif
+ // LQH will wake us up
+ signal->theData[0] = scan.m_userPtr;
+ signal->theData[1] = true;
+ EXECUTE_DIRECT(DBLQH, GSN_CHECK_LCP_STOP, signal, 2);
+ jamEntry();
+ return; // stop
+ break;
+ case AccLockReq::Refused:
+ jam();
+ // we cannot see deleted tuple (assert only)
+ ndbassert(false);
+ // skip it
+ scan.m_state = ScanOp::Next;
+ signal->theData[0] = scan.m_userPtr;
+ signal->theData[1] = true;
+ EXECUTE_DIRECT(DBLQH, GSN_CHECK_LCP_STOP, signal, 2);
+ jamEntry();
+ return; // stop
+ break;
+ case AccLockReq::NoFreeOp:
+ jam();
+ // max ops should depend on max scans (assert only)
+ ndbassert(false);
+ // stay in Current state
+ scan.m_state = ScanOp::Current;
+ signal->theData[0] = scan.m_userPtr;
+ signal->theData[1] = true;
+ EXECUTE_DIRECT(DBLQH, GSN_CHECK_LCP_STOP, signal, 2);
+ jamEntry();
+ return; // stop
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }
+ } else {
+ scan.m_state = ScanOp::Locked;
+ }
+ }
+ if (scan.m_state == ScanOp::Locked) {
+ // we have lock or do not need one
+ jam();
+ // read keys if not already done (uses signal)
+ const TreeEnt ent = scan.m_scanEnt;
+ // conf signal
+ NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend();
+ conf->scanPtr = scan.m_userPtr;
+ // the lock is passed to LQH
+ Uint32 accLockOp = scan.m_accLockOp;
+ if (accLockOp != RNIL) {
+ scan.m_accLockOp = RNIL;
+ // remember it until LQH unlocks it
+ addAccLockOp(scan, accLockOp);
+ } else {
+ ndbrequire(scan.m_readCommitted);
+ // operation RNIL in LQH would signal no tuple returned
+ accLockOp = (Uint32)-1;
+ }
+ conf->accOperationPtr = accLockOp;
+ conf->fragId = frag.m_fragId | ent.m_fragBit;
+ conf->localKey[0] = getTupAddr(frag, ent);
+ conf->localKey[1] = 0;
+ conf->localKeyLength = 1;
+ unsigned signalLength = 6;
+ // add key info
+ if (! scan.m_readCommitted) {
+ sendSignal(scan.m_userRef, GSN_NEXT_SCANCONF,
+ signal, signalLength, JBB);
+ } else {
+ Uint32 blockNo = refToBlock(scan.m_userRef);
+ EXECUTE_DIRECT(blockNo, GSN_NEXT_SCANCONF, signal, signalLength);
+ }
+ // next time look for next entry
+ scan.m_state = ScanOp::Next;
+ return;
+ }
+ // XXX in ACC this is checked before req->checkLcpStop
+ if (scan.m_state == ScanOp::Last ||
+ scan.m_state == ScanOp::Invalid) {
+ jam();
+ NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend();
+ conf->scanPtr = scan.m_userPtr;
+ conf->accOperationPtr = RNIL;
+ conf->fragId = RNIL;
+ unsigned signalLength = 3;
+ sendSignal(scanPtr.p->m_userRef, GSN_NEXT_SCANCONF,
+ signal, signalLength, JBB);
+ return;
+ }
+ ndbrequire(false);
+}
+
+/*
+ * Lock succeeded (after delay) in ACC. If the lock is for current
+ * entry, set state to Locked. If the lock is for an entry we were
+ * moved away from, simply unlock it. Finally, if we are closing the
+ * scan, do nothing since we have already sent an abort request.
+ */
+void
+Dbtux::execACCKEYCONF(Signal* signal)
+{
+ jamEntry();
+ ScanOpPtr scanPtr;
+ scanPtr.i = signal->theData[0];
+ c_scanOpPool.getPtr(scanPtr);
+ ScanOp& scan = *scanPtr.p;
+#ifdef VM_TRACE
+ if (debugFlags & DebugScan) {
+ debugOut << "Lock obtained scan " << scanPtr.i << " " << scan << endl;
+ }
+#endif
+ ndbrequire(scan.m_lockwait && scan.m_accLockOp != RNIL);
+ scan.m_lockwait = false;
+ if (scan.m_state == ScanOp::Blocked) {
+ // the lock wait was for current entry
+ jam();
+ scan.m_state = ScanOp::Locked;
+ // LQH has the ball
+ return;
+ }
+ if (scan.m_state != ScanOp::Aborting) {
+ // we were moved, release lock
+ jam();
+ AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend();
+ lockReq->returnCode = RNIL;
+ lockReq->requestInfo = AccLockReq::Unlock;
+ lockReq->accOpPtr = scan.m_accLockOp;
+ EXECUTE_DIRECT(DBACC, GSN_ACC_LOCKREQ, signal, AccLockReq::UndoSignalLength);
+ jamEntry();
+ ndbrequire(lockReq->returnCode == AccLockReq::Success);
+ scan.m_accLockOp = RNIL;
+ // LQH has the ball
+ return;
+ }
+ // lose the lock
+ scan.m_accLockOp = RNIL;
+ // continue at ACC_ABORTCONF
+}
+
+/*
+ * Lock failed (after delay) in ACC. Probably means somebody ahead of
+ * us in lock queue deleted the tuple.
+ */
+void
+Dbtux::execACCKEYREF(Signal* signal)
+{
+ jamEntry();
+ ScanOpPtr scanPtr;
+ scanPtr.i = signal->theData[0];
+ c_scanOpPool.getPtr(scanPtr);
+ ScanOp& scan = *scanPtr.p;
+#ifdef VM_TRACE
+ if (debugFlags & DebugScan) {
+ debugOut << "Lock refused scan " << scanPtr.i << " " << scan << endl;
+ }
+#endif
+ ndbrequire(scan.m_lockwait && scan.m_accLockOp != RNIL);
+ scan.m_lockwait = false;
+ if (scan.m_state != ScanOp::Aborting) {
+ jam();
+ // release the operation
+ AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend();
+ lockReq->returnCode = RNIL;
+ lockReq->requestInfo = AccLockReq::Abort;
+ lockReq->accOpPtr = scan.m_accLockOp;
+ EXECUTE_DIRECT(DBACC, GSN_ACC_LOCKREQ, signal, AccLockReq::UndoSignalLength);
+ jamEntry();
+ ndbrequire(lockReq->returnCode == AccLockReq::Success);
+ scan.m_accLockOp = RNIL;
+ // scan position should already have been moved (assert only)
+ if (scan.m_state == ScanOp::Blocked) {
+ jam();
+ ndbassert(false);
+ scan.m_state = ScanOp::Next;
+ }
+ // LQH has the ball
+ return;
+ }
+ // lose the lock
+ scan.m_accLockOp = RNIL;
+ // continue at ACC_ABORTCONF
+}
+
+/*
+ * Received when scan is closing. This signal arrives after any
+ * ACCKEYCON or ACCKEYREF which may have been in job buffer.
+ */
+void
+Dbtux::execACC_ABORTCONF(Signal* signal)
+{
+ jamEntry();
+ ScanOpPtr scanPtr;
+ scanPtr.i = signal->theData[0];
+ c_scanOpPool.getPtr(scanPtr);
+ ScanOp& scan = *scanPtr.p;
+#ifdef VM_TRACE
+ if (debugFlags & DebugScan) {
+ debugOut << "ACC_ABORTCONF scan " << scanPtr.i << " " << scan << endl;
+ }
+#endif
+ ndbrequire(scan.m_state == ScanOp::Aborting);
+ // most likely we are still in lock wait
+ if (scan.m_lockwait) {
+ jam();
+ scan.m_lockwait = false;
+ scan.m_accLockOp = RNIL;
+ }
+ scanClose(signal, scanPtr);
+}
+
+/*
+ * Find start position for single range scan. If it exists, sets state
+ * to Next and links the scan to the node. The first entry is returned
+ * by scanNext.
+ */
+void
+Dbtux::scanFirst(ScanOpPtr scanPtr)
+{
+ ScanOp& scan = *scanPtr.p;
+ Frag& frag = *c_fragPool.getPtr(scan.m_fragPtrI);
+ TreeHead& tree = frag.m_tree;
+ // set up index keys for this operation
+ setKeyAttrs(frag);
+ // scan direction 0, 1
+ const unsigned idir = scan.m_descending;
+ // unpack start key into c_dataBuffer
+ const ScanBound& bound = *scan.m_bound[idir];
+ ScanBoundIterator iter;
+ bound.first(iter);
+ for (unsigned j = 0; j < bound.getSize(); j++) {
+ jam();
+ c_dataBuffer[j] = *iter.data;
+ bound.next(iter);
+ }
+ TreePos treePos;
+ searchToScan(frag, c_dataBuffer, scan.m_boundCnt[idir], scan.m_descending, treePos);
+ if (treePos.m_loc == NullTupLoc) {
+ // empty result set
+ jam();
+ scan.m_state = ScanOp::Last;
+ return;
+ }
+ // set position and state
+ scan.m_scanPos = treePos;
+ scan.m_state = ScanOp::Next;
+ // link the scan to node found
+ NodeHandle node(frag);
+ selectNode(node, treePos.m_loc);
+ linkScan(node, scanPtr);
+}
+
+/*
+ * Move to next entry. The scan is already linked to some node. When
+ * we leave, if an entry was found, it will be linked to a possibly
+ * different node. The scan has a position, and a direction which tells
+ * from where we came to this position. This is one of (all comments
+ * are in terms of ascending scan):
+ *
+ * 0 - up from left child (scan this node next)
+ * 1 - up from right child (proceed to parent)
+ * 2 - up from root (the scan ends)
+ * 3 - left to right within node (at end proceed to right child)
+ * 4 - down from parent (proceed to left child)
+ *
+ * If an entry was found, scan direction is 3. Therefore tree
+ * re-organizations need not worry about scan direction.
+ */
+void
+Dbtux::scanNext(ScanOpPtr scanPtr, bool fromMaintReq)
+{
+ ScanOp& scan = *scanPtr.p;
+ Frag& frag = *c_fragPool.getPtr(scan.m_fragPtrI);
+#ifdef VM_TRACE
+ if (debugFlags & DebugScan) {
+ debugOut << "Next in scan " << scanPtr.i << " " << scan << endl;
+ }
+#endif
+ // cannot be moved away from tuple we have locked
+ ndbrequire(scan.m_state != ScanOp::Locked);
+ // set up index keys for this operation
+ setKeyAttrs(frag);
+ // scan direction
+ const unsigned idir = scan.m_descending; // 0, 1
+ const int jdir = 1 - 2 * (int)idir; // 1, -1
+ // unpack end key into c_dataBuffer
+ const ScanBound& bound = *scan.m_bound[1 - idir];
+ ScanBoundIterator iter;
+ bound.first(iter);
+ for (unsigned j = 0; j < bound.getSize(); j++) {
+ jam();
+ c_dataBuffer[j] = *iter.data;
+ bound.next(iter);
+ }
+ // use copy of position
+ TreePos pos = scan.m_scanPos;
+ // get and remember original node
+ NodeHandle origNode(frag);
+ selectNode(origNode, pos.m_loc);
+ ndbrequire(islinkScan(origNode, scanPtr));
+ // current node in loop
+ NodeHandle node = origNode;
+ // copy of entry found
+ TreeEnt ent;
+ while (true) {
+ jam();
+#ifdef VM_TRACE
+ if (debugFlags & DebugScan) {
+ debugOut << "Scan next pos " << pos << " " << node << endl;
+ }
+#endif
+ if (pos.m_dir == 2) {
+ // coming up from root ends the scan
+ jam();
+ pos.m_loc = NullTupLoc;
+ scan.m_state = ScanOp::Last;
+ break;
+ }
+ if (node.m_loc != pos.m_loc) {
+ jam();
+ selectNode(node, pos.m_loc);
+ }
+ if (pos.m_dir == 4) {
+ // coming down from parent proceed to left child
+ jam();
+ TupLoc loc = node.getLink(idir);
+ if (loc != NullTupLoc) {
+ jam();
+ pos.m_loc = loc;
+ pos.m_dir = 4; // unchanged
+ continue;
+ }
+ // pretend we came from left child
+ pos.m_dir = idir;
+ }
+ const unsigned occup = node.getOccup();
+ if (occup == 0) {
+ jam();
+ ndbrequire(fromMaintReq);
+ // move back to parent - see comment in treeRemoveInner
+ pos.m_loc = node.getLink(2);
+ pos.m_dir = node.getSide();
+ continue;
+ }
+ if (pos.m_dir == idir) {
+ // coming up from left child scan current node
+ jam();
+ pos.m_pos = idir == 0 ? 0 : occup - 1;
+ pos.m_match = false;
+ pos.m_dir = 3;
+ }
+ if (pos.m_dir == 3) {
+ // within node
+ jam();
+ // advance position
+ if (! pos.m_match)
+ pos.m_match = true;
+ else
+ // becomes ZNIL (which is > occup) if 0 and scan descending
+ pos.m_pos += jdir;
+ if (pos.m_pos < occup) {
+ jam();
+ ent = node.getEnt(pos.m_pos);
+ pos.m_dir = 3; // unchanged
+ // read and compare all attributes
+ readKeyAttrs(frag, ent, 0, c_entryKey);
+ int ret = cmpScanBound(frag, 1 - idir, c_dataBuffer, scan.m_boundCnt[1 - idir], c_entryKey);
+ ndbrequire(ret != NdbSqlUtil::CmpUnknown);
+ if (jdir * ret < 0) {
+ jam();
+ // hit upper bound of single range scan
+ pos.m_loc = NullTupLoc;
+ scan.m_state = ScanOp::Last;
+ break;
+ }
+ // can we see it
+ if (! scanVisible(scanPtr, ent)) {
+ jam();
+ continue;
+ }
+ // found entry
+ scan.m_state = ScanOp::Current;
+ break;
+ }
+ // after node proceed to right child
+ TupLoc loc = node.getLink(1 - idir);
+ if (loc != NullTupLoc) {
+ jam();
+ pos.m_loc = loc;
+ pos.m_dir = 4;
+ continue;
+ }
+ // pretend we came from right child
+ pos.m_dir = 1 - idir;
+ }
+ if (pos.m_dir == 1 - idir) {
+ // coming up from right child proceed to parent
+ jam();
+ pos.m_loc = node.getLink(2);
+ pos.m_dir = node.getSide();
+ continue;
+ }
+ ndbrequire(false);
+ }
+ // copy back position
+ scan.m_scanPos = pos;
+ // relink
+ if (scan.m_state == ScanOp::Current) {
+ ndbrequire(pos.m_match == true && pos.m_dir == 3);
+ ndbrequire(pos.m_loc == node.m_loc);
+ if (origNode.m_loc != node.m_loc) {
+ jam();
+ unlinkScan(origNode, scanPtr);
+ linkScan(node, scanPtr);
+ }
+ // copy found entry
+ scan.m_scanEnt = ent;
+ } else if (scan.m_state == ScanOp::Last) {
+ jam();
+ ndbrequire(pos.m_loc == NullTupLoc);
+ unlinkScan(origNode, scanPtr);
+ } else {
+ ndbrequire(false);
+ }
+#ifdef VM_TRACE
+ if (debugFlags & DebugScan) {
+ debugOut << "Next out scan " << scanPtr.i << " " << scan << endl;
+ }
+#endif
+}
+
+/*
+ * Check if an entry is visible to the scan.
+ *
+ * There is a special check to never accept same tuple twice in a row.
+ * This is faster than asking TUP. It also fixes some special cases
+ * which are not analyzed or handled yet.
+ */
+bool
+Dbtux::scanVisible(ScanOpPtr scanPtr, TreeEnt ent)
+{
+ const ScanOp& scan = *scanPtr.p;
+ const Frag& frag = *c_fragPool.getPtr(scan.m_fragPtrI);
+ Uint32 fragBit = ent.m_fragBit;
+ Uint32 tableFragPtrI = frag.m_tupTableFragPtrI[fragBit];
+ Uint32 fragId = frag.m_fragId | fragBit;
+ Uint32 tupAddr = getTupAddr(frag, ent);
+ Uint32 tupVersion = ent.m_tupVersion;
+ // check for same tuple twice in row
+ if (scan.m_scanEnt.m_tupLoc == ent.m_tupLoc &&
+ scan.m_scanEnt.m_fragBit == fragBit) {
+ jam();
+ return false;
+ }
+ Uint32 transId1 = scan.m_transId1;
+ Uint32 transId2 = scan.m_transId2;
+ Uint32 savePointId = scan.m_savePointId;
+ bool ret = c_tup->tuxQueryTh(tableFragPtrI, tupAddr, tupVersion, transId1, transId2, savePointId);
+ jamEntry();
+ return ret;
+}
+
+/*
+ * Finish closing of scan and send conf. Any lock wait has been done
+ * already.
+ */
+void
+Dbtux::scanClose(Signal* signal, ScanOpPtr scanPtr)
+{
+ ScanOp& scan = *scanPtr.p;
+ ndbrequire(! scan.m_lockwait && scan.m_accLockOp == RNIL);
+ // unlock all not unlocked by LQH
+ for (unsigned i = 0; i < scan.m_maxAccLockOps; i++) {
+ if (scan.m_accLockOps[i] != RNIL) {
+ jam();
+ AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend();
+ lockReq->returnCode = RNIL;
+ lockReq->requestInfo = AccLockReq::Abort;
+ lockReq->accOpPtr = scan.m_accLockOps[i];
+ EXECUTE_DIRECT(DBACC, GSN_ACC_LOCKREQ, signal, AccLockReq::UndoSignalLength);
+ jamEntry();
+ ndbrequire(lockReq->returnCode == AccLockReq::Success);
+ scan.m_accLockOps[i] = RNIL;
+ }
+ }
+ // send conf
+ NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend();
+ conf->scanPtr = scanPtr.p->m_userPtr;
+ conf->accOperationPtr = RNIL;
+ conf->fragId = RNIL;
+ unsigned signalLength = 3;
+ sendSignal(scanPtr.p->m_userRef, GSN_NEXT_SCANCONF,
+ signal, signalLength, JBB);
+ releaseScanOp(scanPtr);
+}
+
+void
+Dbtux::addAccLockOp(ScanOp& scan, Uint32 accLockOp)
+{
+ ndbrequire(accLockOp != RNIL);
+ Uint32* list = scan.m_accLockOps;
+ bool ok = false;
+ for (unsigned i = 0; i < scan.m_maxAccLockOps; i++) {
+ ndbrequire(list[i] != accLockOp);
+ if (! ok && list[i] == RNIL) {
+ list[i] = accLockOp;
+ ok = true;
+ // continue check for duplicates
+ }
+ }
+ if (! ok) {
+ unsigned i = scan.m_maxAccLockOps;
+ if (i < MaxAccLockOps) {
+ list[i] = accLockOp;
+ ok = true;
+ scan.m_maxAccLockOps = i + 1;
+ }
+ }
+ ndbrequire(ok);
+}
+
+void
+Dbtux::removeAccLockOp(ScanOp& scan, Uint32 accLockOp)
+{
+ ndbrequire(accLockOp != RNIL);
+ Uint32* list = scan.m_accLockOps;
+ bool ok = false;
+ for (unsigned i = 0; i < scan.m_maxAccLockOps; i++) {
+ if (list[i] == accLockOp) {
+ list[i] = RNIL;
+ ok = true;
+ break;
+ }
+ }
+ ndbrequire(ok);
+}
+
+/*
+ * Release allocated records.
+ */
+void
+Dbtux::releaseScanOp(ScanOpPtr& scanPtr)
+{
+#ifdef VM_TRACE
+ if (debugFlags & DebugScan) {
+ debugOut << "Release scan " << scanPtr.i << " " << *scanPtr.p << endl;
+ }
+#endif
+ Frag& frag = *c_fragPool.getPtr(scanPtr.p->m_fragPtrI);
+ scanPtr.p->m_boundMin.release();
+ scanPtr.p->m_boundMax.release();
+ // unlink from per-fragment list and release from pool
+ frag.m_scanList.release(scanPtr);
+}
diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp
new file mode 100644
index 00000000000..b0e2a664bfd
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp
@@ -0,0 +1,449 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#define DBTUX_SEARCH_CPP
+#include "Dbtux.hpp"
+
+/*
+ * Search for entry to add.
+ *
+ * Similar to searchToRemove (see below).
+ *
+ * TODO optimize for initial equal attrs in node min/max
+ */
+void
+Dbtux::searchToAdd(Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos)
+{
+ const TreeHead& tree = frag.m_tree;
+ const unsigned numAttrs = frag.m_numAttrs;
+ NodeHandle currNode(frag);
+ currNode.m_loc = tree.m_root;
+ // assume success
+ treePos.m_match = false;
+ if (currNode.m_loc == NullTupLoc) {
+ // empty tree
+ jam();
+ return;
+ }
+ NodeHandle glbNode(frag); // potential g.l.b of final node
+ /*
+ * In order to not (yet) change old behaviour, a position between
+ * 2 nodes returns the one at the bottom of the tree.
+ */
+ NodeHandle bottomNode(frag);
+ while (true) {
+ jam();
+ selectNode(currNode, currNode.m_loc);
+ int ret;
+ // compare prefix
+ unsigned start = 0;
+ ret = cmpSearchKey(frag, start, searchKey, currNode.getPref(), tree.m_prefSize);
+ if (ret == NdbSqlUtil::CmpUnknown) {
+ jam();
+ // read and compare remaining attributes
+ ndbrequire(start < numAttrs);
+ readKeyAttrs(frag, currNode.getMinMax(0), start, c_entryKey);
+ ret = cmpSearchKey(frag, start, searchKey, c_entryKey);
+ ndbrequire(ret != NdbSqlUtil::CmpUnknown);
+ }
+ if (ret == 0) {
+ jam();
+ // keys are equal, compare entry values
+ ret = searchEnt.cmp(currNode.getMinMax(0));
+ }
+ if (ret < 0) {
+ jam();
+ const TupLoc loc = currNode.getLink(0);
+ if (loc != NullTupLoc) {
+ jam();
+ // continue to left subtree
+ currNode.m_loc = loc;
+ continue;
+ }
+ if (! glbNode.isNull()) {
+ jam();
+ // move up to the g.l.b but remember the bottom node
+ bottomNode = currNode;
+ currNode = glbNode;
+ }
+ } else if (ret > 0) {
+ jam();
+ const TupLoc loc = currNode.getLink(1);
+ if (loc != NullTupLoc) {
+ jam();
+ // save potential g.l.b
+ glbNode = currNode;
+ // continue to right subtree
+ currNode.m_loc = loc;
+ continue;
+ }
+ } else {
+ jam();
+ treePos.m_loc = currNode.m_loc;
+ treePos.m_pos = 0;
+ // failed
+ treePos.m_match = true;
+ return;
+ }
+ break;
+ }
+ // anticipate
+ treePos.m_loc = currNode.m_loc;
+ // binary search
+ int lo = -1;
+ unsigned hi = currNode.getOccup();
+ int ret;
+ while (1) {
+ jam();
+ // hi - lo > 1 implies lo < j < hi
+ int j = (hi + lo) / 2;
+ // read and compare attributes
+ unsigned start = 0;
+ readKeyAttrs(frag, currNode.getEnt(j), start, c_entryKey);
+ ret = cmpSearchKey(frag, start, searchKey, c_entryKey);
+ ndbrequire(ret != NdbSqlUtil::CmpUnknown);
+ if (ret == 0) {
+ jam();
+ // keys are equal, compare entry values
+ ret = searchEnt.cmp(currNode.getEnt(j));
+ }
+ if (ret < 0)
+ hi = j;
+ else if (ret > 0)
+ lo = j;
+ else {
+ treePos.m_pos = j;
+ // failed
+ treePos.m_match = true;
+ return;
+ }
+ if (hi - lo == 1)
+ break;
+ }
+ if (ret < 0) {
+ jam();
+ treePos.m_pos = hi;
+ return;
+ }
+ if (hi < currNode.getOccup()) {
+ jam();
+ treePos.m_pos = hi;
+ return;
+ }
+ if (bottomNode.isNull()) {
+ jam();
+ treePos.m_pos = hi;
+ return;
+ }
+ jam();
+ // backwards compatible for now
+ treePos.m_loc = bottomNode.m_loc;
+ treePos.m_pos = 0;
+}
+
+/*
+ * Search for entry to remove.
+ *
+ * Compares search key to each node min. A move to right subtree can
+ * overshoot target node. The last such node is saved. The final node
+ * is a semi-leaf or leaf. If search key is less than final node min
+ * then the saved node is the g.l.b of the final node and we move back
+ * to it.
+ */
+void
+Dbtux::searchToRemove(Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos)
+{
+ const TreeHead& tree = frag.m_tree;
+ const unsigned numAttrs = frag.m_numAttrs;
+ NodeHandle currNode(frag);
+ currNode.m_loc = tree.m_root;
+ // assume success
+ treePos.m_match = true;
+ if (currNode.m_loc == NullTupLoc) {
+ // empty tree
+ jam();
+ // failed
+ treePos.m_match = false;
+ return;
+ }
+ NodeHandle glbNode(frag); // potential g.l.b of final node
+ while (true) {
+ jam();
+ selectNode(currNode, currNode.m_loc);
+ int ret;
+ // compare prefix
+ unsigned start = 0;
+ ret = cmpSearchKey(frag, start, searchKey, currNode.getPref(), tree.m_prefSize);
+ if (ret == NdbSqlUtil::CmpUnknown) {
+ jam();
+ // read and compare remaining attributes
+ ndbrequire(start < numAttrs);
+ readKeyAttrs(frag, currNode.getMinMax(0), start, c_entryKey);
+ ret = cmpSearchKey(frag, start, searchKey, c_entryKey);
+ ndbrequire(ret != NdbSqlUtil::CmpUnknown);
+ }
+ if (ret == 0) {
+ jam();
+ // keys are equal, compare entry values
+ ret = searchEnt.cmp(currNode.getMinMax(0));
+ }
+ if (ret < 0) {
+ jam();
+ const TupLoc loc = currNode.getLink(0);
+ if (loc != NullTupLoc) {
+ jam();
+ // continue to left subtree
+ currNode.m_loc = loc;
+ continue;
+ }
+ if (! glbNode.isNull()) {
+ jam();
+ // move up to the g.l.b
+ currNode = glbNode;
+ }
+ } else if (ret > 0) {
+ jam();
+ const TupLoc loc = currNode.getLink(1);
+ if (loc != NullTupLoc) {
+ jam();
+ // save potential g.l.b
+ glbNode = currNode;
+ // continue to right subtree
+ currNode.m_loc = loc;
+ continue;
+ }
+ } else {
+ jam();
+ treePos.m_loc = currNode.m_loc;
+ treePos.m_pos = 0;
+ return;
+ }
+ break;
+ }
+ // anticipate
+ treePos.m_loc = currNode.m_loc;
+ // pos 0 was handled above
+ for (unsigned j = 1, occup = currNode.getOccup(); j < occup; j++) {
+ jam();
+ // compare only the entry
+ if (searchEnt.eq(currNode.getEnt(j))) {
+ jam();
+ treePos.m_pos = j;
+ return;
+ }
+ }
+ treePos.m_pos = currNode.getOccup();
+ // failed
+ treePos.m_match = false;
+}
+
+/*
+ * Search for scan start position.
+ *
+ * Similar to searchToAdd. The routines differ somewhat depending on
+ * scan direction and are done by separate methods.
+ */
+void
+Dbtux::searchToScan(Frag& frag, ConstData boundInfo, unsigned boundCount, bool descending, TreePos& treePos)
+{
+ const TreeHead& tree = frag.m_tree;
+ if (tree.m_root != NullTupLoc) {
+ if (! descending)
+ searchToScanAscending(frag, boundInfo, boundCount, treePos);
+ else
+ searchToScanDescending(frag, boundInfo, boundCount, treePos);
+ return;
+ }
+ // empty tree
+}
+
+void
+Dbtux::searchToScanAscending(Frag& frag, ConstData boundInfo, unsigned boundCount, TreePos& treePos)
+{
+ const TreeHead& tree = frag.m_tree;
+ NodeHandle currNode(frag);
+ currNode.m_loc = tree.m_root;
+ NodeHandle glbNode(frag); // potential g.l.b of final node
+ NodeHandle bottomNode(frag);
+ // always before entry
+ treePos.m_match = false;
+ while (true) {
+ jam();
+ selectNode(currNode, currNode.m_loc);
+ int ret;
+ // compare prefix
+ ret = cmpScanBound(frag, 0, boundInfo, boundCount, currNode.getPref(), tree.m_prefSize);
+ if (ret == NdbSqlUtil::CmpUnknown) {
+ jam();
+ // read and compare all attributes
+ readKeyAttrs(frag, currNode.getMinMax(0), 0, c_entryKey);
+ ret = cmpScanBound(frag, 0, boundInfo, boundCount, c_entryKey);
+ ndbrequire(ret != NdbSqlUtil::CmpUnknown);
+ }
+ if (ret < 0) {
+ // bound is left of this node
+ jam();
+ const TupLoc loc = currNode.getLink(0);
+ if (loc != NullTupLoc) {
+ jam();
+ // continue to left subtree
+ currNode.m_loc = loc;
+ continue;
+ }
+ if (! glbNode.isNull()) {
+ jam();
+ // move up to the g.l.b but remember the bottom node
+ bottomNode = currNode;
+ currNode = glbNode;
+ } else {
+ // start scanning this node
+ treePos.m_loc = currNode.m_loc;
+ treePos.m_pos = 0;
+ treePos.m_dir = 3;
+ return;
+ }
+ } else if (ret > 0) {
+ // bound is at or right of this node
+ jam();
+ const TupLoc loc = currNode.getLink(1);
+ if (loc != NullTupLoc) {
+ jam();
+ // save potential g.l.b
+ glbNode = currNode;
+ // continue to right subtree
+ currNode.m_loc = loc;
+ continue;
+ }
+ } else {
+ ndbrequire(false);
+ }
+ break;
+ }
+ for (unsigned j = 0, occup = currNode.getOccup(); j < occup; j++) {
+ jam();
+ int ret;
+ // read and compare attributes
+ readKeyAttrs(frag, currNode.getEnt(j), 0, c_entryKey);
+ ret = cmpScanBound(frag, 0, boundInfo, boundCount, c_entryKey);
+ ndbrequire(ret != NdbSqlUtil::CmpUnknown);
+ if (ret < 0) {
+ // found first entry satisfying the bound
+ treePos.m_loc = currNode.m_loc;
+ treePos.m_pos = j;
+ treePos.m_dir = 3;
+ return;
+ }
+ }
+ // bound is to right of this node
+ if (! bottomNode.isNull()) {
+ jam();
+ // start scanning the l.u.b
+ treePos.m_loc = bottomNode.m_loc;
+ treePos.m_pos = 0;
+ treePos.m_dir = 3;
+ return;
+ }
+ // start scanning upwards (pretend we came from right child)
+ treePos.m_loc = currNode.m_loc;
+ treePos.m_dir = 1;
+}
+
+void
+Dbtux::searchToScanDescending(Frag& frag, ConstData boundInfo, unsigned boundCount, TreePos& treePos)
+{
+ const TreeHead& tree = frag.m_tree;
+ NodeHandle currNode(frag);
+ currNode.m_loc = tree.m_root;
+ NodeHandle glbNode(frag); // potential g.l.b of final node
+ NodeHandle bottomNode(frag);
+ // always before entry
+ treePos.m_match = false;
+ while (true) {
+ jam();
+ selectNode(currNode, currNode.m_loc);
+ int ret;
+ // compare prefix
+ ret = cmpScanBound(frag, 1, boundInfo, boundCount, currNode.getPref(), tree.m_prefSize);
+ if (ret == NdbSqlUtil::CmpUnknown) {
+ jam();
+ // read and compare all attributes
+ readKeyAttrs(frag, currNode.getMinMax(0), 0, c_entryKey);
+ ret = cmpScanBound(frag, 1, boundInfo, boundCount, c_entryKey);
+ ndbrequire(ret != NdbSqlUtil::CmpUnknown);
+ }
+ if (ret < 0) {
+ // bound is left of this node
+ jam();
+ const TupLoc loc = currNode.getLink(0);
+ if (loc != NullTupLoc) {
+ jam();
+ // continue to left subtree
+ currNode.m_loc = loc;
+ continue;
+ }
+ if (! glbNode.isNull()) {
+ jam();
+ // move up to the g.l.b but remember the bottom node
+ bottomNode = currNode;
+ currNode = glbNode;
+ } else {
+ // empty result set
+ return;
+ }
+ } else if (ret > 0) {
+ // bound is at or right of this node
+ jam();
+ const TupLoc loc = currNode.getLink(1);
+ if (loc != NullTupLoc) {
+ jam();
+ // save potential g.l.b
+ glbNode = currNode;
+ // continue to right subtree
+ currNode.m_loc = loc;
+ continue;
+ }
+ } else {
+ ndbrequire(false);
+ }
+ break;
+ }
+ for (unsigned j = 0, occup = currNode.getOccup(); j < occup; j++) {
+ jam();
+ int ret;
+ // read and compare attributes
+ readKeyAttrs(frag, currNode.getEnt(j), 0, c_entryKey);
+ ret = cmpScanBound(frag, 1, boundInfo, boundCount, c_entryKey);
+ ndbrequire(ret != NdbSqlUtil::CmpUnknown);
+ if (ret < 0) {
+ if (j > 0) {
+ // start scanning from previous entry
+ treePos.m_loc = currNode.m_loc;
+ treePos.m_pos = j - 1;
+ treePos.m_dir = 3;
+ return;
+ }
+ // start scanning upwards (pretend we came from left child)
+ treePos.m_loc = currNode.m_loc;
+ treePos.m_pos = 0;
+ treePos.m_dir = 0;
+ return;
+ }
+ }
+ // start scanning this node
+ treePos.m_loc = currNode.m_loc;
+ treePos.m_pos = currNode.getOccup() - 1;
+ treePos.m_dir = 3;
+}
diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp
new file mode 100644
index 00000000000..5107a8d8e31
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp
@@ -0,0 +1,709 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#define DBTUX_TREE_CPP
+#include "Dbtux.hpp"
+
+/*
+ * Add entry. Handle the case when there is room for one more. This
+ * is the common case given slack in nodes.
+ */
+void
+Dbtux::treeAdd(Frag& frag, TreePos treePos, TreeEnt ent)
+{
+ TreeHead& tree = frag.m_tree;
+ NodeHandle node(frag);
+ if (treePos.m_loc != NullTupLoc) {
+ // non-empty tree
+ jam();
+ selectNode(node, treePos.m_loc);
+ unsigned pos = treePos.m_pos;
+ if (node.getOccup() < tree.m_maxOccup) {
+ // node has room
+ jam();
+ nodePushUp(node, pos, ent, RNIL);
+ return;
+ }
+ treeAddFull(frag, node, pos, ent);
+ return;
+ }
+ jam();
+ insertNode(node);
+ nodePushUp(node, 0, ent, RNIL);
+ node.setSide(2);
+ tree.m_root = node.m_loc;
+}
+
+/*
+ * Add entry when node is full. Handle the case when there is g.l.b
+ * node in left subtree with room for one more. It will receive the min
+ * entry of this node. The min entry could be the entry to add.
+ */
+void
+Dbtux::treeAddFull(Frag& frag, NodeHandle lubNode, unsigned pos, TreeEnt ent)
+{
+ TreeHead& tree = frag.m_tree;
+ TupLoc loc = lubNode.getLink(0);
+ if (loc != NullTupLoc) {
+ // find g.l.b node
+ NodeHandle glbNode(frag);
+ do {
+ jam();
+ selectNode(glbNode, loc);
+ loc = glbNode.getLink(1);
+ } while (loc != NullTupLoc);
+ if (glbNode.getOccup() < tree.m_maxOccup) {
+ // g.l.b node has room
+ jam();
+ Uint32 scanList = RNIL;
+ if (pos != 0) {
+ jam();
+ // add the new entry and return min entry
+ nodePushDown(lubNode, pos - 1, ent, scanList);
+ }
+ // g.l.b node receives min entry from l.u.b node
+ nodePushUp(glbNode, glbNode.getOccup(), ent, scanList);
+ return;
+ }
+ treeAddNode(frag, lubNode, pos, ent, glbNode, 1);
+ return;
+ }
+ treeAddNode(frag, lubNode, pos, ent, lubNode, 0);
+}
+
+/*
+ * Add entry when there is no g.l.b node in left subtree or the g.l.b
+ * node is full. We must add a new left or right child node which
+ * becomes the new g.l.b node.
+ */
+void
+Dbtux::treeAddNode(Frag& frag, NodeHandle lubNode, unsigned pos, TreeEnt ent, NodeHandle parentNode, unsigned i)
+{
+ NodeHandle glbNode(frag);
+ insertNode(glbNode);
+ // connect parent and child
+ parentNode.setLink(i, glbNode.m_loc);
+ glbNode.setLink(2, parentNode.m_loc);
+ glbNode.setSide(i);
+ Uint32 scanList = RNIL;
+ if (pos != 0) {
+ jam();
+ // add the new entry and return min entry
+ nodePushDown(lubNode, pos - 1, ent, scanList);
+ }
+ // g.l.b node receives min entry from l.u.b node
+ nodePushUp(glbNode, 0, ent, scanList);
+ // re-balance the tree
+ treeAddRebalance(frag, parentNode, i);
+}
+
+/*
+ * Re-balance tree after adding a node. The process starts with the
+ * parent of the added node.
+ */
+void
+Dbtux::treeAddRebalance(Frag& frag, NodeHandle node, unsigned i)
+{
+ while (true) {
+ // height of subtree i has increased by 1
+ int j = (i == 0 ? -1 : +1);
+ int b = node.getBalance();
+ if (b == 0) {
+ // perfectly balanced
+ jam();
+ node.setBalance(j);
+ // height change propagates up
+ } else if (b == -j) {
+ // height of shorter subtree increased
+ jam();
+ node.setBalance(0);
+ // height of tree did not change - done
+ break;
+ } else if (b == j) {
+ // height of longer subtree increased
+ jam();
+ NodeHandle childNode(frag);
+ selectNode(childNode, node.getLink(i));
+ int b2 = childNode.getBalance();
+ if (b2 == b) {
+ jam();
+ treeRotateSingle(frag, node, i);
+ } else if (b2 == -b) {
+ jam();
+ treeRotateDouble(frag, node, i);
+ } else {
+ // height of subtree increased so it cannot be perfectly balanced
+ ndbrequire(false);
+ }
+ // height of tree did not increase - done
+ break;
+ } else {
+ ndbrequire(false);
+ }
+ TupLoc parentLoc = node.getLink(2);
+ if (parentLoc == NullTupLoc) {
+ jam();
+ // root node - done
+ break;
+ }
+ i = node.getSide();
+ selectNode(node, parentLoc);
+ }
+}
+
+/*
+ * Remove entry. Optimize for nodes with slack. Handle the case when
+ * there is no underflow i.e. occupancy remains at least minOccup. For
+ * interior nodes this is a requirement. For others it means that we do
+ * not need to consider merge of semi-leaf and leaf.
+ */
+void
+Dbtux::treeRemove(Frag& frag, TreePos treePos)
+{
+ TreeHead& tree = frag.m_tree;
+ unsigned pos = treePos.m_pos;
+ NodeHandle node(frag);
+ selectNode(node, treePos.m_loc);
+ TreeEnt ent;
+ if (node.getOccup() > tree.m_minOccup) {
+ // no underflow in any node type
+ jam();
+ nodePopDown(node, pos, ent, 0);
+ return;
+ }
+ if (node.getChilds() == 2) {
+ // underflow in interior node
+ jam();
+ treeRemoveInner(frag, node, pos);
+ return;
+ }
+ // remove entry in semi/leaf
+ nodePopDown(node, pos, ent, 0);
+ if (node.getLink(0) != NullTupLoc) {
+ jam();
+ treeRemoveSemi(frag, node, 0);
+ return;
+ }
+ if (node.getLink(1) != NullTupLoc) {
+ jam();
+ treeRemoveSemi(frag, node, 1);
+ return;
+ }
+ treeRemoveLeaf(frag, node);
+}
+
+/*
+ * Remove entry when interior node underflows. There is g.l.b node in
+ * left subtree to borrow an entry from. The max entry of the g.l.b
+ * node becomes the min entry of this node.
+ */
+void
+Dbtux::treeRemoveInner(Frag& frag, NodeHandle lubNode, unsigned pos)
+{
+ TreeHead& tree = frag.m_tree;
+ TreeEnt ent;
+ // find g.l.b node
+ NodeHandle glbNode(frag);
+ TupLoc loc = lubNode.getLink(0);
+ do {
+ jam();
+ selectNode(glbNode, loc);
+ loc = glbNode.getLink(1);
+ } while (loc != NullTupLoc);
+ // borrow max entry from semi/leaf
+ Uint32 scanList = RNIL;
+ nodePopDown(glbNode, glbNode.getOccup() - 1, ent, &scanList);
+ // g.l.b may be empty now
+ // a descending scan may try to enter the empty g.l.b
+ // we prevent this in scanNext
+ nodePopUp(lubNode, pos, ent, scanList);
+ if (glbNode.getLink(0) != NullTupLoc) {
+ jam();
+ treeRemoveSemi(frag, glbNode, 0);
+ return;
+ }
+ treeRemoveLeaf(frag, glbNode);
+}
+
+/*
+ * Handle semi-leaf after removing an entry. Move entries from leaf to
+ * semi-leaf to bring semi-leaf occupancy above minOccup, if possible.
+ * The leaf may become empty.
+ */
+void
+Dbtux::treeRemoveSemi(Frag& frag, NodeHandle semiNode, unsigned i)
+{
+ TreeHead& tree = frag.m_tree;
+ ndbrequire(semiNode.getChilds() < 2);
+ TupLoc leafLoc = semiNode.getLink(i);
+ NodeHandle leafNode(frag);
+ selectNode(leafNode, leafLoc);
+ if (semiNode.getOccup() < tree.m_minOccup) {
+ jam();
+ unsigned cnt = min(leafNode.getOccup(), tree.m_minOccup - semiNode.getOccup());
+ nodeSlide(semiNode, leafNode, cnt, i);
+ if (leafNode.getOccup() == 0) {
+ // remove empty leaf
+ jam();
+ treeRemoveNode(frag, leafNode);
+ }
+ }
+}
+
+/*
+ * Handle leaf after removing an entry. If parent is semi-leaf, move
+ * entries to it as in the semi-leaf case. If parent is interior node,
+ * do nothing.
+ */
+void
+Dbtux::treeRemoveLeaf(Frag& frag, NodeHandle leafNode)
+{
+ TreeHead& tree = frag.m_tree;
+ TupLoc parentLoc = leafNode.getLink(2);
+ if (parentLoc != NullTupLoc) {
+ jam();
+ NodeHandle parentNode(frag);
+ selectNode(parentNode, parentLoc);
+ unsigned i = leafNode.getSide();
+ if (parentNode.getLink(1 - i) == NullTupLoc) {
+ // parent is semi-leaf
+ jam();
+ if (parentNode.getOccup() < tree.m_minOccup) {
+ jam();
+ unsigned cnt = min(leafNode.getOccup(), tree.m_minOccup - parentNode.getOccup());
+ nodeSlide(parentNode, leafNode, cnt, i);
+ }
+ }
+ }
+ if (leafNode.getOccup() == 0) {
+ jam();
+ // remove empty leaf
+ treeRemoveNode(frag, leafNode);
+ }
+}
+
+/*
+ * Remove empty leaf.
+ */
+void
+Dbtux::treeRemoveNode(Frag& frag, NodeHandle leafNode)
+{
+ TreeHead& tree = frag.m_tree;
+ ndbrequire(leafNode.getChilds() == 0);
+ TupLoc parentLoc = leafNode.getLink(2);
+ unsigned i = leafNode.getSide();
+ deleteNode(leafNode);
+ if (parentLoc != NullTupLoc) {
+ jam();
+ NodeHandle parentNode(frag);
+ selectNode(parentNode, parentLoc);
+ parentNode.setLink(i, NullTupLoc);
+ // re-balance the tree
+ treeRemoveRebalance(frag, parentNode, i);
+ return;
+ }
+ // tree is now empty
+ tree.m_root = NullTupLoc;
+}
+
+/*
+ * Re-balance tree after removing a node. The process starts with the
+ * parent of the removed node.
+ */
+void
+Dbtux::treeRemoveRebalance(Frag& frag, NodeHandle node, unsigned i)
+{
+ while (true) {
+ // height of subtree i has decreased by 1
+ int j = (i == 0 ? -1 : +1);
+ int b = node.getBalance();
+ if (b == 0) {
+ // perfectly balanced
+ jam();
+ node.setBalance(-j);
+ // height of tree did not change - done
+ return;
+ } else if (b == j) {
+ // height of longer subtree has decreased
+ jam();
+ node.setBalance(0);
+ // height change propagates up
+ } else if (b == -j) {
+ // height of shorter subtree has decreased
+ jam();
+ // child on the other side
+ NodeHandle childNode(frag);
+ selectNode(childNode, node.getLink(1 - i));
+ int b2 = childNode.getBalance();
+ if (b2 == b) {
+ jam();
+ treeRotateSingle(frag, node, 1 - i);
+ // height of tree decreased and propagates up
+ } else if (b2 == -b) {
+ jam();
+ treeRotateDouble(frag, node, 1 - i);
+ // height of tree decreased and propagates up
+ } else {
+ jam();
+ treeRotateSingle(frag, node, 1 - i);
+ // height of tree did not change - done
+ return;
+ }
+ } else {
+ ndbrequire(false);
+ }
+ TupLoc parentLoc = node.getLink(2);
+ if (parentLoc == NullTupLoc) {
+ jam();
+ // root node - done
+ return;
+ }
+ i = node.getSide();
+ selectNode(node, parentLoc);
+ }
+}
+
+/*
+ * Single rotation about node 5. One of LL (i=0) or RR (i=1).
+ *
+ * 0 0
+ * | |
+ * 5 ==> 3
+ * / \ / \
+ * 3 6 2 5
+ * / \ / / \
+ * 2 4 1 4 6
+ * /
+ * 1
+ *
+ * In this change 5,3 and 2 must always be there. 0, 1, 2, 4 and 6 are
+ * all optional. If 4 are there it changes side.
+*/
+void
+Dbtux::treeRotateSingle(Frag& frag, NodeHandle& node, unsigned i)
+{
+ ndbrequire(i <= 1);
+ /*
+ 5 is the old top node that have been unbalanced due to an insert or
+ delete. The balance is still the old balance before the update.
+ Verify that bal5 is 1 if RR rotate and -1 if LL rotate.
+ */
+ NodeHandle node5 = node;
+ const TupLoc loc5 = node5.m_loc;
+ const int bal5 = node5.getBalance();
+ const int side5 = node5.getSide();
+ ndbrequire(bal5 + (1 - i) == i);
+ /*
+ 3 is the new root of this part of the tree which is to swap place with
+ node 5. For an insert to cause this it must have the same balance as 5.
+ For deletes it can have the balance 0.
+ */
+ TupLoc loc3 = node5.getLink(i);
+ NodeHandle node3(frag);
+ selectNode(node3, loc3);
+ const int bal3 = node3.getBalance();
+ /*
+ 2 must always be there but is not changed. Thus we mereley check that it
+ exists.
+ */
+ ndbrequire(node3.getLink(i) != NullTupLoc);
+ /*
+ 4 is not necessarily there but if it is there it will move from one
+ side of 3 to the other side of 5. For LL it moves from the right side
+ to the left side and for RR it moves from the left side to the right
+ side. This means that it also changes parent from 3 to 5.
+ */
+ TupLoc loc4 = node3.getLink(1 - i);
+ NodeHandle node4(frag);
+ if (loc4 != NullTupLoc) {
+ jam();
+ selectNode(node4, loc4);
+ ndbrequire(node4.getSide() == (1 - i) &&
+ node4.getLink(2) == loc3);
+ node4.setSide(i);
+ node4.setLink(2, loc5);
+ }//if
+
+ /*
+ Retrieve the address of 5's parent before it is destroyed
+ */
+ TupLoc loc0 = node5.getLink(2);
+
+ /*
+ The next step is to perform the rotation. 3 will inherit 5's parent
+ and side. 5 will become a child of 3 on the right side for LL and on
+ the left side for RR.
+ 5 will get 3 as the parent. It will get 4 as a child and it will be
+ on the right side of 3 for LL and left side of 3 for RR.
+ The final step of the rotate is to check whether 5 originally had any
+ parent. If it had not then 3 is the new root node.
+ We will also verify some preconditions for the change to occur.
+ 1. 3 must have had 5 as parent before the change.
+ 2. 3's side is left for LL and right for RR before change.
+ */
+ ndbrequire(node3.getLink(2) == loc5);
+ ndbrequire(node3.getSide() == i);
+ node3.setLink(1 - i, loc5);
+ node3.setLink(2, loc0);
+ node3.setSide(side5);
+ node5.setLink(i, loc4);
+ node5.setLink(2, loc3);
+ node5.setSide(1 - i);
+ if (loc0 != NullTupLoc) {
+ jam();
+ NodeHandle node0(frag);
+ selectNode(node0, loc0);
+ node0.setLink(side5, loc3);
+ } else {
+ jam();
+ frag.m_tree.m_root = loc3;
+ }//if
+ /* The final step of the change is to update the balance of 3 and
+ 5 that changed places. There are two cases here. The first case is
+ when 3 unbalanced in the same direction by an insert or a delete.
+ In this case the changes will make the tree balanced again for both
+ 3 and 5.
+ The second case only occurs at deletes. In this case 3 starts out
+ balanced. In the figure above this could occur if 4 starts out with
+ a right node and the rotate is triggered by a delete of 6's only child.
+ In this case 5 will change balance but still be unbalanced and 3 will
+ be unbalanced in the opposite direction of 5.
+ */
+ if (bal3 == bal5) {
+ jam();
+ node3.setBalance(0);
+ node5.setBalance(0);
+ } else if (bal3 == 0) {
+ jam();
+ node3.setBalance(-bal5);
+ node5.setBalance(bal5);
+ } else {
+ ndbrequire(false);
+ }//if
+ /*
+ Set node to 3 as return parameter for enabling caller to continue
+ traversing the tree.
+ */
+ node = node3;
+}
+
+/*
+ * Double rotation about node 6. One of LR (i=0) or RL (i=1).
+ *
+ * 0 0
+ * | |
+ * 6 ==> 4
+ * / \ / \
+ * 2 7 2 6
+ * / \ / \ / \
+ * 1 4 1 3 5 7
+ * / \
+ * 3 5
+ *
+ * In this change 6, 2 and 4 must be there, all others are optional.
+ * We will start by proving a Lemma.
+ * Lemma:
+ * The height of the sub-trees 1 and 7 and the maximum height of the
+ * threes from 3 and 5 are all the same.
+ * Proof:
+ * maxheight(3,5) is defined as the maximum height of 3 and 5.
+ * If height(7) > maxheight(3,5) then the AVL condition is ok and we
+ * don't need to perform a rotation.
+ * If height(7) < maxheight(3,5) then the balance of 6 would be at least
+ * -3 which cannot happen in an AVL tree even before a rotation.
+ * Thus we conclude that height(7) == maxheight(3,5)
+ *
+ * The next step is to prove that the height of 1 is equal to maxheight(3,5).
+ * If height(1) - 1 > maxheight(3,5) then we would have
+ * balance in 6 equal to -3 at least which cannot happen in an AVL-tree.
+ * If height(1) - 1 = maxheight(3,5) then we should have solved the
+ * unbalance with a single rotate and not with a double rotate.
+ * If height(1) + 1 = maxheight(3,5) then we would be doing a rotate
+ * with node 2 as the root of the rotation.
+ * If height(1) + k = maxheight(3,5) where k >= 2 then the tree could not have
+ * been an AVL-tree before the insert or delete.
+ * Thus we conclude that height(1) = maxheight(3,5)
+ *
+ * Thus we conclude that height(1) = maxheight(3,5) = height(7).
+ *
+ * Observation:
+ * The balance of node 4 before the rotation can be any (-1, 0, +1).
+ *
+ * The following changes are needed:
+ * Node 6:
+ * 1) Changes parent from 0 -> 4
+ * 2) 1 - i link stays the same
+ * 3) i side link is derived from 1 - i side link from 4
+ * 4) Side is set to 1 - i
+ * 5) Balance change:
+ * If balance(4) == 0 then balance(6) = 0
+ * since height(3) = height(5) = maxheight(3,5) = height(7)
+ * If balance(4) == +1 then balance(6) = 0
+ * since height(5) = maxheight(3,5) = height(7)
+ * If balance(4) == -1 then balance(6) = 1
+ * since height(5) + 1 = maxheight(3,5) = height(7)
+ *
+ * Node 2:
+ * 1) Changes parent from 6 -> 4
+ * 2) i side link stays the same
+ * 3) 1 - i side link is derived from i side link of 4
+ * 4) Side is set to i (thus not changed)
+ * 5) Balance change:
+ * If balance(4) == 0 then balance(2) = 0
+ * since height(3) = height(5) = maxheight(3,5) = height(1)
+ * If balance(4) == -1 then balance(2) = 0
+ * since height(3) = maxheight(3,5) = height(1)
+ * If balance(4) == +1 then balance(6) = 1
+ * since height(3) + 1 = maxheight(3,5) = height(1)
+ *
+ * Node 4:
+ * 1) Inherits parent from 6
+ * 2) i side link is 2
+ * 3) 1 - i side link is 6
+ * 4) Side is inherited from 6
+ * 5) Balance(4) = 0 independent of previous balance
+ * Proof:
+ * If height(1) = 0 then only 2, 4 and 6 are involved and then it is
+ * trivially true.
+ * If height(1) >= 1 then we are sure that 1 and 7 exist with the same
+ * height and that if 3 and 5 exist they are of the same height as 1 and
+ * 7 and thus we know that 4 is balanced since newheight(2) = newheight(6).
+ *
+ * If Node 3 exists:
+ * 1) Change parent from 4 to 2
+ * 2) Change side from i to 1 - i
+ *
+ * If Node 5 exists:
+ * 1) Change parent from 4 to 6
+ * 2) Change side from 1 - i to i
+ *
+ * If Node 0 exists:
+ * 1) previous link to 6 is replaced by link to 4 on proper side
+ *
+ * Node 1 and 7 needs no changes at all.
+ *
+ * Some additional requires are that balance(2) = - balance(6) = -1/+1 since
+ * otherwise we would do a single rotate.
+ *
+ * The balance(6) is -1 if i == 0 and 1 if i == 1
+ *
+ */
+void
+Dbtux::treeRotateDouble(Frag& frag, NodeHandle& node, unsigned i)
+{
+ TreeHead& tree = frag.m_tree;
+
+ // old top node
+ NodeHandle node6 = node;
+ const TupLoc loc6 = node6.m_loc;
+ // the un-updated balance
+ const int bal6 = node6.getBalance();
+ const unsigned side6 = node6.getSide();
+
+ // level 1
+ TupLoc loc2 = node6.getLink(i);
+ NodeHandle node2(frag);
+ selectNode(node2, loc2);
+ const int bal2 = node2.getBalance();
+
+ // level 2
+ TupLoc loc4 = node2.getLink(1 - i);
+ NodeHandle node4(frag);
+ selectNode(node4, loc4);
+ const int bal4 = node4.getBalance();
+
+ ndbrequire(i <= 1);
+ ndbrequire(bal6 + (1 - i) == i);
+ ndbrequire(bal2 == -bal6);
+ ndbrequire(node2.getLink(2) == loc6);
+ ndbrequire(node2.getSide() == i);
+ ndbrequire(node4.getLink(2) == loc2);
+
+ // level 3
+ TupLoc loc3 = node4.getLink(i);
+ TupLoc loc5 = node4.getLink(1 - i);
+
+ // fill up leaf before it becomes internal
+ if (loc3 == NullTupLoc && loc5 == NullTupLoc) {
+ jam();
+ if (node4.getOccup() < tree.m_minOccup) {
+ jam();
+ unsigned cnt = tree.m_minOccup - node4.getOccup();
+ ndbrequire(cnt < node2.getOccup());
+ nodeSlide(node4, node2, cnt, i);
+ ndbrequire(node4.getOccup() >= tree.m_minOccup);
+ ndbrequire(node2.getOccup() != 0);
+ }
+ } else {
+ if (loc3 != NullTupLoc) {
+ jam();
+ NodeHandle node3(frag);
+ selectNode(node3, loc3);
+ node3.setLink(2, loc2);
+ node3.setSide(1 - i);
+ }
+ if (loc5 != NullTupLoc) {
+ jam();
+ NodeHandle node5(frag);
+ selectNode(node5, loc5);
+ node5.setLink(2, node6.m_loc);
+ node5.setSide(i);
+ }
+ }
+ // parent
+ TupLoc loc0 = node6.getLink(2);
+ NodeHandle node0(frag);
+ // perform the rotation
+ node6.setLink(i, loc5);
+ node6.setLink(2, loc4);
+ node6.setSide(1 - i);
+
+ node2.setLink(1 - i, loc3);
+ node2.setLink(2, loc4);
+
+ node4.setLink(i, loc2);
+ node4.setLink(1 - i, loc6);
+ node4.setLink(2, loc0);
+ node4.setSide(side6);
+
+ if (loc0 != NullTupLoc) {
+ jam();
+ selectNode(node0, loc0);
+ node0.setLink(side6, loc4);
+ } else {
+ jam();
+ frag.m_tree.m_root = loc4;
+ }
+ // set balance of changed nodes
+ node4.setBalance(0);
+ if (bal4 == 0) {
+ jam();
+ node2.setBalance(0);
+ node6.setBalance(0);
+ } else if (bal4 == -bal2) {
+ jam();
+ node2.setBalance(0);
+ node6.setBalance(bal2);
+ } else if (bal4 == bal2) {
+ jam();
+ node2.setBalance(-bal2);
+ node6.setBalance(0);
+ } else {
+ ndbrequire(false);
+ }
+ // new top node
+ node = node4;
+}
diff --git a/storage/ndb/src/kernel/blocks/dbtux/Makefile.am b/storage/ndb/src/kernel/blocks/dbtux/Makefile.am
new file mode 100644
index 00000000000..b5951e8ed37
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtux/Makefile.am
@@ -0,0 +1,34 @@
+noinst_LIBRARIES = libdbtux.a
+
+libdbtux_a_SOURCES = \
+ DbtuxGen.cpp \
+ DbtuxMeta.cpp \
+ DbtuxMaint.cpp \
+ DbtuxNode.cpp \
+ DbtuxTree.cpp \
+ DbtuxScan.cpp \
+ DbtuxSearch.cpp \
+ DbtuxCmp.cpp \
+ DbtuxDebug.cpp
+
+INCLUDES_LOC = -I$(top_srcdir)/ndb/src/kernel/blocks/dbtup
+
+include $(top_srcdir)/ndb/config/common.mk.am
+include $(top_srcdir)/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libdbtux.dsp
+
+libdbtux.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-lib.am \
+ $(top_srcdir)/ndb/config/win-name \
+ $(top_srcdir)/ndb/config/win-includes \
+ $(top_srcdir)/ndb/config/win-sources \
+ $(top_srcdir)/ndb/config/win-libraries
+ cat $(top_srcdir)/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/ndb/config/win-sources $@ $(libdbtux_a_SOURCES)
+ @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/storage/ndb/src/kernel/blocks/dbtux/Times.txt b/storage/ndb/src/kernel/blocks/dbtux/Times.txt
new file mode 100644
index 00000000000..68120084846
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtux/Times.txt
@@ -0,0 +1,151 @@
+ordered index performance
+=========================
+
+"mc02" 2x1700 MHz linux-2.4.9 gcc-2.96 -O3 one db-node
+
+case a: maintenance: index on Unsigned
+testOIBasic -case u -table 1 -index 2 -fragtype small -threads 10 -rows 100000 -subloop 1 -nologging
+
+case b: maintenance: index on Varchar(5) + Varchar(5) + Varchar(20) + Unsigned
+testOIBasic -case u -table 2 -index 5 -fragtype small -threads 10 -rows 100000 -subloop 1 -nologging
+
+case c: full scan: index on PK Unsigned
+testOIBasic -case v -table 1 -index 1 -fragtype small -threads 10 -rows 100000 -subloop 1 -nologging
+
+case d: scan 1 tuple via EQ: index on PK Unsigned
+testOIBasic -case w -table 1 -index 1 -fragtype small -threads 10 -rows 100000 -samples 50000 -subloop 1 -nologging -v2
+
+a, b
+1 million rows, pk update without index, pk update with index
+shows ms / 1000 rows for each and pct overhead
+
+c
+1 million rows, index on PK, full table scan, full index scan
+shows ms / 1000 rows for each and index time overhead
+
+d
+1 million rows, index on PK, read table via each pk, scan index for each pk
+shows ms / 1000 rows for each and index time overhead
+samples 10% of all PKs (100,000 pk reads, 100,000 scans)
+
+the "pct" values are from more accurate total times (not shown)
+comments [ ... ] are after the case
+
+040616 mc02/a 40 ms 87 ms 114 pct
+ mc02/b 51 ms 128 ms 148 pct
+
+optim 1 mc02/a 38 ms 85 ms 124 pct
+ mc02/b 51 ms 123 ms 140 pct
+
+optim 2 mc02/a 41 ms 80 ms 96 pct
+ mc02/b 51 ms 117 ms 128 pct
+
+optim 3 mc02/a 43 ms 80 ms 85 pct
+ mc02/b 54 ms 118 ms 117 pct
+
+optim 4 mc02/a 42 ms 80 ms 87 pct
+ mc02/b 51 ms 119 ms 129 pct
+
+optim 5 mc02/a 43 ms 77 ms 77 pct
+ mc02/b 54 ms 118 ms 117 pct
+
+optim 6 mc02/a 42 ms 70 ms 66 pct
+ mc02/b 53 ms 109 ms 105 pct
+
+optim 7 mc02/a 42 ms 69 ms 61 pct
+ mc02/b 52 ms 106 ms 101 pct
+
+optim 8 mc02/a 42 ms 69 ms 62 pct
+ mc02/b 54 ms 104 ms 92 pct
+
+optim 9 mc02/a 43 ms 67 ms 54 pct
+ mc02/b 53 ms 102 ms 91 pct
+
+optim 10 mc02/a 44 ms 65 ms 46 pct
+ mc02/b 53 ms 88 ms 66 pct
+
+optim 11 mc02/a 43 ms 63 ms 46 pct
+ mc02/b 52 ms 86 ms 63 pct
+
+optim 12 mc02/a 38 ms 55 ms 43 pct
+ mc02/b 47 ms 77 ms 63 pct
+ mc02/c 10 ms 14 ms 47 pct
+ mc02/d 176 ms 281 ms 59 pct
+
+optim 13 mc02/a 40 ms 57 ms 42 pct
+ mc02/b 47 ms 77 ms 61 pct
+ mc02/c 9 ms 13 ms 50 pct
+ mc02/d 170 ms 256 ms 50 pct
+
+optim 13 mc02/a 39 ms 59 ms 50 pct
+ mc02/b 47 ms 77 ms 61 pct
+ mc02/c 9 ms 12 ms 44 pct
+ mc02/d 246 ms 289 ms 17 pct
+
+[ after wl-1884 store all-NULL keys (the tests have pctnull=10 per column) ]
+[ case d: bug in testOIBasic killed PK read performance ]
+
+optim 14 mc02/a 41 ms 60 ms 44 pct
+ mc02/b 46 ms 81 ms 73 pct
+ mc02/c 9 ms 13 ms 37 pct
+ mc02/d 242 ms 285 ms 17 pct
+
+[ case b: do long keys suffer from many subroutine calls? ]
+[ case d: bug in testOIBasic killed PK read performance ]
+
+none mc02/a 35 ms 60 ms 71 pct
+ mc02/b 42 ms 75 ms 76 pct
+ mc02/c 5 ms 12 ms 106 pct
+ mc02/d 165 ms 238 ms 44 pct
+
+[ johan re-installed mc02 as fedora gcc-3.3.2, tux uses more C++ stuff than tup]
+
+charsets mc02/a 35 ms 60 ms 71 pct
+ mc02/b 42 ms 84 ms 97 pct
+ mc02/c 5 ms 12 ms 109 pct
+ mc02/d 190 ms 236 ms 23 pct
+
+[ case b: TUX can no longer use pointers to TUP data ]
+
+optim 15 mc02/a 34 ms 60 ms 72 pct
+ mc02/b 42 ms 85 ms 100 pct
+ mc02/c 5 ms 12 ms 110 pct
+ mc02/d 178 ms 242 ms 35 pct
+
+[ corrected wasted space in index node ]
+
+optim 16 mc02/a 34 ms 53 ms 53 pct
+ mc02/b 42 ms 75 ms 75 pct
+
+[ binary search of bounding node when adding entry ]
+
+none mc02/a 35 ms 53 ms 51 pct
+ mc02/b 42 ms 75 ms 76 pct
+
+[ rewrote treeAdd / treeRemove ]
+
+optim 17 mc02/a 35 ms 52 ms 49 pct
+ mc02/b 43 ms 75 ms 75 pct
+
+[ allow slack (2) in interior nodes - almost no effect?? ]
+
+wl-1942 mc02/a 35 ms 52 ms 49 pct
+ mc02/b 42 ms 75 ms 76 pct
+
+before mc02/c 5 ms 13 ms 126 pct
+ mc02/d 134 ms 238 ms 78 pct
+
+after mc02/c 5 ms 10 ms 70 pct
+ mc02/d 178 ms 242 ms 69 pct
+
+[ prelim performance fix for max batch size 16 -> 992 ]
+
+wl-2066 mc02/c 5 ms 10 ms 87 pct
+before mc02/d 140 ms 237 ms 69 pct
+
+wl-2066 mc02/c 5 ms 10 ms 69 pct
+after mc02/d 150 ms 229 ms 52 pct
+
+[ wl-2066 = remove ACC storage, use TUX test to see effect ]
+
+vim: set et:
diff --git a/storage/ndb/src/kernel/blocks/dbtux/tuxstatus.html b/storage/ndb/src/kernel/blocks/dbtux/tuxstatus.html
new file mode 100644
index 00000000000..264809cefd3
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbtux/tuxstatus.html
@@ -0,0 +1,120 @@
+<HTML>
+<HEAD>
+<META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+<TITLE>NDB Ordered Index Status</TITLE>
+</HEAD>
+<BODY LINK="#0000ff" VLINK="#800080" BGCOLOR="#ffffff">
+<p>
+<h2>NDB Ordered Index Status</h2>
+<p>
+<h3>Alpha release Jan 30, 2004</h3>
+<p>
+<ul>
+ <li>
+ Up to 32 index attributes of any type, possibly nullable.
+ <li>
+ Index build i.e. table need not be empty.
+ <li>
+ Logging NOT done, index rebuilt at system restart.
+ <li>
+ Single range scan with lower and upper bounds.
+ <li>
+ Scan with locking: read latest, read for update.
+ <li>
+ LIMITED number of parallel scans.
+ <li>
+ Total result set NOT in index key order.
+ <li>
+ NDB ODBC optimizer to use ordered index for equality but NOT for ranges.
+ <li>
+ MySQL optimizer to use ordered index for equality and ranges.
+</ul>
+<p>
+As an example, consider following index on integer attributes.
+<p>
+<tt>SQL&gt;create index X on T (A, B, C) nologging;</tt>
+<p>
+Single range scan means that bounds are set on
+an initial sequence of index keys, and all but last is an equality.
+<br>
+For example following scans are supported (the last 2 not via NDB ODBC).
+<p>
+<tt>SQL&gt;select * from T where A = 1;</tt>
+<br>
+<tt>SQL&gt;select * from T where A = 1 and B = 10 and C = 20;</tt>
+<br>
+<tt>SQL&gt;select * from T where A &lt; 10;</tt>
+<br>
+<tt>SQL&gt;select * from T where A = 1 and 10 &lt; B and B &lt; 20;</tt>
+<p>
+Following scans are NOT supported:
+<p>
+<tt>SQL&gt;select * from T where B = 1;</tt>
+<br>
+<tt>SQL&gt;select * from T where A &lt; 10 and B &lt; 20;</tt>
+<br>
+<h3>Features and dates</h3>
+[ Now = Jan 19 ]
+<p>
+<table border=1 cellpadding=1>
+<tr align="left">
+ <th width="40%">Feature</th>
+ <th width="15%">Now</th> <th width="15%">Jan 30</th> <th width="15%">Mar 01</th> <th width="15%">Never</th>
+</tr>
+<tr align=left>
+ <td>Index maintenance</td>
+ <td>X</td> <td>-</td> <td>-</td> <td>-</td>
+</tr>
+<tr align=left>
+ <td>Basic scan</td>
+ <td>X 1)</td> <td>-</td> <td>-</td> <td>-</td>
+</tr>
+<tr align=left>
+ <td>Scan bounds on nullable attributes</td>
+ <td>-</td> <td>X</td> <td>-</td> <td>-</td>
+</tr>
+<tr align=left>
+ <td>Scan with locking</td>
+ <td>-</td> <td>X</td> <td>-</td> <td>-</td>
+</tr>
+<tr align="left">
+ <td>NDB ODBC equality bounds</td>
+ <td>-</td> <td>X</td> <td>-</td> <td>-</td>
+</tr>
+<tr align="left">
+ <td>MySQL integration</td>
+ <td>-</td> <td>X</td> <td>-</td> <td>-</td>
+</tr>
+<tr align=left>
+ <td>Index build</td>
+ <td>2)</td> <td>X</td> <td>-</td> <td>-</td>
+</tr>
+<tr align=left>
+ <td>Unlimited number of scans</td>
+ <td>3)</td> <td>-</td> <td>X</td> <td>-</td>
+</tr>
+<tr align=left>
+ <td>Total ordering</td>
+ <td>-</td> <td>-</td> <td>X</td> <td>-</td>
+</tr>
+<tr align=left>
+ <td>Multiple range scan</td>
+ <td>-</td> <td>-</td> <td>X</td> <td>-</td>
+</tr>
+<tr align="left">
+ <td>NDB ODBC range bounds</td>
+ <td>-</td> <td>-</td> <td>-</td> <td>X</td>
+</tr>
+<tr align=left>
+ <td>Logging</td>
+ <td>-</td> <td>-</td> <td>-</td> <td>X</td>
+</tr>
+</table>
+<p>
+1) No locking and bounds must be on non-nullable key attributes.
+<br>
+2) Currently table must be empty when index is created.
+<br>
+3) Currently limited to 11 simultaneous per fragment.
+</BODY>
+</HTML>
diff --git a/storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp b/storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp
new file mode 100644
index 00000000000..b94bb8e6d7e
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp
@@ -0,0 +1,2589 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <ndb_global.h>
+
+#include "DbUtil.hpp"
+
+#include <ndb_version.h>
+
+#include <signaldata/WaitGCP.hpp>
+#include <signaldata/KeyInfo.hpp>
+#include <signaldata/AttrInfo.hpp>
+#include <signaldata/TcKeyConf.hpp>
+#include <signaldata/TcKeyFailConf.hpp>
+#include <signaldata/GetTabInfo.hpp>
+#include <signaldata/DictTabInfo.hpp>
+
+#include <signaldata/UtilSequence.hpp>
+#include <signaldata/UtilPrepare.hpp>
+#include <signaldata/UtilRelease.hpp>
+#include <signaldata/UtilExecute.hpp>
+#include <signaldata/UtilLock.hpp>
+
+#include <SectionReader.hpp>
+#include <Interpreter.hpp>
+#include <AttributeHeader.hpp>
+
+#include <NdbTick.h>
+
+
+/**************************************************************************
+ * ------------------------------------------------------------------------
+ * MODULE: Startup
+ * ------------------------------------------------------------------------
+ *
+ * Constructors, startup, initializations
+ **************************************************************************/
+
+DbUtil::DbUtil(const Configuration & conf) :
+ SimulatedBlock(DBUTIL, conf),
+ c_runningPrepares(c_preparePool),
+ c_runningPreparedOperations(c_preparedOperationPool),
+ c_seizingTransactions(c_transactionPool),
+ c_runningTransactions(c_transactionPool),
+ c_lockQueues(c_lockQueuePool)
+{
+ BLOCK_CONSTRUCTOR(DbUtil);
+
+ // Add received signals
+ addRecSignal(GSN_STTOR, &DbUtil::execSTTOR);
+ addRecSignal(GSN_NDB_STTOR, &DbUtil::execNDB_STTOR);
+ addRecSignal(GSN_DUMP_STATE_ORD, &DbUtil::execDUMP_STATE_ORD);
+ addRecSignal(GSN_CONTINUEB, &DbUtil::execCONTINUEB);
+
+ //addRecSignal(GSN_TCSEIZEREF, &DbUtil::execTCSEIZEREF);
+ addRecSignal(GSN_TCSEIZECONF, &DbUtil::execTCSEIZECONF);
+ addRecSignal(GSN_TCKEYCONF, &DbUtil::execTCKEYCONF);
+ addRecSignal(GSN_TCKEYREF, &DbUtil::execTCKEYREF);
+ addRecSignal(GSN_TCROLLBACKREP, &DbUtil::execTCROLLBACKREP);
+
+ //addRecSignal(GSN_TCKEY_FAILCONF, &DbUtil::execTCKEY_FAILCONF);
+ //addRecSignal(GSN_TCKEY_FAILREF, &DbUtil::execTCKEY_FAILREF);
+ addRecSignal(GSN_TRANSID_AI, &DbUtil::execTRANSID_AI);
+
+ /**
+ * Sequence Service
+ */
+ addRecSignal(GSN_UTIL_SEQUENCE_REQ, &DbUtil::execUTIL_SEQUENCE_REQ);
+ // Debug
+ addRecSignal(GSN_UTIL_SEQUENCE_REF, &DbUtil::execUTIL_SEQUENCE_REF);
+ addRecSignal(GSN_UTIL_SEQUENCE_CONF, &DbUtil::execUTIL_SEQUENCE_CONF);
+
+ /**
+ * Locking
+ */
+ addRecSignal(GSN_UTIL_CREATE_LOCK_REQ, &DbUtil::execUTIL_CREATE_LOCK_REQ);
+ addRecSignal(GSN_UTIL_DESTROY_LOCK_REQ, &DbUtil::execUTIL_DESTORY_LOCK_REQ);
+ addRecSignal(GSN_UTIL_LOCK_REQ, &DbUtil::execUTIL_LOCK_REQ);
+ addRecSignal(GSN_UTIL_UNLOCK_REQ, &DbUtil::execUTIL_UNLOCK_REQ);
+
+ /**
+ * Backend towards Dict
+ */
+ addRecSignal(GSN_GET_TABINFOREF, &DbUtil::execGET_TABINFOREF);
+ addRecSignal(GSN_GET_TABINFO_CONF, &DbUtil::execGET_TABINFO_CONF);
+
+ /**
+ * Prepare / Execute / Release Services
+ */
+ addRecSignal(GSN_UTIL_PREPARE_REQ, &DbUtil::execUTIL_PREPARE_REQ);
+ addRecSignal(GSN_UTIL_PREPARE_CONF, &DbUtil::execUTIL_PREPARE_CONF);
+ addRecSignal(GSN_UTIL_PREPARE_REF, &DbUtil::execUTIL_PREPARE_REF);
+
+ addRecSignal(GSN_UTIL_EXECUTE_REQ, &DbUtil::execUTIL_EXECUTE_REQ);
+ addRecSignal(GSN_UTIL_EXECUTE_CONF, &DbUtil::execUTIL_EXECUTE_CONF);
+ addRecSignal(GSN_UTIL_EXECUTE_REF, &DbUtil::execUTIL_EXECUTE_REF);
+
+ addRecSignal(GSN_UTIL_RELEASE_REQ, &DbUtil::execUTIL_RELEASE_REQ);
+ addRecSignal(GSN_UTIL_RELEASE_CONF, &DbUtil::execUTIL_RELEASE_CONF);
+ addRecSignal(GSN_UTIL_RELEASE_REF, &DbUtil::execUTIL_RELEASE_REF);
+
+ c_pagePool.setSize(10);
+ c_preparePool.setSize(1); // one parallel prepare at a time
+ c_preparedOperationPool.setSize(5); // three hardcoded, two for test
+ c_operationPool.setSize(64); // 64 parallel operations
+ c_transactionPool.setSize(32); // 16 parallel transactions
+ c_attrMappingPool.setSize(100);
+ c_dataBufPool.setSize(6000); // 6000*11*4 = 264K > 8k+8k*16 = 256k
+ {
+ SLList<Prepare> tmp(c_preparePool);
+ PreparePtr ptr;
+ while(tmp.seize(ptr))
+ new (ptr.p) Prepare(c_pagePool);
+ tmp.release();
+ }
+ {
+ SLList<Operation> tmp(c_operationPool);
+ OperationPtr ptr;
+ while(tmp.seize(ptr))
+ new (ptr.p) Operation(c_dataBufPool, c_dataBufPool, c_dataBufPool);
+ tmp.release();
+ }
+ {
+ SLList<PreparedOperation> tmp(c_preparedOperationPool);
+ PreparedOperationPtr ptr;
+ while(tmp.seize(ptr))
+ new (ptr.p) PreparedOperation(c_attrMappingPool,
+ c_dataBufPool, c_dataBufPool);
+ tmp.release();
+ }
+ {
+ SLList<Transaction> tmp(c_transactionPool);
+ TransactionPtr ptr;
+ while(tmp.seize(ptr))
+ new (ptr.p) Transaction(c_pagePool, c_operationPool);
+ tmp.release();
+ }
+
+ c_lockQueuePool.setSize(5);
+ c_lockElementPool.setSize(5);
+ c_lockQueues.setSize(8);
+}
+
+DbUtil::~DbUtil()
+{
+}
+
+BLOCK_FUNCTIONS(DbUtil)
+
+void
+DbUtil::releasePrepare(PreparePtr prepPtr) {
+ prepPtr.p->preparePages.release();
+ c_runningPrepares.release(prepPtr); // Automatic release in pool
+}
+
+void
+DbUtil::releasePreparedOperation(PreparedOperationPtr prepOpPtr) {
+ prepOpPtr.p->attrMapping.release();
+ prepOpPtr.p->attrInfo.release();
+ prepOpPtr.p->rsInfo.release();
+ prepOpPtr.p->pkBitmask.clear();
+ c_preparedOperationPool.release(prepOpPtr); // No list holding these structs
+}
+
+void
+DbUtil::releaseTransaction(TransactionPtr transPtr){
+ transPtr.p->executePages.release();
+ OperationPtr opPtr;
+ for(transPtr.p->operations.first(opPtr); opPtr.i != RNIL;
+ transPtr.p->operations.next(opPtr)){
+ opPtr.p->attrInfo.release();
+ opPtr.p->keyInfo.release();
+ opPtr.p->rs.release();
+ if (opPtr.p->prepOp != 0 && opPtr.p->prepOp_i != RNIL) {
+ if (opPtr.p->prepOp->releaseFlag) {
+ PreparedOperationPtr prepOpPtr;
+ prepOpPtr.i = opPtr.p->prepOp_i;
+ prepOpPtr.p = opPtr.p->prepOp;
+ releasePreparedOperation(prepOpPtr);
+ }
+ }
+ }
+ transPtr.p->operations.release();
+ c_runningTransactions.release(transPtr);
+}
+
+void
+DbUtil::execSTTOR(Signal* signal)
+{
+ jamEntry();
+
+ const Uint32 startphase = signal->theData[1];
+
+ if(startphase == 1){
+ c_transId[0] = (number() << 20) + (getOwnNodeId() << 8);
+ c_transId[1] = 0;
+ }
+
+ if(startphase == 6){
+ hardcodedPrepare();
+ connectTc(signal);
+ }
+
+ signal->theData[0] = 0;
+ signal->theData[3] = 1;
+ signal->theData[4] = 6;
+ signal->theData[5] = 255;
+ sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 6, JBB);
+
+ return;
+}
+
+void
+DbUtil::execNDB_STTOR(Signal* signal)
+{
+ (void)signal; // Don't want compiler warning
+
+ jamEntry();
+}
+
+
+/***************************
+ * Seize a number of TC records
+ * to use for Util transactions
+ */
+
+void
+DbUtil::connectTc(Signal* signal){
+
+ TransactionPtr ptr;
+ while(c_seizingTransactions.seize(ptr)){
+ signal->theData[0] = ptr.i << 1; // See TcCommitConf
+ signal->theData[1] = reference();
+ sendSignal(DBTC_REF, GSN_TCSEIZEREQ, signal, 2, JBB);
+ }
+}
+
+void
+DbUtil::execTCSEIZECONF(Signal* signal){
+ jamEntry();
+
+ TransactionPtr ptr;
+ ptr.i = signal->theData[0] >> 1;
+ c_seizingTransactions.getPtr(ptr, signal->theData[0] >> 1);
+ ptr.p->connectPtr = signal->theData[1];
+
+ c_seizingTransactions.release(ptr);
+}
+
+
+/**************************************************************************
+ * ------------------------------------------------------------------------
+ * MODULE: Misc
+ * ------------------------------------------------------------------------
+ *
+ * ContinueB, Dump
+ **************************************************************************/
+
+void
+DbUtil::execCONTINUEB(Signal* signal){
+ jamEntry();
+ const Uint32 Tdata0 = signal->theData[0];
+
+ switch(Tdata0){
+ default:
+ ndbrequire(0);
+ }
+}
+
+void
+DbUtil::execDUMP_STATE_ORD(Signal* signal){
+ jamEntry();
+
+ /****************************************************************************
+ * SEQUENCE SERVICE
+ *
+ * 200 : Simple test of Public Sequence Interface
+ * ----------------------------------------------
+ * - Sends a SEQUENCE_REQ signal to Util (itself)
+ */
+ const Uint32 tCase = signal->theData[0];
+ if(tCase == 200){
+ jam()
+ ndbout << "--------------------------------------------------" << endl;
+ UtilSequenceReq * req = (UtilSequenceReq*)signal->getDataPtrSend();
+ Uint32 seqId = 1;
+ Uint32 reqTy = UtilSequenceReq::CurrVal;
+
+ if(signal->length() > 1) seqId = signal->theData[1];
+ if(signal->length() > 2) reqTy = signal->theData[2];
+
+ req->senderData = 12;
+ req->sequenceId = seqId;
+ req->requestType = reqTy;
+
+ sendSignal(DBUTIL_REF, GSN_UTIL_SEQUENCE_REQ,
+ signal, UtilSequenceReq::SignalLength, JBB);
+ }
+
+ /****************************************************************************/
+ /* // Obsolete tests, should be rewritten for long signals!!
+ if(tCase == 210){
+ jam();
+ ndbout << "--------------------------------------------------" << endl;
+ const Uint32 pageSizeInWords = 128;
+ Uint32 propPage[pageSizeInWords];
+ LinearWriter w(&propPage[0], 128);
+ w.first();
+ w.add(UtilPrepareReq::NoOfOperations, 1);
+ w.add(UtilPrepareReq::OperationType, UtilPrepareReq::Delete);
+ w.add(UtilPrepareReq::TableName, "sys/def/SYSTAB_0");
+ w.add(UtilPrepareReq::AttributeName, "SYSKEY_0"); // AttrNo = 0
+ Uint32 length = w.getWordsUsed();
+ ndbassert(length <= pageSizeInWords);
+
+ sendUtilPrepareReqSignals(signal, propPage, length);
+ }
+ if(tCase == 211){
+ jam();
+ ndbout << "--------------------------------------------------" << endl;
+ const Uint32 pageSizeInWords = 128;
+ Uint32 propPage[pageSizeInWords];
+ LinearWriter w(&propPage[0],128);
+ w.first();
+ w.add(UtilPrepareReq::NoOfOperations, 1);
+ w.add(UtilPrepareReq::OperationType, UtilPrepareReq::Insert);
+ w.add(UtilPrepareReq::TableName, "sys/def/SYSTAB_0");
+ w.add(UtilPrepareReq::AttributeName, "SYSKEY_0"); // AttrNo = 0
+ w.add(UtilPrepareReq::AttributeName, "NEXTID"); // AttrNo = 1
+ Uint32 length = w.getWordsUsed();
+ ndbassert(length <= pageSizeInWords);
+
+ sendUtilPrepareReqSignals(signal, propPage, length);
+ }
+ if(tCase == 212){
+ jam();
+ ndbout << "--------------------------------------------------" << endl;
+ const Uint32 pageSizeInWords = 128;
+ Uint32 propPage[pageSizeInWords];
+ LinearWriter w(&propPage[0],128);
+ w.first();
+ w.add(UtilPrepareReq::NoOfOperations, 1);
+ w.add(UtilPrepareReq::OperationType, UtilPrepareReq::Update);
+ w.add(UtilPrepareReq::TableName, "sys/def/SYSTAB_0");
+ w.add(UtilPrepareReq::AttributeName, "SYSKEY_0"); // AttrNo = 0
+ w.add(UtilPrepareReq::AttributeName, "NEXTID"); // AttrNo = 1
+ Uint32 length = w.getWordsUsed();
+ ndbassert(length <= pageSizeInWords);
+
+ sendUtilPrepareReqSignals(signal, propPage, length);
+ }
+ if(tCase == 213){
+ jam();
+ ndbout << "--------------------------------------------------" << endl;
+ const Uint32 pageSizeInWords = 128;
+ Uint32 propPage[pageSizeInWords];
+ LinearWriter w(&propPage[0],128);
+ w.first();
+ w.add(UtilPrepareReq::NoOfOperations, 1);
+ w.add(UtilPrepareReq::OperationType, UtilPrepareReq::Read);
+ w.add(UtilPrepareReq::TableName, "sys/def/SYSTAB_0");
+ w.add(UtilPrepareReq::AttributeName, "SYSKEY_0"); // AttrNo = 0
+ Uint32 length = w.getWordsUsed();
+ ndbassert(length <= pageSizeInWords);
+
+ sendUtilPrepareReqSignals(signal, propPage, length);
+ }
+ if(tCase == 214){
+ jam();
+ ndbout << "--------------------------------------------------" << endl;
+ const Uint32 pageSizeInWords = 128;
+ Uint32 propPage[pageSizeInWords];
+ LinearWriter w(&propPage[0], 128);
+ w.first();
+ w.add(UtilPrepareReq::NoOfOperations, 1);
+ w.add(UtilPrepareReq::OperationType, UtilPrepareReq::Delete);
+ w.add(UtilPrepareReq::TableId, (unsigned int)0); // SYSTAB_0
+ w.add(UtilPrepareReq::AttributeId, (unsigned int)0);// SYSKEY_0
+ Uint32 length = w.getWordsUsed();
+ ndbassert(length <= pageSizeInWords);
+
+ sendUtilPrepareReqSignals(signal, propPage, length);
+ }
+ if(tCase == 215){
+ jam();
+ ndbout << "--------------------------------------------------" << endl;
+ const Uint32 pageSizeInWords = 128;
+ Uint32 propPage[pageSizeInWords];
+ LinearWriter w(&propPage[0],128);
+ w.first();
+ w.add(UtilPrepareReq::NoOfOperations, 1);
+ w.add(UtilPrepareReq::OperationType, UtilPrepareReq::Insert);
+ w.add(UtilPrepareReq::TableId, (unsigned int)0); // SYSTAB_0
+ w.add(UtilPrepareReq::AttributeId, (unsigned int)0); // SYSKEY_0
+ w.add(UtilPrepareReq::AttributeId, 1); // NEXTID
+ Uint32 length = w.getWordsUsed();
+ ndbassert(length <= pageSizeInWords);
+
+ sendUtilPrepareReqSignals(signal, propPage, length);
+ }
+ if(tCase == 216){
+ jam();
+ ndbout << "--------------------------------------------------" << endl;
+ const Uint32 pageSizeInWords = 128;
+ Uint32 propPage[pageSizeInWords];
+ LinearWriter w(&propPage[0],128);
+ w.first();
+ w.add(UtilPrepareReq::NoOfOperations, 1);
+ w.add(UtilPrepareReq::OperationType, UtilPrepareReq::Update);
+ w.add(UtilPrepareReq::TableId, (unsigned int)0); // SYSTAB_0
+ w.add(UtilPrepareReq::AttributeId, (unsigned int)0);// SYSKEY_0
+ w.add(UtilPrepareReq::AttributeId, 1); // NEXTID
+ Uint32 length = w.getWordsUsed();
+ ndbassert(length <= pageSizeInWords);
+
+ sendUtilPrepareReqSignals(signal, propPage, length);
+ }
+ if(tCase == 217){
+ jam();
+ ndbout << "--------------------------------------------------" << endl;
+ const Uint32 pageSizeInWords = 128;
+ Uint32 propPage[pageSizeInWords];
+ LinearWriter w(&propPage[0],128);
+ w.first();
+ w.add(UtilPrepareReq::NoOfOperations, 1);
+ w.add(UtilPrepareReq::OperationType, UtilPrepareReq::Read);
+ w.add(UtilPrepareReq::TableId, (unsigned int)0); // SYSTAB_0
+ w.add(UtilPrepareReq::AttributeId, (unsigned int)0);// SYSKEY_0
+ Uint32 length = w.getWordsUsed();
+ ndbassert(length <= pageSizeInWords);
+
+ sendUtilPrepareReqSignals(signal, propPage, length);
+ }
+ */
+ /****************************************************************************/
+ /* // Obsolete tests, should be rewritten for long signals!!
+ if(tCase == 220){
+ jam();
+ ndbout << "--------------------------------------------------" << endl;
+ Uint32 prepI = signal->theData[1];
+ Uint32 length = signal->theData[2];
+ Uint32 attributeValue0 = signal->theData[3];
+ Uint32 attributeValue1a = signal->theData[4];
+ Uint32 attributeValue1b = signal->theData[5];
+ ndbrequire(prepI != 0);
+
+ UtilExecuteReq * req = (UtilExecuteReq *)signal->getDataPtrSend();
+
+ req->senderData = 221;
+ req->prepareId = prepI;
+ req->totalDataLen = length; // Including headers
+ req->offset = 0;
+
+ AttributeHeader::init(&req->attrData[0], 0, 1); // AttrNo 0, DataSize
+ req->attrData[1] = attributeValue0; // AttrValue
+ AttributeHeader::init(&req->attrData[2], 1, 2); // AttrNo 1, DataSize
+ req->attrData[3] = attributeValue1a; // AttrValue
+ req->attrData[4] = attributeValue1b; // AttrValue
+
+ printUTIL_EXECUTE_REQ(stdout, signal->getDataPtrSend(), 3 + 5,0);
+ sendSignal(DBUTIL_REF, GSN_UTIL_EXECUTE_REQ, signal, 3 + 5, JBB);
+ }
+*/
+ /****************************************************************************
+ * 230 : PRINT STATE
+ */
+#ifdef ARRAY_GUARD
+ if(tCase == 230){
+ jam();
+
+ ndbout << "--------------------------------------------------" << endl;
+ if (signal->length() <= 1) {
+ ndbout << "Usage: DUMP 230 <recordType> <recordNo>" << endl
+ << "[1] Print Prepare (running) records" << endl
+ << "[2] Print PreparedOperation records" << endl
+ << "[3] Print Transaction records" << endl
+ << "[4] Print Operation records" << endl
+ << "Ex. \"dump 230 1 2\" prints Prepare record no 2." << endl
+ << endl
+ << "210 : PREPARE_REQ DELETE SYSTAB_0 SYSKEY_0" << endl
+ << "211 : PREPARE_REQ INSERT SYSTAB_0 SYSKEY_0 NEXTID" << endl
+ << "212 : PREPARE_REQ UPDATE SYSTAB_0 SYSKEY_0 NEXTID" << endl
+ << "213 : PREPARE_REQ READ SYSTAB_0 SYSKEY_0" << endl
+ << "214 : PREPARE_REQ DELETE SYSTAB_0 SYSKEY_0 using id" << endl
+ << "215 : PREPARE_REQ INSERT SYSTAB_0 SYSKEY_0 NEXTID using id" << endl
+ << "216 : PREPARE_REQ UPDATE SYSTAB_0 SYSKEY_0 NEXTID using id" << endl
+ << "217 : PREPARE_REQ READ SYSTAB_0 SYSKEY_0 using id" << endl
+ << "220 : EXECUTE_REQ <PrepId> <Len> <Val1> <Val2a> <Val2b>" <<endl
+ << "299 : Crash system (using ndbrequire(0))"
+ << endl
+ << "Ex. \"dump 220 3 5 1 0 17 \" prints Prepare record no 2."
+ << endl;
+ return;
+ }
+
+ switch (signal->theData[1]) {
+ case 1:
+ // ** Print a specific record **
+ if (signal->length() >= 3) {
+ PreparePtr prepPtr;
+ if (!c_preparePool.isSeized(signal->theData[2])) {
+ ndbout << "Prepare Id: " << signal->theData[2]
+ << " (Not seized!)" << endl;
+ } else {
+ c_preparePool.getPtr(prepPtr, signal->theData[2]);
+ prepPtr.p->print();
+ }
+ return;
+ }
+
+ // ** Print all records **
+ PreparePtr prepPtr;
+ if (!c_runningPrepares.first(prepPtr)) {
+ ndbout << "No Prepare records exist" << endl;
+ return;
+ }
+
+ while (!prepPtr.isNull()) {
+ prepPtr.p->print();
+ c_runningPrepares.next(prepPtr);
+ }
+ return;
+
+ case 2:
+ // ** Print a specific record **
+ if (signal->length() >= 3) {
+ if (!c_preparedOperationPool.isSeized(signal->theData[2])) {
+ ndbout << "PreparedOperation Id: " << signal->theData[2]
+ << " (Not seized!)" << endl;
+ return;
+ }
+ ndbout << "PreparedOperation Id: " << signal->theData[2] << endl;
+ PreparedOperationPtr prepOpPtr;
+ c_runningPreparedOperations.getPtr(prepOpPtr, signal->theData[2]);
+ prepOpPtr.p->print();
+ return;
+ }
+
+ // ** Print all records **
+ PreparedOperationPtr prepOpPtr;
+ if (!c_runningPreparedOperations.first(prepOpPtr)) {
+ ndbout << "No PreparedOperations exist" << endl;
+ return;
+ }
+ while (!prepOpPtr.isNull()) {
+ ndbout << "[-PreparedOperation no " << prepOpPtr.i << ":";
+ prepOpPtr.p->print();
+ ndbout << "]";
+ c_runningPreparedOperations.next(prepOpPtr);
+ }
+ return;
+
+ case 3:
+ // ** Print a specific record **
+ if (signal->length() >= 3) {
+ ndbout << "Print specific record not implemented." << endl;
+ return;
+ }
+
+ // ** Print all records **
+ ndbout << "Print all records not implemented, specify an Id." << endl;
+ return;
+
+ case 4:
+ ndbout << "Not implemented" << endl;
+ return;
+
+ default:
+ ndbout << "Unknown input (try without any data)" << endl;
+ return;
+ }
+ }
+#endif
+ if(tCase == 240 && signal->getLength() == 2){
+ MutexManager::ActiveMutexPtr ptr;
+ ndbrequire(c_mutexMgr.seize(ptr));
+ ptr.p->m_mutexId = signal->theData[1];
+ Callback c = { safe_cast(&DbUtil::mutex_created), ptr.i };
+ ptr.p->m_callback = c;
+ c_mutexMgr.create(signal, ptr);
+ ndbout_c("c_mutexMgr.create ptrI=%d mutexId=%d", ptr.i, ptr.p->m_mutexId);
+ }
+
+ if(tCase == 241 && signal->getLength() == 2){
+ MutexManager::ActiveMutexPtr ptr;
+ ndbrequire(c_mutexMgr.seize(ptr));
+ ptr.p->m_mutexId = signal->theData[1];
+ Callback c = { safe_cast(&DbUtil::mutex_locked), ptr.i };
+ ptr.p->m_callback = c;
+ c_mutexMgr.lock(signal, ptr);
+ ndbout_c("c_mutexMgr.lock ptrI=%d mutexId=%d", ptr.i, ptr.p->m_mutexId);
+ }
+
+ if(tCase == 242 && signal->getLength() == 2){
+ MutexManager::ActiveMutexPtr ptr;
+ ptr.i = signal->theData[1];
+ c_mutexMgr.getPtr(ptr);
+ Callback c = { safe_cast(&DbUtil::mutex_unlocked), ptr.i };
+ ptr.p->m_callback = c;
+ c_mutexMgr.unlock(signal, ptr);
+ ndbout_c("c_mutexMgr.unlock ptrI=%d mutexId=%d", ptr.i, ptr.p->m_mutexId);
+ }
+
+ if(tCase == 243 && signal->getLength() == 3){
+ MutexManager::ActiveMutexPtr ptr;
+ ndbrequire(c_mutexMgr.seize(ptr));
+ ptr.p->m_mutexId = signal->theData[1];
+ ptr.p->m_mutexKey = signal->theData[2];
+ Callback c = { safe_cast(&DbUtil::mutex_destroyed), ptr.i };
+ ptr.p->m_callback = c;
+ c_mutexMgr.destroy(signal, ptr);
+ ndbout_c("c_mutexMgr.destroy ptrI=%d mutexId=%d key=%d",
+ ptr.i, ptr.p->m_mutexId, ptr.p->m_mutexKey);
+ }
+}
+
+void
+DbUtil::mutex_created(Signal* signal, Uint32 ptrI, Uint32 retVal){
+ MutexManager::ActiveMutexPtr ptr; ptr.i = ptrI;
+ c_mutexMgr.getPtr(ptr);
+ ndbout_c("mutex_created - mutexId=%d, retVal=%d",
+ ptr.p->m_mutexId, retVal);
+ c_mutexMgr.release(ptrI);
+}
+
+void
+DbUtil::mutex_destroyed(Signal* signal, Uint32 ptrI, Uint32 retVal){
+ MutexManager::ActiveMutexPtr ptr; ptr.i = ptrI;
+ c_mutexMgr.getPtr(ptr);
+ ndbout_c("mutex_destroyed - mutexId=%d, retVal=%d",
+ ptr.p->m_mutexId, retVal);
+ c_mutexMgr.release(ptrI);
+}
+
+void
+DbUtil::mutex_locked(Signal* signal, Uint32 ptrI, Uint32 retVal){
+ MutexManager::ActiveMutexPtr ptr; ptr.i = ptrI;
+ c_mutexMgr.getPtr(ptr);
+ ndbout_c("mutex_locked - mutexId=%d, retVal=%d key=%d ptrI=%d",
+ ptr.p->m_mutexId, retVal, ptr.p->m_mutexKey, ptrI);
+ if(retVal)
+ c_mutexMgr.release(ptrI);
+}
+
+void
+DbUtil::mutex_unlocked(Signal* signal, Uint32 ptrI, Uint32 retVal){
+ MutexManager::ActiveMutexPtr ptr; ptr.i = ptrI;
+ c_mutexMgr.getPtr(ptr);
+ ndbout_c("mutex_unlocked - mutexId=%d, retVal=%d",
+ ptr.p->m_mutexId, retVal);
+ if(!retVal)
+ c_mutexMgr.release(ptrI);
+}
+
+void
+DbUtil::execUTIL_SEQUENCE_REF(Signal* signal){
+ jamEntry();
+ ndbout << "UTIL_SEQUENCE_REF" << endl;
+ printUTIL_SEQUENCE_REF(stdout, signal->getDataPtrSend(), signal->length(), 0);
+}
+
+void
+DbUtil::execUTIL_SEQUENCE_CONF(Signal* signal){
+ jamEntry();
+ ndbout << "UTIL_SEQUENCE_CONF" << endl;
+ printUTIL_SEQUENCE_CONF(stdout, signal->getDataPtrSend(), signal->length(),0);
+}
+
+void
+DbUtil::execUTIL_PREPARE_CONF(Signal* signal){
+ jamEntry();
+ ndbout << "UTIL_PREPARE_CONF" << endl;
+ printUTIL_PREPARE_CONF(stdout, signal->getDataPtrSend(), signal->length(), 0);
+}
+
+void
+DbUtil::execUTIL_PREPARE_REF(Signal* signal){
+ jamEntry();
+ ndbout << "UTIL_PREPARE_REF" << endl;
+ printUTIL_PREPARE_REF(stdout, signal->getDataPtrSend(), signal->length(), 0);
+}
+
+void
+DbUtil::execUTIL_EXECUTE_CONF(Signal* signal) {
+ jamEntry();
+ ndbout << "UTIL_EXECUTE_CONF" << endl;
+ printUTIL_EXECUTE_CONF(stdout, signal->getDataPtrSend(), signal->length(), 0);
+}
+
+void
+DbUtil::execUTIL_EXECUTE_REF(Signal* signal) {
+ jamEntry();
+
+ ndbout << "UTIL_EXECUTE_REF" << endl;
+ printUTIL_EXECUTE_REF(stdout, signal->getDataPtrSend(), signal->length(), 0);
+}
+
+void
+DbUtil::execUTIL_RELEASE_CONF(Signal* signal) {
+ jamEntry();
+ ndbout << "UTIL_RELEASE_CONF" << endl;
+}
+
+void
+DbUtil::execUTIL_RELEASE_REF(Signal* signal) {
+ jamEntry();
+
+ ndbout << "UTIL_RELEASE_REF" << endl;
+}
+
+void
+DbUtil::sendUtilPrepareRef(Signal* signal, UtilPrepareRef::ErrorCode error,
+ Uint32 recipient, Uint32 senderData){
+ UtilPrepareRef * ref = (UtilPrepareRef *)signal->getDataPtrSend();
+ ref->errorCode = error;
+ ref->senderData = senderData;
+
+ sendSignal(recipient, GSN_UTIL_PREPARE_REF, signal,
+ UtilPrepareRef::SignalLength, JBB);
+}
+
+void
+DbUtil::sendUtilExecuteRef(Signal* signal, UtilExecuteRef::ErrorCode error,
+ Uint32 TCerror, Uint32 recipient, Uint32 senderData){
+
+ UtilExecuteRef * ref = (UtilExecuteRef *)signal->getDataPtrSend();
+ ref->senderData = senderData;
+ ref->errorCode = error;
+ ref->TCErrorCode = TCerror;
+
+ sendSignal(recipient, GSN_UTIL_EXECUTE_REF, signal,
+ UtilPrepareRef::SignalLength, JBB);
+}
+
+
+/**************************************************************************
+ * ------------------------------------------------------------------------
+ * MODULE: Prepare service
+ * ------------------------------------------------------------------------
+ *
+ * Prepares a transaction by storing info in some structs
+ **************************************************************************/
+
+void
+DbUtil::execUTIL_PREPARE_REQ(Signal* signal)
+{
+ jamEntry();
+
+ /****************
+ * Decode Signal
+ ****************/
+ UtilPrepareReq * req = (UtilPrepareReq *)signal->getDataPtr();
+ const Uint32 senderRef = req->senderRef;
+ const Uint32 senderData = req->senderData;
+
+ if(signal->getNoOfSections() == 0) {
+ // Missing prepare data
+ jam();
+ releaseSections(signal);
+ sendUtilPrepareRef(signal, UtilPrepareRef::MISSING_PROPERTIES_SECTION,
+ senderRef, senderData);
+ return;
+ }
+
+ PreparePtr prepPtr;
+ SegmentedSectionPtr ptr;
+
+ jam();
+ if(!c_runningPrepares.seize(prepPtr)) {
+ jam();
+ releaseSections(signal);
+ sendUtilPrepareRef(signal, UtilPrepareRef::PREPARE_SEIZE_ERROR,
+ senderRef, senderData);
+ return;
+ };
+ signal->getSection(ptr, UtilPrepareReq::PROPERTIES_SECTION);
+ const Uint32 noPages = (ptr.sz + sizeof(Page32)) / sizeof(Page32);
+ ndbassert(noPages > 0);
+ if (!prepPtr.p->preparePages.seize(noPages)) {
+ jam();
+ releaseSections(signal);
+ sendUtilPrepareRef(signal, UtilPrepareRef::PREPARE_PAGES_SEIZE_ERROR,
+ senderRef, senderData);
+ c_preparePool.release(prepPtr);
+ return;
+ }
+ // Save SimpleProperties
+ Uint32* target = &prepPtr.p->preparePages.getPtr(0)->data[0];
+ copy(target, ptr);
+ prepPtr.p->prepDataLen = ptr.sz;
+ // Release long signal sections
+ releaseSections(signal);
+ // Check table properties with DICT
+ SimplePropertiesSectionReader reader(ptr, getSectionSegmentPool());
+ prepPtr.p->clientRef = senderRef;
+ prepPtr.p->clientData = senderData;
+ // Release long signal sections
+ releaseSections(signal);
+ readPrepareProps(signal, &reader, prepPtr.i);
+}
+
+void DbUtil::readPrepareProps(Signal* signal,
+ SimpleProperties::Reader* reader,
+ Uint32 senderData)
+{
+ jam();
+#if 0
+ printf("DbUtil::readPrepareProps: Received SimpleProperties:\n");
+ reader->printAll(ndbout);
+#endif
+ ndbrequire(reader->first());
+ ndbrequire(reader->getKey() == UtilPrepareReq::NoOfOperations);
+ ndbrequire(reader->getUint32() == 1); // Only one op/trans implemented
+
+ ndbrequire(reader->next());
+ ndbrequire(reader->getKey() == UtilPrepareReq::OperationType);
+
+ ndbrequire(reader->next());
+ UtilPrepareReq::KeyValue tableKey =
+ (UtilPrepareReq::KeyValue) reader->getKey();
+ ndbrequire((tableKey == UtilPrepareReq::TableName) ||
+ (tableKey == UtilPrepareReq::TableId));
+
+ /************************
+ * Ask Dict for metadata
+ ************************/
+ {
+ GetTabInfoReq * req = (GetTabInfoReq *)signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->senderData = senderData;
+ if (tableKey == UtilPrepareReq::TableName) {
+ jam();
+ char tableName[MAX_TAB_NAME_SIZE];
+ req->requestType = GetTabInfoReq::RequestByName |
+ GetTabInfoReq::LongSignalConf;
+
+ req->tableNameLen = reader->getValueLen(); // Including trailing \0
+
+ /********************************************
+ * Code signal data and send signals to DICT
+ ********************************************/
+
+ ndbrequire(req->tableNameLen < MAX_TAB_NAME_SIZE);
+ reader->getString((char*)tableName);
+ LinearSectionPtr ptr[1];
+ ptr[0].p = (Uint32*)tableName;
+ ptr[0].sz = req->tableNameLen;
+ sendSignal(DBDICT_REF, GSN_GET_TABINFOREQ, signal,
+ GetTabInfoReq::SignalLength, JBB, ptr,1);
+
+ }
+ else { // (tableKey == UtilPrepareReq::TableId)
+ jam();
+ req->requestType = GetTabInfoReq::RequestById |
+ GetTabInfoReq::LongSignalConf;
+ req->tableId = reader->getUint32();
+ sendSignal(DBDICT_REF, GSN_GET_TABINFOREQ, signal,
+ GetTabInfoReq::SignalLength, JBB);
+ }
+
+ }
+}
+
+/**
+ * @note We assume that this signal comes due to a request related
+ * to a Prepare struct. DictTabInfo:s 'senderData' denotes
+ * the Prepare struct related to the request.
+ */
+void
+DbUtil::execGET_TABINFO_CONF(Signal* signal){
+ jamEntry();
+
+ if(!assembleFragments(signal)){
+ jam();
+ return;
+ }
+
+ /****************
+ * Decode signal
+ ****************/
+ GetTabInfoConf * const conf = (GetTabInfoConf*)signal->getDataPtr();
+ const Uint32 prepI = conf->senderData;
+ const Uint32 totalLen = conf->totalLen;
+
+ SegmentedSectionPtr dictTabInfoPtr;
+ signal->getSection(dictTabInfoPtr, GetTabInfoConf::DICT_TAB_INFO);
+ ndbrequire(dictTabInfoPtr.sz == totalLen);
+
+ PreparePtr prepPtr;
+ c_runningPrepares.getPtr(prepPtr, prepI);
+ prepareOperation(signal, prepPtr);
+}
+
+void
+DbUtil::execGET_TABINFOREF(Signal* signal){
+ jamEntry();
+
+ GetTabInfoRef * ref = (GetTabInfoRef *)signal->getDataPtr();
+ Uint32 prepI = ref->senderData;
+#define EVENT_DEBUG
+#if 0 //def EVENT_DEBUG
+ ndbout << "Signal GET_TABINFOREF received." << endl;
+ ndbout << "Error Code: " << ref->errorCode << endl;
+
+ switch (ref->errorCode) {
+ case GetTabInfoRef::InvalidTableId:
+ ndbout << " Msg: Invalid table id" << endl;
+ break;
+ case GetTabInfoRef::TableNotDefined:
+ ndbout << " Msg: Table not defined" << endl;
+ break;
+ case GetTabInfoRef::TableNameToLong:
+ ndbout << " Msg: Table node too long" << endl;
+ break;
+ default:
+ ndbout << " Msg: Unknown error returned from Dict" << endl;
+ break;
+ }
+#endif
+
+ PreparePtr prepPtr;
+ c_runningPrepares.getPtr(prepPtr, prepI);
+
+ sendUtilPrepareRef(signal, UtilPrepareRef::DICT_TAB_INFO_ERROR,
+ prepPtr.p->clientRef, prepPtr.p->clientData);
+
+ releasePrepare(prepPtr);
+}
+
+
+/******************************************************************************
+ * Prepare Operation
+ *
+ * Using a prepare record, prepare an operation (i.e. create PreparedOperation).
+ * Info from both Pepare request (PreparePages) and DictTabInfo is used.
+ *
+ * Algorithm:
+ * -# Seize AttrbuteMapping
+ * - Lookup in preparePages how many attributes should be prepared
+ * - Seize AttributeMapping
+ * -# For each attributes in preparePages
+ * - Lookup id and isPK in dictInfoPages
+ * - Store "no -> (AttributeId, Position)" in AttributeMapping
+ * -# For each map in AttributeMapping
+ * - if (isPK) then assign offset
+ ******************************************************************************/
+void
+DbUtil::prepareOperation(Signal* signal, PreparePtr prepPtr)
+{
+ jam();
+
+ /*******************************************
+ * Seize and store PreparedOperation struct
+ *******************************************/
+ PreparedOperationPtr prepOpPtr;
+ if(!c_runningPreparedOperations.seize(prepOpPtr)) {
+ jam();
+ releaseSections(signal);
+ sendUtilPrepareRef(signal, UtilPrepareRef::PREPARED_OPERATION_SEIZE_ERROR,
+ prepPtr.p->clientRef, prepPtr.p->clientData);
+ releasePrepare(prepPtr);
+ return;
+ }
+ prepPtr.p->prepOpPtr = prepOpPtr;
+
+ /********************
+ * Read request info
+ ********************/
+ SimplePropertiesLinearReader prepPagesReader(&prepPtr.p->preparePages.getPtr(0)->data[0],
+ prepPtr.p->prepDataLen);
+
+ ndbrequire(prepPagesReader.first());
+ ndbrequire(prepPagesReader.getKey() == UtilPrepareReq::NoOfOperations);
+ const Uint32 noOfOperations = prepPagesReader.getUint32();
+ ndbrequire(noOfOperations == 1);
+
+ ndbrequire(prepPagesReader.next());
+ ndbrequire(prepPagesReader.getKey() == UtilPrepareReq::OperationType);
+ const Uint32 operationType = prepPagesReader.getUint32();
+
+ ndbrequire(prepPagesReader.next());
+
+ char tableName[MAX_TAB_NAME_SIZE];
+ Uint32 tableId;
+ UtilPrepareReq::KeyValue tableKey =
+ (UtilPrepareReq::KeyValue) prepPagesReader.getKey();
+ if (tableKey == UtilPrepareReq::TableId) {
+ jam();
+ tableId = prepPagesReader.getUint32();
+ }
+ else {
+ jam();
+ ndbrequire(prepPagesReader.getKey() == UtilPrepareReq::TableName);
+ ndbrequire(prepPagesReader.getValueLen() <= MAX_TAB_NAME_SIZE);
+ prepPagesReader.getString(tableName);
+ }
+ /******************************************************************
+ * Seize AttributeMapping (by counting no of attribs in prepPages)
+ ******************************************************************/
+ Uint32 noOfAttributes = 0; // No of attributes in PreparePages (used later)
+ while(prepPagesReader.next()) {
+ if (tableKey == UtilPrepareReq::TableName) {
+ jam();
+ ndbrequire(prepPagesReader.getKey() == UtilPrepareReq::AttributeName);
+ } else {
+ jam();
+ ndbrequire(prepPagesReader.getKey() == UtilPrepareReq::AttributeId);
+ }
+ noOfAttributes++;
+ }
+ ndbrequire(prepPtr.p->prepOpPtr.p->attrMapping.seize(noOfAttributes));
+ if (operationType == UtilPrepareReq::Read) {
+ ndbrequire(prepPtr.p->prepOpPtr.p->rsInfo.seize(noOfAttributes));
+ }
+ /***************************************
+ * For each attribute name, lookup info
+ ***************************************/
+ // Goto start of attribute names
+ ndbrequire(prepPagesReader.first() && prepPagesReader.next() &&
+ prepPagesReader.next());
+
+ DictTabInfo::Table tableDesc; tableDesc.init();
+ AttrMappingBuffer::DataBufferIterator attrMappingIt;
+ ndbrequire(prepPtr.p->prepOpPtr.p->attrMapping.first(attrMappingIt));
+
+ ResultSetBuffer::DataBufferIterator rsInfoIt;
+ if (operationType == UtilPrepareReq::Read) {
+ ndbrequire(prepPtr.p->prepOpPtr.p->rsInfo.first(rsInfoIt));
+ }
+
+ Uint32 noOfPKAttribsStored = 0;
+ Uint32 noOfNonPKAttribsStored = 0;
+ Uint32 attrLength = 0;
+ Uint32 pkAttrLength = 0;
+ char attrNameRequested[MAX_ATTR_NAME_SIZE];
+ Uint32 attrIdRequested;
+
+ while(prepPagesReader.next()) {
+ UtilPrepareReq::KeyValue attributeKey =
+ (UtilPrepareReq::KeyValue) prepPagesReader.getKey();
+
+ ndbrequire((attributeKey == UtilPrepareReq::AttributeName) ||
+ (attributeKey == UtilPrepareReq::AttributeId));
+ if (attributeKey == UtilPrepareReq::AttributeName) {
+ jam();
+ ndbrequire(prepPagesReader.getValueLen() <= MAX_ATTR_NAME_SIZE);
+
+ prepPagesReader.getString(attrNameRequested);
+ attrIdRequested= ~0u;
+ } else {
+ jam();
+ attrIdRequested = prepPagesReader.getUint32();
+ }
+ /*****************************************
+ * Copy DictTabInfo into tableDesc struct
+ *****************************************/
+
+ SegmentedSectionPtr ptr;
+ signal->getSection(ptr, GetTabInfoConf::DICT_TAB_INFO);
+ SimplePropertiesSectionReader dictInfoReader(ptr, getSectionSegmentPool());
+
+ SimpleProperties::UnpackStatus unpackStatus;
+ unpackStatus = SimpleProperties::unpack(dictInfoReader, &tableDesc,
+ DictTabInfo::TableMapping,
+ DictTabInfo::TableMappingSize,
+ true, true);
+ ndbrequire(unpackStatus == SimpleProperties::Break);
+
+ /************************
+ * Lookup in DictTabInfo
+ ************************/
+ DictTabInfo::Attribute attrDesc; attrDesc.init();
+ char attrName[MAX_ATTR_NAME_SIZE];
+ Uint32 attrId= ~(Uint32)0;
+ bool attributeFound = false;
+ Uint32 noOfKeysFound = 0; // # PK attrs found before attr in DICTdata
+ Uint32 noOfNonKeysFound = 0; // # nonPK attrs found before attr in DICTdata
+ for (Uint32 i=0; i<tableDesc.NoOfAttributes; i++) {
+ if (tableKey == UtilPrepareReq::TableName) {
+ jam();
+ ndbrequire(dictInfoReader.getKey() == DictTabInfo::AttributeName);
+ ndbrequire(dictInfoReader.getValueLen() <= MAX_ATTR_NAME_SIZE);
+ dictInfoReader.getString(attrName);
+ attrId= ~(Uint32)0; // attrId not used
+ } else { // (tableKey == UtilPrepareReq::TableId)
+ jam();
+ dictInfoReader.next(); // Skip name
+ ndbrequire(dictInfoReader.getKey() == DictTabInfo::AttributeId);
+ attrId = dictInfoReader.getUint32();
+ attrName[0]= '\0'; // attrName not used
+ }
+ unpackStatus = SimpleProperties::unpack(dictInfoReader, &attrDesc,
+ DictTabInfo::AttributeMapping,
+ DictTabInfo::AttributeMappingSize,
+ true, true);
+ ndbrequire(unpackStatus == SimpleProperties::Break);
+ //attrDesc.print(stdout);
+
+ if (attrDesc.AttributeKeyFlag) { jam(); noOfKeysFound++; }
+ else { jam(); noOfNonKeysFound++; }
+ if (attributeKey == UtilPrepareReq::AttributeName) {
+ if (strcmp(attrName, attrNameRequested) == 0) {
+ attributeFound = true;
+ break;
+ }
+ }
+ else // (attributeKey == UtilPrepareReq::AttributeId)
+ if (attrId == attrIdRequested) {
+ attributeFound = true;
+ break;
+ }
+
+ // Move to next attribute
+ ndbassert(dictInfoReader.getKey() == DictTabInfo::AttributeEnd);
+ dictInfoReader.next();
+ }
+
+ /**********************
+ * Attribute not found
+ **********************/
+ if (!attributeFound) {
+ jam();
+ releaseSections(signal);
+ sendUtilPrepareRef(signal,
+ UtilPrepareRef::DICT_TAB_INFO_ERROR,
+ prepPtr.p->clientRef, prepPtr.p->clientData);
+ infoEvent("UTIL: Unknown attribute requested: %s in table: %s",
+ attrNameRequested, tableName);
+ releasePreparedOperation(prepOpPtr);
+ releasePrepare(prepPtr);
+ return;
+ }
+
+ /**************************************************************
+ * Attribute found - store in mapping (AttributeId, Position)
+ **************************************************************/
+ AttributeHeader & attrMap =
+ AttributeHeader::init(attrMappingIt.data,
+ attrDesc.AttributeId, // 1. Store AttrId
+ 0);
+
+ if (attrDesc.AttributeKeyFlag) {
+ // ** Attribute belongs to PK **
+ prepOpPtr.p->pkBitmask.set(attrDesc.AttributeId);
+ attrMap.setDataSize(noOfKeysFound - 1); // 2. Store Position
+ noOfPKAttribsStored++;
+ } else {
+ attrMap.setDataSize(0x3fff); // 2. Store Position (fake)
+ noOfNonPKAttribsStored++;
+
+ /***********************************************************
+ * Error: Read nonPK Attr before all PK attr have been read
+ ***********************************************************/
+ if (noOfPKAttribsStored != tableDesc.NoOfKeyAttr) {
+ jam();
+ releaseSections(signal);
+ sendUtilPrepareRef(signal,
+ UtilPrepareRef::DICT_TAB_INFO_ERROR,
+ prepPtr.p->clientRef, prepPtr.p->clientData);
+ infoEvent("UTIL: Non-PK attr not allowed before "
+ "all PK attrs have been defined, table: %s",
+ tableName);
+ releasePreparedOperation(prepOpPtr);
+ releasePrepare(prepPtr);
+ return;
+ }
+ }
+#if 0
+ ndbout << "BEFORE: attrLength: " << attrLength << endl;
+#endif
+ {
+ int len = 0;
+ switch (attrDesc.AttributeSize) {
+ case DictTabInfo::an8Bit:
+ len = (attrDesc.AttributeArraySize + 3)/ 4;
+ break;
+ case DictTabInfo::a16Bit:
+ len = (attrDesc.AttributeArraySize + 1) / 2;
+ break;
+ case DictTabInfo::a32Bit:
+ len = attrDesc.AttributeArraySize;
+ break;
+ case DictTabInfo::a64Bit:
+ len = attrDesc.AttributeArraySize * 2;
+ break;
+ case DictTabInfo::a128Bit:
+ len = attrDesc.AttributeArraySize * 4;
+ break;
+ }
+ attrLength += len;
+ if (attrDesc.AttributeKeyFlag)
+ pkAttrLength += len;
+
+ if (operationType == UtilPrepareReq::Read) {
+ AttributeHeader::init(rsInfoIt.data,
+ attrDesc.AttributeId, // 1. Store AttrId
+ len);
+ prepOpPtr.p->rsInfo.next(rsInfoIt, 1);
+ }
+ }
+#if 0
+ ndbout << ": AttributeSize: " << attrDesc.AttributeSize << endl;
+ ndbout << ": AttributeArraySize: " << attrDesc.AttributeArraySize << endl;
+ ndbout << "AFTER: attrLength: " << attrLength << endl;
+#endif
+ //attrMappingIt.print(stdout);
+ //prepPtr.p->prepOpPtr.p->attrMapping.print(stdout);
+ prepPtr.p->prepOpPtr.p->attrMapping.next(attrMappingIt, 1);
+ }
+
+ /***************************
+ * Error: Not all PKs found
+ ***************************/
+ if (noOfPKAttribsStored != tableDesc.NoOfKeyAttr) {
+ jam();
+ releaseSections(signal);
+ sendUtilPrepareRef(signal,
+ UtilPrepareRef::DICT_TAB_INFO_ERROR,
+ prepPtr.p->clientRef, prepPtr.p->clientData);
+ infoEvent("UTIL: Not all primary key attributes requested for table: %s",
+ tableName);
+ releasePreparedOperation(prepOpPtr);
+ releasePrepare(prepPtr);
+ return;
+ }
+
+#if 0
+ AttrMappingBuffer::ConstDataBufferIterator tmpIt;
+ for (prepPtr.p->prepOpPtr.p->attrMapping.first(tmpIt); tmpIt.curr.i != RNIL;
+ prepPtr.p->prepOpPtr.p->attrMapping.next(tmpIt)) {
+ AttributeHeader* ah = (AttributeHeader *) tmpIt.data;
+ ah->print(stdout);
+ }
+#endif
+
+ /**********************************************
+ * Preparing of PreparedOperation signal train
+ **********************************************/
+ Uint32 static_len = TcKeyReq::StaticLength;
+ prepOpPtr.p->tckey.tableId = tableDesc.TableId;
+ prepOpPtr.p->tckey.tableSchemaVersion = tableDesc.TableVersion;
+ prepOpPtr.p->noOfKeyAttr = tableDesc.NoOfKeyAttr;
+ prepOpPtr.p->keyLen = tableDesc.KeyLength; // Total no of words in PK
+ if (prepOpPtr.p->keyLen > TcKeyReq::MaxKeyInfo) {
+ jam();
+ prepOpPtr.p->tckeyLenInBytes = (static_len + TcKeyReq::MaxKeyInfo) * 4;
+ } else {
+ jam();
+ prepOpPtr.p->tckeyLenInBytes = (static_len + prepOpPtr.p->keyLen) * 4;
+ }
+ prepOpPtr.p->keyDataPos = static_len; // Start of keyInfo[] in tckeyreq
+
+ Uint32 requestInfo = 0;
+ TcKeyReq::setAbortOption(requestInfo, TcKeyReq::AbortOnError);
+ TcKeyReq::setKeyLength(requestInfo, tableDesc.KeyLength);
+ switch(operationType) {
+ case(UtilPrepareReq::Read):
+ prepOpPtr.p->rsLen =
+ attrLength +
+ tableDesc.NoOfKeyAttr +
+ noOfNonPKAttribsStored; // Read needs a resultset
+ prepOpPtr.p->noOfAttr = tableDesc.NoOfKeyAttr + noOfNonPKAttribsStored;
+ prepOpPtr.p->tckey.attrLen = prepOpPtr.p->noOfAttr;
+ TcKeyReq::setOperationType(requestInfo, ZREAD);
+ break;
+ case(UtilPrepareReq::Update):
+ prepOpPtr.p->rsLen = 0;
+ prepOpPtr.p->noOfAttr = tableDesc.NoOfKeyAttr + noOfNonPKAttribsStored;
+ prepOpPtr.p->tckey.attrLen = attrLength + prepOpPtr.p->noOfAttr;
+ TcKeyReq::setOperationType(requestInfo, ZUPDATE);
+ break;
+ case(UtilPrepareReq::Insert):
+ prepOpPtr.p->rsLen = 0;
+ prepOpPtr.p->noOfAttr = tableDesc.NoOfKeyAttr + noOfNonPKAttribsStored;
+ prepOpPtr.p->tckey.attrLen = attrLength + prepOpPtr.p->noOfAttr;
+ TcKeyReq::setOperationType(requestInfo, ZINSERT);
+ break;
+ case(UtilPrepareReq::Delete):
+ // The number of attributes should equal the size of the primary key
+ ndbrequire(tableDesc.KeyLength == attrLength);
+ prepOpPtr.p->rsLen = 0;
+ prepOpPtr.p->noOfAttr = tableDesc.NoOfKeyAttr;
+ prepOpPtr.p->tckey.attrLen = 0;
+ TcKeyReq::setOperationType(requestInfo, ZDELETE);
+ break;
+ case(UtilPrepareReq::Write):
+ prepOpPtr.p->rsLen = 0;
+ prepOpPtr.p->noOfAttr = tableDesc.NoOfKeyAttr + noOfNonPKAttribsStored;
+ prepOpPtr.p->tckey.attrLen = attrLength + prepOpPtr.p->noOfAttr;
+ TcKeyReq::setOperationType(requestInfo, ZWRITE);
+ break;
+ }
+ TcKeyReq::setAIInTcKeyReq(requestInfo, 0); // Attrinfo sent separately
+ prepOpPtr.p->tckey.requestInfo = requestInfo;
+
+ /****************************
+ * Confirm completed prepare
+ ****************************/
+ UtilPrepareConf * conf = (UtilPrepareConf *)signal->getDataPtr();
+ conf->senderData = prepPtr.p->clientData;
+ conf->prepareId = prepPtr.p->prepOpPtr.i;
+
+ releaseSections(signal);
+ sendSignal(prepPtr.p->clientRef, GSN_UTIL_PREPARE_CONF, signal,
+ UtilPrepareConf::SignalLength, JBB);
+
+#if 0
+ prepPtr.p->prepOpPtr.p->print();
+#endif
+ releasePrepare(prepPtr);
+}
+
+
+void
+DbUtil::execUTIL_RELEASE_REQ(Signal* signal){
+ jamEntry();
+
+ UtilReleaseReq * req = (UtilReleaseReq *)signal->getDataPtr();
+ const Uint32 clientRef = signal->senderBlockRef();
+ const Uint32 prepareId = req->prepareId;
+ const Uint32 senderData = req->senderData;
+
+#if 0
+ /**
+ * This only works in when ARRAY_GUARD is defined (debug-mode)
+ */
+ if (!c_preparedOperationPool.isSeized(prepareId)) {
+ UtilReleaseRef * ref = (UtilReleaseRef *)signal->getDataPtr();
+ ref->prepareId = prepareId;
+ ref->errorCode = UtilReleaseRef::NO_SUCH_PREPARE_SEIZED;
+ sendSignal(clientRef, GSN_UTIL_RELEASE_REF, signal,
+ UtilReleaseRef::SignalLength, JBB);
+ }
+#endif
+ PreparedOperationPtr prepOpPtr;
+ c_preparedOperationPool.getPtr(prepOpPtr, prepareId);
+
+ releasePreparedOperation(prepOpPtr);
+
+ UtilReleaseConf * const conf = (UtilReleaseConf*)signal->getDataPtrSend();
+ conf->senderData = senderData;
+ sendSignal(clientRef, GSN_UTIL_RELEASE_CONF, signal,
+ UtilReleaseConf::SignalLength, JBB);
+}
+
+
+/**************************************************************************
+ * ------------------------------------------------------------------------
+ * MODULE: Sequence Service
+ * ------------------------------------------------------------------------
+ *
+ * A service with a stored incrementable number
+ **************************************************************************/
+
+void
+DbUtil::hardcodedPrepare() {
+ /**
+ * Prepare SequenceCurrVal (READ)
+ */
+ {
+ PreparedOperationPtr ptr;
+ ndbrequire(c_preparedOperationPool.seizeId(ptr, 0));
+ ptr.p->keyLen = 1;
+ ptr.p->tckey.attrLen = 1;
+ ptr.p->rsLen = 3;
+ ptr.p->tckeyLenInBytes = (TcKeyReq::StaticLength +
+ ptr.p->keyLen + ptr.p->tckey.attrLen) * 4;
+ ptr.p->keyDataPos = TcKeyReq::StaticLength;
+ ptr.p->tckey.tableId = 0;
+ Uint32 requestInfo = 0;
+ TcKeyReq::setAbortOption(requestInfo, TcKeyReq::CommitIfFailFree);
+ TcKeyReq::setOperationType(requestInfo, ZREAD);
+ TcKeyReq::setKeyLength(requestInfo, 1);
+ TcKeyReq::setAIInTcKeyReq(requestInfo, 1);
+ ptr.p->tckey.requestInfo = requestInfo;
+ ptr.p->tckey.tableSchemaVersion = 1;
+
+ // This is actually attr data
+ AttributeHeader::init(&ptr.p->tckey.distrGroupHashValue, 1, 0);
+
+ ndbrequire(ptr.p->rsInfo.seize(1));
+ ResultSetInfoBuffer::DataBufferIterator it;
+ ptr.p->rsInfo.first(it);
+ AttributeHeader::init(it.data, 1, 2); // Attribute 1 - 2 data words
+ }
+
+ /**
+ * Prepare SequenceNextVal (UPDATE)
+ */
+ {
+ PreparedOperationPtr ptr;
+ ndbrequire(c_preparedOperationPool.seizeId(ptr, 1));
+ ptr.p->keyLen = 1;
+ ptr.p->rsLen = 3;
+ ptr.p->tckeyLenInBytes = (TcKeyReq::StaticLength + ptr.p->keyLen + 5) * 4;
+ ptr.p->keyDataPos = TcKeyReq::StaticLength;
+ ptr.p->tckey.attrLen = 11;
+ ptr.p->tckey.tableId = 0;
+ Uint32 requestInfo = 0;
+ TcKeyReq::setAbortOption(requestInfo, TcKeyReq::CommitIfFailFree);
+ TcKeyReq::setOperationType(requestInfo, ZUPDATE);
+ TcKeyReq::setKeyLength(requestInfo, 1);
+ TcKeyReq::setAIInTcKeyReq(requestInfo, 5);
+ TcKeyReq::setInterpretedFlag(requestInfo, 1);
+ ptr.p->tckey.requestInfo = requestInfo;
+ ptr.p->tckey.tableSchemaVersion = 1;
+
+ // Signal is packed, which is why attrInfo is at distrGroupHashValue
+ // position
+ Uint32 * attrInfo = &ptr.p->tckey.distrGroupHashValue;
+ attrInfo[0] = 0; // IntialReadSize
+ attrInfo[1] = 5; // InterpretedSize
+ attrInfo[2] = 0; // FinalUpdateSize
+ attrInfo[3] = 1; // FinalReadSize
+ attrInfo[4] = 0; // SubroutineSize
+
+ { // AttrInfo
+ ndbrequire(ptr.p->attrInfo.seize(6));
+ AttrInfoBuffer::DataBufferIterator it;
+ ptr.p->attrInfo.first(it);
+ * it.data = Interpreter::Read(1, 6);
+ ndbrequire(ptr.p->attrInfo.next(it));
+ * it.data = Interpreter::LoadConst16(7, 1);
+ ndbrequire(ptr.p->attrInfo.next(it));
+ * it.data = Interpreter::Add(7, 6, 7);
+ ndbrequire(ptr.p->attrInfo.next(it));
+ * it.data = Interpreter::Write(1, 7);
+ ndbrequire(ptr.p->attrInfo.next(it));
+ * it.data = Interpreter::ExitOK();
+
+ ndbrequire(ptr.p->attrInfo.next(it));
+ AttributeHeader::init(it.data, 1, 0);
+ }
+
+ { // ResultSet
+ ndbrequire(ptr.p->rsInfo.seize(1));
+ ResultSetInfoBuffer::DataBufferIterator it;
+ ptr.p->rsInfo.first(it);
+ AttributeHeader::init(it.data, 1, 2); // Attribute 1 - 2 data words
+ }
+ }
+
+ /**
+ * Prepare CreateSequence (INSERT)
+ */
+ {
+ PreparedOperationPtr ptr;
+ ndbrequire(c_preparedOperationPool.seizeId(ptr, 2));
+ ptr.p->keyLen = 1;
+ ptr.p->tckey.attrLen = 5;
+ ptr.p->rsLen = 0;
+ ptr.p->tckeyLenInBytes = (TcKeyReq::StaticLength +
+ ptr.p->keyLen + ptr.p->tckey.attrLen) * 4;
+ ptr.p->keyDataPos = TcKeyReq::StaticLength;
+ ptr.p->tckey.tableId = 0;
+ Uint32 requestInfo = 0;
+ TcKeyReq::setAbortOption(requestInfo, TcKeyReq::CommitIfFailFree);
+ TcKeyReq::setOperationType(requestInfo, ZINSERT);
+ TcKeyReq::setKeyLength(requestInfo, 1);
+ TcKeyReq::setAIInTcKeyReq(requestInfo, 0);
+ ptr.p->tckey.requestInfo = requestInfo;
+ ptr.p->tckey.tableSchemaVersion = 1;
+ }
+}
+
+void
+DbUtil::execUTIL_SEQUENCE_REQ(Signal* signal){
+ jamEntry();
+
+ UtilSequenceReq * req = (UtilSequenceReq*)signal->getDataPtr();
+
+ PreparedOperation * prepOp;
+
+ switch(req->requestType){
+ case UtilSequenceReq::CurrVal:
+ prepOp = c_preparedOperationPool.getPtr(0); //c_SequenceCurrVal
+ break;
+ case UtilSequenceReq::NextVal:
+ prepOp = c_preparedOperationPool.getPtr(1); //c_SequenceNextVal
+ break;
+ case UtilSequenceReq::Create:
+ prepOp = c_preparedOperationPool.getPtr(2); //c_CreateSequence
+ break;
+ default:
+ ndbrequire(false);
+ prepOp = 0; // remove warning
+ }
+
+ /**
+ * 1 Transaction with 1 operation
+ */
+ TransactionPtr transPtr;
+ ndbrequire(c_runningTransactions.seize(transPtr));
+
+ OperationPtr opPtr;
+ ndbrequire(transPtr.p->operations.seize(opPtr));
+
+ ndbrequire(opPtr.p->rs.seize(prepOp->rsLen));
+ ndbrequire(opPtr.p->keyInfo.seize(prepOp->keyLen));
+
+ transPtr.p->gsn = GSN_UTIL_SEQUENCE_REQ;
+ transPtr.p->clientRef = signal->senderBlockRef();
+ transPtr.p->clientData = req->senderData;
+ transPtr.p->sequence.sequenceId = req->sequenceId;
+ transPtr.p->sequence.requestType = req->requestType;
+
+ opPtr.p->prepOp = prepOp;
+ opPtr.p->prepOp_i = RNIL;
+
+ KeyInfoBuffer::DataBufferIterator it;
+ opPtr.p->keyInfo.first(it);
+ it.data[0] = transPtr.p->sequence.sequenceId;
+
+ if(req->requestType == UtilSequenceReq::Create){
+ ndbrequire(opPtr.p->attrInfo.seize(5));
+ AttrInfoBuffer::DataBufferIterator it;
+
+ opPtr.p->attrInfo.first(it);
+ AttributeHeader::init(it.data, 0, 1);
+
+ ndbrequire(opPtr.p->attrInfo.next(it));
+ * it.data = transPtr.p->sequence.sequenceId;
+
+ ndbrequire(opPtr.p->attrInfo.next(it));
+ AttributeHeader::init(it.data, 1, 2);
+
+ ndbrequire(opPtr.p->attrInfo.next(it));
+ * it.data = 0;
+
+ ndbrequire(opPtr.p->attrInfo.next(it));
+ * it.data = 0;
+ }
+
+ runTransaction(signal, transPtr);
+}
+
+int
+DbUtil::getResultSet(Signal* signal, const Transaction * transP,
+ struct LinearSectionPtr sectionsPtr[]) {
+ OperationPtr opPtr;
+ ndbrequire(transP->operations.first(opPtr));
+ ndbrequire(transP->operations.hasNext(opPtr) == false);
+
+ int noAttr = 0;
+ int dataSz = 0;
+ Uint32* tmpBuf = signal->theData + 25;
+ const Uint32* headerBuffer = tmpBuf;
+
+ const ResultSetBuffer & rs = opPtr.p->rs;
+ ResultSetInfoBuffer::ConstDataBufferIterator it;
+
+ // extract headers
+ for(rs.first(it); it.curr.i != RNIL; ) {
+ *tmpBuf++ = it.data[0];
+ rs.next(it, ((AttributeHeader*)&it.data[0])->getDataSize() + 1);
+ noAttr++;
+ }
+
+ if (noAttr == 0)
+ return 0;
+
+ const Uint32* dataBuffer = tmpBuf;
+
+ // extract data
+ for(rs.first(it); it.curr.i != RNIL; ) {
+ int sz = ((AttributeHeader*)&it.data[0])->getDataSize();
+ rs.next(it,1);
+ for (int i = 0; i < sz; i++) {
+ *tmpBuf++ = *it.data;
+ rs.next(it,1);
+ dataSz++;
+ }
+ }
+
+ sectionsPtr[UtilExecuteReq::HEADER_SECTION].p = (Uint32 *)headerBuffer;
+ sectionsPtr[UtilExecuteReq::HEADER_SECTION].sz = noAttr;
+ sectionsPtr[UtilExecuteReq::DATA_SECTION].p = (Uint32 *)dataBuffer;
+ sectionsPtr[UtilExecuteReq::DATA_SECTION].sz = dataSz;
+
+ return 1;
+}
+
+void
+DbUtil::reportSequence(Signal* signal, const Transaction * transP){
+ OperationPtr opPtr;
+ ndbrequire(transP->operations.first(opPtr));
+ ndbrequire(transP->operations.hasNext(opPtr) == false);
+
+ if(transP->errorCode == 0){
+ jam(); // OK
+
+ UtilSequenceConf * ret = (UtilSequenceConf *)signal->getDataPtrSend();
+ ret->senderData = transP->clientData;
+ ret->sequenceId = transP->sequence.sequenceId;
+ ret->requestType = transP->sequence.requestType;
+
+ bool ok = false;
+ switch(transP->sequence.requestType){
+ case UtilSequenceReq::CurrVal:
+ case UtilSequenceReq::NextVal:{
+ ok = true;
+ ndbrequire(opPtr.p->rsRecv == 3);
+
+ ResultSetBuffer::DataBufferIterator rsit;
+ ndbrequire(opPtr.p->rs.first(rsit));
+
+ ret->sequenceValue[0] = rsit.data[1];
+ ret->sequenceValue[1] = rsit.data[2];
+ break;
+ }
+ case UtilSequenceReq::Create:
+ ok = true;
+ ret->sequenceValue[0] = 0;
+ ret->sequenceValue[1] = 0;
+ break;
+ }
+ ndbrequire(ok);
+ sendSignal(transP->clientRef, GSN_UTIL_SEQUENCE_CONF, signal,
+ UtilSequenceConf::SignalLength, JBB);
+ return;
+ }
+
+ UtilSequenceRef::ErrorCode errCode = UtilSequenceRef::TCError;
+
+ switch(transP->sequence.requestType)
+ {
+ case UtilSequenceReq::CurrVal:
+ case UtilSequenceReq::NextVal:{
+ if (transP->errorCode == 626)
+ errCode = UtilSequenceRef::NoSuchSequence;
+ break;
+ }
+ case UtilSequenceReq::Create:
+ break;
+ }
+
+ UtilSequenceRef * ret = (UtilSequenceRef *)signal->getDataPtrSend();
+ ret->senderData = transP->clientData;
+ ret->sequenceId = transP->sequence.sequenceId;
+ ret->requestType = transP->sequence.requestType;
+ ret->errorCode = (Uint32)errCode;
+ sendSignal(transP->clientRef, GSN_UTIL_SEQUENCE_REF, signal,
+ UtilSequenceRef::SignalLength, JBB);
+}
+#if 0
+ Ndb ndb("ndb","def");
+ NdbConnection* tConnection = ndb.startTransaction();
+ NdbOperation* tOperation = tConnection->getNdbOperation("SYSTAB_0");
+
+ //#if 0 && API_CODE
+ if( tOperation != NULL ) {
+ tOperation->interpretedUpdateTuple();
+ tOperation->equal((U_Int32)0, keyValue );
+ tNextId_Result = tOperation->getValue((U_Int32)1);
+ tOperation->incValue((U_Int32)1, (U_Int32)8192);
+
+ if (tConnection->execute( Commit ) != -1 ) {
+ U_Int64 tValue = tNextId_Result->u_64_value(); // Read result value
+ theFirstTransId = tValue;
+ theLastTransId = tValue + 8191;
+ closeTransaction(tConnection);
+ return startTransactionLocal(aPriority, nodeId);
+ }
+ }
+ /**
+ * IntialReadSize = 0;
+ * InterpretedSize = incValue(1);
+ * FinalUpdateSize = 0;
+ * FinalReadSize = 1; // Read value
+ * SubroutineSize = 0;
+ */
+#endif
+
+
+/**************************************************************************
+ * ------------------------------------------------------------------------
+ * MODULE: Transaction execution request
+ * ------------------------------------------------------------------------
+ *
+ * Handle requests to execute a prepared transaction
+ **************************************************************************/
+
+void
+DbUtil::execUTIL_EXECUTE_REQ(Signal* signal)
+{
+ jamEntry();
+
+ UtilExecuteReq * req = (UtilExecuteReq *)signal->getDataPtr();
+ const Uint32 clientRef = req->senderRef;
+ const Uint32 clientData = req->senderData;
+ const Uint32 prepareId = req->getPrepareId();
+ const bool releaseFlag = req->getReleaseFlag();
+
+ if(signal->getNoOfSections() == 0) {
+ // Missing prepare data
+ jam();
+ releaseSections(signal);
+ sendUtilExecuteRef(signal, UtilExecuteRef::MissingDataSection,
+ 0, clientRef, clientData);
+ return;
+ }
+ /*******************************
+ * Get PreparedOperation struct
+ *******************************/
+ PreparedOperationPtr prepOpPtr;
+ c_runningPreparedOperations.first(prepOpPtr);
+ while (!prepOpPtr.isNull() && prepOpPtr.i != prepareId)
+ c_runningPreparedOperations.next(prepOpPtr);
+
+ if (prepOpPtr.i != prepareId) {
+ jam();
+ releaseSections(signal);
+ sendUtilExecuteRef(signal, UtilExecuteRef::IllegalPrepareId,
+ 0, clientRef, clientData);
+ return;
+ }
+
+ prepOpPtr.p->releaseFlag = releaseFlag;
+
+ TransactionPtr transPtr;
+ OperationPtr opPtr;
+ SegmentedSectionPtr headerPtr, dataPtr;
+
+ signal->getSection(headerPtr, UtilExecuteReq::HEADER_SECTION);
+ SectionReader headerReader(headerPtr, getSectionSegmentPool());
+ signal->getSection(dataPtr, UtilExecuteReq::DATA_SECTION);
+ SectionReader dataReader(dataPtr, getSectionSegmentPool());
+
+#if 0 //def EVENT_DEBUG
+ // Debugging
+ printf("DbUtil::execUTIL_EXECUTEL_REQ: Headers (%u): ", headerPtr.sz);
+ Uint32 word;
+ while(headerReader.getWord(&word))
+ printf("H'%.8x ", word);
+ printf("\n");
+ printf("DbUtil::execUTIL_EXECUTEL_REQ: Data (%u): ", dataPtr.sz);
+ headerReader.reset();
+ while(dataReader.getWord(&word))
+ printf("H'%.8x ", word);
+ printf("\n");
+ dataReader.reset();
+#endif
+
+// Uint32 totalDataLen = headerPtr.sz + dataPtr.sz;
+
+ /************************************************************
+ * Seize Transaction record
+ ************************************************************/
+ ndbrequire(c_runningTransactions.seize(transPtr));
+ transPtr.p->gsn = GSN_UTIL_EXECUTE_REQ;
+ transPtr.p->clientRef = clientRef;
+ transPtr.p->clientData = clientData;
+ ndbrequire(transPtr.p->operations.seize(opPtr));
+ opPtr.p->prepOp = prepOpPtr.p;
+ opPtr.p->prepOp_i = prepOpPtr.i;
+
+#if 0 //def EVENT_DEBUG
+ printf("opPtr.p->rs.seize( %u )\n", prepOpPtr.p->rsLen);
+#endif
+ ndbrequire(opPtr.p->rs.seize(prepOpPtr.p->rsLen));
+
+ /***********************************************************
+ * Store signal data on linear memory in Transaction record
+ ***********************************************************/
+ KeyInfoBuffer* keyInfo = &opPtr.p->keyInfo;
+ AttrInfoBuffer* attrInfo = &opPtr.p->attrInfo;
+ AttributeHeader header;
+ Uint32* tempBuf = signal->theData + 25;
+ bool dataComplete = true;
+
+ while(headerReader.getWord((Uint32 *)&header)) {
+ Uint32* bufStart = tempBuf;
+ header.insertHeader(tempBuf++);
+ for(unsigned int i = 0; i < header.getDataSize(); i++) {
+ if (!dataReader.getWord(tempBuf++)) {
+ dataComplete = false;
+ break;
+ }
+ }
+ bool res = true;
+
+#if 0 //def EVENT_DEBUG
+ if (TcKeyReq::getOperationType(prepOpPtr.p->tckey.requestInfo) ==
+ TcKeyReq::Read) {
+ if(prepOpPtr.p->pkBitmask.get(header.getAttributeId()))
+ printf("PrimaryKey\n");
+ }
+ printf("AttrId %u Hdrsz %d Datasz %u \n",
+ header.getAttributeId(),
+ header.getHeaderSize(),
+ header.getDataSize());
+#endif
+
+ if(prepOpPtr.p->pkBitmask.get(header.getAttributeId()))
+ // A primary key attribute
+ res = keyInfo->append(bufStart + header.getHeaderSize(),
+ header.getDataSize());
+
+ switch (TcKeyReq::getOperationType(prepOpPtr.p->tckey.requestInfo)) {
+ case ZREAD:
+ res &= attrInfo->append(bufStart, header.getHeaderSize());
+ break;
+ case ZDELETE:
+ // no attrinfo for Delete
+ break;
+ default:
+ res &= attrInfo->append(bufStart,
+ header.getHeaderSize() + header.getDataSize());
+ }
+
+ if (!res) {
+ // Failed to allocate buffer data
+ jam();
+ releaseSections(signal);
+ sendUtilExecuteRef(signal, UtilExecuteRef::AllocationError,
+ 0, clientRef, clientData);
+ releaseTransaction(transPtr);
+ return;
+ }
+ }
+ if (!dataComplete) {
+ // Missing data in data section
+ jam();
+ releaseSections(signal);
+ sendUtilExecuteRef(signal, UtilExecuteRef::MissingData,
+ 0, clientRef, clientData);
+ releaseTransaction(transPtr);
+ return;
+ }
+
+ const Uint32 l1 = prepOpPtr.p->tckey.attrLen;
+ const Uint32 l2 =
+ prepOpPtr.p->attrInfo.getSize() + opPtr.p->attrInfo.getSize();
+
+ if (TcKeyReq::getOperationType(prepOpPtr.p->tckey.requestInfo) != ZREAD){
+ ndbrequire(l1 == l2);
+ } else {
+#if 0
+ ndbout_c("TcKeyReq::Read");
+#endif
+ }
+
+ releaseSections(signal);
+ transPtr.p->noOfRetries = 3;
+ runTransaction(signal, transPtr);
+}
+
+/**************************************************************************
+ * ------------------------------------------------------------------------
+ * MODULE: General transaction machinery
+ * ------------------------------------------------------------------------
+ * Executes a prepared transaction
+ **************************************************************************/
+void
+DbUtil::runTransaction(Signal* signal, TransactionPtr transPtr){
+
+ /* Init transaction */
+ transPtr.p->sent = 0;
+ transPtr.p->recv = 0;
+ transPtr.p->errorCode = 0;
+ getTransId(transPtr.p);
+
+ OperationPtr opPtr;
+ ndbrequire(transPtr.p->operations.first(opPtr));
+
+ /* First operation */
+ Uint32 start = 0;
+ TcKeyReq::setStartFlag(start, 1);
+ runOperation(signal, transPtr, opPtr, start);
+ transPtr.p->sent ++;
+
+ /* Rest of operations */
+ start = 0;
+ while(opPtr.i != RNIL){
+ runOperation(signal, transPtr, opPtr, start);
+ transPtr.p->sent ++;
+ }
+ //transPtr.p->print();
+}
+
+void
+DbUtil::runOperation(Signal* signal, TransactionPtr & transPtr,
+ OperationPtr & opPtr, Uint32 start) {
+ Uint32 opI = opPtr.i;
+ Operation * op = opPtr.p;
+ const PreparedOperation * pop = op->prepOp;
+
+ if(!transPtr.p->operations.next(opPtr)){
+ TcKeyReq::setCommitFlag(start, 1); // Last operation
+ TcKeyReq::setExecuteFlag(start, 1);
+ }
+
+#if 0 //def EVENT_DEBUG
+ if (TcKeyReq::getOperationType(pop->tckey.requestInfo) ==
+ TcKeyReq::Read) {
+ printf("TcKeyReq::Read runOperation\n");
+ }
+#endif
+
+ /**
+ * Init operation w.r.t result set
+ */
+ initResultSet(op->rs, pop->rsInfo);
+ op->rs.first(op->rsIterator);
+ op->rsRecv = 0;
+#if 0 //def EVENT_DEBUG
+ printf("pop->rsLen %u\n", pop->rsLen);
+#endif
+ op->rsExpect = 0;
+ op->transPtrI = transPtr.i;
+
+ TcKeyReq * tcKey = (TcKeyReq*)signal->getDataPtrSend();
+ //ndbout << "*** 6 ***"<< endl; pop->print();
+ memcpy(tcKey, &pop->tckey, pop->tckeyLenInBytes);
+ //ndbout << "*** 6b ***"<< endl;
+ //printTCKEYREQ(stdout, signal->getDataPtrSend(),
+ // pop->tckeyLenInBytes >> 2, 0);
+ tcKey->apiConnectPtr = transPtr.p->connectPtr;
+ tcKey->senderData = opI;
+ tcKey->transId1 = transPtr.p->transId[0];
+ tcKey->transId2 = transPtr.p->transId[1];
+ tcKey->requestInfo |= start;
+
+#if 0 //def EVENT_DEBUG
+ // Debugging
+ printf("DbUtil::runOperation: KEYINFO\n");
+ op->keyInfo.print(stdout);
+ printf("DbUtil::runOperation: ATTRINFO\n");
+ op->attrInfo.print(stdout);
+#endif
+
+ /**
+ * Key Info
+ */
+ //KeyInfoBuffer::DataBufferIterator kit;
+ KeyInfoIterator kit;
+ op->keyInfo.first(kit);
+ Uint32 *keyDst = ((Uint32*)tcKey) + pop->keyDataPos;
+ for(Uint32 i = 0; i<8 && kit.curr.i != RNIL; i++, op->keyInfo.next(kit)){
+ keyDst[i] = * kit.data;
+ }
+ //ndbout << "*** 7 ***" << endl;
+ //printTCKEYREQ(stdout, signal->getDataPtrSend(),
+ // pop->tckeyLenInBytes >> 2, 0);
+
+#if 0 //def EVENT_DEBUG
+ printf("DbUtil::runOperation: sendSignal(DBTC_REF, GSN_TCKEYREQ, signal, %d , JBB)\n", pop->tckeyLenInBytes >> 2);
+ printTCKEYREQ(stdout, signal->getDataPtr(), pop->tckeyLenInBytes >> 2,0);
+#endif
+ sendSignal(DBTC_REF, GSN_TCKEYREQ, signal, pop->tckeyLenInBytes >> 2, JBB);
+
+ /**
+ * More the 8 words of key info not implemented
+ */
+ // ndbrequire(kit.curr.i == RNIL); // Yes it is
+
+ /**
+ * KeyInfo
+ */
+ KeyInfo* keyInfo = (KeyInfo *)signal->getDataPtrSend();
+ keyInfo->connectPtr = transPtr.p->connectPtr;
+ keyInfo->transId[0] = transPtr.p->transId[0];
+ keyInfo->transId[1] = transPtr.p->transId[1];
+ sendKeyInfo(signal, keyInfo, op->keyInfo, kit);
+
+ /**
+ * AttrInfo
+ */
+ AttrInfo* attrInfo = (AttrInfo *)signal->getDataPtrSend();
+ attrInfo->connectPtr = transPtr.p->connectPtr;
+ attrInfo->transId[0] = transPtr.p->transId[0];
+ attrInfo->transId[1] = transPtr.p->transId[1];
+
+ AttrInfoIterator ait;
+ pop->attrInfo.first(ait);
+ sendAttrInfo(signal, attrInfo, pop->attrInfo, ait);
+
+ op->attrInfo.first(ait);
+ sendAttrInfo(signal, attrInfo, op->attrInfo, ait);
+}
+
+void
+DbUtil::sendKeyInfo(Signal* signal,
+ KeyInfo* keyInfo,
+ const KeyInfoBuffer & keyBuf,
+ KeyInfoIterator & kit)
+{
+ while(kit.curr.i != RNIL) {
+ Uint32 *keyDst = keyInfo->keyData;
+ Uint32 keyDataLen = 0;
+ for(Uint32 i = 0; i<KeyInfo::DataLength && kit.curr.i != RNIL;
+ i++, keyBuf.next(kit)){
+ keyDst[i] = * kit.data;
+ keyDataLen++;
+ }
+#if 0 //def EVENT_DEBUG
+ printf("DbUtil::sendKeyInfo: sendSignal(DBTC_REF, GSN_KEYINFO, signal, %d , JBB)\n", KeyInfo::HeaderLength + keyDataLen);
+#endif
+ sendSignal(DBTC_REF, GSN_KEYINFO, signal,
+ KeyInfo::HeaderLength + keyDataLen, JBB);
+ }
+}
+
+void
+DbUtil::sendAttrInfo(Signal* signal,
+ AttrInfo* attrInfo,
+ const AttrInfoBuffer & attrBuf,
+ AttrInfoIterator & ait)
+{
+ while(ait.curr.i != RNIL) {
+ Uint32 *attrDst = attrInfo->attrData;
+ Uint32 i = 0;
+ for(i = 0; i<AttrInfo::DataLength && ait.curr.i != RNIL;
+ i++, attrBuf.next(ait)){
+ attrDst[i] = * ait.data;
+ }
+#if 0 //def EVENT_DEBUG
+ printf("DbUtil::sendAttrInfo: sendSignal(DBTC_REF, GSN_ATTRINFO, signal, %d , JBB)\n", AttrInfo::HeaderLength + i);
+#endif
+ sendSignal(DBTC_REF, GSN_ATTRINFO, signal,
+ AttrInfo::HeaderLength + i, JBB);
+ }
+}
+
+void
+DbUtil::initResultSet(ResultSetBuffer & rs,
+ const ResultSetInfoBuffer & rsi){
+
+ ResultSetBuffer::DataBufferIterator rsit;
+ rs.first(rsit);
+
+ ResultSetInfoBuffer::ConstDataBufferIterator rsiit;
+ for(rsi.first(rsiit); rsiit.curr.i != RNIL; rsi.next(rsiit)){
+ ndbrequire(rsit.curr.i != RNIL);
+
+ rsit.data[0] = rsiit.data[0];
+#if 0 //def EVENT_DEBUG
+ printf("Init resultset %u, sz %d\n",
+ rsit.curr.i,
+ ((AttributeHeader*)&rsit.data[0])->getDataSize() + 1);
+#endif
+ rs.next(rsit, ((AttributeHeader*)&rsit.data[0])->getDataSize() + 1);
+ }
+}
+
+void
+DbUtil::getTransId(Transaction * transP){
+
+ Uint32 tmp[2];
+ tmp[0] = c_transId[0];
+ tmp[1] = c_transId[1];
+
+ transP->transId[0] = tmp[0];
+ transP->transId[1] = tmp[1];
+
+ c_transId[1] = tmp[1] + 1;
+}
+
+
+
+/**************************************************************************
+ * ------------------------------------------------------------------------
+ * MODULE: Post Execute
+ * ------------------------------------------------------------------------
+ *
+ * Handles result from a sent transaction
+ **************************************************************************/
+
+/**
+ * execTRANSID_AI
+ *
+ * Receive result from transaction
+ *
+ * NOTE: This codes assumes that
+ * TransidAI::DataLength = ResultSetBuffer::getSegmentSize() * n
+ */
+void
+DbUtil::execTRANSID_AI(Signal* signal){
+ jamEntry();
+#if 0 //def EVENT_DEBUG
+ ndbout_c("File: %s line: %u",__FILE__,__LINE__);
+#endif
+
+ const Uint32 opI = signal->theData[0];
+ const Uint32 transId1 = signal->theData[1];
+ const Uint32 transId2 = signal->theData[2];
+ const Uint32 dataLen = signal->length() - 3;
+
+ Operation * opP = c_operationPool.getPtr(opI);
+ TransactionPtr transPtr;
+ c_runningTransactions.getPtr(transPtr, opP->transPtrI);
+
+ ndbrequire(transId1 == transPtr.p->transId[0] &&
+ transId2 == transPtr.p->transId[1]);
+ opP->rsRecv += dataLen;
+
+ /**
+ * Save result
+ */
+ const Uint32 *src = &signal->theData[3];
+ ResultSetBuffer::DataBufferIterator rs = opP->rsIterator;
+
+ ndbrequire(opP->rs.import(rs,src,dataLen));
+ opP->rs.next(rs, dataLen);
+ opP->rsIterator = rs;
+
+ if(!opP->complete()){
+ jam();
+ return;
+ }
+
+ transPtr.p->recv++;
+ if(!transPtr.p->complete()){
+ jam();
+ return;
+ }
+
+ finishTransaction(signal, transPtr);
+}
+
+void
+DbUtil::execTCKEYCONF(Signal* signal){
+ jamEntry();
+#if 0 //def EVENT_DEBUG
+ ndbout_c("File: %s line: %u",__FILE__,__LINE__);
+#endif
+
+ TcKeyConf * keyConf = (TcKeyConf*)signal->getDataPtr();
+
+ //const Uint32 gci = keyConf->gci;
+ const Uint32 transI = keyConf->apiConnectPtr >> 1;
+ const Uint32 confInfo = keyConf->confInfo;
+ const Uint32 transId1 = keyConf->transId1;
+ const Uint32 transId2 = keyConf->transId2;
+
+ Uint32 recv = 0;
+ const Uint32 ops = TcKeyConf::getNoOfOperations(confInfo);
+ for(Uint32 i = 0; i<ops; i++){
+ OperationPtr opPtr;
+ c_operationPool.getPtr(opPtr, keyConf->operations[i].apiOperationPtr);
+
+ ndbrequire(opPtr.p->transPtrI == transI);
+ opPtr.p->rsExpect += keyConf->operations[i].attrInfoLen;
+ if(opPtr.p->complete()){
+ recv++;
+ }
+ }
+
+ /**
+ * Check commit ack marker flag
+ */
+ if (TcKeyConf::getMarkerFlag(confInfo)){
+ signal->theData[0] = transId1;
+ signal->theData[1] = transId2;
+ sendSignal(DBTC_REF, GSN_TC_COMMIT_ACK, signal, 2, JBB);
+ }//if
+
+ TransactionPtr transPtr;
+ c_runningTransactions.getPtr(transPtr, transI);
+ ndbrequire(transId1 == transPtr.p->transId[0] &&
+ transId2 == transPtr.p->transId[1]);
+
+ transPtr.p->recv += recv;
+ if(!transPtr.p->complete()){
+ jam();
+ return;
+ }
+ finishTransaction(signal, transPtr);
+}
+
+void
+DbUtil::execTCKEYREF(Signal* signal){
+ jamEntry();
+#if 0 //def EVENT_DEBUG
+ ndbout_c("File: %s line: %u",__FILE__,__LINE__);
+#endif
+
+ const Uint32 transI = signal->theData[0] >> 1;
+ const Uint32 transId1 = signal->theData[1];
+ const Uint32 transId2 = signal->theData[2];
+ const Uint32 errCode = signal->theData[3];
+
+ TransactionPtr transPtr;
+ c_runningTransactions.getPtr(transPtr, transI);
+ ndbrequire(transId1 == transPtr.p->transId[0] &&
+ transId2 == transPtr.p->transId[1]);
+
+ //if(getClassification(errCode) == PermanentError){
+ //}
+
+ //ndbout << "Transaction error (code: " << errCode << ")" << endl;
+
+ transPtr.p->errorCode = errCode;
+ finishTransaction(signal, transPtr);
+}
+
+void
+DbUtil::execTCROLLBACKREP(Signal* signal){
+ jamEntry();
+#if 0 //def EVENT_DEBUG
+ ndbout_c("File: %s line: %u",__FILE__,__LINE__);
+#endif
+
+ const Uint32 transI = signal->theData[0] >> 1;
+ const Uint32 transId1 = signal->theData[1];
+ const Uint32 transId2 = signal->theData[2];
+ const Uint32 errCode = signal->theData[3];
+
+ TransactionPtr transPtr;
+ c_runningTransactions.getPtr(transPtr, transI);
+ ndbrequire(transId1 == transPtr.p->transId[0] &&
+ transId2 == transPtr.p->transId[1]);
+
+ //if(getClassification(errCode) == PermanentError){
+ //}
+
+#if 0 //def EVENT_DEBUG
+ ndbout << "Transaction error (code: " << errCode << ")" << endl;
+#endif
+
+ if(transPtr.p->noOfRetries > 0){
+ transPtr.p->noOfRetries--;
+ switch(errCode){
+ case 266:
+ case 410:
+ case 1204:
+#if 0
+ ndbout_c("errCode: %d noOfRetries: %d -> retry",
+ errCode, transPtr.p->noOfRetries);
+#endif
+ runTransaction(signal, transPtr);
+ return;
+ }
+ }
+
+ transPtr.p->errorCode = errCode;
+ finishTransaction(signal, transPtr);
+}
+
+void
+DbUtil::finishTransaction(Signal* signal, TransactionPtr transPtr){
+#if 0 //def EVENT_DEBUG
+ ndbout_c("Transaction %x %x completed %s",
+ transPtr.p->transId[0],
+ transPtr.p->transId[1],
+ transPtr.p->errorCode == 0 ? "OK" : "FAILED");
+#endif
+
+ /*
+ How to find the correct RS? Could we have multi-RS/transaction?
+
+ Operation * opP = c_operationPool.getPtr(opI);
+
+ ResultSetBuffer::DataBufferIterator rsit;
+ ndbrequire(opP->rs.first(rsit));
+ ndbout << "F Result: " << rsit.data << endl;
+
+ while (opP->rs.next(rsit)) {
+ ndbout << "R Result: " << rsit.data << endl;
+ }
+ */
+
+ switch(transPtr.p->gsn){
+ case GSN_UTIL_SEQUENCE_REQ:
+ jam();
+ reportSequence(signal, transPtr.p);
+ break;
+ case GSN_UTIL_EXECUTE_REQ:
+ if (transPtr.p->errorCode) {
+ UtilExecuteRef * ret = (UtilExecuteRef *)signal->getDataPtrSend();
+ ret->senderData = transPtr.p->clientData;
+ ret->errorCode = UtilExecuteRef::TCError;
+ ret->TCErrorCode = transPtr.p->errorCode;
+ sendSignal(transPtr.p->clientRef, GSN_UTIL_EXECUTE_REF, signal,
+ UtilExecuteRef::SignalLength, JBB);
+ } else {
+ struct LinearSectionPtr sectionsPtr[UtilExecuteReq::NoOfSections];
+ UtilExecuteConf * ret = (UtilExecuteConf *)signal->getDataPtrSend();
+ ret->senderData = transPtr.p->clientData;
+ if (getResultSet(signal, transPtr.p, sectionsPtr)) {
+#if 0 //def EVENT_DEBUG
+ for (int j = 0; j < 2; j++) {
+ printf("Result set %u %u\n", j,sectionsPtr[j].sz);
+ for (int i=0; i < sectionsPtr[j].sz; i++)
+ printf("H'%.8x ", sectionsPtr[j].p[i]);
+ printf("\n");
+ }
+#endif
+ sendSignal(transPtr.p->clientRef, GSN_UTIL_EXECUTE_CONF, signal,
+ UtilExecuteConf::SignalLength, JBB,
+ sectionsPtr, UtilExecuteReq::NoOfSections);
+ } else
+ sendSignal(transPtr.p->clientRef, GSN_UTIL_EXECUTE_CONF, signal,
+ UtilExecuteConf::SignalLength, JBB);
+ }
+ break;
+ default:
+ ndbrequire(0);
+ break;
+ }
+ releaseTransaction(transPtr);
+}
+
+void
+DbUtil::execUTIL_LOCK_REQ(Signal * signal){
+ jamEntry();
+ UtilLockReq * req = (UtilLockReq*)signal->getDataPtr();
+ const Uint32 lockId = req->lockId;
+
+ LockQueuePtr lockQPtr;
+ if(!c_lockQueues.find(lockQPtr, lockId)){
+ jam();
+ sendLOCK_REF(signal, req, UtilLockRef::NoSuchLock);
+ return;
+ }
+
+// const Uint32 requestInfo = req->requestInfo;
+ const Uint32 senderNode = refToNode(req->senderRef);
+ if(senderNode != getOwnNodeId() && senderNode != 0){
+ jam();
+ sendLOCK_REF(signal, req, UtilLockRef::DistributedLockNotSupported);
+ return;
+ }
+
+ LocalDLFifoList<LockQueueElement> queue(c_lockElementPool,
+ lockQPtr.p->m_queue);
+ if(req->requestInfo & UtilLockReq::TryLock && !queue.isEmpty()){
+ jam();
+ sendLOCK_REF(signal, req, UtilLockRef::LockAlreadyHeld);
+ return;
+ }
+
+ LockQueueElementPtr lockEPtr;
+ if(!c_lockElementPool.seize(lockEPtr)){
+ jam();
+ sendLOCK_REF(signal, req, UtilLockRef::OutOfLockRecords);
+ return;
+ }
+
+ lockEPtr.p->m_senderRef = req->senderRef;
+ lockEPtr.p->m_senderData = req->senderData;
+
+ if(queue.isEmpty()){
+ jam();
+ sendLOCK_CONF(signal, lockQPtr.p, lockEPtr.p);
+ }
+
+ queue.add(lockEPtr);
+}
+
+void
+DbUtil::execUTIL_UNLOCK_REQ(Signal* signal){
+ jamEntry();
+
+ UtilUnlockReq * req = (UtilUnlockReq*)signal->getDataPtr();
+ const Uint32 lockId = req->lockId;
+
+ LockQueuePtr lockQPtr;
+ if(!c_lockQueues.find(lockQPtr, lockId)){
+ jam();
+ sendUNLOCK_REF(signal, req, UtilUnlockRef::NoSuchLock);
+ return;
+ }
+
+ LocalDLFifoList<LockQueueElement> queue(c_lockElementPool,
+ lockQPtr.p->m_queue);
+ LockQueueElementPtr lockEPtr;
+ if(!queue.first(lockEPtr)){
+ jam();
+ sendUNLOCK_REF(signal, req, UtilUnlockRef::NotLockOwner);
+ return;
+ }
+
+ if(lockQPtr.p->m_lockKey != req->lockKey){
+ jam();
+ sendUNLOCK_REF(signal, req, UtilUnlockRef::NotLockOwner);
+ return;
+ }
+
+ sendUNLOCK_CONF(signal, lockQPtr.p, lockEPtr.p);
+ queue.release(lockEPtr);
+
+ if(queue.first(lockEPtr)){
+ jam();
+ sendLOCK_CONF(signal, lockQPtr.p, lockEPtr.p);
+ return;
+ }
+}
+
+void
+DbUtil::sendLOCK_REF(Signal* signal,
+ const UtilLockReq * req, UtilLockRef::ErrorCode err){
+ const Uint32 senderData = req->senderData;
+ const Uint32 senderRef = req->senderRef;
+ const Uint32 lockId = req->lockId;
+
+ UtilLockRef * ref = (UtilLockRef*)signal->getDataPtrSend();
+ ref->senderData = senderData;
+ ref->senderRef = reference();
+ ref->lockId = lockId;
+ ref->errorCode = err;
+ sendSignal(senderRef, GSN_UTIL_LOCK_REF, signal,
+ UtilLockRef::SignalLength, JBB);
+}
+
+void
+DbUtil::sendLOCK_CONF(Signal* signal,
+ LockQueue * lockQP,
+ LockQueueElement * lockEP){
+ const Uint32 senderData = lockEP->m_senderData;
+ const Uint32 senderRef = lockEP->m_senderRef;
+ const Uint32 lockId = lockQP->m_lockId;
+ const Uint32 lockKey = ++lockQP->m_lockKey;
+
+ UtilLockConf * conf = (UtilLockConf*)signal->getDataPtrSend();
+ conf->senderData = senderData;
+ conf->senderRef = reference();
+ conf->lockId = lockId;
+ conf->lockKey = lockKey;
+ sendSignal(senderRef, GSN_UTIL_LOCK_CONF, signal,
+ UtilLockConf::SignalLength, JBB);
+}
+
+void
+DbUtil::sendUNLOCK_REF(Signal* signal,
+ const UtilUnlockReq* req, UtilUnlockRef::ErrorCode err){
+
+ const Uint32 senderData = req->senderData;
+ const Uint32 senderRef = req->senderRef;
+ const Uint32 lockId = req->lockId;
+
+ UtilUnlockRef * ref = (UtilUnlockRef*)signal->getDataPtrSend();
+ ref->senderData = senderData;
+ ref->senderRef = reference();
+ ref->lockId = lockId;
+ ref->errorCode = err;
+ sendSignal(senderRef, GSN_UTIL_UNLOCK_REF, signal,
+ UtilUnlockRef::SignalLength, JBB);
+}
+
+void
+DbUtil::sendUNLOCK_CONF(Signal* signal,
+ LockQueue * lockQP,
+ LockQueueElement * lockEP){
+ const Uint32 senderData = lockEP->m_senderData;
+ const Uint32 senderRef = lockEP->m_senderRef;
+ const Uint32 lockId = lockQP->m_lockId;
+ ++lockQP->m_lockKey;
+
+ UtilUnlockConf * conf = (UtilUnlockConf*)signal->getDataPtrSend();
+ conf->senderData = senderData;
+ conf->senderRef = reference();
+ conf->lockId = lockId;
+ sendSignal(senderRef, GSN_UTIL_UNLOCK_CONF, signal,
+ UtilUnlockConf::SignalLength, JBB);
+}
+
+void
+DbUtil::execUTIL_CREATE_LOCK_REQ(Signal* signal){
+ jamEntry();
+ UtilCreateLockReq req = * (UtilCreateLockReq*)signal->getDataPtr();
+
+ UtilCreateLockRef::ErrorCode err = UtilCreateLockRef::OK;
+
+ do {
+ LockQueuePtr lockQPtr;
+ if(c_lockQueues.find(lockQPtr, req.lockId)){
+ jam();
+ err = UtilCreateLockRef::LockIdAlreadyUsed;
+ break;
+ }
+
+ if(req.lockType != UtilCreateLockReq::Mutex){
+ jam();
+ err = UtilCreateLockRef::UnsupportedLockType;
+ break;
+ }
+
+ if(!c_lockQueues.seize(lockQPtr)){
+ jam();
+ err = UtilCreateLockRef::OutOfLockQueueRecords;
+ break;
+ }
+
+ new (lockQPtr.p) LockQueue(req.lockId);
+ c_lockQueues.add(lockQPtr);
+
+ UtilCreateLockConf * conf = (UtilCreateLockConf*)signal->getDataPtrSend();
+ conf->senderData = req.senderData;
+ conf->senderRef = reference();
+ conf->lockId = req.lockId;
+
+ sendSignal(req.senderRef, GSN_UTIL_CREATE_LOCK_CONF, signal,
+ UtilCreateLockConf::SignalLength, JBB);
+ return;
+ } while(false);
+
+ UtilCreateLockRef * ref = (UtilCreateLockRef*)signal->getDataPtrSend();
+ ref->senderData = req.senderData;
+ ref->senderRef = reference();
+ ref->lockId = req.lockId;
+ ref->errorCode = err;
+
+ sendSignal(req.senderRef, GSN_UTIL_CREATE_LOCK_REF, signal,
+ UtilCreateLockRef::SignalLength, JBB);
+}
+
+void
+DbUtil::execUTIL_DESTORY_LOCK_REQ(Signal* signal){
+ jamEntry();
+
+ UtilDestroyLockReq req = * (UtilDestroyLockReq*)signal->getDataPtr();
+ UtilDestroyLockRef::ErrorCode err = UtilDestroyLockRef::OK;
+ do {
+ LockQueuePtr lockQPtr;
+ if(!c_lockQueues.find(lockQPtr, req.lockId)){
+ jam();
+ err = UtilDestroyLockRef::NoSuchLock;
+ break;
+ }
+
+ LocalDLFifoList<LockQueueElement> queue(c_lockElementPool,
+ lockQPtr.p->m_queue);
+ LockQueueElementPtr lockEPtr;
+ if(!queue.first(lockEPtr)){
+ jam();
+ err = UtilDestroyLockRef::NotLockOwner;
+ break;
+ }
+
+ if(lockQPtr.p->m_lockKey != req.lockKey){
+ jam();
+ err = UtilDestroyLockRef::NotLockOwner;
+ break;
+ }
+
+ /**
+ * OK
+ */
+
+ // Inform all in lock queue that queue has been destroyed
+ UtilLockRef * ref = (UtilLockRef*)signal->getDataPtrSend();
+ ref->lockId = req.lockId;
+ ref->errorCode = UtilLockRef::NoSuchLock;
+ ref->senderRef = reference();
+ LockQueueElementPtr loopPtr = lockEPtr;
+ for(queue.next(loopPtr); !loopPtr.isNull(); queue.next(loopPtr)){
+ jam();
+ ref->senderData = loopPtr.p->m_senderData;
+ const Uint32 senderRef = loopPtr.p->m_senderRef;
+ sendSignal(senderRef, GSN_UTIL_LOCK_REF, signal,
+ UtilLockRef::SignalLength, JBB);
+ }
+ queue.release();
+ c_lockQueues.release(lockQPtr);
+
+ // Send Destroy conf
+ UtilDestroyLockConf* conf=(UtilDestroyLockConf*)signal->getDataPtrSend();
+ conf->senderData = req.senderData;
+ conf->senderRef = reference();
+ conf->lockId = req.lockId;
+ sendSignal(req.senderRef, GSN_UTIL_DESTROY_LOCK_CONF, signal,
+ UtilDestroyLockConf::SignalLength, JBB);
+ return;
+ } while(false);
+
+ UtilDestroyLockRef * ref = (UtilDestroyLockRef*)signal->getDataPtrSend();
+ ref->senderData = req.senderData;
+ ref->senderRef = reference();
+ ref->lockId = req.lockId;
+ ref->errorCode = err;
+ sendSignal(req.senderRef, GSN_UTIL_DESTROY_LOCK_REF, signal,
+ UtilDestroyLockRef::SignalLength, JBB);
+}
+
+template class ArrayPool<DbUtil::Page32>;
diff --git a/storage/ndb/src/kernel/blocks/dbutil/DbUtil.hpp b/storage/ndb/src/kernel/blocks/dbutil/DbUtil.hpp
new file mode 100644
index 00000000000..5499970fde3
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbutil/DbUtil.hpp
@@ -0,0 +1,485 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef DBUTIL_H
+#define DBUTIL_H
+
+#include <ndb_limits.h>
+#include <SimulatedBlock.hpp>
+#include <NodeBitmask.hpp>
+
+#include <ArrayList.hpp>
+#include <ArrayPool.hpp>
+#include <SLList.hpp>
+#include <DLList.hpp>
+#include <DLFifoList.hpp>
+#include <DataBuffer.hpp>
+#include <KeyTable.hpp>
+
+#include <signaldata/KeyInfo.hpp>
+#include <signaldata/AttrInfo.hpp>
+#include <signaldata/TcKeyReq.hpp>
+#include <signaldata/UtilPrepare.hpp>
+#include <signaldata/UtilExecute.hpp>
+#include <signaldata/UtilLock.hpp>
+#include <SimpleProperties.hpp>
+
+#define UTIL_WORDS_PER_PAGE 1023
+
+/**
+ * @class DbUtil
+ * @brief Database utilities
+ *
+ * This block implements transactional services which can be used by other
+ * blocks.
+ *
+ * @section secSequence Module: The Sequence Service
+ *
+ * A sequence is a varaible stored in the database. Each time it is
+ * requested with "NextVal" it returns a unique number. If requested
+ * with "CurrVal" it returns the current number.
+ *
+ * - Request: SEQUENCE_REQ
+ * Requests the 'NextVal' or 'CurrVal' for sequence variable 'sequenceId'.
+ *
+ * - Response: SEQUENCE_CONF / REF (if failure)
+ * Returns value requested.
+ */
+class DbUtil : public SimulatedBlock
+{
+public:
+ DbUtil(const class Configuration & conf);
+ virtual ~DbUtil();
+ BLOCK_DEFINES(DbUtil);
+
+protected:
+ /**
+ * Startup & Misc
+ */
+ void execSTTOR(Signal* signal);
+ void execNDB_STTOR(Signal* signal);
+ void execDUMP_STATE_ORD(Signal* signal);
+ void execCONTINUEB(Signal* signal);
+
+ /**
+ * Sequence Service : Public interface
+ */
+ void execUTIL_SEQUENCE_REQ(Signal* signal);
+ void execUTIL_SEQUENCE_REF(Signal* signal);
+ void execUTIL_SEQUENCE_CONF(Signal* signal);
+
+ /**
+ * Prepare Service : Public interface
+ */
+ void execUTIL_PREPARE_REQ(Signal* signal);
+ void execUTIL_PREPARE_CONF(Signal* signal);
+ void execUTIL_PREPARE_REF(Signal* signal);
+
+ /**
+ * Delete Service : Public interface
+ */
+ void execUTIL_DELETE_REQ(Signal* signal);
+ void execUTIL_DELETE_REF(Signal* signal);
+ void execUTIL_DELETE_CONF(Signal* signal);
+
+ /**
+ * Execute Service : Public interface
+ */
+ void execUTIL_EXECUTE_REQ(Signal* signal);
+ void execUTIL_EXECUTE_REF(Signal* signal);
+ void execUTIL_EXECUTE_CONF(Signal* signal);
+
+ /**
+ * Prepare Release Service : Public interface
+ */
+ void execUTIL_RELEASE_REQ(Signal* signal);
+ void execUTIL_RELEASE_CONF(Signal* signal);
+ void execUTIL_RELEASE_REF(Signal* signal);
+
+ /**
+ * Backend interface to a used TC service
+ */
+ void execTCSEIZECONF(Signal* signal);
+ void execTCKEYCONF(Signal* signal);
+ void execTCKEYREF(Signal* signal);
+ void execTCROLLBACKREP(Signal* signal);
+ void execTCKEY_FAILCONF(Signal* signal);
+ void execTCKEY_FAILREF(Signal* signal);
+ void execTRANSID_AI(Signal* signal);
+
+ /**
+ * Backend interface to a used DICT service
+ */
+ void execGET_TABINFOREF(Signal*);
+ void execGET_TABINFO_CONF(Signal* signal);
+
+private:
+
+public:
+ struct PreparedOperation;
+
+ typedef DataBuffer<11> KeyInfoBuffer;
+ typedef KeyInfoBuffer::ConstDataBufferIterator KeyInfoIterator;
+ typedef DataBuffer<11> AttrInfoBuffer;
+ typedef AttrInfoBuffer::ConstDataBufferIterator AttrInfoIterator;
+ typedef DataBuffer<11> ResultSetBuffer;
+ typedef DataBuffer<11> ResultSetInfoBuffer;
+ typedef DataBuffer<1> AttrMappingBuffer;
+
+ /**
+ * @struct Page32
+ * @brief For storing SimpleProperties objects and similar temporary data
+ */
+ struct Page32 {
+ Uint32 data[UTIL_WORDS_PER_PAGE];
+ Uint32 nextPool; // Note: This used as data when seized
+ };
+
+ /**
+ * @struct Prepare
+ * @brief Info regarding prepare request (contains a prepared operation)
+ *
+ * The prepare phase interprets the table and attribute names sent
+ * in the prepare request from the client and asks DICT for meta
+ * information.
+ */
+ struct Prepare {
+ Prepare(ArrayPool<Page32> & ap) : preparePages(ap) {}
+
+ /*** Client info ***/
+ Uint32 clientRef;
+ Uint32 clientData;
+
+ /**
+ * SimpleProp sent in UTIL_PREPARE_REQ
+ *
+ * Example format:
+ * - UtilPrepareReq::NoOfOperations=1
+ * - UtilPrepareReq::OperationType=UtilPrepareReq::Delete
+ * - UtilPrepareReq::TableName="SYSTAB_0"
+ * - UtilPrepareReq::AttributeName="SYSKEY_0"
+ */
+ Uint32 prepDataLen;
+ Array<Page32> preparePages;
+
+ /*** PreparedOperation constructed in Prepare phase ***/
+ Ptr<PreparedOperation> prepOpPtr;
+
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ };
+ Uint32 prevList;
+
+ void print() const {
+ ndbout << "[-Prepare-" << endl
+ << " clientRef: " << clientRef
+ << ", clientData: " << clientData
+ << "]" << endl;
+ }
+ };
+
+ /**
+ * @struct PreparedOperation
+ * @brief Contains instantiated TcKeyReq signaldata for operation
+ *
+ * The prepare phase is finished by storing the request in a
+ * PreparedOperation record.
+ */
+ struct PreparedOperation {
+ PreparedOperation(AttrMappingBuffer::DataBufferPool & am,
+ AttrInfoBuffer::DataBufferPool & ai,
+ ResultSetInfoBuffer::DataBufferPool & rs) :
+ releaseFlag(false), attrMapping(am), attrInfo(ai), rsInfo(rs)
+ {
+ pkBitmask.clear();
+ }
+
+ /*** Various Operation Info ***/
+ Uint32 keyLen; // Length of primary key (fixed size is assumed)
+ Uint32 rsLen; // Size of result set
+ Uint32 noOfKeyAttr; // Number of key attributes
+ Uint32 noOfAttr; // Number of attributes
+ bool releaseFlag; // flag if operation release after completion
+
+ /**
+ * Attribute Mapping
+ *
+ * This datastructure (buffer of AttributeHeader:s) are used to map
+ * each execute request to a TCKEYREQ train of signals.
+ *
+ * The datastructure contains (AttributeId, Position) pairs, where
+ * - AttributeId is id used in database, and
+ * - Position is position of attribute value in TCKEYREQ keyinfo
+ * part of the train of signals which will be send to TC.
+ * Position == 0x3fff means it should *not* be sent
+ * in keyinfo part.
+ */
+ AttrMappingBuffer attrMapping;
+
+ /*** First signal in tckeyreq train ***/
+ Uint32 tckeyLenInBytes; // TcKeyReq total signal length (in bytes)
+ Uint32 keyDataPos; // Where to store keydata[] in tckey signal
+ // (in #words from base in tckey signal)
+ TcKeyReq tckey; // Signaldata for first signal in train
+
+ /*** Attrinfo signals sent to TC (part of tckeyreq train) ***/
+ AttrInfoBuffer attrInfo;
+
+ /*** Result of executed operation ***/
+ ResultSetInfoBuffer rsInfo;
+
+ Bitmask<MAX_ATTRIBUTES_IN_TABLE> pkBitmask;
+
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ };
+ Uint32 prevList;
+
+ void print() const {
+ ndbout << "[-PreparedOperation-" << endl
+ << " keyLen: " << keyLen
+ << ", rsLen: " << rsLen
+ << ", noOfKeyAttr: " << noOfKeyAttr
+ << ", noOfAttr: " << noOfAttr
+ << ", tckeyLenInBytes: " << tckeyLenInBytes
+ << ", keyDataPos: " << keyDataPos << endl
+ << "-AttrMapping- (AttrId, KeyPos)-pairs "
+ << "(Pos=3fff if non-key attr):" << endl;
+ attrMapping.print(stdout);
+ ndbout << "[-tckey- ";
+ printTCKEYREQ(stdout, (Uint32*)&tckey, 8, 0);
+ ndbout << "[-attrInfo- ";
+ attrInfo.print(stdout);
+ ndbout << "[-rsInfo- ";
+ rsInfo.print(stdout);
+ ndbout << "]]]]" << endl;
+ }
+ };
+
+ /**
+ * @struct Operation
+ * @brief Used in execution (contains resultset and buffers for result)
+ */
+ struct Operation {
+ Operation(KeyInfoBuffer::DataBufferPool & ki,
+ AttrInfoBuffer::DataBufferPool & ai,
+ ResultSetBuffer::DataBufferPool & _rs) :
+ prepOp_i(RNIL), keyInfo(ki), attrInfo(ai), rs(_rs) {}
+
+ PreparedOperation * prepOp;
+ Uint32 prepOp_i;
+ KeyInfoBuffer keyInfo;
+ AttrInfoBuffer attrInfo;
+ ResultSetBuffer rs;
+ ResultSetBuffer::DataBufferIterator rsIterator;
+
+ Uint32 transPtrI;
+
+ Uint32 rsRecv;
+ Uint32 rsExpect;
+ inline bool complete() const { return rsRecv == rsExpect; }
+
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ };
+
+ void print() const {
+ ndbout << "[-Operation-" << endl
+ << " transPtrI: " << transPtrI
+ << ", rsRecv: " << rsRecv;
+ ndbout << "[-PreparedOperation-" << endl;
+ prepOp->print();
+ ndbout << "[-keyInfo-" << endl;
+ keyInfo.print(stdout);
+ ndbout << "[-attrInfo-" << endl;
+ attrInfo.print(stdout);
+ ndbout << "]]" << endl;
+ }
+ };
+
+ /**
+ * @struct Transaction
+ * @brief Used in execution (contains list of operations)
+ */
+ struct Transaction {
+ Transaction(ArrayPool<Page32> & ap, ArrayPool<Operation> & op) :
+ executePages(ap), operations(op) {}
+
+ Uint32 clientRef;
+ Uint32 clientData;
+ Array<Page32> executePages;
+
+ Uint32 gsn; // Request type (SEQUENCE, DELETE, etc)
+ union {
+ /**
+ * Sequence transaction
+ */
+ struct {
+ Uint32 sequenceId;
+ Uint32 requestType;
+ } sequence;
+ };
+
+ Uint32 connectPtr;
+ Uint32 transId[2];
+ SLList<Operation> operations;
+
+ Uint32 errorCode;
+ Uint32 noOfRetries;
+ Uint32 sent; // No of operations sent
+ Uint32 recv; // No of completed operations received
+ inline bool complete() const { return sent == recv; };
+
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ };
+ Uint32 prevList;
+
+ void print() const {
+ ndbout << "[-Transaction-" << endl
+ << " clientRef: " << clientRef
+ << ", clientData: " << clientData
+ << ", gsn: " << gsn
+ << ", errorCode: " << errorCode
+ << endl
+ << " sent: " << sent << " operations"
+ << ", recv: " << recv << " completed operations";
+ OperationPtr opPtr;
+ this->operations.first(opPtr);
+ while(opPtr.i != RNIL){
+ ndbout << "[-Operation-" << endl;
+ opPtr.p->print();
+ this->operations.next(opPtr);
+ }
+ ndbout << "]" << endl;
+ }
+ };
+
+ typedef Ptr<Page32> Page32Ptr;
+ typedef Ptr<Prepare> PreparePtr;
+ typedef Ptr<Transaction> TransactionPtr;
+ typedef Ptr<Operation> OperationPtr;
+ typedef Ptr<PreparedOperation> PreparedOperationPtr;
+
+ Uint32 c_transId[2];
+ ArrayPool<Page32> c_pagePool;
+ ArrayPool<Prepare> c_preparePool;
+ ArrayPool<Operation> c_operationPool;
+ ArrayPool<PreparedOperation> c_preparedOperationPool;
+ ArrayPool<Transaction> c_transactionPool;
+
+ DataBuffer<1>::DataBufferPool c_attrMappingPool;
+ DataBuffer<11>::DataBufferPool c_dataBufPool;
+ DLList<Prepare> c_runningPrepares;
+ DLList<PreparedOperation> c_runningPreparedOperations;
+ DLList<Transaction> c_seizingTransactions; // Being seized at TC
+ DLList<Transaction> c_runningTransactions; // Seized and now exec.
+
+ void getTransId(Transaction *);
+ void initResultSet(ResultSetBuffer &, const ResultSetInfoBuffer &);
+ void runTransaction(Signal* signal, TransactionPtr);
+ void runOperation(Signal* signal, TransactionPtr &, OperationPtr &, Uint32);
+ void sendKeyInfo(Signal* signal,
+ KeyInfo* keyInfo,
+ const KeyInfoBuffer & keyBuf,
+ KeyInfoIterator & kit);
+ void sendAttrInfo(Signal*,
+ AttrInfo* attrInfo,
+ const AttrInfoBuffer &,
+ AttrInfoIterator & ait);
+ int getResultSet(Signal* signal, const Transaction * transP,
+ struct LinearSectionPtr sectionsPtr[]);
+ void finishTransaction(Signal*, TransactionPtr);
+ void releaseTransaction(TransactionPtr transPtr);
+ void hardcodedPrepare();
+ void connectTc(Signal* signal);
+ void reportSequence(Signal*, const Transaction *);
+ void readPrepareProps(Signal* signal,
+ SimpleProperties::Reader* reader,
+ Uint32 senderData);
+ void prepareOperation(Signal*, PreparePtr);
+ void sendUtilPrepareRef(Signal*, UtilPrepareRef::ErrorCode, Uint32, Uint32);
+ void sendUtilExecuteRef(Signal*, UtilExecuteRef::ErrorCode,
+ Uint32, Uint32, Uint32);
+ void releasePrepare(PreparePtr);
+ void releasePreparedOperation(PreparedOperationPtr);
+
+ /***************************************************************************
+ * Lock manager
+ */
+ struct LockQueueElement {
+ Uint32 m_senderData;
+ Uint32 m_senderRef;
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ };
+ Uint32 prevList;
+ };
+ typedef Ptr<LockQueueElement> LockQueueElementPtr;
+
+ struct LockQueue {
+ LockQueue(){}
+ LockQueue(Uint32 id) : m_queue() { m_lockId = id; m_lockKey = 0;}
+ union {
+ Uint32 m_lockId;
+ Uint32 key;
+ };
+ Uint32 m_lockKey;
+ DLFifoList<LockQueueElement>::Head m_queue;
+ union {
+ Uint32 nextHash;
+ Uint32 nextPool;
+ };
+ Uint32 prevHash;
+
+ Uint32 hashValue() const {
+ return m_lockId;
+ }
+ bool equal(const LockQueue & rec) const {
+ return m_lockId == rec.m_lockId;
+ }
+ };
+ typedef Ptr<LockQueue> LockQueuePtr;
+
+
+ ArrayPool<LockQueue> c_lockQueuePool;
+ ArrayPool<LockQueueElement> c_lockElementPool;
+ KeyTable<LockQueue> c_lockQueues;
+
+ void execUTIL_CREATE_LOCK_REQ(Signal* signal);
+ void execUTIL_DESTORY_LOCK_REQ(Signal* signal);
+ void execUTIL_LOCK_REQ(Signal* signal);
+ void execUTIL_UNLOCK_REQ(Signal* signal);
+
+ void sendLOCK_REF(Signal*, const UtilLockReq * req, UtilLockRef::ErrorCode);
+ void sendLOCK_CONF(Signal*, LockQueue *, LockQueueElement *);
+
+ void sendUNLOCK_REF(Signal*, const UtilUnlockReq*, UtilUnlockRef::ErrorCode);
+ void sendUNLOCK_CONF(Signal*, LockQueue *, LockQueueElement *);
+
+ // For testing of mutex:es
+ void mutex_created(Signal* signal, Uint32 mutexId, Uint32 retVal);
+ void mutex_destroyed(Signal* signal, Uint32 mutexId, Uint32 retVal);
+ void mutex_locked(Signal* signal, Uint32 mutexId, Uint32 retVal);
+ void mutex_unlocked(Signal* signal, Uint32 mutexId, Uint32 retVal);
+};
+
+#endif
diff --git a/storage/ndb/src/kernel/blocks/dbutil/DbUtil.txt b/storage/ndb/src/kernel/blocks/dbutil/DbUtil.txt
new file mode 100644
index 00000000000..cc8c1985009
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbutil/DbUtil.txt
@@ -0,0 +1,68 @@
+UTIL Protocols
+--------------
+Transactions are executed in two phases:
+1) PREPARE
+2) EXECUTE
+
+
+PREPARE PHASE
+-------------
+1) ** REQUEST **
+ Client (any block) requests prepare service from Util:
+
+ Client --UTIL_PREPARE_REQ--> Util
+ ...
+ Client --UTIL_PREPARE_REQ--> Util
+
+2) ** DICTINFO **
+ Util requests Dict for information about table:
+
+ Util --GET_TABINFOREQ--> Dict
+
+ Util <--DICTTABINFO-- Dict
+ ...
+ Util <--DICTTABINFO-- Dict
+
+3) ** PREPARE **
+ Operation (= transaction) is prepared (DbUtil::prepareOperation)
+
+ a) AttrMapping is created (a map used to read of the
+ actual execute request attribute values and put them in KEYINFO)
+
+ b) TC Signal train is prepared
+
+4) ** CONFIRM **
+ Request is confirmed
+
+ Client <--UTIL_PREPARE_CONF-- Util
+
+
+EXECUTE PHASE
+-------------
+1) Client (any block) requests execute service from Util:
+ (Execute can be INSERT, DELETE,...)
+
+ Client --UTIL_EXECUTE_REQ--> Util (Multi-signals not yet implemented)
+ ...
+ Client --UTIL_EXECUTE_REQ--> Util
+
+2) Util --TCKEYREQ--> tc
+
+ Util --KEYINFO--> tc (sometimes) (Not yet implemented)
+ ...
+ Util --KEYINFO--> tc
+
+ Util --ATTRINFO--> tc (sometimes)
+ ...
+ Util --ATTRINFO--> tc
+
+3) Util <--TCKEYCONF-- tc
+
+ Util --TC_COMMIT_ACK-->tc (sometimes)
+
+ (in parallel with)
+
+ Util <--TRANSID_AI-- tc (sometimes)
+ ...
+ Util <--TRANSID_AI-- tc
+
diff --git a/storage/ndb/src/kernel/blocks/dbutil/Makefile.am b/storage/ndb/src/kernel/blocks/dbutil/Makefile.am
new file mode 100644
index 00000000000..925356c2f76
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/dbutil/Makefile.am
@@ -0,0 +1,23 @@
+noinst_LIBRARIES = libdbutil.a
+
+libdbutil_a_SOURCES = DbUtil.cpp
+
+include $(top_srcdir)/ndb/config/common.mk.am
+include $(top_srcdir)/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libdbutil.dsp
+
+libdbutil.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-lib.am \
+ $(top_srcdir)/ndb/config/win-name \
+ $(top_srcdir)/ndb/config/win-includes \
+ $(top_srcdir)/ndb/config/win-sources \
+ $(top_srcdir)/ndb/config/win-libraries
+ cat $(top_srcdir)/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/ndb/config/win-sources $@ $(libdbutil_a_SOURCES)
+ @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/storage/ndb/src/kernel/blocks/grep/Grep.cpp b/storage/ndb/src/kernel/blocks/grep/Grep.cpp
new file mode 100644
index 00000000000..0527c5415ab
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/grep/Grep.cpp
@@ -0,0 +1,2010 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "Grep.hpp"
+#include <ndb_version.h>
+
+#include <NdbTCP.h>
+#include <Bitmask.hpp>
+
+#include <signaldata/NodeFailRep.hpp>
+#include <signaldata/ReadNodesConf.hpp>
+#include <signaldata/CheckNodeGroups.hpp>
+#include <signaldata/GrepImpl.hpp>
+#include <signaldata/RepImpl.hpp>
+#include <signaldata/EventReport.hpp>
+#include <signaldata/DictTabInfo.hpp>
+#include <signaldata/GetTabInfo.hpp>
+#include <signaldata/WaitGCP.hpp>
+#include <GrepEvent.hpp>
+#include <AttributeHeader.hpp>
+
+#define CONTINUEB_DELAY 500
+#define SSREPBLOCKNO 2
+#define PSREPBLOCKNO 2
+
+//#define DEBUG_GREP
+//#define DEBUG_GREP_SUBSCRIPTION
+//#define DEBUG_GREP_TRANSFER
+//#define DEBUG_GREP_APPLY
+//#define DEBUG_GREP_DELETE
+
+/**************************************************************************
+ * ------------------------------------------------------------------------
+ * MODULE: STARTUP of GREP Block, etc
+ * ------------------------------------------------------------------------
+ **************************************************************************/
+static Uint32 g_TypeOfStart = NodeState::ST_ILLEGAL_TYPE;
+void
+Grep::getNodeGroupMembers(Signal* signal) {
+ jam();
+ /**
+ * Ask DIH for nodeGroupMembers
+ */
+ CheckNodeGroups * sd = (CheckNodeGroups*)signal->getDataPtrSend();
+ sd->blockRef = reference();
+ sd->requestType =
+ CheckNodeGroups::Direct |
+ CheckNodeGroups::GetNodeGroupMembers;
+ sd->nodeId = getOwnNodeId();
+ EXECUTE_DIRECT(DBDIH, GSN_CHECKNODEGROUPSREQ, signal,
+ CheckNodeGroups::SignalLength);
+ jamEntry();
+
+ c_nodeGroup = sd->output;
+ c_noNodesInGroup = 0;
+ for (int i = 0; i < MAX_NDB_NODES; i++) {
+ if (sd->mask.get(i)) {
+ if (i == getOwnNodeId()) c_idInNodeGroup = c_noNodesInGroup;
+ c_nodesInGroup[c_noNodesInGroup] = i;
+ c_noNodesInGroup++;
+ }
+ }
+ ndbrequire(c_noNodesInGroup > 0); // at least 1 node in the nodegroup
+
+#ifdef NODEFAIL_DEBUG
+ for (Uint32 i = 0; i < c_noNodesInGroup; i++) {
+ ndbout_c ("Grep: NodeGroup %u, me %u, me in group %u, member[%u] %u",
+ c_nodeGroup, getOwnNodeId(), c_idInNodeGroup,
+ i, c_nodesInGroup[i]);
+ }
+#endif
+}
+
+
+void
+Grep::execSTTOR(Signal* signal)
+{
+ jamEntry();
+ const Uint32 startphase = signal->theData[1];
+ const Uint32 typeOfStart = signal->theData[7];
+ if (startphase == 3)
+ {
+ jam();
+ signal->theData[0] = reference();
+ g_TypeOfStart = typeOfStart;
+ sendSignal(NDBCNTR_REF, GSN_READ_NODESREQ, signal, 1, JBB);
+ return;
+ }
+ if(startphase == 5) {
+ jam();
+ /**
+ * we don't want any log/meta records comming to use
+ * until we are done with the recovery.
+ */
+ if (g_TypeOfStart == NodeState::ST_NODE_RESTART) {
+ jam();
+ pspart.m_recoveryMode = true;
+ getNodeGroupMembers(signal);
+ for (Uint32 i = 0; i < c_noNodesInGroup; i++) {
+ Uint32 ref =numberToRef(GREP, c_nodesInGroup[i]);
+ if (ref != reference())
+ sendSignal(ref, GSN_GREP_START_ME, signal,
+ 1 /*SumaStartMe::SignalLength*/, JBB);
+ }
+ } else pspart.m_recoveryMode = false;
+
+ }
+
+ if(startphase == 7) {
+ jam();
+ if (g_TypeOfStart == NodeState::ST_NODE_RESTART) {
+ pspart.m_recoveryMode = false;
+ }
+ }
+
+ sendSTTORRY(signal);
+}
+
+
+void
+Grep::PSPart::execSTART_ME(Signal* signal)
+{
+ jamEntry();
+ GrepStartMe * me =(GrepStartMe*)signal->getDataPtr();
+ BlockReference ref = me->senderRef;
+ GrepAddSubReq* const addReq = (GrepAddSubReq *)signal->getDataPtr();
+
+
+ SubscriptionPtr subPtr;
+ c_subscriptions.first(c_subPtr);
+ for(; !c_subPtr.isNull(); c_subscriptions.next(c_subPtr)) {
+ jam();
+ subPtr.i = c_subPtr.curr.i;
+ subPtr.p = c_subscriptions.getPtr(subPtr.i);
+ addReq->subscriptionId = subPtr.p->m_subscriptionId;
+ addReq->subscriptionKey = subPtr.p->m_subscriptionKey;
+ addReq->subscriberData = subPtr.p->m_subscriberData;
+ addReq->subscriptionType = subPtr.p->m_subscriptionType;
+ addReq->senderRef = subPtr.p->m_coordinatorRef;
+ addReq->subscriberRef =subPtr.p->m_subscriberRef;
+
+ sendSignal(ref,
+ GSN_GREP_ADD_SUB_REQ,
+ signal,
+ GrepAddSubReq::SignalLength,
+ JBB);
+ }
+
+ addReq->subscriptionId = 0;
+ addReq->subscriptionKey = 0;
+ addReq->subscriberData = 0;
+ addReq->subscriptionType = 0;
+ addReq->senderRef = 0;
+ addReq->subscriberRef = 0;
+
+ sendSignal(ref,
+ GSN_GREP_ADD_SUB_REQ,
+ signal,
+ GrepAddSubReq::SignalLength,
+ JBB);
+}
+
+void
+Grep::PSPart::execGREP_ADD_SUB_REQ(Signal* signal)
+{
+ jamEntry();
+ GrepAddSubReq * const grepReq = (GrepAddSubReq *)signal->getDataPtr();
+ const Uint32 subId = grepReq->subscriptionId;
+ const Uint32 subKey = grepReq->subscriptionKey;
+ const Uint32 subData = grepReq->subscriberData;
+ const Uint32 subType = grepReq->subscriptionType;
+ const Uint32 coordinatorRef = grepReq->senderRef;
+
+ /**
+ * this is ref to the REP node for this subscription.
+ */
+ const Uint32 subRef = grepReq->subscriberRef;
+
+ if(subId!=0 && subKey!=0) {
+ jam();
+ SubscriptionPtr subPtr;
+ ndbrequire( c_subscriptionPool.seize(subPtr));
+ subPtr.p->m_coordinatorRef = coordinatorRef;
+ subPtr.p->m_subscriptionId = subId;
+ subPtr.p->m_subscriptionKey = subKey;
+ subPtr.p->m_subscriberRef = subRef;
+ subPtr.p->m_subscriberData = subData;
+ subPtr.p->m_subscriptionType = subType;
+
+ c_subscriptions.add(subPtr);
+ }
+ else {
+ jam();
+ GrepAddSubConf * conf = (GrepAddSubConf *)grepReq;
+ conf->noOfSub =
+ c_subscriptionPool.getSize()-c_subscriptionPool.getNoOfFree();
+ sendSignal(signal->getSendersBlockRef(),
+ GSN_GREP_ADD_SUB_CONF,
+ signal,
+ GrepAddSubConf::SignalLength,
+ JBB);
+ }
+}
+
+void
+Grep::PSPart::execGREP_ADD_SUB_REF(Signal* signal)
+{
+ /**
+ * @todo fix error stuff
+ */
+}
+
+void
+Grep::PSPart::execGREP_ADD_SUB_CONF(Signal* signal)
+{
+ jamEntry();
+ GrepAddSubConf* const conf = (GrepAddSubConf *)signal->getDataPtr();
+ Uint32 noOfSubscriptions = conf->noOfSub;
+ Uint32 noOfRestoredSubscriptions =
+ c_subscriptionPool.getSize()-c_subscriptionPool.getNoOfFree();
+ if(noOfSubscriptions!=noOfRestoredSubscriptions) {
+ jam();
+ /**
+ *@todo send ref signal
+ */
+ ndbrequire(false);
+ }
+}
+
+void
+Grep::execREAD_NODESCONF(Signal* signal)
+{
+ jamEntry();
+ ReadNodesConf * conf = (ReadNodesConf *)signal->getDataPtr();
+
+#if 0
+ ndbout_c("Grep: Recd READ_NODESCONF");
+#endif
+
+ /******************************
+ * Check which REP nodes exist
+ ******************************/
+ Uint32 i;
+ for (i = 1; i < MAX_NODES; i++)
+ {
+ jam();
+#if 0
+ ndbout_c("Grep: Found node %d of type %d", i, getNodeInfo(i).getType());
+#endif
+ if (getNodeInfo(i).getType() == NodeInfo::REP)
+ {
+ jam();
+ /**
+ * @todo This should work for more than ONE rep node!
+ */
+ pscoord.m_repRef = numberToRef(PSREPBLOCKNO, i);
+ pspart.m_repRef = numberToRef(PSREPBLOCKNO, i);
+#if 0
+ ndbout_c("Grep: REP node %d detected", i);
+#endif
+ }
+ }
+
+ /*****************************
+ * Check which DB nodes exist
+ *****************************/
+ m_aliveNodes.clear();
+
+ Uint32 count = 0;
+ for(i = 0; i<MAX_NDB_NODES; i++)
+ {
+ if (NodeBitmask::get(conf->allNodes, i))
+ {
+ jam();
+ count++;
+
+ NodePtr node;
+ ndbrequire(m_nodes.seize(node));
+
+ node.p->nodeId = i;
+ if (NodeBitmask::get(conf->inactiveNodes, i))
+ {
+ node.p->alive = 0;
+ }
+ else
+ {
+ node.p->alive = 1;
+ m_aliveNodes.set(i);
+ }
+ }
+ }
+ m_masterNodeId = conf->masterNodeId;
+ ndbrequire(count == conf->noOfNodes);
+ sendSTTORRY(signal);
+}
+
+void
+Grep::sendSTTORRY(Signal* signal)
+{
+ signal->theData[0] = 0;
+ signal->theData[3] = 1;
+ signal->theData[4] = 3;
+ signal->theData[5] = 5;
+ signal->theData[6] = 7;
+ signal->theData[7] = 255; // No more start phases from missra
+ sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 8, JBB);
+}
+
+void
+Grep::execNDB_STTOR(Signal* signal)
+{
+ jamEntry();
+}
+
+void
+Grep::execDUMP_STATE_ORD(Signal* signal)
+{
+ jamEntry();
+ //Uint32 tCase = signal->theData[0];
+
+#if 0
+ if(sscoord.m_repRef == 0)
+ {
+ ndbout << "Grep: Recd DUMP signal but has no connection with REP node"
+ << endl;
+ return;
+ }
+#endif
+
+ /*
+ switch (tCase)
+ {
+ case 8100: sscoord.grepReq(signal, GrepReq::START_SUBSCR); break;
+ case 8102: sscoord.grepReq(signal, GrepReq::START_METALOG); break;
+ case 8104: sscoord.grepReq(signal, GrepReq::START_METASCAN); break;
+ case 8106: sscoord.grepReq(signal, GrepReq::START_DATALOG); break;
+ case 8108: sscoord.grepReq(signal, GrepReq::START_DATASCAN); break;
+ case 8110: sscoord.grepReq(signal, GrepReq::STOP_SUBSCR); break;
+ case 8500: sscoord.grepReq(signal, GrepReq::REMOVE_BUFFERS); break;
+ case 8300: sscoord.grepReq(signal, GrepReq::SLOWSTOP); break;
+ case 8400: sscoord.grepReq(signal, GrepReq::FASTSTOP); break;
+ case 8600: sscoord.grepReq(signal, GrepReq::CREATE_SUBSCR); break;
+ case 8700: sscoord.dropTable(signal,(Uint32)signal->theData[1]);break;
+ default: break;
+ }
+ */
+}
+
+/**
+ * Signal received when REP node has failed
+ */
+void
+Grep::execAPI_FAILREQ(Signal* signal)
+{
+ jamEntry();
+ //Uint32 failedApiNode = signal->theData[0];
+ //BlockReference retRef = signal->theData[1];
+
+ /**
+ * @todo We should probably do something smart if the
+ * PS REP node fails???? /Lars
+ */
+
+#if 0
+ ndbout_c("Grep: API_FAILREQ received for API node %d.", failedApiNode);
+#endif
+
+ /**
+ * @note This signal received is NOT allowed to send any CONF
+ * signal, since this would screw up TC/DICT to API
+ * "connections".
+ */
+}
+
+/**************************************************************************
+ * ------------------------------------------------------------------------
+ * MODULE: GREP Control
+ * ------------------------------------------------------------------------
+ **************************************************************************/
+void
+Grep::execGREP_REQ(Signal* signal)
+{
+ jamEntry();
+
+ //GrepReq * req = (GrepReq *)signal->getDataPtr();
+
+ /**
+ * @todo Fix so that request is redirected to REP Server
+ * Obsolete?
+ * Was: sscoord.grepReq(signal, req->request);
+ */
+ ndbout_c("Warning! REP commands can only be executed at REP SERVER prompt!");
+}
+
+
+/**************************************************************************
+ * ------------------------------------------------------------------------
+ * MODULE: NODE STATE HANDLING
+ * ------------------------------------------------------------------------
+ **************************************************************************/
+void
+Grep::execNODE_FAILREP(Signal* signal)
+{
+ jamEntry();
+ NodeFailRep * rep = (NodeFailRep*)signal->getDataPtr();
+ bool changed = false;
+
+ NodePtr nodePtr;
+ for(m_nodes.first(nodePtr); nodePtr.i != RNIL; m_nodes.next(nodePtr))
+ {
+ jam();
+ if (NodeBitmask::get(rep->theNodes, nodePtr.p->nodeId))
+ {
+ jam();
+
+ if (nodePtr.p->alive)
+ {
+ jam();
+ ndbassert(m_aliveNodes.get(nodePtr.p->nodeId));
+ changed = true;
+ }
+ else
+ {
+ ndbassert(!m_aliveNodes.get(nodePtr.p->nodeId));
+ }
+
+ nodePtr.p->alive = 0;
+ m_aliveNodes.clear(nodePtr.p->nodeId);
+ }
+ }
+
+
+ /**
+ * Problem: Fix a node failure running a protocol
+ *
+ * 1. Coordinator node of a protocol dies
+ * - Elect a new coordinator
+ * - send ref to user
+ *
+ * 2. Non-coordinator dies.
+ * - make coordinator aware of this
+ * so that coordinator does not wait for
+ * conf from faulty node
+ * - node recovery will restore the non-coordinator.
+ *
+ */
+}
+
+void
+Grep::execINCL_NODEREQ(Signal* signal)
+{
+ jamEntry();
+
+ //const Uint32 senderRef = signal->theData[0];
+ const Uint32 inclNode = signal->theData[1];
+
+ NodePtr node;
+ for(m_nodes.first(node); node.i != RNIL; m_nodes.next(node))
+ {
+ jam();
+ const Uint32 nodeId = node.p->nodeId;
+ if (inclNode == nodeId) {
+ jam();
+
+ ndbrequire(node.p->alive == 0);
+ ndbassert(!m_aliveNodes.get(nodeId));
+
+ node.p->alive = 1;
+ m_aliveNodes.set(nodeId);
+
+ break;
+ }
+ }
+
+ /**
+ * @todo: if we include this DIH's got to be prepared, later if needed...
+ */
+#if 0
+ signal->theData[0] = reference();
+
+ sendSignal(senderRef, GSN_INCL_NODECONF, signal, 1, JBB);
+#endif
+}
+
+
+/**
+ * Helper methods
+ */
+void
+Grep::PSCoord::prepareOperationRec(SubCoordinatorPtr subPtr,
+ BlockReference subscriber,
+ Uint32 subId,
+ Uint32 subKey,
+ Uint32 request)
+{
+ subPtr.p->m_coordinatorRef = reference();
+ subPtr.p->m_subscriberRef = subscriber;
+ subPtr.p->m_subscriberData = subPtr.i;
+ subPtr.p->m_subscriptionId = subId;
+ subPtr.p->m_subscriptionKey = subKey;
+ subPtr.p->m_outstandingRequest = request;
+}
+
+
+/**************************************************************************
+ * ------------------------------------------------------------------------
+ * MODULE: CREATE SUBSCRIPTION ID
+ * ------------------------------------------------------------------------
+ *
+ * Requests SUMA to create a unique subscription id
+ **************************************************************************/
+
+void
+Grep::PSCoord::execGREP_CREATE_SUBID_REQ(Signal* signal)
+{
+ jamEntry();
+
+ CreateSubscriptionIdReq * req =
+ (CreateSubscriptionIdReq*)signal->getDataPtr();
+ BlockReference ref = signal->getSendersBlockRef();
+
+ SubCoordinatorPtr subPtr;
+ if( !c_subCoordinatorPool.seize(subPtr)) {
+ jam();
+ SubCoordinator sub;
+ sub.m_subscriberRef = ref;
+ sub.m_subscriptionId = 0;
+ sub.m_subscriptionKey = 0;
+ sendRefToSS(signal, sub, GrepError::SUBSCRIPTION_ID_NOMEM );
+ return;
+ }
+ prepareOperationRec(subPtr,
+ ref,
+ 0,0,
+ GSN_CREATE_SUBID_REQ);
+
+
+ ndbout_c("SUBID_REQ Ref %d",ref);
+ req->senderData=subPtr.p->m_subscriberData;
+
+ sendSignal(SUMA_REF, GSN_CREATE_SUBID_REQ, signal,
+ SubCreateReq::SignalLength, JBB);
+
+#if 1 //def DEBUG_GREP_SUBSCRIPTION
+ ndbout_c("Grep::PSCoord: Sent CREATE_SUBID_REQ to SUMA");
+#endif
+}
+
+void
+Grep::PSCoord::execCREATE_SUBID_CONF(Signal* signal)
+{
+ jamEntry();
+ CreateSubscriptionIdConf const * conf =
+ (CreateSubscriptionIdConf *)signal->getDataPtr();
+ Uint32 subId = conf->subscriptionId;
+ Uint32 subKey = conf->subscriptionKey;
+ Uint32 subData = conf->subscriberData;
+
+#if 1 //def DEBUG_GREP_SUBSCRIPTION
+ ndbout_c("Grep::PSCoord: Recd GREP_SUBID_CONF (subId:%d, subKey:%d)",
+ subId, subKey);
+#endif
+
+ SubCoordinatorPtr subPtr;
+ c_subCoordinatorPool.getPtr(subPtr, subData);
+ BlockReference repRef = subPtr.p->m_subscriberRef;
+
+ { // Check that id/key is unique
+ SubCoordinator key;
+ SubCoordinatorPtr tmp;
+ key.m_subscriptionId = subId;
+ key.m_subscriptionKey = subKey;
+ if(c_runningSubscriptions.find(tmp, key)){
+ jam();
+ SubCoordinator sub;
+ sub.m_subscriberRef=repRef;
+ sub.m_subscriptionId = subId;
+ sub.m_subscriptionKey = subKey;
+ sendRefToSS(signal,sub, GrepError::SUBSCRIPTION_ID_NOT_UNIQUE );
+ return;
+ }
+ }
+
+ sendSignal(subPtr.p->m_subscriberRef, GSN_GREP_CREATE_SUBID_CONF, signal,
+ CreateSubscriptionIdConf::SignalLength, JBB);
+ c_subCoordinatorPool.release(subData);
+
+ m_grep->sendEventRep(signal,
+ NDB_LE_GrepSubscriptionInfo,
+ GrepEvent::GrepPS_CreateSubIdConf,
+ subId,
+ subKey,
+ (Uint32)GrepError::GE_NO_ERROR);
+}
+
+void
+Grep::PSCoord::execCREATE_SUBID_REF(Signal* signal) {
+ jamEntry();
+ CreateSubscriptionIdRef const * ref =
+ (CreateSubscriptionIdRef *)signal->getDataPtr();
+ Uint32 subData = ref->subscriberData;
+ GrepError::GE_Code err;
+
+ Uint32 sendersBlockRef = signal->getSendersBlockRef();
+ if(sendersBlockRef == SUMA_REF)
+ {
+ jam();
+ err = GrepError::SUBSCRIPTION_ID_SUMA_FAILED_CREATE;
+ } else {
+ jam();
+ ndbrequire(false); /* Added since errorcode err unhandled
+ * TODO: fix correct errorcode
+ */
+ err= GrepError::GE_NO_ERROR; // remove compiler warning
+ }
+
+ SubCoordinatorPtr subPtr;
+ c_runningSubscriptions.getPtr(subPtr, subData);
+ BlockReference repref = subPtr.p->m_subscriberRef;
+
+ SubCoordinator sub;
+ sub.m_subscriberRef = repref;
+ sub.m_subscriptionId = 0;
+ sub.m_subscriptionKey = 0;
+ sendRefToSS(signal,sub, err);
+
+}
+
+
+/**************************************************************************
+ * ------------------------------------------------------------------------
+ * MODULE: CREATE SUBSCRIPTION
+ * ------------------------------------------------------------------------
+ *
+ * Creates a subscription for every GREP to its local SUMA.
+ * GREP node that executes createSubscription becomes the GREP Coord.
+ **************************************************************************/
+
+/**
+ * Request to create a subscription (sent from SS)
+ */
+void
+Grep::PSCoord::execGREP_SUB_CREATE_REQ(Signal* signal)
+{
+ jamEntry();
+ GrepSubCreateReq const * grepReq = (GrepSubCreateReq *)signal->getDataPtr();
+ Uint32 subId = grepReq->subscriptionId;
+ Uint32 subKey = grepReq->subscriptionKey;
+ Uint32 subType = grepReq->subscriptionType;
+ BlockReference rep = signal->getSendersBlockRef();
+
+ GrepCreateReq * req =(GrepCreateReq*)grepReq;
+
+ SubCoordinatorPtr subPtr;
+
+ if( !c_subCoordinatorPool.seize(subPtr)) {
+ jam();
+ SubCoordinator sub;
+ sub.m_subscriberRef = rep;
+ sub.m_subscriptionId = 0;
+ sub.m_subscriptionKey = 0;
+ sub.m_outstandingRequest = GSN_GREP_CREATE_REQ;
+ sendRefToSS(signal, sub, GrepError::NOSPACE_IN_POOL);
+ return;
+ }
+ prepareOperationRec(subPtr,
+ numberToRef(PSREPBLOCKNO, refToNode(rep)), subId, subKey,
+ GSN_GREP_CREATE_REQ);
+
+ /* Get the payload of the signal.
+ */
+ SegmentedSectionPtr selectedTablesPtr;
+ if(subType == SubCreateReq::SelectiveTableSnapshot) {
+ jam();
+ ndbrequire(signal->getNoOfSections()==1);
+ signal->getSection(selectedTablesPtr,0);
+ signal->header.m_noOfSections = 0;
+ }
+ /**
+ * Prepare the signal to be sent to Grep participatns
+ */
+ subPtr.p->m_subscriptionType = subType;
+ req->senderRef = reference();
+ req->subscriberRef = numberToRef(PSREPBLOCKNO, refToNode(rep));
+ req->subscriberData = subPtr.p->m_subscriberData;
+ req->subscriptionId = subId;
+ req->subscriptionKey = subKey;
+ req->subscriptionType = subType;
+
+ /*add payload if it is a selectivetablesnap*/
+ if(subType == SubCreateReq::SelectiveTableSnapshot) {
+ jam();
+ signal->setSection(selectedTablesPtr, 0);
+ }
+
+ /******************************
+ * Send to all PS participants
+ ******************************/
+ NodeReceiverGroup rg(GREP, m_grep->m_aliveNodes);
+ subPtr.p->m_outstandingParticipants = rg;
+ sendSignal(rg,
+ GSN_GREP_CREATE_REQ, signal,
+ GrepCreateReq::SignalLength, JBB);
+
+
+#ifdef DEBUG_GREP_SUBSCRIPTION
+ ndbout_c("Grep::PSCoord: Sent GREP_CREATE_REQ "
+ "(subId:%d, subKey:%d, subData:%d, subType:%d) to parts",
+ subId, subKey, subPtr.p->m_subscriberData, subType);
+#endif
+}
+
+void
+Grep::PSPart::execGREP_CREATE_REQ(Signal* signal)
+{
+ jamEntry();
+ GrepCreateReq * const grepReq = (GrepCreateReq *)signal->getDataPtr();
+ const Uint32 subId = grepReq->subscriptionId;
+ const Uint32 subKey = grepReq->subscriptionKey;
+ const Uint32 subData = grepReq->subscriberData;
+ const Uint32 subType = grepReq->subscriptionType;
+ const Uint32 coordinatorRef = grepReq->senderRef;
+ const Uint32 subRef = grepReq->subscriberRef; //this is ref to the
+ //REP node for this
+ //subscription.
+
+ SubscriptionPtr subPtr;
+ ndbrequire( c_subscriptionPool.seize(subPtr));
+ subPtr.p->m_coordinatorRef = coordinatorRef;
+ subPtr.p->m_subscriptionId = subId;
+ subPtr.p->m_subscriptionKey = subKey;
+ subPtr.p->m_subscriberRef = subRef;
+ subPtr.p->m_subscriberData = subPtr.i;
+ subPtr.p->m_subscriptionType = subType;
+ subPtr.p->m_outstandingRequest = GSN_GREP_CREATE_REQ;
+ subPtr.p->m_operationPtrI = subData;
+
+ c_subscriptions.add(subPtr);
+
+ SegmentedSectionPtr selectedTablesPtr;
+ if(subType == SubCreateReq::SelectiveTableSnapshot) {
+ jam();
+ ndbrequire(signal->getNoOfSections()==1);
+ signal->getSection(selectedTablesPtr,0);// SubCreateReq::TABLE_LIST);
+ signal->header.m_noOfSections = 0;
+ }
+
+ /**
+ * Prepare signal to be sent to SUMA
+ */
+ SubCreateReq * sumaReq = (SubCreateReq *)grepReq;
+ sumaReq->subscriberRef = GREP_REF;
+ sumaReq->subscriberData = subPtr.p->m_subscriberData;
+ sumaReq->subscriptionId = subPtr.p->m_subscriptionId;
+ sumaReq->subscriptionKey = subPtr.p->m_subscriptionKey;
+ sumaReq->subscriptionType = subPtr.p->m_subscriptionType;
+ /*add payload if it is a selectivetablesnap*/
+ if(subType == SubCreateReq::SelectiveTableSnapshot) {
+ jam();
+ signal->setSection(selectedTablesPtr, 0);
+ }
+ sendSignal(SUMA_REF,
+ GSN_SUB_CREATE_REQ,
+ signal,
+ SubCreateReq::SignalLength,
+ JBB);
+}
+
+void
+Grep::PSPart::execSUB_CREATE_CONF(Signal* signal)
+{
+ jamEntry();
+
+ SubCreateConf * const conf = (SubCreateConf *)signal->getDataPtr();
+ Uint32 subData = conf->subscriberData;
+
+ SubscriptionPtr subPtr;
+ c_subscriptions.getPtr(subPtr, subData);
+ /**
+ @todo check why this can fuck up -johan
+
+ ndbrequire(subPtr.p->m_subscriptionId == conf->subscriptionId);
+ ndbrequire(subPtr.p->m_subscriptionKey == conf->subscriptionKey);
+ */
+#ifdef DEBUG_GREP_SUBSCRIPTION
+ ndbout_c("Grep::PSPart: Recd SUB_CREATE_CONF "
+ "(subId:%d, subKey:%d) from SUMA",
+ conf->subscriptionId, conf->subscriptionKey);
+#endif
+
+ /*********************
+ * Send conf to coord
+ *********************/
+ GrepCreateConf * grepConf = (GrepCreateConf*)conf;
+ grepConf->senderNodeId = getOwnNodeId();
+ grepConf->senderData = subPtr.p->m_operationPtrI;
+ sendSignal(subPtr.p->m_coordinatorRef, GSN_GREP_CREATE_CONF, signal,
+ GrepCreateConf::SignalLength, JBB);
+ subPtr.p->m_outstandingRequest = 0;
+}
+
+/**
+ * Handle errors that either occured in:
+ * 1) PSPart
+ * or
+ * 2) propagated from local SUMA
+ */
+void
+Grep::PSPart::execSUB_CREATE_REF(Signal* signal)
+{
+ jamEntry();
+ SubCreateRef * const ref = (SubCreateRef *)signal->getDataPtr();
+ Uint32 subData = ref->subscriberData;
+ GrepError::GE_Code err = (GrepError::GE_Code)ref->err;
+ SubscriptionPtr subPtr;
+ c_subscriptions.getPtr(subPtr, subData);
+ sendRefToPSCoord(signal, *subPtr.p, err /*error*/);
+ subPtr.p->m_outstandingRequest = 0;
+}
+
+void
+Grep::PSCoord::execGREP_CREATE_CONF(Signal* signal)
+{
+ jamEntry();
+ GrepCreateConf const * conf = (GrepCreateConf *)signal->getDataPtr();
+ Uint32 subData = conf->senderData;
+ Uint32 nodeId = conf->senderNodeId;
+
+ SubCoordinatorPtr subPtr;
+ c_subCoordinatorPool.getPtr(subPtr, subData);
+
+ ndbrequire(subPtr.p->m_outstandingRequest == GSN_GREP_CREATE_REQ);
+
+ subPtr.p->m_outstandingParticipants.clearWaitingFor(nodeId);
+
+ if(!subPtr.p->m_outstandingParticipants.done()) return;
+ /********************************
+ * All participants have CONF:ed
+ ********************************/
+ Uint32 subId = subPtr.p->m_subscriptionId;
+ Uint32 subKey = subPtr.p->m_subscriptionKey;
+
+ GrepSubCreateConf * grepConf = (GrepSubCreateConf *)signal->getDataPtr();
+ grepConf->subscriptionId = subId;
+ grepConf->subscriptionKey = subKey;
+ sendSignal(subPtr.p->m_subscriberRef, GSN_GREP_SUB_CREATE_CONF, signal,
+ GrepSubCreateConf::SignalLength, JBB);
+
+ /**
+ * Send event report
+ */
+ m_grep->sendEventRep(signal,
+ NDB_LE_GrepSubscriptionInfo,
+ GrepEvent::GrepPS_SubCreateConf,
+ subId,
+ subKey,
+ (Uint32)GrepError::GE_NO_ERROR);
+
+ c_subCoordinatorPool.release(subPtr);
+
+}
+
+/**
+ * Handle errors that either occured in:
+ * 1) PSCoord
+ * or
+ * 2) propagated from PSPart
+ */
+void
+Grep::PSCoord::execGREP_CREATE_REF(Signal* signal)
+{
+ jamEntry();
+ GrepCreateRef * const ref = (GrepCreateRef *)signal->getDataPtr();
+ Uint32 subData = ref->senderData;
+ Uint32 err = ref->err;
+ SubCoordinatorPtr subPtr;
+ c_runningSubscriptions.getPtr(subPtr, subData);
+
+ sendRefToSS(signal, *subPtr.p, (GrepError::GE_Code)err /*error*/);
+}
+
+
+/**************************************************************************
+ * ------------------------------------------------------------------------
+ * MODULE: START SUBSCRIPTION
+ * ------------------------------------------------------------------------
+ *
+ * Starts a subscription at SUMA.
+ * Each participant starts its own subscription.
+ **************************************************************************/
+
+/**
+ * Request to start subscription (Sent from SS)
+ */
+void
+Grep::PSCoord::execGREP_SUB_START_REQ(Signal* signal)
+{
+ jamEntry();
+ GrepSubStartReq * const subReq = (GrepSubStartReq *)signal->getDataPtr();
+ SubscriptionData::Part part = (SubscriptionData::Part) subReq->part;
+ Uint32 subId = subReq->subscriptionId;
+ Uint32 subKey = subReq->subscriptionKey;
+ BlockReference rep = signal->getSendersBlockRef();
+
+ SubCoordinatorPtr subPtr;
+
+ if(!c_subCoordinatorPool.seize(subPtr)) {
+ jam();
+ SubCoordinator sub;
+ sub.m_subscriberRef = rep;
+ sub.m_subscriptionId = 0;
+ sub.m_subscriptionKey = 0;
+ sub.m_outstandingRequest = GSN_GREP_START_REQ;
+ sendRefToSS(signal, sub, GrepError::NOSPACE_IN_POOL);
+ return;
+ }
+
+ prepareOperationRec(subPtr,
+ numberToRef(PSREPBLOCKNO, refToNode(rep)),
+ subId, subKey,
+ GSN_GREP_START_REQ);
+
+ GrepStartReq * const req = (GrepStartReq *) subReq;
+ req->part = (Uint32) part;
+ req->subscriptionId = subPtr.p->m_subscriptionId;
+ req->subscriptionKey = subPtr.p->m_subscriptionKey;
+ req->senderData = subPtr.p->m_subscriberData;
+
+ /***************************
+ * Send to all participants
+ ***************************/
+ NodeReceiverGroup rg(GREP, m_grep->m_aliveNodes);
+ subPtr.p->m_outstandingParticipants = rg;
+ sendSignal(rg,
+ GSN_GREP_START_REQ,
+ signal,
+ GrepStartReq::SignalLength, JBB);
+
+#ifdef DEBUG_GREP_SUBSCRIPTION
+ ndbout_c("Grep::PSCoord: Sent GREP_START_REQ "
+ "(subId:%d, subKey:%d, senderData:%d, part:%d) to all participants",
+ req->subscriptionId, req->subscriptionKey, req->senderData, part);
+#endif
+}
+
+
+void
+Grep::PSPart::execGREP_START_REQ(Signal* signal)
+{
+ jamEntry();
+ GrepStartReq * const grepReq = (GrepStartReq *) signal->getDataPtr();
+ SubscriptionData::Part part = (SubscriptionData::Part)grepReq->part;
+ Uint32 subId = grepReq->subscriptionId;
+ Uint32 subKey = grepReq->subscriptionKey;
+ Uint32 operationPtrI = grepReq->senderData;
+
+ Subscription key;
+ key.m_subscriptionId = subId;
+ key.m_subscriptionKey = subKey;
+ SubscriptionPtr subPtr;
+ ndbrequire(c_subscriptions.find(subPtr, key));;
+ subPtr.p->m_outstandingRequest = GSN_GREP_START_REQ;
+ subPtr.p->m_operationPtrI = operationPtrI;
+ /**
+ * send SUB_START_REQ to local SUMA
+ */
+ SubStartReq * sumaReq = (SubStartReq *) grepReq;
+ sumaReq->subscriptionId = subId;
+ sumaReq->subscriptionKey = subKey;
+ sumaReq->subscriberData = subPtr.i;
+ sumaReq->part = (Uint32) part;
+
+ sendSignal(SUMA_REF, GSN_SUB_START_REQ, signal,
+ SubStartReq::SignalLength, JBB);
+#ifdef DEBUG_GREP_SUBSCRIPTION
+ ndbout_c("Grep::PSPart: Sent SUB_START_REQ (subId:%d, subKey:%d, part:%d)",
+ subId, subKey, (Uint32)part);
+#endif
+}
+
+
+void
+Grep::PSPart::execSUB_START_CONF(Signal* signal)
+{
+ jamEntry();
+
+ SubStartConf * const conf = (SubStartConf *) signal->getDataPtr();
+ SubscriptionData::Part part = (SubscriptionData::Part)conf->part;
+ Uint32 subId = conf->subscriptionId;
+ Uint32 subKey = conf->subscriptionKey;
+ Uint32 subData = conf->subscriberData;
+ Uint32 firstGCI = conf->firstGCI;
+#ifdef DEBUG_GREP_SUBSCRIPTION
+ ndbout_c("Grep::PSPart: Recd SUB_START_CONF "
+ "(subId:%d, subKey:%d, subData:%d)",
+ subId, subKey, subData);
+#endif
+
+ SubscriptionPtr subPtr;
+ c_subscriptions.getPtr(subPtr, subData);
+ ndbrequire(subPtr.p->m_subscriptionId == subId);
+ ndbrequire(subPtr.p->m_subscriptionKey == subKey);
+
+ GrepStartConf * grepConf = (GrepStartConf *)conf;
+ grepConf->senderData = subPtr.p->m_operationPtrI;
+ grepConf->part = (Uint32) part;
+ grepConf->subscriptionKey = subKey;
+ grepConf->subscriptionId = subId;
+ grepConf->firstGCI = firstGCI;
+ grepConf->senderNodeId = getOwnNodeId();
+ sendSignal(subPtr.p->m_coordinatorRef, GSN_GREP_START_CONF, signal,
+ GrepStartConf::SignalLength, JBB);
+ subPtr.p->m_outstandingRequest = 0;
+
+#ifdef DEBUG_GREP_SUBSCRIPTION
+ ndbout_c("Grep::PSPart: Sent GREP_START_CONF "
+ "(subId:%d, subKey:%d, subData:%d, part:%d)",
+ subId, subKey, subData, part);
+#endif
+}
+
+
+/**
+ * Handle errors that either occured in:
+ * 1) PSPart
+ * or
+ * 2) propagated from local SUMA
+ *
+ * Propagates REF signal to PSCoord
+ */
+void
+Grep::PSPart::execSUB_START_REF(Signal* signal)
+{
+ SubStartRef * const ref = (SubStartRef *)signal->getDataPtr();
+ Uint32 subData = ref->subscriberData;
+ GrepError::GE_Code err = (GrepError::GE_Code)ref->err;
+ SubscriptionData::Part part = (SubscriptionData::Part)ref->part;
+ SubscriptionPtr subPtr;
+ c_subscriptions.getPtr(subPtr, subData);
+ sendRefToPSCoord(signal, *subPtr.p, err /*error*/, part);
+ subPtr.p->m_outstandingRequest = 0;
+}
+
+
+/**
+ * Logging has started... (says PS Participant)
+ */
+void
+Grep::PSCoord::execGREP_START_CONF(Signal* signal)
+{
+ jamEntry();
+
+ GrepStartConf * const conf = (GrepStartConf *) signal->getDataPtr();
+ Uint32 subData = conf->senderData;
+ SubscriptionData::Part part = (SubscriptionData::Part)conf->part;
+ Uint32 subId = conf->subscriptionId;
+ Uint32 subKey = conf->subscriptionKey;
+ Uint32 firstGCI = conf->firstGCI;
+
+ SubCoordinatorPtr subPtr;
+ c_subCoordinatorPool.getPtr(subPtr, subData);
+ ndbrequire(subPtr.p->m_outstandingRequest == GSN_GREP_START_REQ);
+
+ subPtr.p->m_outstandingParticipants.clearWaitingFor(conf->senderNodeId);
+
+ if(!subPtr.p->m_outstandingParticipants.done()) return;
+ jam();
+
+ /*************************
+ * All participants ready
+ *************************/
+ GrepSubStartConf * grepConf = (GrepSubStartConf *) conf;
+ grepConf->part = part;
+ grepConf->subscriptionId = subId;
+ grepConf->subscriptionKey = subKey;
+ grepConf->firstGCI = firstGCI;
+
+ bool ok = false;
+ switch(part) {
+ case SubscriptionData::MetaData:
+ ok = true;
+ sendSignal(subPtr.p->m_subscriberRef, GSN_GREP_SUB_START_CONF, signal,
+ GrepSubStartConf::SignalLength, JBB);
+
+ /**
+ * Send event report
+ */
+ m_grep->sendEventRep(signal,
+ NDB_LE_GrepSubscriptionInfo,
+ GrepEvent::GrepPS_SubStartMetaConf,
+ subId, subKey,
+ (Uint32)GrepError::GE_NO_ERROR);
+
+ c_subCoordinatorPool.release(subPtr);
+ break;
+ case SubscriptionData::TableData:
+ ok = true;
+ sendSignal(subPtr.p->m_subscriberRef, GSN_GREP_SUB_START_CONF, signal,
+ GrepSubStartConf::SignalLength, JBB);
+
+ /**
+ * Send event report
+ */
+ m_grep->sendEventRep(signal,
+ NDB_LE_GrepSubscriptionInfo,
+ GrepEvent::GrepPS_SubStartDataConf,
+ subId, subKey,
+ (Uint32)GrepError::GE_NO_ERROR);
+
+
+ c_subCoordinatorPool.release(subPtr);
+ break;
+ }
+ ndbrequire(ok);
+
+#ifdef DEBUG_GREP_SUBSCRIPTION
+ ndbout_c("Grep::PSCoord: Recd SUB_START_CONF (subId:%d, subKey:%d, part:%d) "
+ "from all slaves",
+ subId, subKey, (Uint32)part);
+#endif
+}
+
+/**
+ * Handle errors that either occured in:
+ * 1) PSCoord
+ * or
+ * 2) propagated from PSPart
+ */
+void
+Grep::PSCoord::execGREP_START_REF(Signal* signal)
+{
+ jamEntry();
+ GrepStartRef * const ref = (GrepStartRef *)signal->getDataPtr();
+ Uint32 subData = ref->senderData;
+ GrepError::GE_Code err = (GrepError::GE_Code)ref->err;
+ SubscriptionData::Part part = (SubscriptionData::Part)ref->part;
+
+ SubCoordinatorPtr subPtr;
+ c_runningSubscriptions.getPtr(subPtr, subData);
+ sendRefToSS(signal, *subPtr.p, err /*error*/, part);
+}
+
+/**************************************************************************
+ * ------------------------------------------------------------------------
+ * MODULE: REMOVE SUBSCRIPTION
+ * ------------------------------------------------------------------------
+ *
+ * Remove a subscription at SUMA.
+ * Each participant removes its own subscription.
+ * We start by deleting the subscription inside the requestor
+ * since, we don't know if nodes (REP nodes or DB nodes)
+ * have disconnected after we sent out this and
+ * if we dont delete the sub in the requestor now,
+ * we won't be able to create a new subscription
+ **************************************************************************/
+
+/**
+ * Request to abort subscription (Sent from SS)
+ */
+void
+Grep::PSCoord::execGREP_SUB_REMOVE_REQ(Signal* signal)
+{
+ jamEntry();
+ GrepSubRemoveReq * const subReq = (GrepSubRemoveReq *)signal->getDataPtr();
+ Uint32 subId = subReq->subscriptionId;
+ Uint32 subKey = subReq->subscriptionKey;
+ BlockReference rep = signal->getSendersBlockRef();
+
+ SubCoordinatorPtr subPtr;
+ if( !c_subCoordinatorPool.seize(subPtr)) {
+ jam();
+ SubCoordinator sub;
+ sub.m_subscriberRef = rep;
+ sub.m_subscriptionId = 0;
+ sub.m_subscriptionKey = 0;
+ sub.m_outstandingRequest = GSN_GREP_REMOVE_REQ;
+ sendRefToSS(signal, sub, GrepError::NOSPACE_IN_POOL);
+ return;
+ }
+
+
+ prepareOperationRec(subPtr,
+ numberToRef(PSREPBLOCKNO, refToNode(rep)),
+ subId, subKey,
+ GSN_GREP_REMOVE_REQ);
+
+ c_runningSubscriptions.add(subPtr);
+
+ GrepRemoveReq * req = (GrepRemoveReq *) subReq;
+ req->subscriptionId = subPtr.p->m_subscriptionId;
+ req->subscriptionKey = subPtr.p->m_subscriptionKey;
+ req->senderData = subPtr.p->m_subscriberData;
+ req->senderRef = subPtr.p->m_coordinatorRef;
+
+ /***************************
+ * Send to all participants
+ ***************************/
+ NodeReceiverGroup rg(GREP, m_grep->m_aliveNodes);
+ subPtr.p->m_outstandingParticipants = rg;
+ sendSignal(rg,
+ GSN_GREP_REMOVE_REQ, signal,
+ GrepRemoveReq::SignalLength, JBB);
+}
+
+
+void
+Grep::PSPart::execGREP_REMOVE_REQ(Signal* signal)
+{
+ jamEntry();
+ GrepRemoveReq * const grepReq = (GrepRemoveReq *) signal->getDataPtr();
+ Uint32 subId = grepReq->subscriptionId;
+ Uint32 subKey = grepReq->subscriptionKey;
+ Uint32 subData = grepReq->senderData;
+ Uint32 coordinator = grepReq->senderRef;
+
+ Subscription key;
+ key.m_subscriptionId = subId;
+ key.m_subscriptionKey = subKey;
+ SubscriptionPtr subPtr;
+
+ if(!c_subscriptions.find(subPtr, key))
+ {
+ /**
+ * The subscription was not found, so it must be deleted.
+ * Send CONF back, since it does not exist (thus, it is removed)
+ */
+ GrepRemoveConf * grepConf = (GrepRemoveConf *)grepReq;
+ grepConf->subscriptionKey = subKey;
+ grepConf->subscriptionId = subId;
+ grepConf->senderData = subData;
+ grepConf->senderNodeId = getOwnNodeId();
+ sendSignal(coordinator, GSN_GREP_REMOVE_CONF, signal,
+ GrepRemoveConf::SignalLength, JBB);
+ return;
+ }
+
+ subPtr.p->m_operationPtrI = subData;
+ subPtr.p->m_coordinatorRef = coordinator;
+ subPtr.p->m_outstandingRequest = GSN_GREP_REMOVE_REQ;
+
+ /**
+ * send SUB_REMOVE_REQ to local SUMA
+ */
+ SubRemoveReq * sumaReq = (SubRemoveReq *) grepReq;
+ sumaReq->subscriptionId = subId;
+ sumaReq->subscriptionKey = subKey;
+ sumaReq->senderData = subPtr.i;
+ sendSignal(SUMA_REF, GSN_SUB_REMOVE_REQ, signal,
+ SubStartReq::SignalLength, JBB);
+}
+
+
+/**
+ * SUB_REMOVE_CONF (from local SUMA)
+ */
+void
+Grep::PSPart::execSUB_REMOVE_CONF(Signal* signal)
+{
+ jamEntry();
+ SubRemoveConf * const conf = (SubRemoveConf *) signal->getDataPtr();
+ Uint32 subId = conf->subscriptionId;
+ Uint32 subKey = conf->subscriptionKey;
+ Uint32 subData = conf->subscriberData;
+
+ SubscriptionPtr subPtr;
+ c_subscriptions.getPtr(subPtr, subData);
+ ndbrequire(subPtr.p->m_subscriptionId == subId);
+ ndbrequire(subPtr.p->m_subscriptionKey == subKey);
+ subPtr.p->m_outstandingRequest = 0;
+ GrepRemoveConf * grepConf = (GrepRemoveConf *)conf;
+ grepConf->subscriptionKey = subKey;
+ grepConf->subscriptionId = subId;
+ grepConf->senderData = subPtr.p->m_operationPtrI;
+ grepConf->senderNodeId = getOwnNodeId();
+ sendSignal(subPtr.p->m_coordinatorRef, GSN_GREP_REMOVE_CONF, signal,
+ GrepRemoveConf::SignalLength, JBB);
+ c_subscriptions.release(subPtr);
+
+}
+
+
+/**
+ * SUB_REMOVE_CONF (from local SUMA)
+ */
+void
+Grep::PSPart::execSUB_REMOVE_REF(Signal* signal)
+{
+ jamEntry();
+ SubRemoveRef * const ref = (SubRemoveRef *)signal->getDataPtr();
+ Uint32 subData = ref->subscriberData;
+ /* GrepError::GE_Code err = (GrepError::GE_Code)ref->err;*/
+ SubscriptionPtr subPtr;
+ c_subscriptions.getPtr(subPtr, subData);
+
+ //sendSubRemoveRef_PSCoord(signal, *subPtr.p, err /*error*/);
+}
+
+
+/**
+ * Aborting has been carried out (says Participants)
+ */
+void
+Grep::PSCoord::execGREP_REMOVE_CONF(Signal* signal)
+{
+ jamEntry();
+ GrepRemoveConf * const conf = (GrepRemoveConf *) signal->getDataPtr();
+ Uint32 subId = conf->subscriptionId;
+ Uint32 subKey = conf->subscriptionKey;
+ Uint32 senderNodeId = conf->senderNodeId;
+ Uint32 subData = conf->senderData;
+ SubCoordinatorPtr subPtr;
+ c_subCoordinatorPool.getPtr(subPtr, subData);
+
+ ndbrequire(subPtr.p->m_outstandingRequest == GSN_GREP_REMOVE_REQ);
+
+ subPtr.p->m_outstandingParticipants.clearWaitingFor(senderNodeId);
+
+ if(!subPtr.p->m_outstandingParticipants.done()) {
+ jam();
+ return;
+ }
+ jam();
+
+ /*************************
+ * All participants ready
+ *************************/
+
+ m_grep->sendEventRep(signal,
+ NDB_LE_GrepSubscriptionInfo,
+ GrepEvent::GrepPS_SubRemoveConf,
+ subId, subKey,
+ GrepError::GE_NO_ERROR);
+
+ GrepSubRemoveConf * grepConf = (GrepSubRemoveConf *) conf;
+ grepConf->subscriptionId = subId;
+ grepConf->subscriptionKey = subKey;
+ sendSignal(subPtr.p->m_subscriberRef, GSN_GREP_SUB_REMOVE_CONF, signal,
+ GrepSubRemoveConf::SignalLength, JBB);
+
+ c_subCoordinatorPool.release(subPtr);
+}
+
+
+
+void
+Grep::PSCoord::execGREP_REMOVE_REF(Signal* signal)
+{
+ jamEntry();
+ GrepRemoveRef * const ref = (GrepRemoveRef *)signal->getDataPtr();
+ Uint32 subData = ref->senderData;
+ Uint32 err = ref->err;
+ SubCoordinatorPtr subPtr;
+
+ /**
+ * Get the operationrecord matching subdata and remove it. Subsequent
+ * execGREP_REMOVE_REF will simply be ignored at this stage.
+ */
+ for( c_runningSubscriptions.first(c_subPtr);
+ !c_subPtr.isNull(); c_runningSubscriptions.next(c_subPtr)) {
+ jam();
+ subPtr.i = c_subPtr.curr.i;
+ subPtr.p = c_runningSubscriptions.getPtr(subPtr.i);
+ if(subData == subPtr.i)
+ {
+ sendRefToSS(signal, *subPtr.p, (GrepError::GE_Code)err /*error*/);
+ c_runningSubscriptions.release(subPtr);
+ return;
+ }
+ }
+ return;
+}
+
+
+/**************************************************************************
+ * ------------------------------------------------------------------------
+ * MODULE: LOG RECORDS (COMING IN FROM LOCAL SUMA)
+ * ------------------------------------------------------------------------
+ *
+ * After the subscription is started, we get log records from SUMA.
+ * Both table data and meta data log records are received.
+ *
+ * TODO:
+ * @todo Changes in meta data is currently not
+ * allowed during global replication
+ **************************************************************************/
+
+void
+Grep::PSPart::execSUB_META_DATA(Signal* signal)
+{
+ jamEntry();
+ if(m_recoveryMode) {
+ jam();
+ return;
+ }
+ /**
+ * METASCAN and METALOG
+ */
+ SubMetaData * data = (SubMetaData *) signal->getDataPtrSend();
+ SubscriptionPtr subPtr;
+ c_subscriptions.getPtr(subPtr, data->subscriberData);
+
+ /***************************
+ * Forward data to REP node
+ ***************************/
+ sendSignal(subPtr.p->m_subscriberRef, GSN_SUB_META_DATA, signal,
+ SubMetaData::SignalLength, JBB);
+#ifdef DEBUG_GREP_SUBSCRIPTION
+ ndbout_c("Grep::PSPart: Sent SUB_META_DATA to REP "
+ "(TableId: %d, SenderData: %d, GCI: %d)",
+ data->tableId, data->senderData, data->gci);
+#endif
+}
+
+/**
+ * Receive table data from SUMA and dispatches it to REP node.
+ */
+void
+Grep::PSPart::execSUB_TABLE_DATA(Signal* signal)
+{
+ jamEntry();
+ if(m_recoveryMode) {
+ jam();
+ return;
+ }
+ ndbrequire(m_repRef!=0);
+
+ if(!assembleFragments(signal)) { jam(); return; }
+
+ /**
+ * Check if it is SCAN or LOG data that has arrived
+ */
+ if(signal->getNoOfSections() == 2)
+ {
+ jam();
+ /**
+ * DATASCAN - Not marked with GCI, so mark with latest seen GCI
+ */
+ if(m_firstScanGCI == 1 && m_lastScanGCI == 0) {
+ m_firstScanGCI = m_latestSeenGCI;
+ m_lastScanGCI = m_latestSeenGCI;
+ }
+ SubTableData * data = (SubTableData*)signal->getDataPtrSend();
+ Uint32 subData = data->senderData;
+ data->gci = m_latestSeenGCI;
+ data->logType = SubTableData::SCAN;
+
+ SubscriptionPtr subPtr;
+ c_subscriptions.getPtr(subPtr, subData);
+ sendSignal(subPtr.p->m_subscriberRef, GSN_SUB_TABLE_DATA, signal,
+ SubTableData::SignalLength, JBB);
+#ifdef DEBUG_GREP
+ ndbout_c("Grep::PSPart: Sent SUB_TABLE_DATA (Scan, GCI: %d)",
+ data->gci);
+#endif
+ }
+ else
+ {
+ jam();
+ /**
+ * DATALOG (TRIGGER) - Already marked with GCI
+ */
+ SubTableData * data = (SubTableData*)signal->getDataPtrSend();
+ data->logType = SubTableData::LOG;
+ Uint32 subData = data->senderData;
+ if (data->gci > m_latestSeenGCI) m_latestSeenGCI = data->gci;
+
+ // Reformat to sections and send to replication node.
+ LinearSectionPtr ptr[3];
+ ptr[0].p = signal->theData + 25;
+ ptr[0].sz = data->noOfAttributes;
+ ptr[1].p = signal->theData + 25 + MAX_ATTRIBUTES_IN_TABLE;
+ ptr[1].sz = data->dataSize;
+
+ SubscriptionPtr subPtr;
+ c_subscriptions.getPtr(subPtr, subData);
+ sendSignal(subPtr.p->m_subscriberRef, GSN_SUB_TABLE_DATA,
+ signal, SubTableData::SignalLength, JBB, ptr, 2);
+#ifdef DEBUG_GREP
+ ndbout_c("Grep::PSPart: Sent SUB_TABLE_DATA (Log, GCI: %d)",
+ data->gci);
+#endif
+ }
+}
+
+
+/**************************************************************************
+ * ------------------------------------------------------------------------
+ * MODULE: START SYNCHRONIZATION
+ * ------------------------------------------------------------------------
+ *
+ *
+ **************************************************************************/
+
+/**
+ * Request to start sync (from Rep SS)
+ */
+void
+Grep::PSCoord::execGREP_SUB_SYNC_REQ(Signal* signal)
+{
+ jamEntry();
+ GrepSubSyncReq * const subReq = (GrepSubSyncReq*)signal->getDataPtr();
+ SubscriptionData::Part part = (SubscriptionData::Part) subReq->part;
+ Uint32 subId = subReq->subscriptionId;
+ Uint32 subKey = subReq->subscriptionKey;
+ BlockReference rep = signal->getSendersBlockRef();
+
+ SubCoordinatorPtr subPtr;
+ if( !c_subCoordinatorPool.seize(subPtr)) {
+ jam();
+ SubCoordinator sub;
+ sub.m_subscriberRef = rep;
+ sub.m_subscriptionId = 0;
+ sub.m_subscriptionKey = 0;
+ sub.m_outstandingRequest = GSN_GREP_SYNC_REQ;
+ sendRefToSS(signal, sub, GrepError::NOSPACE_IN_POOL);
+ return;
+ }
+
+ prepareOperationRec(subPtr,
+ numberToRef(PSREPBLOCKNO, refToNode(rep)),
+ subId, subKey,
+ GSN_GREP_SYNC_REQ);
+
+ GrepSyncReq * req = (GrepSyncReq *)subReq;
+ req->subscriptionId = subPtr.p->m_subscriptionId;
+ req->subscriptionKey = subPtr.p->m_subscriptionKey;
+ req->senderData = subPtr.p->m_subscriberData;
+ req->part = (Uint32)part;
+
+ /***************************
+ * Send to all participants
+ ***************************/
+ NodeReceiverGroup rg(GREP, m_grep->m_aliveNodes);
+ subPtr.p->m_outstandingParticipants = rg;
+ sendSignal(rg,
+ GSN_GREP_SYNC_REQ, signal, GrepSyncReq::SignalLength, JBB);
+}
+
+
+/**
+ * Sync req from Grep::PSCoord to PS particpant
+ */
+void
+Grep::PSPart::execGREP_SYNC_REQ(Signal* signal)
+{
+ jamEntry();
+
+ GrepSyncReq * const grepReq = (GrepSyncReq *) signal->getDataPtr();
+ Uint32 part = grepReq->part;
+ Uint32 subId = grepReq->subscriptionId;
+ Uint32 subKey = grepReq->subscriptionKey;
+ Uint32 subData = grepReq->senderData;
+
+ Subscription key;
+ key.m_subscriptionId = subId;
+ key.m_subscriptionKey = subKey;
+ SubscriptionPtr subPtr;
+ ndbrequire(c_subscriptions.find(subPtr, key));
+ subPtr.p->m_operationPtrI = subData;
+ subPtr.p->m_outstandingRequest = GSN_GREP_SYNC_REQ;
+ /**********************************
+ * Send SUB_SYNC_REQ to local SUMA
+ **********************************/
+ SubSyncReq * sumaReq = (SubSyncReq *)grepReq;
+ sumaReq->subscriptionId = subId;
+ sumaReq->subscriptionKey = subKey;
+ sumaReq->subscriberData = subPtr.i;
+ sumaReq->part = part;
+ sendSignal(SUMA_REF, GSN_SUB_SYNC_REQ, signal,
+ SubSyncReq::SignalLength, JBB);
+}
+
+
+/**
+ * SYNC conf from SUMA
+ */
+void
+Grep::PSPart::execSUB_SYNC_CONF(Signal* signal)
+{
+ jamEntry();
+
+ SubSyncConf * const conf = (SubSyncConf *) signal->getDataPtr();
+ Uint32 part = conf->part;
+ Uint32 subId = conf->subscriptionId;
+ Uint32 subKey = conf->subscriptionKey;
+ Uint32 subData = conf->subscriberData;
+
+ SubscriptionPtr subPtr;
+ c_subscriptions.getPtr(subPtr, subData);
+
+ ndbrequire(subPtr.p->m_subscriptionId == subId);
+ ndbrequire(subPtr.p->m_subscriptionKey == subKey);
+
+ GrepSyncConf * grepConf = (GrepSyncConf *)conf;
+ grepConf->senderNodeId = getOwnNodeId();
+ grepConf->part = part;
+ grepConf->firstGCI = m_firstScanGCI;
+ grepConf->lastGCI = m_lastScanGCI;
+ grepConf->subscriptionId = subId;
+ grepConf->subscriptionKey = subKey;
+ grepConf->senderData = subPtr.p->m_operationPtrI;
+ sendSignal(subPtr.p->m_coordinatorRef, GSN_GREP_SYNC_CONF, signal,
+ GrepSyncConf::SignalLength, JBB);
+
+ m_firstScanGCI = 1;
+ m_lastScanGCI = 0;
+ subPtr.p->m_outstandingRequest = 0;
+}
+
+/**
+ * Handle errors that either occured in:
+ * 1) PSPart
+ * or
+ * 2) propagated from local SUMA
+ *
+ * Propagates REF signal to PSCoord
+ */
+void
+Grep::PSPart::execSUB_SYNC_REF(Signal* signal) {
+ jamEntry();
+ SubSyncRef * const ref = (SubSyncRef *)signal->getDataPtr();
+ Uint32 subData = ref->subscriberData;
+ GrepError::GE_Code err = (GrepError::GE_Code)ref->err;
+ SubscriptionData::Part part = (SubscriptionData::Part)ref->part;
+
+ SubscriptionPtr subPtr;
+ c_subscriptions.getPtr(subPtr, subData);
+ sendRefToPSCoord(signal, *subPtr.p, err /*error*/ ,part);
+ subPtr.p->m_outstandingRequest = 0;
+}
+
+/**
+ * Syncing has started... (says PS Participant)
+ */
+void
+Grep::PSCoord::execGREP_SYNC_CONF(Signal* signal)
+{
+ jamEntry();
+
+ GrepSyncConf const * conf = (GrepSyncConf *)signal->getDataPtr();
+ Uint32 part = conf->part;
+ Uint32 firstGCI = conf->firstGCI;
+ Uint32 lastGCI = conf->lastGCI;
+ Uint32 subId = conf->subscriptionId;
+ Uint32 subKey = conf->subscriptionKey;
+ Uint32 subData = conf->senderData;
+
+ SubCoordinatorPtr subPtr;
+ c_subCoordinatorPool.getPtr(subPtr, subData);
+ ndbrequire(subPtr.p->m_outstandingRequest == GSN_GREP_SYNC_REQ);
+
+ subPtr.p->m_outstandingParticipants.clearWaitingFor(conf->senderNodeId);
+ if(!subPtr.p->m_outstandingParticipants.done()) return;
+
+ /**
+ * Send event
+ */
+ GrepEvent::Subscription event;
+ if(part == SubscriptionData::MetaData)
+ event = GrepEvent::GrepPS_SubSyncMetaConf;
+ else
+ event = GrepEvent::GrepPS_SubSyncDataConf;
+
+ /* @todo Johan: Add firstGCI here. /Lars */
+ m_grep->sendEventRep(signal, NDB_LE_GrepSubscriptionInfo,
+ event, subId, subKey,
+ (Uint32)GrepError::GE_NO_ERROR,
+ lastGCI);
+
+ /*************************
+ * All participants ready
+ *************************/
+ GrepSubSyncConf * grepConf = (GrepSubSyncConf *)conf;
+ grepConf->part = part;
+ grepConf->firstGCI = firstGCI;
+ grepConf->lastGCI = lastGCI;
+ grepConf->subscriptionId = subId;
+ grepConf->subscriptionKey = subKey;
+
+ sendSignal(subPtr.p->m_subscriberRef, GSN_GREP_SUB_SYNC_CONF, signal,
+ GrepSubSyncConf::SignalLength, JBB);
+ c_subCoordinatorPool.release(subPtr);
+}
+
+/**
+ * Handle errors that either occured in:
+ * 1) PSCoord
+ * or
+ * 2) propagated from PSPart
+ */
+void
+Grep::PSCoord::execGREP_SYNC_REF(Signal* signal) {
+ jamEntry();
+ GrepSyncRef * const ref = (GrepSyncRef *)signal->getDataPtr();
+ Uint32 subData = ref->senderData;
+ SubscriptionData::Part part = (SubscriptionData::Part)ref->part;
+ GrepError::GE_Code err = (GrepError::GE_Code)ref->err;
+ SubCoordinatorPtr subPtr;
+ c_runningSubscriptions.getPtr(subPtr, subData);
+ sendRefToSS(signal, *subPtr.p, err /*error*/, part);
+}
+
+
+
+void
+Grep::PSCoord::sendRefToSS(Signal * signal,
+ SubCoordinator sub,
+ GrepError::GE_Code err,
+ SubscriptionData::Part part) {
+ /**
+
+ GrepCreateRef * ref = (GrepCreateRef*)signal->getDataPtrSend();
+ ref->senderData = sub.m_subscriberData;
+ ref->subscriptionId = sub.m_subscriptionId;
+ ref->subscriptionKey = sub.m_subscriptionKey;
+ ref->err = err;
+ sendSignal(sub.m_coordinatorRef, GSN_GREP_CREATE_REF, signal,
+ GrepCreateRef::SignalLength, JBB);
+*/
+
+ jam();
+ GrepEvent::Subscription event;
+ switch(sub.m_outstandingRequest) {
+ case GSN_GREP_CREATE_SUBID_REQ:
+ {
+ jam();
+ CreateSubscriptionIdRef * ref =
+ (CreateSubscriptionIdRef*)signal->getDataPtrSend();
+ ref->err = (Uint32)err;
+ ref->subscriptionId = sub.m_subscriptionId;
+ ref->subscriptionKey = sub.m_subscriptionKey;
+ sendSignal(sub.m_subscriberRef,
+ GSN_GREP_CREATE_SUBID_REF,
+ signal,
+ CreateSubscriptionIdRef::SignalLength,
+ JBB);
+ event = GrepEvent::GrepPS_CreateSubIdRef;
+ }
+ break;
+ case GSN_GREP_CREATE_REQ:
+ {
+ jam();
+ GrepSubCreateRef * ref = (GrepSubCreateRef*)signal->getDataPtrSend();
+ ref->err = (Uint32)err;
+ ref->subscriptionId = sub.m_subscriptionId;
+ ref->subscriptionKey = sub.m_subscriptionKey;
+ sendSignal(sub.m_subscriberRef, GSN_GREP_SUB_CREATE_REF, signal,
+ GrepSubCreateRef::SignalLength, JBB);
+ event = GrepEvent::GrepPS_SubCreateRef;
+ }
+ break;
+ case GSN_GREP_SYNC_REQ:
+ {
+ jam();
+ GrepSubSyncRef * ref = (GrepSubSyncRef*)signal->getDataPtrSend();
+ ref->err = (Uint32)err;
+ ref->subscriptionId = sub.m_subscriptionId;
+ ref->subscriptionKey = sub.m_subscriptionKey;
+ ref->part = (SubscriptionData::Part) part;
+ sendSignal(sub.m_subscriberRef,
+ GSN_GREP_SUB_SYNC_REF,
+ signal,
+ GrepSubSyncRef::SignalLength,
+ JBB);
+ if(part == SubscriptionData::MetaData)
+ event = GrepEvent::GrepPS_SubSyncMetaRef;
+ else
+ event = GrepEvent::GrepPS_SubSyncDataRef;
+ }
+ break;
+ case GSN_GREP_START_REQ:
+ {
+ jam();
+ GrepSubStartRef * ref = (GrepSubStartRef*)signal->getDataPtrSend();
+ ref->err = (Uint32)err;
+ ref->subscriptionId = sub.m_subscriptionId;
+ ref->subscriptionKey = sub.m_subscriptionKey;
+
+ sendSignal(sub.m_subscriberRef, GSN_GREP_SUB_START_REF,
+ signal, GrepSubStartRef::SignalLength, JBB);
+ if(part == SubscriptionData::MetaData)
+ event = GrepEvent::GrepPS_SubStartMetaRef;
+ else
+ event = GrepEvent::GrepPS_SubStartDataRef;
+ /**
+ * Send event report
+ */
+ m_grep->sendEventRep(signal,
+ NDB_LE_GrepSubscriptionAlert,
+ event,
+ sub.m_subscriptionId,
+ sub.m_subscriptionKey,
+ (Uint32)err);
+ }
+ break;
+ case GSN_GREP_REMOVE_REQ:
+ {
+ jam();
+ GrepSubRemoveRef * ref = (GrepSubRemoveRef*)signal->getDataPtrSend();
+ ref->subscriptionId = sub.m_subscriptionId;
+ ref->subscriptionKey = sub.m_subscriptionKey;
+ ref->err = (Uint32)err;
+
+ sendSignal(sub.m_subscriberRef,
+ GSN_GREP_SUB_REMOVE_REF,
+ signal,
+ GrepSubRemoveRef::SignalLength,
+ JBB);
+
+ event = GrepEvent::GrepPS_SubRemoveRef;
+ }
+ break;
+ default:
+ ndbrequire(false);
+ event= GrepEvent::Rep_Disconnect; // remove compiler warning
+ }
+ /**
+ * Finally, send an event.
+ */
+ m_grep->sendEventRep(signal,
+ NDB_LE_GrepSubscriptionAlert,
+ event,
+ sub.m_subscriptionId,
+ sub.m_subscriptionKey,
+ err);
+
+}
+
+
+void
+Grep::PSPart::sendRefToPSCoord(Signal * signal,
+ Subscription sub,
+ GrepError::GE_Code err,
+ SubscriptionData::Part part) {
+
+ jam();
+ GrepEvent::Subscription event;
+ switch(sub.m_outstandingRequest) {
+
+ case GSN_GREP_CREATE_REQ:
+ {
+ GrepCreateRef * ref = (GrepCreateRef*)signal->getDataPtrSend();
+ ref->senderData = sub.m_subscriberData;
+ ref->subscriptionId = sub.m_subscriptionId;
+ ref->subscriptionKey = sub.m_subscriptionKey;
+ ref->err = err;
+ sendSignal(sub.m_coordinatorRef, GSN_GREP_CREATE_REF, signal,
+ GrepCreateRef::SignalLength, JBB);
+
+ event = GrepEvent::GrepPS_SubCreateRef;
+ }
+ break;
+ case GSN_GREP_SYNC_REQ:
+ {
+ GrepSyncRef * ref = (GrepSyncRef*)signal->getDataPtrSend();
+ ref->senderData = sub.m_subscriberData;
+ ref->subscriptionId = sub.m_subscriptionId;
+ ref->subscriptionKey = sub.m_subscriptionKey;
+ ref->part = part;
+ ref->err = err;
+ sendSignal(sub.m_coordinatorRef,
+ GSN_GREP_SYNC_REF, signal,
+ GrepSyncRef::SignalLength, JBB);
+ if(part == SubscriptionData::MetaData)
+ event = GrepEvent::GrepPS_SubSyncMetaRef;
+ else
+ event = GrepEvent::GrepPS_SubSyncDataRef;
+ }
+ break;
+ case GSN_GREP_START_REQ:
+ {
+ jam();
+ GrepStartRef * ref = (GrepStartRef*)signal->getDataPtrSend();
+ ref->senderData = sub.m_subscriberData;
+ ref->subscriptionId = sub.m_subscriptionId;
+ ref->subscriptionKey = sub.m_subscriptionKey;
+ ref->part = (Uint32) part;
+ ref->err = err;
+ sendSignal(sub.m_coordinatorRef, GSN_GREP_START_REF, signal,
+ GrepStartRef::SignalLength, JBB);
+ if(part == SubscriptionData::MetaData)
+ event = GrepEvent::GrepPS_SubStartMetaRef;
+ else
+ event = GrepEvent::GrepPS_SubStartDataRef;
+ }
+ break;
+
+ case GSN_GREP_REMOVE_REQ:
+ {
+ jamEntry();
+ GrepRemoveRef * ref = (GrepRemoveRef*)signal->getDataPtrSend();
+ ref->senderData = sub.m_operationPtrI;
+ ref->subscriptionId = sub.m_subscriptionId;
+ ref->subscriptionKey = sub.m_subscriptionKey;
+ ref->err = err;
+ sendSignal(sub.m_coordinatorRef, GSN_GREP_REMOVE_REF, signal,
+ GrepCreateRef::SignalLength, JBB);
+
+ }
+ break;
+ default:
+ ndbrequire(false);
+ event= GrepEvent::Rep_Disconnect; // remove compiler warning
+ }
+
+ /**
+ * Finally, send an event.
+ */
+ m_grep->sendEventRep(signal,
+ NDB_LE_GrepSubscriptionAlert,
+ event,
+ sub.m_subscriptionId,
+ sub.m_subscriptionKey,
+ err);
+
+}
+
+/**************************************************************************
+ * ------------------------------------------------------------------------
+ * MODULE: GREP PS Coordinator GCP
+ * ------------------------------------------------------------------------
+ *
+ *
+ **************************************************************************/
+
+void
+Grep::PSPart::execSUB_GCP_COMPLETE_REP(Signal* signal)
+{
+ jamEntry();
+ if(m_recoveryMode) {
+ jam();
+ return;
+ }
+ SubGcpCompleteRep * rep = (SubGcpCompleteRep *)signal->getDataPtrSend();
+ rep->senderRef = reference();
+
+ if (rep->gci > m_latestSeenGCI) m_latestSeenGCI = rep->gci;
+ SubscriptionPtr subPtr;
+ c_subscriptions.first(c_subPtr);
+ for(; !c_subPtr.isNull(); c_subscriptions.next(c_subPtr)) {
+
+ subPtr.i = c_subPtr.curr.i;
+ subPtr.p = c_subscriptions.getPtr(subPtr.i);
+ sendSignal(subPtr.p->m_subscriberRef, GSN_SUB_GCP_COMPLETE_REP, signal,
+ SubGcpCompleteRep::SignalLength, JBB);
+ }
+
+#ifdef DEBUG_GREP
+ ndbout_c("Grep::PSPart: Recd SUB_GCP_COMPLETE_REP "
+ "(GCI: %d, nodeId: %d) from SUMA",
+ rep->gci, refToNode(rep->senderRef));
+#endif
+}
+
+
+void
+Grep::PSPart::execSUB_SYNC_CONTINUE_REQ(Signal* signal)
+{
+ jamEntry();
+ SubSyncContinueReq * const req = (SubSyncContinueReq*)signal->getDataPtr();
+ Uint32 subData = req->subscriberData;
+
+ SubscriptionPtr subPtr;
+ c_subscriptions.getPtr(subPtr,subData);
+
+ /**
+ * @todo Figure out how to control how much data we can receive?
+ */
+ SubSyncContinueConf * conf = (SubSyncContinueConf*)req;
+ conf->subscriptionId = subPtr.p->m_subscriptionId;
+ conf->subscriptionKey = subPtr.p->m_subscriptionKey;
+ sendSignal(SUMA_REF, GSN_SUB_SYNC_CONTINUE_CONF, signal,
+ SubSyncContinueConf::SignalLength, JBB);
+}
+
+void
+Grep::sendEventRep(Signal * signal,
+ Ndb_logevent_type type,
+ GrepEvent::Subscription event,
+ Uint32 subId,
+ Uint32 subKey,
+ Uint32 err,
+ Uint32 other) {
+ jam();
+ signal->theData[0] = type;
+ signal->theData[1] = event;
+ signal->theData[2] = subId;
+ signal->theData[3] = subKey;
+ signal->theData[4] = err;
+
+ if(other==0)
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 5 ,JBB);
+ else {
+ signal->theData[5] = other;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 6 ,JBB);
+ }
+}
diff --git a/storage/ndb/src/kernel/blocks/grep/Grep.hpp b/storage/ndb/src/kernel/blocks/grep/Grep.hpp
new file mode 100644
index 00000000000..a14143294e1
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/grep/Grep.hpp
@@ -0,0 +1,535 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef GREP_HPP
+#define GREP_HPP
+
+#include <ndb_limits.h>
+#include <SimulatedBlock.hpp>
+
+#include <NodeBitmask.hpp>
+#include <SignalCounter.hpp>
+#include <SLList.hpp>
+
+#include <DLList.hpp>
+
+#include <GrepError.hpp>
+#include <GrepEvent.hpp>
+
+#include <signaldata/EventReport.hpp>
+#include <signaldata/SumaImpl.hpp>
+
+
+/**
+ * Module in block (Should be placed elsewhere)
+ */
+class BlockComponent {
+public:
+ BlockComponent(SimulatedBlock *);
+ BlockReference reference() { return m_sb->reference(); };
+ BlockNumber number() { return m_sb->number(); };
+
+ void sendSignal(NodeReceiverGroup rg,
+ GlobalSignalNumber gsn,
+ Signal* signal,
+ Uint32 length,
+ JobBufferLevel jbuf ) const {
+ m_sb->sendSignal(rg, gsn, signal, length, jbuf);
+ }
+
+ void sendSignal(BlockReference ref,
+ GlobalSignalNumber gsn,
+ Signal* signal,
+ Uint32 length,
+ JobBufferLevel jbuf ) const {
+ m_sb->sendSignal(ref, gsn, signal, length, jbuf);
+ }
+
+ void sendSignal(BlockReference ref,
+ GlobalSignalNumber gsn,
+ Signal* signal,
+ Uint32 length,
+ JobBufferLevel jbuf,
+ LinearSectionPtr ptr[3],
+ Uint32 noOfSections) const {
+ m_sb->sendSignal(ref, gsn, signal, length, jbuf, ptr, noOfSections);
+ }
+
+ void sendSignalWithDelay(BlockReference ref,
+ GlobalSignalNumber gsn,
+ Signal* signal,
+ Uint32 delayInMilliSeconds,
+ Uint32 length) const {
+
+ m_sb->sendSignalWithDelay(ref, gsn, signal, delayInMilliSeconds, length);
+ }
+
+ NodeId getOwnNodeId() const {
+ return m_sb->getOwnNodeId();
+ }
+
+ bool assembleFragments(Signal * signal) {
+ return m_sb->assembleFragments(signal);
+ }
+
+ void progError(int line, int err_code, const char* extra) {
+ m_sb->progError(line, err_code, extra);
+ }
+
+private:
+ SimulatedBlock * m_sb;
+};
+
+
+
+/**
+ * Participant of GREP Protocols (not necessarily a protocol coordinator)
+ *
+ * This object is only used on primary system
+ */
+#if 0
+class GrepParticipant : public SimulatedBlock
+{
+protected:
+ GrepParticipant(const Configuration & conf);
+ virtual ~GrepParticipant();
+ BLOCK_DEFINES(GrepParticipant);
+
+protected:
+ /***************************************************************************
+ * SUMA Signal Interface
+ ***************************************************************************/
+ void execSUB_CREATE_CONF(Signal*);
+ void execSUB_STARTCONF(Signal*);
+ void execSUB_REMOVE_CONF(Signal*);
+
+ void execSUB_META_DATA(Signal*);
+ void execSUB_TABLE_DATA(Signal*);
+
+ void execSUB_SYNC_CONF(Signal*);
+
+ void execSUB_GCP_COMPLETE_REP(Signal*);
+ void execSUB_SYNC_CONTINUE_REQ(Signal*);
+
+ /***************************************************************************
+ * GREP Coordinator Signal Interface
+ ***************************************************************************/
+ void execGREP_CREATE_REQ(Signal*);
+ void execGREP_START_REQ(Signal*);
+ void execGREP_SYNC_REQ(Signal*);
+ void execGREP_REMOVE_REQ(Signal*);
+
+
+protected:
+ BlockReference m_repRef; ///< Replication node (only one rep node per grep)
+
+private:
+ BlockReference m_coordinator;
+ Uint32 m_latestSeenGCI;
+};
+#endif
+
+
+/**
+ * GREP Coordinator
+ */
+class Grep : public SimulatedBlock //GrepParticipant
+{
+ BLOCK_DEFINES(Grep);
+
+public:
+ Grep(const Configuration & conf);
+ virtual ~Grep();
+
+private:
+ /***************************************************************************
+ * General Signal Recivers
+ ***************************************************************************/
+ void execSTTOR(Signal*);
+ void sendSTTORRY(Signal*);
+ void execNDB_STTOR(Signal*);
+ void execDUMP_STATE_ORD(Signal*);
+ void execREAD_NODESCONF(Signal*);
+ void execNODE_FAILREP(Signal*);
+ void execINCL_NODEREQ(Signal*);
+ void execGREP_REQ(Signal*);
+ void execAPI_FAILREQ(Signal*);
+ /**
+ * Forwarded to PSCoord
+ */
+ //CONF
+ void fwdGREP_CREATE_CONF(Signal* s) {
+ pscoord.execGREP_CREATE_CONF(s); };
+ void fwdGREP_START_CONF(Signal* s) {
+ pscoord.execGREP_START_CONF(s); };
+ void fwdGREP_SYNC_CONF(Signal* s) {
+ pscoord.execGREP_SYNC_CONF(s); };
+ void fwdGREP_REMOVE_CONF(Signal* s) {
+ pscoord.execGREP_REMOVE_CONF(s); };
+ void fwdCREATE_SUBID_CONF(Signal* s) {
+ pscoord.execCREATE_SUBID_CONF(s); };
+
+ //REF
+
+ void fwdGREP_CREATE_REF(Signal* s) {
+ pscoord.execGREP_CREATE_REF(s); };
+ void fwdGREP_START_REF(Signal* s) {
+ pscoord.execGREP_START_REF(s); };
+ void fwdGREP_SYNC_REF(Signal* s) {
+ pscoord.execGREP_SYNC_REF(s); };
+
+ void fwdGREP_REMOVE_REF(Signal* s) {
+ pscoord.execGREP_REMOVE_REF(s); };
+
+ void fwdCREATE_SUBID_REF(Signal* s) {
+ pscoord.execCREATE_SUBID_REF(s); };
+
+ //REQ
+ void fwdGREP_SUB_CREATE_REQ(Signal* s) {
+ pscoord.execGREP_SUB_CREATE_REQ(s); };
+ void fwdGREP_SUB_START_REQ(Signal* s) {
+ pscoord.execGREP_SUB_START_REQ(s); };
+ void fwdGREP_SUB_SYNC_REQ(Signal* s) {
+ pscoord.execGREP_SUB_SYNC_REQ(s); };
+ void fwdGREP_SUB_REMOVE_REQ(Signal* s) {
+ pscoord.execGREP_SUB_REMOVE_REQ(s); };
+ void fwdGREP_CREATE_SUBID_REQ(Signal* s) {
+ pscoord.execGREP_CREATE_SUBID_REQ(s); };
+
+ /**
+ * Forwarded to PSPart
+ */
+
+ void fwdSTART_ME(Signal* s){
+ pspart.execSTART_ME(s);
+ };
+ void fwdGREP_ADD_SUB_REQ(Signal* s){
+ pspart.execGREP_ADD_SUB_REQ(s);
+ };
+ void fwdGREP_ADD_SUB_REF(Signal* s){
+ pspart.execGREP_ADD_SUB_REF(s);
+ };
+ void fwdGREP_ADD_SUB_CONF(Signal* s){
+ pspart.execGREP_ADD_SUB_CONF(s);
+ };
+
+ //CONF
+ void fwdSUB_CREATE_CONF(Signal* s) {
+ pspart.execSUB_CREATE_CONF(s); };
+ void fwdSUB_START_CONF(Signal* s) {
+ pspart.execSUB_START_CONF(s); };
+ void fwdSUB_REMOVE_CONF(Signal* s) {
+ pspart.execSUB_REMOVE_CONF(s); };
+ void fwdSUB_SYNC_CONF(Signal* s) {
+ pspart.execSUB_SYNC_CONF(s); };
+
+ //REF
+
+ void fwdSUB_CREATE_REF(Signal* s) {
+ pspart.execSUB_CREATE_REF(s); };
+ void fwdSUB_START_REF(Signal* s) {
+ pspart.execSUB_START_REF(s); };
+ void fwdSUB_REMOVE_REF(Signal* s) {
+ pspart.execSUB_REMOVE_REF(s); };
+ void fwdSUB_SYNC_REF(Signal* s) {
+ pspart.execSUB_SYNC_REF(s); };
+
+ //REQ
+ void fwdSUB_SYNC_CONTINUE_REQ(Signal* s) {
+ pspart.execSUB_SYNC_CONTINUE_REQ(s); };
+ void fwdGREP_CREATE_REQ(Signal* s) {
+ pspart.execGREP_CREATE_REQ(s); };
+ void fwdGREP_START_REQ(Signal* s) {
+ pspart.execGREP_START_REQ(s); };
+ void fwdGREP_SYNC_REQ(Signal* s) {
+ pspart.execGREP_SYNC_REQ(s); };
+ void fwdGREP_REMOVE_REQ(Signal* s) {
+ pspart.execGREP_REMOVE_REQ(s); };
+
+ void fwdSUB_META_DATA(Signal* s) {
+ pspart.execSUB_META_DATA(s); };
+ void fwdSUB_TABLE_DATA(Signal* s) {
+ pspart.execSUB_TABLE_DATA(s); };
+
+ void fwdSUB_GCP_COMPLETE_REP(Signal* s) {
+ pspart.execSUB_GCP_COMPLETE_REP(s); };
+
+ void sendEventRep(Signal * signal,
+ Ndb_logevent_type type,
+ GrepEvent::Subscription event,
+ Uint32 subId,
+ Uint32 subKey,
+ Uint32 err,
+ Uint32 gci=0);
+
+ void getNodeGroupMembers(Signal* signal);
+
+
+ /***************************************************************************
+ * Block Data
+ ***************************************************************************/
+ struct Node {
+ Uint32 nodeId;
+ Uint32 alive;
+ Uint32 nextList;
+ union { Uint32 prevList; Uint32 nextPool; };
+ };
+ typedef Ptr<Node> NodePtr;
+
+ NodeId m_masterNodeId;
+ SLList<Node> m_nodes;
+ NdbNodeBitmask m_aliveNodes;
+ ArrayPool<Node> m_nodePool;
+
+ /**
+ * for all Suma's to keep track of other Suma's in Node group
+ */
+ Uint32 c_nodeGroup;
+ Uint32 c_noNodesInGroup;
+ Uint32 c_idInNodeGroup;
+ NodeId c_nodesInGroup[4];
+
+
+public:
+ /***************************************************************************
+ * GREP PS Coordinator
+ ***************************************************************************/
+ class PSCoord : public BlockComponent {
+
+ private:
+
+ struct SubCoordinator {
+ Uint32 m_subscriberRef;
+ Uint32 m_subscriberData;
+ Uint32 m_coordinatorRef;
+ Uint32 m_subscriptionId;
+ Uint32 m_subscriptionKey;
+ Uint32 m_subscriptionType;
+ NdbNodeBitmask m_participants;
+ Uint32 m_outstandingRequest;
+ SignalCounter m_outstandingParticipants;
+
+ Uint32 nextHash;
+ union { Uint32 prevHash; Uint32 nextPool; };
+
+ Uint32 hashValue() const {
+ return m_subscriptionId + m_subscriptionKey;
+ }
+
+ bool equal(const SubCoordinator & s) const {
+ return
+ m_subscriptionId == s.m_subscriptionId &&
+ m_subscriptionKey == s.m_subscriptionKey;
+ }
+
+ };
+
+ typedef Ptr<SubCoordinator> SubCoordinatorPtr;
+ ArrayPool<SubCoordinator> c_subCoordinatorPool;
+ DLHashTable<SubCoordinator>::Iterator c_subPtr;
+ DLHashTable<SubCoordinator> c_runningSubscriptions;
+
+ void prepareOperationRec(SubCoordinatorPtr ptr,
+ BlockReference subscriber,
+ Uint32 subId,
+ Uint32 subKey,
+ Uint32 request);
+
+ public:
+ PSCoord(class Grep *);
+
+ void execGREP_CREATE_CONF(Signal*);
+ void execGREP_START_CONF(Signal*);
+ void execGREP_SYNC_CONF(Signal*);
+ void execGREP_REMOVE_CONF(Signal*);
+
+ void execGREP_CREATE_REF(Signal*);
+ void execGREP_START_REF(Signal*);
+ void execGREP_SYNC_REF(Signal*);
+ void execGREP_REMOVE_REF(Signal*);
+
+
+ void execCREATE_SUBID_CONF(Signal*); //comes from SUMA
+ void execGREP_CREATE_SUBID_REQ(Signal*);
+
+ void execGREP_SUB_CREATE_REQ(Signal*);
+ void execGREP_SUB_START_REQ(Signal*);
+ void execGREP_SUB_SYNC_REQ(Signal*);
+ void execGREP_SUB_REMOVE_REQ(Signal*);
+
+
+
+ void execCREATE_SUBID_REF(Signal*);
+
+
+
+ void sendCreateSubIdRef_SS(Signal * signal,
+ Uint32 subId,
+ Uint32 subKey,
+ BlockReference to,
+ GrepError::GE_Code err);
+
+
+ void sendSubRemoveRef_SS(Signal * signal,
+ SubCoordinator sub,
+ GrepError::GE_Code err);
+
+ void sendRefToSS(Signal * signal,
+ SubCoordinator sub,
+ GrepError::GE_Code err,
+ SubscriptionData::Part part = (SubscriptionData::Part)0);
+
+ void setRepRef(BlockReference rr) { m_repRef = rr; };
+ //void setAliveNodes(NdbNodeBitmask an) { m_aliveNodes = an; };
+
+ BlockReference m_repRef; ///< Rep node (only one rep node per grep)
+ // NdbNodeBitmask m_aliveNodes;
+
+ Uint32 m_outstandingRequest;
+ SignalCounter m_outstandingParticipants;
+
+ Grep * m_grep;
+ } pscoord;
+ friend class PSCoord;
+
+ /***************************************************************************
+ * GREP PS Participant
+ ***************************************************************************
+ * Participant of GREP Protocols (not necessarily a protocol coordinator)
+ *
+ * This object is only used on primary system
+ ***************************************************************************/
+ class PSPart: public BlockComponent
+ {
+ //protected:
+ //GrepParticipant(const Configuration & conf);
+ //virtual ~GrepParticipant();
+ //BLOCK_DEFINES(GrepParticipant);
+
+ struct Subscription {
+ Uint32 m_subscriberRef;
+ Uint32 m_subscriberData;
+ Uint32 m_subscriptionId;
+ Uint32 m_subscriptionKey;
+ Uint32 m_subscriptionType;
+ Uint32 m_coordinatorRef;
+ Uint32 m_outstandingRequest;
+ Uint32 m_operationPtrI;
+ Uint32 nextHash;
+ union { Uint32 prevHash; Uint32 nextPool; };
+
+ Uint32 hashValue() const {
+ return m_subscriptionId + m_subscriptionKey;
+ }
+
+ bool equal(const Subscription & s) const {
+ return
+ m_subscriptionId == s.m_subscriptionId &&
+ m_subscriptionKey == s.m_subscriptionKey;
+ }
+
+ };
+ typedef Ptr<Subscription> SubscriptionPtr;
+
+ DLHashTable<Subscription> c_subscriptions;
+ DLHashTable<Subscription>::Iterator c_subPtr;
+ ArrayPool<Subscription> c_subscriptionPool;
+
+ public:
+ PSPart(class Grep *);
+
+
+ //protected:
+ /*************************************************************************
+ * SUMA Signal Interface
+ *************************************************************************/
+ void execSUB_CREATE_CONF(Signal*);
+ void execSUB_START_CONF(Signal*);
+ void execSUB_SYNC_CONF(Signal*);
+ void execSUB_REMOVE_CONF(Signal*);
+
+ void execSUB_CREATE_REF(Signal*);
+ void execSUB_START_REF(Signal*);
+ void execSUB_SYNC_REF(Signal*);
+ void execSUB_REMOVE_REF(Signal*);
+
+
+ void execSUB_META_DATA(Signal*);
+ void execSUB_TABLE_DATA(Signal*);
+
+
+ void execSUB_GCP_COMPLETE_REP(Signal*);
+ void execSUB_SYNC_CONTINUE_REQ(Signal*);
+
+ /*************************************************************************
+ * GREP Coordinator Signal Interface
+ *************************************************************************/
+ void execGREP_CREATE_REQ(Signal*);
+ void execGREP_START_REQ(Signal*);
+ void execGREP_SYNC_REQ(Signal*);
+ void execGREP_REMOVE_REQ(Signal*);
+
+ /**
+ * NR/NF signals
+ */
+ void execSTART_ME(Signal *);
+ void execGREP_ADD_SUB_REQ(Signal *);
+ void execGREP_ADD_SUB_REF(Signal *);
+ void execGREP_ADD_SUB_CONF(Signal *);
+
+ /*************************************************************************
+ * GREP Coordinator error handling interface
+ *************************************************************************/
+
+ void sendRefToPSCoord(Signal * signal,
+ Subscription sub,
+ GrepError::GE_Code err,
+ SubscriptionData::Part part = (SubscriptionData::Part)0);
+
+ //protected:
+ BlockReference m_repRef; ///< Replication node
+ ///< (only one rep node per grep)
+ bool m_recoveryMode;
+
+ private:
+ BlockReference m_coordinator;
+ Uint32 m_firstScanGCI;
+ Uint32 m_lastScanGCI;
+ Uint32 m_latestSeenGCI;
+ Grep * m_grep;
+ } pspart;
+ friend class PSPart;
+
+ /***************************************************************************
+ * AddRecSignal Stuff (should maybe be gerneralized)
+ ***************************************************************************/
+ typedef void (Grep::* ExecSignalLocal1) (Signal* signal);
+ typedef void (Grep::PSCoord::* ExecSignalLocal2) (Signal* signal);
+ typedef void (Grep::PSPart::* ExecSignalLocal4) (Signal* signal);
+};
+
+
+/*************************************************************************
+ * Requestor
+ *
+ * The following methods are callbacks (registered functions)
+ * for the Requestor. The Requestor calls these when it needs
+ * something to be done.
+ *************************************************************************/
+void startSubscription(void * cbObj, Signal*, int type);
+void scanSubscription(void * cbObj, Signal*, int type);
+
+#endif
diff --git a/storage/ndb/src/kernel/blocks/grep/GrepInit.cpp b/storage/ndb/src/kernel/blocks/grep/GrepInit.cpp
new file mode 100644
index 00000000000..d764fb1f473
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/grep/GrepInit.cpp
@@ -0,0 +1,164 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "Grep.hpp"
+#include <Properties.hpp>
+#include <Configuration.hpp>
+
+/*****************************************************************************
+ * Grep Participant
+ *****************************************************************************/
+#if 0
+GrepParticipant::GrepParticipant(const Configuration & conf) :
+ SimulatedBlock(GREP, conf)
+{
+ BLOCK_CONSTRUCTOR(Grep);
+ //m_repRef = 0;
+ m_latestSeenGCI = 0;
+}
+
+GrepParticipant::~GrepParticipant()
+{
+}
+
+BLOCK_FUNCTIONS(GrepParticipant);
+#endif
+
+/*****************************************************************************
+ * Grep Coordinator
+ *****************************************************************************/
+Grep::Grep(const Configuration & conf) :
+ // GrepParticipant(conf),
+ SimulatedBlock(GREP, conf),
+ m_nodes(m_nodePool),
+ pscoord(this),
+ pspart(this)
+{
+ m_nodePool.setSize(MAX_NDB_NODES);
+ m_masterNodeId = getOwnNodeId();
+
+ /***************************************************************************
+ * General Signals
+ ***************************************************************************/
+ addRecSignal(GSN_STTOR, &Grep::execSTTOR);
+ addRecSignal(GSN_NDB_STTOR, &Grep::execNDB_STTOR);
+ addRecSignal(GSN_DUMP_STATE_ORD, &Grep::execDUMP_STATE_ORD);
+ addRecSignal(GSN_READ_NODESCONF, &Grep::execREAD_NODESCONF);
+ addRecSignal(GSN_NODE_FAILREP, &Grep::execNODE_FAILREP);
+ addRecSignal(GSN_INCL_NODEREQ, &Grep::execINCL_NODEREQ);
+
+ addRecSignal(GSN_GREP_REQ, &Grep::execGREP_REQ);
+ addRecSignal(GSN_API_FAILREQ, &Grep::execAPI_FAILREQ);
+
+
+ /***************************************************************************
+ * Grep::PSCoord Signal Interface
+ ***************************************************************************/
+ /**
+ * From Grep::PSPart
+ */
+ addRecSignal(GSN_GREP_CREATE_CONF, &Grep::fwdGREP_CREATE_CONF);
+ addRecSignal(GSN_GREP_START_CONF, &Grep::fwdGREP_START_CONF);
+ addRecSignal(GSN_GREP_SYNC_CONF, &Grep::fwdGREP_SYNC_CONF);
+ addRecSignal(GSN_GREP_REMOVE_CONF, &Grep::fwdGREP_REMOVE_CONF);
+
+ addRecSignal(GSN_GREP_CREATE_REF, &Grep::fwdGREP_CREATE_REF);
+ addRecSignal(GSN_GREP_START_REF, &Grep::fwdGREP_START_REF);
+ addRecSignal(GSN_GREP_REMOVE_REF, &Grep::fwdGREP_REMOVE_REF);
+
+ /**
+ * From Grep::SSCoord to Grep::PSCoord
+ */
+ addRecSignal(GSN_GREP_SUB_START_REQ, &Grep::fwdGREP_SUB_START_REQ);
+ addRecSignal(GSN_GREP_SUB_CREATE_REQ, &Grep::fwdGREP_SUB_CREATE_REQ);
+ addRecSignal(GSN_GREP_SUB_SYNC_REQ, &Grep::fwdGREP_SUB_SYNC_REQ);
+ addRecSignal(GSN_GREP_SUB_REMOVE_REQ, &Grep::fwdGREP_SUB_REMOVE_REQ);
+ addRecSignal(GSN_GREP_CREATE_SUBID_REQ, &Grep::fwdGREP_CREATE_SUBID_REQ);
+
+ /****************************************************************************
+ * PSPart
+ ***************************************************************************/
+ /**
+ * From SUMA to GREP PS Participant. If suma is not a coodinator
+ */
+ addRecSignal(GSN_SUB_START_CONF, &Grep::fwdSUB_START_CONF);
+ addRecSignal(GSN_SUB_CREATE_CONF, &Grep::fwdSUB_CREATE_CONF);
+ addRecSignal(GSN_SUB_SYNC_CONF, &Grep::fwdSUB_SYNC_CONF);
+ addRecSignal(GSN_SUB_REMOVE_CONF, &Grep::fwdSUB_REMOVE_CONF);
+ addRecSignal(GSN_SUB_CREATE_REF, &Grep::fwdSUB_CREATE_REF);
+ addRecSignal(GSN_SUB_START_REF, &Grep::fwdSUB_START_REF);
+ addRecSignal(GSN_SUB_SYNC_REF, &Grep::fwdSUB_SYNC_REF);
+ addRecSignal(GSN_SUB_REMOVE_REF, &Grep::fwdSUB_REMOVE_REF);
+
+ addRecSignal(GSN_SUB_SYNC_CONTINUE_REQ,
+ &Grep::fwdSUB_SYNC_CONTINUE_REQ);
+
+ /**
+ * From Suma to Grep::PSPart. Data signals.
+ */
+ addRecSignal(GSN_SUB_META_DATA, &Grep::fwdSUB_META_DATA);
+ addRecSignal(GSN_SUB_TABLE_DATA, &Grep::fwdSUB_TABLE_DATA);
+ addRecSignal(GSN_SUB_GCP_COMPLETE_REP, &Grep::fwdSUB_GCP_COMPLETE_REP);
+
+ /**
+ * From Grep::PSCoord to Grep::PSPart
+ */
+ addRecSignal(GSN_GREP_CREATE_REQ, &Grep::fwdGREP_CREATE_REQ);
+ addRecSignal(GSN_GREP_START_REQ, &Grep::fwdGREP_START_REQ);
+ addRecSignal(GSN_GREP_REMOVE_REQ, &Grep::fwdGREP_REMOVE_REQ);
+ addRecSignal(GSN_GREP_SYNC_REQ, &Grep::fwdGREP_SYNC_REQ);
+ addRecSignal(GSN_CREATE_SUBID_CONF, &Grep::fwdCREATE_SUBID_CONF);
+ addRecSignal(GSN_GREP_START_ME, &Grep::fwdSTART_ME);
+ addRecSignal(GSN_GREP_ADD_SUB_REQ, &Grep::fwdGREP_ADD_SUB_REQ);
+ addRecSignal(GSN_GREP_ADD_SUB_REF, &Grep::fwdGREP_ADD_SUB_REF);
+ addRecSignal(GSN_GREP_ADD_SUB_CONF, &Grep::fwdGREP_ADD_SUB_CONF);
+}
+
+Grep::~Grep()
+{
+}
+
+BLOCK_FUNCTIONS(Grep)
+
+Grep::PSPart::PSPart(Grep * sb) :
+ BlockComponent(sb),
+ c_subscriptions(c_subscriptionPool)
+{
+ m_grep = sb;
+
+ m_firstScanGCI = 1; // Empty interval = [1,0]
+ m_lastScanGCI = 0;
+
+ m_latestSeenGCI = 0;
+
+ c_subscriptions.setSize(10);
+ c_subscriptionPool.setSize(10);
+}
+
+Grep::PSCoord::PSCoord(Grep * sb) :
+ BlockComponent(sb),
+ c_runningSubscriptions(c_subCoordinatorPool)
+{
+ m_grep = sb;
+ c_runningSubscriptions.setSize(10);
+ c_subCoordinatorPool.setSize(2);
+}
+
+//BLOCK_FUNCTIONS(Grep::PSCoord);
+
+BlockComponent::BlockComponent(SimulatedBlock * sb) {
+ m_sb = sb;
+}
diff --git a/storage/ndb/src/kernel/blocks/grep/Makefile.am b/storage/ndb/src/kernel/blocks/grep/Makefile.am
new file mode 100644
index 00000000000..6d2b422784b
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/grep/Makefile.am
@@ -0,0 +1,23 @@
+noinst_LIBRARIES = libgrep.a
+
+libgrep_a_SOURCES = Grep.cpp GrepInit.cpp
+
+include $(top_srcdir)/ndb/config/common.mk.am
+include $(top_srcdir)/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libgrep.dsp
+
+libgrep.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-lib.am \
+ $(top_srcdir)/ndb/config/win-name \
+ $(top_srcdir)/ndb/config/win-includes \
+ $(top_srcdir)/ndb/config/win-sources \
+ $(top_srcdir)/ndb/config/win-libraries
+ cat $(top_srcdir)/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/ndb/config/win-sources $@ $(libgrep_a_SOURCES)
+ @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/storage/ndb/src/kernel/blocks/grep/systab_test/Makefile b/storage/ndb/src/kernel/blocks/grep/systab_test/Makefile
new file mode 100644
index 00000000000..bd69e0f3799
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/grep/systab_test/Makefile
@@ -0,0 +1,12 @@
+include .defs.mk
+
+TYPE := kernel
+
+BIN_TARGET := grep_systab_test
+BIN_TARGET_ARCHIVES := portlib general
+
+CCFLAGS_LOC += -I..
+
+SOURCES = ../GrepSystemTable.cpp grep_systab_test.cpp
+
+include $(NDB_TOP)/Epilogue.mk
diff --git a/storage/ndb/src/kernel/blocks/grep/systab_test/grep_systab_test.cpp b/storage/ndb/src/kernel/blocks/grep/systab_test/grep_systab_test.cpp
new file mode 100644
index 00000000000..e3a77af4e4e
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/grep/systab_test/grep_systab_test.cpp
@@ -0,0 +1,138 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/**
+ * Unit Test for GrepSystemTable
+ */
+
+#include "../GrepSystemTable.hpp"
+#include <SimulatedBlock.hpp>
+
+#define EXEC(X) ( ndbout << endl, ndbout_c(#X), X )
+
+int
+main () {
+ GrepSystemTable st;
+
+ Uint32 f, l;
+
+ ndbout_c("*************************************");
+ ndbout_c("* GrepSystemTable Unit Test Program *");
+ ndbout_c("*************************************");
+
+ ndbout_c("--------------------------------------------------------");
+ ndbout_c("Test 1: Clear");
+ ndbout_c("--------------------------------------------------------");
+
+ EXEC(st.set(GrepSystemTable::PS, 22, 26));
+ st.print();
+ st.require(GrepSystemTable::PS, 22, 26);
+
+ EXEC(st.clear(GrepSystemTable::PS, 20, 24));
+ st.print();
+ st.require(GrepSystemTable::PS, 25, 26);
+
+ EXEC(st.clear(GrepSystemTable::PS, 0, 100));
+ st.print();
+ st.require(GrepSystemTable::PS, 1, 0);
+
+ EXEC(st.set(GrepSystemTable::PS, 22, 26));
+ st.print();
+ st.require(GrepSystemTable::PS, 22, 26);
+
+ EXEC(st.clear(GrepSystemTable::PS, 24, 28));
+ st.print();
+ st.require(GrepSystemTable::PS, 22, 23);
+
+ EXEC(st.clear(GrepSystemTable::PS, 0, 100));
+ st.print();
+ st.require(GrepSystemTable::PS, 1, 0);
+
+ EXEC(st.set(GrepSystemTable::PS, 22, 26));
+ st.print();
+ st.require(GrepSystemTable::PS, 22, 26);
+
+ EXEC(st.clear(GrepSystemTable::PS, 24, 26));
+ st.print();
+ st.require(GrepSystemTable::PS, 22, 23);
+
+ EXEC(st.clear(GrepSystemTable::PS, 0, 100));
+ st.print();
+ st.require(GrepSystemTable::PS, 1, 0);
+
+ EXEC(st.set(GrepSystemTable::PS, 22, 26));
+ st.print();
+ st.require(GrepSystemTable::PS, 22, 26);
+
+ EXEC(st.clear(GrepSystemTable::PS, 22, 24));
+ st.print();
+ st.require(GrepSystemTable::PS, 25, 26);
+
+ ndbout_c("--------------------------------------------------------");
+ ndbout_c("Test 2: PS --> SSreq");
+ ndbout_c("--------------------------------------------------------");
+
+ EXEC(st.set(GrepSystemTable::PS, 22, 26));
+ st.print();
+ st.require(GrepSystemTable::PS, 22, 26);
+ st.require(GrepSystemTable::SSReq, 1, 0);
+
+ if (!EXEC(st.copy(GrepSystemTable::PS, GrepSystemTable::SSReq, 3, &f, &l)))
+ ndbout_c("%s:%d: Illegal copy!", __FILE__, __FILE__);
+ ndbout_c("f=%d, l=%d", f, l);
+ st.print();
+ st.require(GrepSystemTable::PS, 22, 26);
+ st.require(GrepSystemTable::SSReq, 22, 24);
+
+ EXEC(st.clear(GrepSystemTable::PS, 22, 22));
+ st.print();
+ st.require(GrepSystemTable::PS, 23, 26);
+ st.require(GrepSystemTable::SSReq, 22, 24);
+
+ if (!EXEC(st.copy(GrepSystemTable::PS, GrepSystemTable::SSReq, 2, &f, &l)))
+ ndbout_c("%s:%d: Illegal copy!", __FILE__, __LINE__);
+ ndbout_c("f=%d, l=%d", f, l);
+ st.print();
+ st.require(GrepSystemTable::PS, 23, 26);
+ st.require(GrepSystemTable::SSReq, 22, 26);
+
+ st.set(GrepSystemTable::SS, 7, 9);
+ st.set(GrepSystemTable::InsReq, 7, 9);
+ if (EXEC(st.movable(GrepSystemTable::SS, GrepSystemTable::InsReq)))
+ ndbout_c("%s:%d: Illegal move!", __FILE__, __LINE__);
+ st.print();
+ st.require(GrepSystemTable::SS, 7, 9);
+ st.require(GrepSystemTable::InsReq, 7, 9);
+
+ EXEC(st.intervalMinus(7, 9, 7, 7, &f, &l));
+ ndbout_c("f=%d, l=%d", f, l);
+
+ st.clear(GrepSystemTable::InsReq, 8, 9);
+ st.require(GrepSystemTable::SS, 7, 9);
+ st.require(GrepSystemTable::InsReq, 7, 7);
+ if (EXEC(st.movable(GrepSystemTable::SS, GrepSystemTable::InsReq)) != 2)
+ ndbout_c("%s:%d: Illegal move!", __FILE__, __LINE__);
+ st.print();
+
+ EXEC(st.copy(GrepSystemTable::SS, GrepSystemTable::InsReq, &f));
+ st.print();
+ st.require(GrepSystemTable::SS, 7, 9);
+ st.require(GrepSystemTable::InsReq, 7, 8);
+
+ ndbout_c("--------------------------------------------------------");
+ ndbout_c("Test completed");
+ ndbout_c("--------------------------------------------------------");
+}
diff --git a/storage/ndb/src/kernel/blocks/mutexes.hpp b/storage/ndb/src/kernel/blocks/mutexes.hpp
new file mode 100644
index 00000000000..5c0276fc4fa
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/mutexes.hpp
@@ -0,0 +1,39 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef KERNEL_MUTEXES_HPP
+#define KERNEL_MUTEXES_HPP
+
+#include <ndb_types.h>
+
+/**
+ * This mutex is used by:
+ * DIH - before sending START_LCP to all participants
+ * DICT - before commiting a CREATE TABLE
+ * BACKUP - before sending DEFINE_BACKUP
+ */
+#define DIH_START_LCP_MUTEX 0
+#define DICT_COMMIT_TABLE_MUTEX 0
+
+/**
+ * This mutex is used by
+ * DIH - before switching primary replica
+ * BACKUP - before sending DEFINE_BACKUP
+ */
+#define DIH_SWITCH_PRIMARY_MUTEX 1
+#define BACKUP_DEFINE_MUTEX 1
+
+#endif
diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/Makefile.am b/storage/ndb/src/kernel/blocks/ndbcntr/Makefile.am
new file mode 100644
index 00000000000..3f24675b2b3
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/Makefile.am
@@ -0,0 +1,26 @@
+noinst_LIBRARIES = libndbcntr.a
+
+libndbcntr_a_SOURCES = \
+ NdbcntrInit.cpp \
+ NdbcntrSysTable.cpp \
+ NdbcntrMain.cpp
+
+include $(top_srcdir)/ndb/config/common.mk.am
+include $(top_srcdir)/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libndbcntr.dsp
+
+libndbcntr.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-lib.am \
+ $(top_srcdir)/ndb/config/win-name \
+ $(top_srcdir)/ndb/config/win-includes \
+ $(top_srcdir)/ndb/config/win-sources \
+ $(top_srcdir)/ndb/config/win-libraries
+ cat $(top_srcdir)/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/ndb/config/win-sources $@ $(libndbcntr_a_SOURCES)
+ @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp b/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp
new file mode 100644
index 00000000000..639d300d6df
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp
@@ -0,0 +1,376 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef NDBCNTR_H
+#define NDBCNTR_H
+
+
+#include <pc.hpp>
+#include <SimulatedBlock.hpp>
+#include <ndb_limits.h>
+#include <signaldata/StopReq.hpp>
+#include <signaldata/ResumeReq.hpp>
+#include <signaldata/DictTabInfo.hpp>
+#include <signaldata/CntrStart.hpp>
+#include <signaldata/CheckNodeGroups.hpp>
+
+#include <signaldata/UpgradeStartup.hpp>
+
+#include <NodeState.hpp>
+#include <NdbTick.h>
+
+#ifdef NDBCNTR_C
+/*
+2.1 GLOBAL SYMBOLS
+------------------
+*/
+/*
+2.2 LOCAL SYMBOLS
+-----------------
+*/
+#define ZNO_NDB_BLOCKS 6 /* ACC, DICT, DIH, LQH, TC, TUP */
+
+#define ZNOT_AVAILABLE 913
+
+//------- OTHERS ---------------------------------------------
+#define ZSTARTUP 1
+#define ZSHUTDOWN 2
+
+#define ZSIZE_NDB_BLOCKS_REC 16 /* MAX BLOCKS IN NDB */
+#define ZSIZE_SYSTAB 2048
+#define ZSTART_PHASE_1 1
+#define ZSTART_PHASE_2 2
+#define ZSTART_PHASE_3 3
+#define ZSTART_PHASE_4 4
+#define ZSTART_PHASE_5 5
+#define ZSTART_PHASE_6 6
+#define ZSTART_PHASE_7 7
+#define ZSTART_PHASE_8 8
+#define ZSTART_PHASE_9 9
+#define ZSTART_PHASE_END 255
+#define ZWAITPOINT_4_1 1
+#define ZWAITPOINT_4_2 2
+#define ZWAITPOINT_5_1 3
+#define ZWAITPOINT_5_2 4
+#define ZWAITPOINT_6_1 5
+#define ZWAITPOINT_6_2 6
+#define ZWAITPOINT_7_1 7
+#define ZWAITPOINT_7_2 8
+#define ZSYSTAB_VERSION 1
+#endif
+
+class Ndbcntr: public SimulatedBlock {
+public:
+// Records
+
+/* FSREADREQ FSWRITEREQ */
+/**
+ * 2.3 RECORDS AND FILESIZES
+ * ------------------------------------------------------------
+ */
+
+ struct StartRecord {
+ Uint64 m_startTime;
+
+ void reset();
+ NdbNodeBitmask m_starting;
+ NdbNodeBitmask m_waiting; // == (m_withLog | m_withoutLog)
+ NdbNodeBitmask m_withLog;
+ NdbNodeBitmask m_withoutLog;
+ Uint32 m_lastGci;
+ Uint32 m_lastGciNodeId;
+
+ Uint64 m_startPartialTimeout;
+ Uint64 m_startPartitionedTimeout;
+ Uint64 m_startFailureTimeout;
+ struct {
+ Uint32 m_nodeId;
+ Uint32 m_lastGci;
+ } m_logNodes[MAX_NDB_NODES];
+ Uint32 m_logNodesCount;
+ } c_start;
+
+ struct NdbBlocksRec {
+ BlockReference blockref;
+ }; /* p2c: size = 2 bytes */
+
+ typedef Ptr<NdbBlocksRec> NdbBlocksRecPtr;
+
+ /**
+ * Ndbcntr creates and initializes system tables on initial system start.
+ * The tables are defined in static structs in NdbcntrSysTable.cpp.
+ */
+ struct SysColumn {
+ unsigned pos;
+ const char* name;
+ // DictTabInfo
+ DictTabInfo::ExtType type;
+ Uint32 length;
+ bool keyFlag;
+ bool nullable;
+ };
+ struct SysTable {
+ const char* name;
+ unsigned columnCount;
+ const SysColumn* columnList;
+ // DictTabInfo
+ DictTabInfo::TableType tableType;
+ DictTabInfo::FragmentType fragmentType;
+ bool tableLoggedFlag;
+ // saved table id
+ mutable Uint32 tableId;
+ };
+ struct SysIndex {
+ const char* name;
+ const SysTable* primaryTable;
+ Uint32 columnCount;
+ Uint32 columnList[4];
+ // DictTabInfo
+ DictTabInfo::TableType indexType;
+ DictTabInfo::FragmentType fragmentType;
+ bool indexLoggedFlag;
+ // saved index table id
+ mutable Uint32 indexId;
+ };
+ static const SysTable* g_sysTableList[];
+ static const unsigned g_sysTableCount;
+ // the system tables
+ static const SysTable g_sysTable_SYSTAB_0;
+ static const SysTable g_sysTable_NDBEVENTS_0;
+
+public:
+ Ndbcntr(const class Configuration &);
+ virtual ~Ndbcntr();
+
+private:
+ BLOCK_DEFINES(Ndbcntr);
+
+ // Transit signals
+ void execCONTINUEB(Signal* signal);
+ void execREAD_NODESCONF(Signal* signal);
+ void execREAD_NODESREF(Signal* signal);
+ void execCM_ADD_REP(Signal* signal);
+ void execCNTR_START_REQ(Signal* signal);
+ void execCNTR_START_REF(Signal* signal);
+ void execCNTR_START_CONF(Signal* signal);
+ void execCNTR_START_REP(Signal* signal);
+ void execCNTR_WAITREP(Signal* signal);
+ void execNODE_FAILREP(Signal* signal);
+ void execSYSTEM_ERROR(Signal* signal);
+
+ // Received signals
+ void execDUMP_STATE_ORD(Signal* signal);
+ void execSTTOR(Signal* signal);
+ void execTCSEIZECONF(Signal* signal);
+ void execTCSEIZEREF(Signal* signal);
+ void execTCRELEASECONF(Signal* signal);
+ void execTCRELEASEREF(Signal* signal);
+ void execTCKEYCONF(Signal* signal);
+ void execTCKEYREF(Signal* signal);
+ void execTCROLLBACKREP(Signal* signal);
+ void execGETGCICONF(Signal* signal);
+ void execDIH_RESTARTCONF(Signal* signal);
+ void execDIH_RESTARTREF(Signal* signal);
+ void execCREATE_TABLE_REF(Signal* signal);
+ void execCREATE_TABLE_CONF(Signal* signal);
+ void execNDB_STTORRY(Signal* signal);
+ void execNDB_STARTCONF(Signal* signal);
+ void execREAD_NODESREQ(Signal* signal);
+ void execNDB_STARTREF(Signal* signal);
+ void execSET_VAR_REQ(Signal* signal);
+
+ void execSTOP_PERM_REF(Signal* signal);
+ void execSTOP_PERM_CONF(Signal* signal);
+
+ void execSTOP_ME_REF(Signal* signal);
+ void execSTOP_ME_CONF(Signal* signal);
+
+ void execWAIT_GCP_REF(Signal* signal);
+ void execWAIT_GCP_CONF(Signal* signal);
+
+ void execSTOP_REQ(Signal* signal);
+ void execRESUME_REQ(Signal* signal);
+
+ void execCHANGE_NODE_STATE_CONF(Signal* signal);
+
+ void execABORT_ALL_REF(Signal* signal);
+ void execABORT_ALL_CONF(Signal* signal);
+
+ // Statement blocks
+ void sendCreateTabReq(Signal* signal, const char* buffer, Uint32 bufLen);
+ void startInsertTransactions(Signal* signal);
+ void initData(Signal* signal);
+ void resetStartVariables(Signal* signal);
+ void sendCntrStartReq(Signal* signal);
+ void sendCntrStartRef(Signal*, Uint32 nodeId, CntrStartRef::ErrorCode);
+ void sendNdbSttor(Signal* signal);
+ void sendSttorry(Signal* signal);
+
+ bool trySystemRestart(Signal* signal);
+ void startWaitingNodes(Signal* signal);
+ CheckNodeGroups::Output checkNodeGroups(Signal*, const NdbNodeBitmask &);
+
+ // Generated statement blocks
+ void systemErrorLab(Signal* signal);
+
+ void createSystableLab(Signal* signal, unsigned index);
+ void crSystab7Lab(Signal* signal);
+ void crSystab8Lab(Signal* signal);
+ void crSystab9Lab(Signal* signal);
+
+ void startPhase1Lab(Signal* signal);
+ void startPhase2Lab(Signal* signal);
+ void startPhase3Lab(Signal* signal);
+ void startPhase4Lab(Signal* signal);
+ void startPhase5Lab(Signal* signal);
+ // jump 2 to resync phase counters
+ void startPhase8Lab(Signal* signal);
+ void startPhase9Lab(Signal* signal);
+ void ph2ALab(Signal* signal);
+ void ph2CLab(Signal* signal);
+ void ph2ELab(Signal* signal);
+ void ph2FLab(Signal* signal);
+ void ph2GLab(Signal* signal);
+ void ph3ALab(Signal* signal);
+ void ph4ALab(Signal* signal);
+ void ph4BLab(Signal* signal);
+ void ph4CLab(Signal* signal);
+ void ph5ALab(Signal* signal);
+ void ph6ALab(Signal* signal);
+ void ph6BLab(Signal* signal);
+ void ph7ALab(Signal* signal);
+ void ph8ALab(Signal* signal);
+
+
+ void waitpoint41Lab(Signal* signal);
+ void waitpoint51Lab(Signal* signal);
+ void waitpoint52Lab(Signal* signal);
+ void waitpoint61Lab(Signal* signal);
+ void waitpoint71Lab(Signal* signal);
+
+ void updateNodeState(Signal* signal, const NodeState & newState) const ;
+ void getNodeGroup(Signal* signal);
+
+ // Initialisation
+ void initData();
+ void initRecords();
+
+ // Variables
+ /**------------------------------------------------------------------------
+ * CONTAIN INFO ABOUT ALL NODES IN CLUSTER. NODE_PTR ARE USED AS NODE NUMBER
+ * IF THE STATE ARE ZDELETE THEN THE NODE DOESN'T EXIST. NODES ARE ALLOWED
+ * TO REGISTER (ZADD) DURING RESTART.
+ *
+ * WHEN THE SYSTEM IS RUNNING THE MASTER WILL CHECK IF ANY NODE HAS MADE
+ * A CNTR_MASTERREQ AND TAKE CARE OF THE REQUEST.
+ * TO CONFIRM THE REQ, THE MASTER DEMANDS THAT ALL RUNNING NODES HAS VOTED
+ * FOR THE NEW NODE.
+ * NODE_PTR:MASTER_REQ IS USED DURING RESTART TO LOG
+ * POSTPONED CNTR_MASTERREQ'S
+ *------------------------------------------------------------------------*/
+ NdbBlocksRec *ndbBlocksRec;
+
+ /*
+ 2.4 COMMON STORED VARIABLES
+ */
+ UintR cgciSystab;
+ UintR ckey;
+ //UintR csystabId;
+ UintR cnoWaitrep6;
+ UintR cnoWaitrep7;
+ UintR ctcConnectionP;
+ UintR ctcReqInfo;
+ Uint8 ctransidPhase;
+ Uint16 cresponses;
+
+ Uint8 cstartPhase;
+ Uint16 cinternalStartphase;
+
+ Uint16 cmasterNodeId;
+ Uint16 cndbBlocksCount;
+ Uint16 cnoStartNodes;
+ UintR cnoWaitrep;
+ NodeState::StartType ctypeOfStart;
+ Uint16 cdynamicNodeId;
+
+ Uint32 c_fsRemoveCount;
+ Uint32 c_nodeGroup;
+ void clearFilesystem(Signal* signal);
+ void execFSREMOVEREF(Signal* signal);
+ void execFSREMOVECONF(Signal* signal);
+
+ NdbNodeBitmask c_allDefinedNodes;
+ NdbNodeBitmask c_clusterNodes; // All members of qmgr cluster
+ NdbNodeBitmask c_startedNodes; // All cntr started nodes
+
+public:
+ struct StopRecord {
+ public:
+ StopRecord(Ndbcntr & _cntr) : cntr(_cntr) {
+ stopReq.senderRef = 0;
+ }
+
+ Ndbcntr & cntr;
+ StopReq stopReq; // Signal data
+ NDB_TICKS stopInitiatedTime; // When was the stop initiated
+
+ bool checkNodeFail(Signal* signal);
+ void checkTimeout(Signal* signal);
+ void checkApiTimeout(Signal* signal);
+ void checkTcTimeout(Signal* signal);
+ void checkLqhTimeout_1(Signal* signal);
+ void checkLqhTimeout_2(Signal* signal);
+
+ BlockNumber number() const { return cntr.number(); }
+ void progError(int line, int cause, const char * extra) {
+ cntr.progError(line, cause, extra);
+ }
+ };
+private:
+ StopRecord c_stopRec;
+ friend struct StopRecord;
+
+ struct Missra {
+ Missra(Ndbcntr & ref) : cntr(ref) { }
+
+ Uint32 currentBlockIndex;
+ Uint32 currentStartPhase;
+ Uint32 nextStartPhase[NO_OF_BLOCKS];
+
+ void execSTART_ORD(Signal* signal);
+ void execSTTORRY(Signal* signal);
+ void sendNextSTTOR(Signal* signal);
+ void execREAD_CONFIG_CONF(Signal* signal);
+ void sendNextREAD_CONFIG_REQ(Signal* signal);
+
+ BlockNumber number() const { return cntr.number(); }
+ void progError(int line, int cause, const char * extra) {
+ cntr.progError(line, cause, extra);
+ }
+ Ndbcntr & cntr;
+ };
+
+ Missra c_missra;
+ friend struct Missra;
+
+ void execSTTORRY(Signal* signal);
+ void execSTART_ORD(Signal* signal);
+ void execREAD_CONFIG_CONF(Signal*);
+
+ friend struct UpgradeStartup;
+};
+
+#endif
diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp
new file mode 100644
index 00000000000..c7b472fc91a
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp
@@ -0,0 +1,117 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+
+#define NDBCNTR_C
+#include "Ndbcntr.hpp"
+#include <ndb_limits.h>
+
+#define DEBUG(x) { ndbout << "Ndbcntr::" << x << endl; }
+
+
+void Ndbcntr::initData()
+{
+
+ // Records with constant sizes
+ ndbBlocksRec = new NdbBlocksRec[ZSIZE_NDB_BLOCKS_REC];
+}//Ndbcntr::initData()
+
+void Ndbcntr::initRecords()
+{
+ // Records with dynamic sizes
+}//Ndbcntr::initRecords()
+
+Ndbcntr::Ndbcntr(const class Configuration & conf):
+ SimulatedBlock(NDBCNTR, conf),
+ cnoWaitrep6(0),
+ cnoWaitrep7(0),
+ c_stopRec(* this),
+ c_missra(* this)
+{
+
+ BLOCK_CONSTRUCTOR(Ndbcntr);
+
+ // Transit signals
+ addRecSignal(GSN_CONTINUEB, &Ndbcntr::execCONTINUEB);
+ addRecSignal(GSN_READ_NODESCONF, &Ndbcntr::execREAD_NODESCONF);
+ addRecSignal(GSN_READ_NODESREF, &Ndbcntr::execREAD_NODESREF);
+ addRecSignal(GSN_CM_ADD_REP, &Ndbcntr::execCM_ADD_REP);
+ addRecSignal(GSN_CNTR_START_REQ, &Ndbcntr::execCNTR_START_REQ);
+ addRecSignal(GSN_CNTR_START_REF, &Ndbcntr::execCNTR_START_REF);
+ addRecSignal(GSN_CNTR_START_CONF, &Ndbcntr::execCNTR_START_CONF);
+ addRecSignal(GSN_CNTR_WAITREP, &Ndbcntr::execCNTR_WAITREP);
+ addRecSignal(GSN_CNTR_START_REP, &Ndbcntr::execCNTR_START_REP);
+ addRecSignal(GSN_NODE_FAILREP, &Ndbcntr::execNODE_FAILREP);
+ addRecSignal(GSN_SYSTEM_ERROR , &Ndbcntr::execSYSTEM_ERROR);
+
+ // Received signals
+ addRecSignal(GSN_DUMP_STATE_ORD, &Ndbcntr::execDUMP_STATE_ORD);
+ addRecSignal(GSN_STTOR, &Ndbcntr::execSTTOR);
+ addRecSignal(GSN_TCSEIZECONF, &Ndbcntr::execTCSEIZECONF);
+ addRecSignal(GSN_TCSEIZEREF, &Ndbcntr::execTCSEIZEREF);
+ addRecSignal(GSN_TCRELEASECONF, &Ndbcntr::execTCRELEASECONF);
+ addRecSignal(GSN_TCRELEASEREF, &Ndbcntr::execTCRELEASEREF);
+ addRecSignal(GSN_TCKEYCONF, &Ndbcntr::execTCKEYCONF);
+ addRecSignal(GSN_TCKEYREF, &Ndbcntr::execTCKEYREF);
+ addRecSignal(GSN_TCROLLBACKREP, &Ndbcntr::execTCROLLBACKREP);
+ addRecSignal(GSN_GETGCICONF, &Ndbcntr::execGETGCICONF);
+ addRecSignal(GSN_DIH_RESTARTCONF, &Ndbcntr::execDIH_RESTARTCONF);
+ addRecSignal(GSN_DIH_RESTARTREF, &Ndbcntr::execDIH_RESTARTREF);
+ addRecSignal(GSN_CREATE_TABLE_REF, &Ndbcntr::execCREATE_TABLE_REF);
+ addRecSignal(GSN_CREATE_TABLE_CONF, &Ndbcntr::execCREATE_TABLE_CONF);
+ addRecSignal(GSN_NDB_STTORRY, &Ndbcntr::execNDB_STTORRY);
+ addRecSignal(GSN_NDB_STARTCONF, &Ndbcntr::execNDB_STARTCONF);
+ addRecSignal(GSN_READ_NODESREQ, &Ndbcntr::execREAD_NODESREQ);
+ addRecSignal(GSN_NDB_STARTREF, &Ndbcntr::execNDB_STARTREF);
+ addRecSignal(GSN_SET_VAR_REQ, &Ndbcntr::execSET_VAR_REQ);
+
+ addRecSignal(GSN_STOP_PERM_REF, &Ndbcntr::execSTOP_PERM_REF);
+ addRecSignal(GSN_STOP_PERM_CONF, &Ndbcntr::execSTOP_PERM_CONF);
+
+ addRecSignal(GSN_STOP_ME_REF, &Ndbcntr::execSTOP_ME_REF);
+ addRecSignal(GSN_STOP_ME_CONF, &Ndbcntr::execSTOP_ME_CONF);
+
+ addRecSignal(GSN_STOP_REQ, &Ndbcntr::execSTOP_REQ);
+ addRecSignal(GSN_RESUME_REQ, &Ndbcntr::execRESUME_REQ);
+
+ addRecSignal(GSN_WAIT_GCP_REF, &Ndbcntr::execWAIT_GCP_REF);
+ addRecSignal(GSN_WAIT_GCP_CONF, &Ndbcntr::execWAIT_GCP_CONF);
+ addRecSignal(GSN_CHANGE_NODE_STATE_CONF,
+ &Ndbcntr::execCHANGE_NODE_STATE_CONF);
+
+ addRecSignal(GSN_ABORT_ALL_REF, &Ndbcntr::execABORT_ALL_REF);
+ addRecSignal(GSN_ABORT_ALL_CONF, &Ndbcntr::execABORT_ALL_CONF);
+
+ addRecSignal(GSN_START_ORD, &Ndbcntr::execSTART_ORD);
+ addRecSignal(GSN_STTORRY, &Ndbcntr::execSTTORRY);
+ addRecSignal(GSN_READ_CONFIG_CONF, &Ndbcntr::execREAD_CONFIG_CONF);
+
+ addRecSignal(GSN_FSREMOVEREF, &Ndbcntr::execFSREMOVEREF);
+ addRecSignal(GSN_FSREMOVECONF, &Ndbcntr::execFSREMOVECONF);
+
+ initData();
+ ctypeOfStart = NodeState::ST_ILLEGAL_TYPE;
+ c_start.m_startTime = NdbTick_CurrentMillisecond();
+}//Ndbcntr::Ndbcntr()
+
+Ndbcntr::~Ndbcntr()
+{
+ delete []ndbBlocksRec;
+
+}//Ndbcntr::~Ndbcntr()
+
+BLOCK_FUNCTIONS(Ndbcntr)
diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
new file mode 100644
index 00000000000..524a40697bf
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
@@ -0,0 +1,2695 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#define NDBCNTR_C
+#include "Ndbcntr.hpp"
+
+#include <ndb_limits.h>
+#include <ndb_version.h>
+#include <SimpleProperties.hpp>
+#include <signaldata/DictTabInfo.hpp>
+#include <signaldata/CreateTable.hpp>
+#include <signaldata/ReadNodesConf.hpp>
+#include <signaldata/NodeFailRep.hpp>
+#include <signaldata/TcKeyReq.hpp>
+#include <signaldata/TcKeyConf.hpp>
+#include <signaldata/EventReport.hpp>
+#include <signaldata/NodeStateSignalData.hpp>
+#include <signaldata/StopPerm.hpp>
+#include <signaldata/StopMe.hpp>
+#include <signaldata/WaitGCP.hpp>
+#include <signaldata/CheckNodeGroups.hpp>
+#include <signaldata/StartOrd.hpp>
+#include <signaldata/AbortAll.hpp>
+#include <signaldata/SystemError.hpp>
+#include <signaldata/NdbSttor.hpp>
+#include <signaldata/CntrStart.hpp>
+#include <signaldata/DumpStateOrd.hpp>
+
+#include <signaldata/FsRemoveReq.hpp>
+#include <signaldata/ReadConfig.hpp>
+
+#include <AttributeHeader.hpp>
+#include <Configuration.hpp>
+#include <DebuggerNames.hpp>
+
+#include <NdbOut.hpp>
+#include <NdbTick.h>
+
+/**
+ * ALL_BLOCKS Used during start phases and while changing node state
+ *
+ * NDBFS_REF Has to be before NDBCNTR_REF (due to "ndb -i" stuff)
+ */
+struct BlockInfo {
+ BlockReference Ref; // BlockReference
+ Uint32 NextSP; // Next start phase
+ Uint32 ErrorInsertStart;
+ Uint32 ErrorInsertStop;
+};
+
+static BlockInfo ALL_BLOCKS[] = {
+ { DBTC_REF, 1 , 8000, 8035 },
+ { DBDIH_REF, 1 , 7000, 7173 },
+ { DBLQH_REF, 1 , 5000, 5030 },
+ { DBACC_REF, 1 , 3000, 3999 },
+ { DBTUP_REF, 1 , 4000, 4007 },
+ { DBDICT_REF, 1 , 6000, 6003 },
+ { NDBFS_REF, 0 , 2000, 2999 },
+ { NDBCNTR_REF, 0 , 1000, 1999 },
+ { QMGR_REF, 1 , 1, 999 },
+ { CMVMI_REF, 1 , 9000, 9999 },
+ { TRIX_REF, 1 , 0, 0 },
+ { BACKUP_REF, 1 , 10000, 10999 },
+ { DBUTIL_REF, 1 , 11000, 11999 },
+ { SUMA_REF, 1 , 13000, 13999 },
+ { GREP_REF, 1 , 0, 0 },
+ { DBTUX_REF, 1 , 12000, 12999 }
+};
+
+static const Uint32 ALL_BLOCKS_SZ = sizeof(ALL_BLOCKS)/sizeof(BlockInfo);
+
+/*******************************/
+/* CONTINUEB */
+/*******************************/
+void Ndbcntr::execCONTINUEB(Signal* signal)
+{
+ jamEntry();
+ UintR Ttemp1 = signal->theData[0];
+ switch (Ttemp1) {
+ case ZSTARTUP:{
+ if(getNodeState().startLevel == NodeState::SL_STARTED){
+ jam();
+ return;
+ }
+
+ if(cmasterNodeId == getOwnNodeId() && c_start.m_starting.isclear()){
+ jam();
+ trySystemRestart(signal);
+ // Fall-through
+ }
+
+ Uint64 now = NdbTick_CurrentMillisecond();
+ if(now > c_start.m_startFailureTimeout)
+ {
+ jam();
+ Uint32 to_3= 0;
+ const ndb_mgm_configuration_iterator * p =
+ theConfiguration.getOwnConfigIterator();
+ ndb_mgm_get_int_parameter(p, CFG_DB_START_FAILURE_TIMEOUT, &to_3);
+ BaseString tmp;
+ tmp.append("Shutting down node as total restart time exceeds "
+ " StartFailureTimeout as set in config file ");
+ if(to_3 == 0)
+ tmp.append(" 0 (inifinite)");
+ else
+ tmp.appfmt(" %d", to_3);
+
+ progError(__LINE__, ERR_SYSTEM_ERROR, tmp.c_str());
+ }
+
+ signal->theData[0] = ZSTARTUP;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 1000, 1);
+ break;
+ }
+ case ZSHUTDOWN:
+ jam();
+ c_stopRec.checkTimeout(signal);
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ }//switch
+}//Ndbcntr::execCONTINUEB()
+
+/*******************************/
+/* SYSTEM_ERROR */
+/*******************************/
+void Ndbcntr::execSYSTEM_ERROR(Signal* signal)
+{
+ const SystemError * const sysErr = (SystemError *)signal->getDataPtr();
+ char buf[100];
+ int killingNode = refToNode(sysErr->errorRef);
+
+ jamEntry();
+ switch (sysErr->errorCode){
+ case SystemError::StartInProgressError:
+ BaseString::snprintf(buf, sizeof(buf),
+ "Node %d killed this node because "
+ "master start in progress error",
+ killingNode);
+ break;
+
+ case SystemError::GCPStopDetected:
+ BaseString::snprintf(buf, sizeof(buf),
+ "Node %d killed this node because "
+ "GCP stop was detected",
+ killingNode);
+ break;
+
+ case SystemError::ScanfragTimeout:
+ BaseString::snprintf(buf, sizeof(buf),
+ "Node %d killed this node because "
+ "a fragment scan timed out and could not be stopped",
+ killingNode);
+ break;
+
+ case SystemError::ScanfragStateError:
+ BaseString::snprintf(buf, sizeof(buf),
+ "Node %d killed this node because "
+ "the state of a fragment scan was out of sync.",
+ killingNode);
+ break;
+
+ case SystemError::CopyFragRefError:
+ BaseString::snprintf(buf, sizeof(buf),
+ "Node %d killed this node because "
+ "it could not copy a fragment during node restart",
+ killingNode);
+ break;
+
+ default:
+ BaseString::snprintf(buf, sizeof(buf), "System error %d, "
+ " this node was killed by node %d",
+ sysErr->errorCode, killingNode);
+ break;
+ }
+
+ progError(__LINE__,
+ ERR_SYSTEM_ERROR,
+ buf);
+ return;
+}//Ndbcntr::execSYSTEM_ERROR()
+
+void Ndbcntr::execSTTOR(Signal* signal)
+{
+ jamEntry();
+ cstartPhase = signal->theData[1];
+
+ NodeState newState(NodeState::SL_STARTING, cstartPhase,
+ (NodeState::StartType)ctypeOfStart);
+ updateNodeState(signal, newState);
+
+ cndbBlocksCount = 0;
+ cinternalStartphase = cstartPhase - 1;
+
+ switch (cstartPhase) {
+ case 0:
+ if(theConfiguration.getInitialStart()){
+ jam();
+ c_fsRemoveCount = 0;
+ clearFilesystem(signal);
+ return;
+ }
+ sendSttorry(signal);
+ break;
+ case ZSTART_PHASE_1:
+ jam();
+ startPhase1Lab(signal);
+ break;
+ case ZSTART_PHASE_2:
+ jam();
+ startPhase2Lab(signal);
+ break;
+ case ZSTART_PHASE_3:
+ jam();
+ startPhase3Lab(signal);
+ break;
+ case ZSTART_PHASE_4:
+ jam();
+ startPhase4Lab(signal);
+ break;
+ case ZSTART_PHASE_5:
+ jam();
+ startPhase5Lab(signal);
+ break;
+ case 6:
+ jam();
+ getNodeGroup(signal);
+ // Fall through
+ break;
+ case ZSTART_PHASE_8:
+ jam();
+ startPhase8Lab(signal);
+ break;
+ case ZSTART_PHASE_9:
+ jam();
+ startPhase9Lab(signal);
+ break;
+ default:
+ jam();
+ sendSttorry(signal);
+ break;
+ }//switch
+}//Ndbcntr::execSTTOR()
+
+void
+Ndbcntr::getNodeGroup(Signal* signal){
+ jam();
+ CheckNodeGroups * sd = (CheckNodeGroups*)signal->getDataPtrSend();
+ sd->requestType = CheckNodeGroups::Direct | CheckNodeGroups::GetNodeGroup;
+ EXECUTE_DIRECT(DBDIH, GSN_CHECKNODEGROUPSREQ, signal,
+ CheckNodeGroups::SignalLength);
+ jamEntry();
+ c_nodeGroup = sd->output;
+ sendSttorry(signal);
+}
+
+/*******************************/
+/* NDB_STTORRY */
+/*******************************/
+void Ndbcntr::execNDB_STTORRY(Signal* signal)
+{
+ jamEntry();
+ switch (cstartPhase) {
+ case ZSTART_PHASE_2:
+ jam();
+ ph2GLab(signal);
+ return;
+ break;
+ case ZSTART_PHASE_3:
+ jam();
+ ph3ALab(signal);
+ return;
+ break;
+ case ZSTART_PHASE_4:
+ jam();
+ ph4BLab(signal);
+ return;
+ break;
+ case ZSTART_PHASE_5:
+ jam();
+ ph5ALab(signal);
+ return;
+ break;
+ case ZSTART_PHASE_6:
+ jam();
+ ph6ALab(signal);
+ return;
+ break;
+ case ZSTART_PHASE_7:
+ jam();
+ ph6BLab(signal);
+ return;
+ break;
+ case ZSTART_PHASE_8:
+ jam();
+ ph7ALab(signal);
+ return;
+ break;
+ case ZSTART_PHASE_9:
+ jam();
+ ph8ALab(signal);
+ return;
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ return;
+ break;
+ }//switch
+}//Ndbcntr::execNDB_STTORRY()
+
+void Ndbcntr::startPhase1Lab(Signal* signal)
+{
+ jamEntry();
+
+ initData(signal);
+
+ cdynamicNodeId = 0;
+
+ NdbBlocksRecPtr ndbBlocksPtr;
+ ndbBlocksPtr.i = 0;
+ ptrAss(ndbBlocksPtr, ndbBlocksRec);
+ ndbBlocksPtr.p->blockref = DBLQH_REF;
+ ndbBlocksPtr.i = 1;
+ ptrAss(ndbBlocksPtr, ndbBlocksRec);
+ ndbBlocksPtr.p->blockref = DBDICT_REF;
+ ndbBlocksPtr.i = 2;
+ ptrAss(ndbBlocksPtr, ndbBlocksRec);
+ ndbBlocksPtr.p->blockref = DBTUP_REF;
+ ndbBlocksPtr.i = 3;
+ ptrAss(ndbBlocksPtr, ndbBlocksRec);
+ ndbBlocksPtr.p->blockref = DBACC_REF;
+ ndbBlocksPtr.i = 4;
+ ptrAss(ndbBlocksPtr, ndbBlocksRec);
+ ndbBlocksPtr.p->blockref = DBTC_REF;
+ ndbBlocksPtr.i = 5;
+ ptrAss(ndbBlocksPtr, ndbBlocksRec);
+ ndbBlocksPtr.p->blockref = DBDIH_REF;
+ sendSttorry(signal);
+ return;
+}
+
+void Ndbcntr::execREAD_NODESREF(Signal* signal)
+{
+ jamEntry();
+ systemErrorLab(signal);
+ return;
+}//Ndbcntr::execREAD_NODESREF()
+
+
+/*******************************/
+/* NDB_STARTREF */
+/*******************************/
+void Ndbcntr::execNDB_STARTREF(Signal* signal)
+{
+ jamEntry();
+ systemErrorLab(signal);
+ return;
+}//Ndbcntr::execNDB_STARTREF()
+
+/*******************************/
+/* STTOR */
+/*******************************/
+void Ndbcntr::startPhase2Lab(Signal* signal)
+{
+ c_start.m_lastGci = 0;
+ c_start.m_lastGciNodeId = getOwnNodeId();
+
+ signal->theData[0] = reference();
+ sendSignal(DBDIH_REF, GSN_DIH_RESTARTREQ, signal, 1, JBB);
+ return;
+}//Ndbcntr::startPhase2Lab()
+
+/*******************************/
+/* DIH_RESTARTCONF */
+/*******************************/
+void Ndbcntr::execDIH_RESTARTCONF(Signal* signal)
+{
+ jamEntry();
+ //cmasterDihId = signal->theData[0];
+ c_start.m_lastGci = signal->theData[1];
+ ctypeOfStart = NodeState::ST_SYSTEM_RESTART;
+ ph2ALab(signal);
+ return;
+}//Ndbcntr::execDIH_RESTARTCONF()
+
+/*******************************/
+/* DIH_RESTARTREF */
+/*******************************/
+void Ndbcntr::execDIH_RESTARTREF(Signal* signal)
+{
+ jamEntry();
+ ctypeOfStart = NodeState::ST_INITIAL_START;
+ ph2ALab(signal);
+ return;
+}//Ndbcntr::execDIH_RESTARTREF()
+
+void Ndbcntr::ph2ALab(Signal* signal)
+{
+ /******************************/
+ /* request configured nodes */
+ /* from QMGR */
+ /* READ_NODESREQ */
+ /******************************/
+ signal->theData[0] = reference();
+ sendSignal(QMGR_REF, GSN_READ_NODESREQ, signal, 1, JBB);
+ return;
+}//Ndbcntr::ph2ALab()
+
+inline
+Uint64
+setTimeout(Uint64 time, Uint32 timeoutValue){
+ if(timeoutValue == 0)
+ return ~(Uint64)0;
+ return time + timeoutValue;
+}
+
+/*******************************/
+/* READ_NODESCONF */
+/*******************************/
+void Ndbcntr::execREAD_NODESCONF(Signal* signal)
+{
+ jamEntry();
+ const ReadNodesConf * readNodes = (ReadNodesConf *)&signal->theData[0];
+
+ cmasterNodeId = readNodes->masterNodeId;
+ cdynamicNodeId = readNodes->ndynamicId;
+
+ /**
+ * All defined nodes...
+ */
+ c_allDefinedNodes.assign(NdbNodeBitmask::Size, readNodes->allNodes);
+ c_clusterNodes.assign(NdbNodeBitmask::Size, readNodes->clusterNodes);
+
+ Uint32 to_1 = 30000;
+ Uint32 to_2 = 0;
+ Uint32 to_3 = 0;
+
+ const ndb_mgm_configuration_iterator * p =
+ theConfiguration.getOwnConfigIterator();
+
+ ndbrequire(p != 0);
+ ndb_mgm_get_int_parameter(p, CFG_DB_START_PARTIAL_TIMEOUT, &to_1);
+ ndb_mgm_get_int_parameter(p, CFG_DB_START_PARTITION_TIMEOUT, &to_2);
+ ndb_mgm_get_int_parameter(p, CFG_DB_START_FAILURE_TIMEOUT, &to_3);
+
+ c_start.m_startTime = NdbTick_CurrentMillisecond();
+ c_start.m_startPartialTimeout = setTimeout(c_start.m_startTime, to_1);
+ c_start.m_startPartitionedTimeout = setTimeout(c_start.m_startTime, to_2);
+ c_start.m_startFailureTimeout = setTimeout(c_start.m_startTime, to_3);
+
+ UpgradeStartup::sendCmAppChg(* this, signal, 0); // ADD
+
+ sendCntrStartReq(signal);
+
+ signal->theData[0] = ZSTARTUP;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 1000, 1);
+
+ return;
+}
+
+void
+Ndbcntr::execCM_ADD_REP(Signal* signal){
+ jamEntry();
+ c_clusterNodes.set(signal->theData[0]);
+}
+
+void
+Ndbcntr::sendCntrStartReq(Signal * signal){
+ jamEntry();
+
+ CntrStartReq * req = (CntrStartReq*)signal->getDataPtrSend();
+ req->startType = ctypeOfStart;
+ req->lastGci = c_start.m_lastGci;
+ req->nodeId = getOwnNodeId();
+ sendSignal(calcNdbCntrBlockRef(cmasterNodeId), GSN_CNTR_START_REQ,
+ signal, CntrStartReq::SignalLength, JBB);
+}
+
+void
+Ndbcntr::execCNTR_START_REF(Signal * signal){
+ jamEntry();
+ const CntrStartRef * ref = (CntrStartRef*)signal->getDataPtr();
+
+ switch(ref->errorCode){
+ case CntrStartRef::NotMaster:
+ jam();
+ cmasterNodeId = ref->masterNodeId;
+ sendCntrStartReq(signal);
+ return;
+ }
+ ndbrequire(false);
+}
+
+void
+Ndbcntr::StartRecord::reset(){
+ m_starting.clear();
+ m_waiting.clear();
+ m_withLog.clear();
+ m_withoutLog.clear();
+ m_lastGci = m_lastGciNodeId = 0;
+ m_startPartialTimeout = ~0;
+ m_startPartitionedTimeout = ~0;
+ m_startFailureTimeout = ~0;
+
+ m_logNodesCount = 0;
+}
+
+void
+Ndbcntr::execCNTR_START_CONF(Signal * signal){
+ jamEntry();
+ const CntrStartConf * conf = (CntrStartConf*)signal->getDataPtr();
+
+ cnoStartNodes = conf->noStartNodes;
+ ctypeOfStart = (NodeState::StartType)conf->startType;
+ c_start.m_lastGci = conf->startGci;
+ cmasterNodeId = conf->masterNodeId;
+ NdbNodeBitmask tmp;
+ tmp.assign(NdbNodeBitmask::Size, conf->startedNodes);
+ c_startedNodes.bitOR(tmp);
+ c_start.m_starting.assign(NdbNodeBitmask::Size, conf->startingNodes);
+ ph2GLab(signal);
+
+ UpgradeStartup::sendCmAppChg(* this, signal, 2); //START
+}
+
+/**
+ * Tried with parallell nr, but it crashed in DIH
+ * so I turned it off, as I don't want to debug DIH now...
+ * Jonas 19/11-03
+ *
+ * After trying for 2 hours, I gave up.
+ * DIH is not designed to support it, and
+ * it requires quite of lot of changes to
+ * make it work
+ * Jonas 5/12-03
+ */
+#define PARALLELL_NR 0
+
+#if PARALLELL_NR
+const bool parallellNR = true;
+#else
+const bool parallellNR = false;
+#endif
+
+void
+Ndbcntr::execCNTR_START_REP(Signal* signal){
+ jamEntry();
+ Uint32 nodeId = signal->theData[0];
+ c_startedNodes.set(nodeId);
+ c_start.m_starting.clear(nodeId);
+
+ if(!c_start.m_starting.isclear()){
+ jam();
+ return;
+ }
+
+ if(cmasterNodeId != getOwnNodeId()){
+ jam();
+ c_start.reset();
+ return;
+ }
+
+ if(c_start.m_waiting.isclear()){
+ jam();
+ c_start.reset();
+ return;
+ }
+
+ startWaitingNodes(signal);
+}
+
+void
+Ndbcntr::execCNTR_START_REQ(Signal * signal){
+ jamEntry();
+ const CntrStartReq * req = (CntrStartReq*)signal->getDataPtr();
+
+ const Uint32 nodeId = req->nodeId;
+ const Uint32 lastGci = req->lastGci;
+ const NodeState::StartType st = (NodeState::StartType)req->startType;
+
+ if(cmasterNodeId == 0){
+ jam();
+ // Has not completed READNODES yet
+ sendSignalWithDelay(reference(), GSN_CNTR_START_REQ, signal, 100,
+ signal->getLength());
+ return;
+ }
+
+ if(cmasterNodeId != getOwnNodeId()){
+ jam();
+ sendCntrStartRef(signal, nodeId, CntrStartRef::NotMaster);
+ return;
+ }
+
+ const NodeState & nodeState = getNodeState();
+ switch(nodeState.startLevel){
+ case NodeState::SL_NOTHING:
+ case NodeState::SL_CMVMI:
+ jam();
+ ndbrequire(false);
+ case NodeState::SL_STARTING:
+ case NodeState::SL_STARTED:
+ jam();
+ break;
+
+ case NodeState::SL_STOPPING_1:
+ case NodeState::SL_STOPPING_2:
+ case NodeState::SL_STOPPING_3:
+ case NodeState::SL_STOPPING_4:
+ jam();
+ sendCntrStartRef(signal, nodeId, CntrStartRef::StopInProgress);
+ return;
+ }
+
+ /**
+ * Am I starting (or started)
+ */
+ const bool starting = (nodeState.startLevel != NodeState::SL_STARTED);
+
+ c_start.m_waiting.set(nodeId);
+ switch(st){
+ case NodeState::ST_INITIAL_START:
+ jam();
+ c_start.m_withoutLog.set(nodeId);
+ break;
+ case NodeState::ST_SYSTEM_RESTART:
+ jam();
+ c_start.m_withLog.set(nodeId);
+ if(starting && lastGci > c_start.m_lastGci){
+ jam();
+ CntrStartRef * ref = (CntrStartRef*)signal->getDataPtrSend();
+ ref->errorCode = CntrStartRef::NotMaster;
+ ref->masterNodeId = nodeId;
+ NodeReceiverGroup rg (NDBCNTR, c_start.m_waiting);
+ sendSignal(rg, GSN_CNTR_START_REF, signal,
+ CntrStartRef::SignalLength, JBB);
+ return;
+ }
+ if(starting){
+ jam();
+ Uint32 i = c_start.m_logNodesCount++;
+ c_start.m_logNodes[i].m_nodeId = nodeId;
+ c_start.m_logNodes[i].m_lastGci = req->lastGci;
+ }
+ break;
+ case NodeState::ST_NODE_RESTART:
+ case NodeState::ST_INITIAL_NODE_RESTART:
+ case NodeState::ST_ILLEGAL_TYPE:
+ ndbrequire(false);
+ }
+
+ const bool startInProgress = !c_start.m_starting.isclear();
+
+ if((starting && startInProgress) || (startInProgress && !parallellNR)){
+ jam();
+ // We're already starting together with a bunch of nodes
+ // Let this node wait...
+ return;
+ }
+
+ if(starting){
+ jam();
+ trySystemRestart(signal);
+ } else {
+ jam();
+ startWaitingNodes(signal);
+ }
+ return;
+}
+
+void
+Ndbcntr::startWaitingNodes(Signal * signal){
+
+#if ! PARALLELL_NR
+ const Uint32 nodeId = c_start.m_waiting.find(0);
+ const Uint32 Tref = calcNdbCntrBlockRef(nodeId);
+ ndbrequire(nodeId != c_start.m_waiting.NotFound);
+
+ NodeState::StartType nrType = NodeState::ST_NODE_RESTART;
+ if(c_start.m_withoutLog.get(nodeId)){
+ jam();
+ nrType = NodeState::ST_INITIAL_NODE_RESTART;
+ }
+
+ /**
+ * Let node perform restart
+ */
+ CntrStartConf * conf = (CntrStartConf*)signal->getDataPtrSend();
+ conf->noStartNodes = 1;
+ conf->startType = nrType;
+ conf->startGci = ~0; // Not used
+ conf->masterNodeId = getOwnNodeId();
+ BitmaskImpl::clear(NdbNodeBitmask::Size, conf->startingNodes);
+ BitmaskImpl::set(NdbNodeBitmask::Size, conf->startingNodes, nodeId);
+ c_startedNodes.copyto(NdbNodeBitmask::Size, conf->startedNodes);
+ sendSignal(Tref, GSN_CNTR_START_CONF, signal,
+ CntrStartConf::SignalLength, JBB);
+
+ c_start.m_waiting.clear(nodeId);
+ c_start.m_withLog.clear(nodeId);
+ c_start.m_withoutLog.clear(nodeId);
+ c_start.m_starting.set(nodeId);
+#else
+ // Parallell nr
+
+ c_start.m_starting = c_start.m_waiting;
+ c_start.m_waiting.clear();
+
+ CntrStartConf * conf = (CntrStartConf*)signal->getDataPtrSend();
+ conf->noStartNodes = 1;
+ conf->startGci = ~0; // Not used
+ conf->masterNodeId = getOwnNodeId();
+ c_start.m_starting.copyto(NdbNodeBitmask::Size, conf->startingNodes);
+ c_startedNodes.copyto(NdbNodeBitmask::Size, conf->startedNodes);
+
+ char buf[100];
+ if(!c_start.m_withLog.isclear()){
+ jam();
+ ndbout_c("Starting nodes w/ log: %s", c_start.m_withLog.getText(buf));
+
+ NodeReceiverGroup rg(NDBCNTR, c_start.m_withLog);
+ conf->startType = NodeState::ST_NODE_RESTART;
+
+ sendSignal(rg, GSN_CNTR_START_CONF, signal,
+ CntrStartConf::SignalLength, JBB);
+ }
+
+ if(!c_start.m_withoutLog.isclear()){
+ jam();
+ ndbout_c("Starting nodes wo/ log: %s", c_start.m_withoutLog.getText(buf));
+ NodeReceiverGroup rg(NDBCNTR, c_start.m_withoutLog);
+ conf->startType = NodeState::ST_INITIAL_NODE_RESTART;
+
+ sendSignal(rg, GSN_CNTR_START_CONF, signal,
+ CntrStartConf::SignalLength, JBB);
+ }
+
+ c_start.m_waiting.clear();
+ c_start.m_withLog.clear();
+ c_start.m_withoutLog.clear();
+#endif
+}
+
+void
+Ndbcntr::sendCntrStartRef(Signal * signal,
+ Uint32 nodeId, CntrStartRef::ErrorCode code){
+ CntrStartRef * ref = (CntrStartRef*)signal->getDataPtrSend();
+ ref->errorCode = code;
+ ref->masterNodeId = cmasterNodeId;
+ sendSignal(calcNdbCntrBlockRef(nodeId), GSN_CNTR_START_REF, signal,
+ CntrStartRef::SignalLength, JBB);
+}
+
+CheckNodeGroups::Output
+Ndbcntr::checkNodeGroups(Signal* signal, const NdbNodeBitmask & mask){
+ CheckNodeGroups* sd = (CheckNodeGroups*)&signal->theData[0];
+ sd->blockRef = reference();
+ sd->requestType = CheckNodeGroups::Direct | CheckNodeGroups::ArbitCheck;
+ sd->mask = mask;
+ EXECUTE_DIRECT(DBDIH, GSN_CHECKNODEGROUPSREQ, signal,
+ CheckNodeGroups::SignalLength);
+ jamEntry();
+ return (CheckNodeGroups::Output)sd->output;
+}
+
+bool
+Ndbcntr::trySystemRestart(Signal* signal){
+ /**
+ * System restart something
+ */
+ const bool allNodes = c_start.m_waiting.equal(c_allDefinedNodes);
+ const bool allClusterNodes = c_start.m_waiting.equal(c_clusterNodes);
+ const Uint64 now = NdbTick_CurrentMillisecond();
+
+ if(!allClusterNodes){
+ jam();
+ return false;
+ }
+
+ if(!allNodes && c_start.m_startPartialTimeout > now){
+ jam();
+ return false;
+ }
+
+ NodeState::StartType srType = NodeState::ST_SYSTEM_RESTART;
+ if(c_start.m_waiting.equal(c_start.m_withoutLog)){
+ if(!allNodes){
+ jam();
+ return false;
+ }
+ jam();
+ srType = NodeState::ST_INITIAL_START;
+ c_start.m_starting = c_start.m_withoutLog; // Used for starting...
+ c_start.m_withoutLog.clear();
+ } else {
+
+ CheckNodeGroups::Output wLog = checkNodeGroups(signal, c_start.m_withLog);
+
+ switch (wLog) {
+ case CheckNodeGroups::Win:
+ jam();
+ break;
+ case CheckNodeGroups::Lose:
+ jam();
+ // If we lose with all nodes, then we're in trouble
+ ndbrequire(!allNodes);
+ return false;
+ case CheckNodeGroups::Partitioning:
+ jam();
+ bool allowPartition = (c_start.m_startPartitionedTimeout != (Uint64)~0);
+
+ if(allNodes){
+ if(allowPartition){
+ jam();
+ break;
+ }
+ ndbrequire(false); // All nodes -> partitioning, which is not allowed
+ }
+
+ if(c_start.m_startPartitionedTimeout > now){
+ jam();
+ return false;
+ }
+ break;
+ }
+
+ // For now only with the "logged"-ones.
+ // Let the others do node restart afterwards...
+ c_start.m_starting = c_start.m_withLog;
+ c_start.m_withLog.clear();
+ }
+
+ /**
+ * Okidoki, we try to start
+ */
+ CntrStartConf * conf = (CntrStartConf*)signal->getDataPtr();
+ conf->noStartNodes = c_start.m_starting.count();
+ conf->startType = srType;
+ conf->startGci = c_start.m_lastGci;
+ conf->masterNodeId = c_start.m_lastGciNodeId;
+ c_start.m_starting.copyto(NdbNodeBitmask::Size, conf->startingNodes);
+ c_startedNodes.copyto(NdbNodeBitmask::Size, conf->startedNodes);
+
+ ndbrequire(c_start.m_lastGciNodeId == getOwnNodeId());
+
+ NodeReceiverGroup rg(NDBCNTR, c_start.m_starting);
+ sendSignal(rg, GSN_CNTR_START_CONF, signal, CntrStartConf::SignalLength,JBB);
+
+ c_start.m_waiting.bitANDC(c_start.m_starting);
+
+ return true;
+}
+
+void Ndbcntr::ph2GLab(Signal* signal)
+{
+ if (cndbBlocksCount < ZNO_NDB_BLOCKS) {
+ jam();
+ sendNdbSttor(signal);
+ return;
+ }//if
+ sendSttorry(signal);
+ return;
+}//Ndbcntr::ph2GLab()
+
+/*
+4.4 START PHASE 3 */
+/*###########################################################################*/
+// SEND SIGNAL NDBSTTOR TO ALL BLOCKS, ACC, DICT, DIH, LQH, TC AND TUP
+// WHEN ALL BLOCKS HAVE RETURNED THEIR NDB_STTORRY ALL BLOCK HAVE FINISHED
+// THEIR LOCAL CONNECTIONs SUCESSFULLY
+// AND THEN WE CAN SEND APPL_STARTREG TO INFORM QMGR THAT WE ARE READY TO
+// SET UP DISTRIBUTED CONNECTIONS.
+/*--------------------------------------------------------------*/
+// THIS IS NDB START PHASE 3.
+/*--------------------------------------------------------------*/
+/*******************************/
+/* STTOR */
+/*******************************/
+void Ndbcntr::startPhase3Lab(Signal* signal)
+{
+ ph3ALab(signal);
+ return;
+}//Ndbcntr::startPhase3Lab()
+
+/*******************************/
+/* NDB_STTORRY */
+/*******************************/
+void Ndbcntr::ph3ALab(Signal* signal)
+{
+ if (cndbBlocksCount < ZNO_NDB_BLOCKS) {
+ jam();
+ sendNdbSttor(signal);
+ return;
+ }//if
+
+ sendSttorry(signal);
+ return;
+}//Ndbcntr::ph3ALab()
+
+/*
+4.5 START PHASE 4 */
+/*###########################################################################*/
+// WAIT FOR ALL NODES IN CLUSTER TO CHANGE STATE INTO ZSTART ,
+// APPL_CHANGEREP IS ALWAYS SENT WHEN SOMEONE HAVE
+// CHANGED THEIR STATE. APPL_STARTCONF INDICATES THAT ALL NODES ARE IN START
+// STATE SEND NDB_STARTREQ TO DIH AND THEN WAIT FOR NDB_STARTCONF
+/*---------------------------------------------------------------------------*/
+/*******************************/
+/* STTOR */
+/*******************************/
+void Ndbcntr::startPhase4Lab(Signal* signal)
+{
+ ph4ALab(signal);
+}//Ndbcntr::startPhase4Lab()
+
+
+void Ndbcntr::ph4ALab(Signal* signal)
+{
+ ph4BLab(signal);
+ return;
+}//Ndbcntr::ph4ALab()
+
+/*******************************/
+/* NDB_STTORRY */
+/*******************************/
+void Ndbcntr::ph4BLab(Signal* signal)
+{
+/*--------------------------------------*/
+/* CASE: CSTART_PHASE = ZSTART_PHASE_4 */
+/*--------------------------------------*/
+ if (cndbBlocksCount < ZNO_NDB_BLOCKS) {
+ jam();
+ sendNdbSttor(signal);
+ return;
+ }//if
+ if ((ctypeOfStart == NodeState::ST_NODE_RESTART) ||
+ (ctypeOfStart == NodeState::ST_INITIAL_NODE_RESTART)) {
+ jam();
+ sendSttorry(signal);
+ return;
+ }//if
+ waitpoint41Lab(signal);
+ return;
+}//Ndbcntr::ph4BLab()
+
+void Ndbcntr::waitpoint41Lab(Signal* signal)
+{
+ if (getOwnNodeId() == cmasterNodeId) {
+ jam();
+/*--------------------------------------*/
+/* MASTER WAITS UNTIL ALL SLAVES HAS */
+/* SENT THE REPORTS */
+/*--------------------------------------*/
+ cnoWaitrep++;
+ if (cnoWaitrep == cnoStartNodes) {
+ jam();
+ cnoWaitrep = 0;
+/*---------------------------------------------------------------------------*/
+// NDB_STARTREQ STARTS UP ALL SET UP OF DISTRIBUTION INFORMATION IN DIH AND
+// DICT. AFTER SETTING UP THIS
+// DATA IT USES THAT DATA TO SET UP WHICH FRAGMENTS THAT ARE TO START AND
+// WHERE THEY ARE TO START. THEN
+// IT SETS UP THE FRAGMENTS AND RECOVERS THEM BY:
+// 1) READING A LOCAL CHECKPOINT FROM DISK.
+// 2) EXECUTING THE UNDO LOG ON INDEX AND DATA.
+// 3) EXECUTING THE FRAGMENT REDO LOG FROM ONE OR SEVERAL NODES TO
+// RESTORE THE RESTART CONFIGURATION OF DATA IN NDB CLUSTER.
+/*---------------------------------------------------------------------------*/
+ signal->theData[0] = reference();
+ signal->theData[1] = ctypeOfStart;
+ sendSignal(DBDIH_REF, GSN_NDB_STARTREQ, signal, 2, JBB);
+ }//if
+ } else {
+ jam();
+/*--------------------------------------*/
+/* SLAVE NODES WILL PASS HERE ONCE AND */
+/* SEND A WAITPOINT REPORT TO MASTER. */
+/* SLAVES WONT DO ANYTHING UNTIL THEY */
+/* RECEIVE A WAIT REPORT FROM THE MASTER*/
+/*--------------------------------------*/
+ signal->theData[0] = getOwnNodeId();
+ signal->theData[1] = ZWAITPOINT_4_1;
+ sendSignal(calcNdbCntrBlockRef(cmasterNodeId),
+ GSN_CNTR_WAITREP, signal, 2, JBB);
+ }//if
+ return;
+}//Ndbcntr::waitpoint41Lab()
+
+/*******************************/
+/* NDB_STARTCONF */
+/*******************************/
+void Ndbcntr::execNDB_STARTCONF(Signal* signal)
+{
+ jamEntry();
+
+ NodeReceiverGroup rg(NDBCNTR, c_start.m_starting);
+ signal->theData[0] = getOwnNodeId();
+ signal->theData[1] = ZWAITPOINT_4_2;
+ sendSignal(rg, GSN_CNTR_WAITREP, signal, 2, JBB);
+ return;
+}//Ndbcntr::execNDB_STARTCONF()
+
+/*
+4.6 START PHASE 5 */
+/*###########################################################################*/
+// SEND APPL_RUN TO THE QMGR IN THIS BLOCK
+// SEND NDB_STTOR ALL BLOCKS ACC, DICT, DIH, LQH, TC AND TUP THEN WAIT FOR
+// THEIR NDB_STTORRY
+/*---------------------------------------------------------------------------*/
+/*******************************/
+/* STTOR */
+/*******************************/
+void Ndbcntr::startPhase5Lab(Signal* signal)
+{
+ ph5ALab(signal);
+ return;
+}//Ndbcntr::startPhase5Lab()
+
+/*******************************/
+/* NDB_STTORRY */
+/*******************************/
+/*---------------------------------------------------------------------------*/
+// THIS IS NDB START PHASE 5.
+/*---------------------------------------------------------------------------*/
+// IN THIS START PHASE TUP INITIALISES DISK FILES FOR DISK STORAGE IF INITIAL
+// START. DIH WILL START UP
+// THE GLOBAL CHECKPOINT PROTOCOL AND WILL CONCLUDE ANY UNFINISHED TAKE OVERS
+// THAT STARTED BEFORE THE SYSTEM CRASH.
+/*---------------------------------------------------------------------------*/
+void Ndbcntr::ph5ALab(Signal* signal)
+{
+ if (cndbBlocksCount < ZNO_NDB_BLOCKS) {
+ jam();
+ sendNdbSttor(signal);
+ return;
+ }//if
+
+ cstartPhase = cstartPhase + 1;
+ cinternalStartphase = cstartPhase - 1;
+ if (getOwnNodeId() == cmasterNodeId) {
+ switch(ctypeOfStart){
+ case NodeState::ST_INITIAL_START:
+ jam();
+ /*--------------------------------------*/
+ /* MASTER CNTR IS RESPONSIBLE FOR */
+ /* CREATING SYSTEM TABLES */
+ /*--------------------------------------*/
+ createSystableLab(signal, 0);
+ return;
+ case NodeState::ST_SYSTEM_RESTART:
+ jam();
+ waitpoint52Lab(signal);
+ return;
+ case NodeState::ST_NODE_RESTART:
+ case NodeState::ST_INITIAL_NODE_RESTART:
+ jam();
+ break;
+ case NodeState::ST_ILLEGAL_TYPE:
+ jam();
+ break;
+ }
+ ndbrequire(false);
+ }
+
+ /**
+ * Not master
+ */
+ NdbSttor * const req = (NdbSttor*)signal->getDataPtrSend();
+ switch(ctypeOfStart){
+ case NodeState::ST_NODE_RESTART:
+ case NodeState::ST_INITIAL_NODE_RESTART:
+ jam();
+ /*----------------------------------------------------------------------*/
+ // SEND NDB START PHASE 5 IN NODE RESTARTS TO COPY DATA TO THE NEWLY
+ // STARTED NODE.
+ /*----------------------------------------------------------------------*/
+ req->senderRef = reference();
+ req->nodeId = getOwnNodeId();
+ req->internalStartPhase = cinternalStartphase;
+ req->typeOfStart = ctypeOfStart;
+ req->masterNodeId = cmasterNodeId;
+
+ //#define TRACE_STTOR
+#ifdef TRACE_STTOR
+ ndbout_c("sending NDB_STTOR(%d) to DIH", cinternalStartphase);
+#endif
+ sendSignal(DBDIH_REF, GSN_NDB_STTOR, signal,
+ NdbSttor::SignalLength, JBB);
+ return;
+ case NodeState::ST_INITIAL_START:
+ case NodeState::ST_SYSTEM_RESTART:
+ jam();
+ /*--------------------------------------*/
+ /* DURING SYSTEMRESTART AND INITALSTART:*/
+ /* SLAVE NODES WILL PASS HERE ONCE AND */
+ /* SEND A WAITPOINT REPORT TO MASTER. */
+ /* SLAVES WONT DO ANYTHING UNTIL THEY */
+ /* RECEIVE A WAIT REPORT FROM THE MASTER*/
+ /* WHEN THE MASTER HAS FINISHED HIS WORK*/
+ /*--------------------------------------*/
+ signal->theData[0] = getOwnNodeId();
+ signal->theData[1] = ZWAITPOINT_5_2;
+ sendSignal(calcNdbCntrBlockRef(cmasterNodeId),
+ GSN_CNTR_WAITREP, signal, 2, JBB);
+ return;
+ default:
+ ndbrequire(false);
+ }
+}//Ndbcntr::ph5ALab()
+
+void Ndbcntr::waitpoint52Lab(Signal* signal)
+{
+ cnoWaitrep = cnoWaitrep + 1;
+/*---------------------------------------------------------------------------*/
+// THIS WAITING POINT IS ONLY USED BY A MASTER NODE. WE WILL EXECUTE NDB START
+// PHASE 5 FOR DIH IN THE
+// MASTER. THIS WILL START UP LOCAL CHECKPOINTS AND WILL ALSO CONCLUDE ANY
+// UNFINISHED LOCAL CHECKPOINTS
+// BEFORE THE SYSTEM CRASH. THIS WILL ENSURE THAT WE ALWAYS RESTART FROM A
+// WELL KNOWN STATE.
+/*---------------------------------------------------------------------------*/
+/*--------------------------------------*/
+/* MASTER WAITS UNTIL HE RECEIVED WAIT */
+/* REPORTS FROM ALL SLAVE CNTR */
+/*--------------------------------------*/
+ if (cnoWaitrep == cnoStartNodes) {
+ jam();
+ cnoWaitrep = 0;
+
+ NdbSttor * const req = (NdbSttor*)signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->nodeId = getOwnNodeId();
+ req->internalStartPhase = cinternalStartphase;
+ req->typeOfStart = ctypeOfStart;
+ req->masterNodeId = cmasterNodeId;
+#ifdef TRACE_STTOR
+ ndbout_c("sending NDB_STTOR(%d) to DIH", cinternalStartphase);
+#endif
+ sendSignal(DBDIH_REF, GSN_NDB_STTOR, signal,
+ NdbSttor::SignalLength, JBB);
+ }//if
+ return;
+}//Ndbcntr::waitpoint52Lab()
+
+/*******************************/
+/* NDB_STTORRY */
+/*******************************/
+void Ndbcntr::ph6ALab(Signal* signal)
+{
+ if ((ctypeOfStart == NodeState::ST_NODE_RESTART) ||
+ (ctypeOfStart == NodeState::ST_INITIAL_NODE_RESTART)) {
+ jam();
+ waitpoint51Lab(signal);
+ return;
+ }//if
+
+ NodeReceiverGroup rg(NDBCNTR, c_start.m_starting);
+ rg.m_nodes.clear(getOwnNodeId());
+ signal->theData[0] = getOwnNodeId();
+ signal->theData[1] = ZWAITPOINT_5_1;
+ sendSignal(rg, GSN_CNTR_WAITREP, signal, 2, JBB);
+
+ waitpoint51Lab(signal);
+ return;
+}//Ndbcntr::ph6ALab()
+
+void Ndbcntr::waitpoint51Lab(Signal* signal)
+{
+ cstartPhase = cstartPhase + 1;
+/*---------------------------------------------------------------------------*/
+// A FINAL STEP IS NOW TO SEND NDB_STTOR TO TC. THIS MAKES IT POSSIBLE TO
+// CONNECT TO TC FOR APPLICATIONS.
+// THIS IS NDB START PHASE 6 WHICH IS FOR ALL BLOCKS IN ALL NODES.
+/*---------------------------------------------------------------------------*/
+ cinternalStartphase = cstartPhase - 1;
+ cndbBlocksCount = 0;
+ ph6BLab(signal);
+ return;
+}//Ndbcntr::waitpoint51Lab()
+
+void Ndbcntr::ph6BLab(Signal* signal)
+{
+ // c_missra.currentStartPhase - cstartPhase - cinternalStartphase =
+ // 5 - 7 - 6
+ if (cndbBlocksCount < ZNO_NDB_BLOCKS) {
+ jam();
+ sendNdbSttor(signal);
+ return;
+ }//if
+ if ((ctypeOfStart == NodeState::ST_NODE_RESTART) ||
+ (ctypeOfStart == NodeState::ST_INITIAL_NODE_RESTART)) {
+ jam();
+ sendSttorry(signal);
+ return;
+ }
+ waitpoint61Lab(signal);
+}
+
+void Ndbcntr::waitpoint61Lab(Signal* signal)
+{
+ if (getOwnNodeId() == cmasterNodeId) {
+ jam();
+ cnoWaitrep6++;
+ if (cnoWaitrep6 == cnoStartNodes) {
+ jam();
+ NodeReceiverGroup rg(NDBCNTR, c_start.m_starting);
+ rg.m_nodes.clear(getOwnNodeId());
+ signal->theData[0] = getOwnNodeId();
+ signal->theData[1] = ZWAITPOINT_6_2;
+ sendSignal(rg, GSN_CNTR_WAITREP, signal, 2, JBB);
+ sendSttorry(signal);
+ }
+ } else {
+ jam();
+ signal->theData[0] = getOwnNodeId();
+ signal->theData[1] = ZWAITPOINT_6_1;
+ sendSignal(calcNdbCntrBlockRef(cmasterNodeId), GSN_CNTR_WAITREP, signal, 2, JBB);
+ }
+}
+
+// Start phase 8 (internal 7)
+void Ndbcntr::startPhase8Lab(Signal* signal)
+{
+ cinternalStartphase = cstartPhase - 1;
+ cndbBlocksCount = 0;
+ ph7ALab(signal);
+}
+
+void Ndbcntr::ph7ALab(Signal* signal)
+{
+ while (cndbBlocksCount < ZNO_NDB_BLOCKS) {
+ jam();
+ sendNdbSttor(signal);
+ return;
+ }
+ if ((ctypeOfStart == NodeState::ST_NODE_RESTART) ||
+ (ctypeOfStart == NodeState::ST_INITIAL_NODE_RESTART)) {
+ jam();
+ sendSttorry(signal);
+ return;
+ }
+ waitpoint71Lab(signal);
+}
+
+void Ndbcntr::waitpoint71Lab(Signal* signal)
+{
+ if (getOwnNodeId() == cmasterNodeId) {
+ jam();
+ cnoWaitrep7++;
+ if (cnoWaitrep7 == cnoStartNodes) {
+ jam();
+ NodeReceiverGroup rg(NDBCNTR, c_start.m_starting);
+ rg.m_nodes.clear(getOwnNodeId());
+ signal->theData[0] = getOwnNodeId();
+ signal->theData[1] = ZWAITPOINT_7_2;
+ sendSignal(rg, GSN_CNTR_WAITREP, signal, 2, JBB);
+ sendSttorry(signal);
+ }
+ } else {
+ jam();
+ signal->theData[0] = getOwnNodeId();
+ signal->theData[1] = ZWAITPOINT_7_1;
+ sendSignal(calcNdbCntrBlockRef(cmasterNodeId), GSN_CNTR_WAITREP, signal, 2, JBB);
+ }
+}
+
+// Start phase 9 (internal 8)
+void Ndbcntr::startPhase9Lab(Signal* signal)
+{
+ cinternalStartphase = cstartPhase - 1;
+ cndbBlocksCount = 0;
+ ph8ALab(signal);
+}
+
+void Ndbcntr::ph8ALab(Signal* signal)
+{
+/*---------------------------------------------------------------------------*/
+// NODES WHICH PERFORM A NODE RESTART NEEDS TO GET THE DYNAMIC ID'S
+// OF THE OTHER NODES HERE.
+/*---------------------------------------------------------------------------*/
+ sendSttorry(signal);
+ resetStartVariables(signal);
+ return;
+}//Ndbcntr::ph8BLab()
+
+/*******************************/
+/* CNTR_WAITREP */
+/*******************************/
+void Ndbcntr::execCNTR_WAITREP(Signal* signal)
+{
+ Uint16 twaitPoint;
+
+ jamEntry();
+ twaitPoint = signal->theData[1];
+ switch (twaitPoint) {
+ case ZWAITPOINT_4_1:
+ jam();
+ waitpoint41Lab(signal);
+ break;
+ case ZWAITPOINT_4_2:
+ jam();
+ sendSttorry(signal);
+ break;
+ case ZWAITPOINT_5_1:
+ jam();
+ waitpoint51Lab(signal);
+ break;
+ case ZWAITPOINT_5_2:
+ jam();
+ waitpoint52Lab(signal);
+ break;
+ case ZWAITPOINT_6_1:
+ jam();
+ waitpoint61Lab(signal);
+ break;
+ case ZWAITPOINT_6_2:
+ jam();
+ sendSttorry(signal);
+ break;
+ case ZWAITPOINT_7_1:
+ jam();
+ waitpoint71Lab(signal);
+ break;
+ case ZWAITPOINT_7_2:
+ jam();
+ sendSttorry(signal);
+ break;
+ default:
+ jam();
+ systemErrorLab(signal);
+ break;
+ }//switch
+}//Ndbcntr::execCNTR_WAITREP()
+
+/*******************************/
+/* NODE_FAILREP */
+/*******************************/
+void Ndbcntr::execNODE_FAILREP(Signal* signal)
+{
+ jamEntry();
+
+ const NodeFailRep * nodeFail = (NodeFailRep *)&signal->theData[0];
+ NdbNodeBitmask allFailed;
+ allFailed.assign(NdbNodeBitmask::Size, nodeFail->theNodes);
+
+ NdbNodeBitmask failedStarted = c_startedNodes;
+ NdbNodeBitmask failedStarting = c_start.m_starting;
+ NdbNodeBitmask failedWaiting = c_start.m_waiting;
+
+ failedStarted.bitAND(allFailed);
+ failedStarting.bitAND(allFailed);
+ failedWaiting.bitAND(allFailed);
+
+ const bool tMasterFailed = allFailed.get(cmasterNodeId);
+ const bool tStarted = !failedStarted.isclear();
+ const bool tStarting = !failedStarting.isclear();
+ const bool tWaiting = !failedWaiting.isclear();
+
+ if(tMasterFailed){
+ jam();
+ /**
+ * If master has failed choose qmgr president as master
+ */
+ cmasterNodeId = nodeFail->masterNodeId;
+ }
+
+ /**
+ * Clear node bitmasks from failed nodes
+ */
+ c_start.m_starting.bitANDC(allFailed);
+ c_start.m_waiting.bitANDC(allFailed);
+ c_start.m_withLog.bitANDC(allFailed);
+ c_start.m_withoutLog.bitANDC(allFailed);
+ c_clusterNodes.bitANDC(allFailed);
+ c_startedNodes.bitANDC(allFailed);
+
+ const NodeState & st = getNodeState();
+ if(st.startLevel == st.SL_STARTING){
+ jam();
+
+ const Uint32 phase = st.starting.startPhase;
+
+ const bool tStartConf = (phase > 2) || (phase == 2 && cndbBlocksCount > 0);
+
+ if(tMasterFailed){
+ progError(__LINE__,
+ ERR_SR_OTHERNODEFAILED,
+ "Unhandled node failure during restart");
+ }
+
+ if(tStartConf && tStarting){
+ // One of other starting nodes has crashed...
+ progError(__LINE__,
+ ERR_SR_OTHERNODEFAILED,
+ "Unhandled node failure of starting node during restart");
+ }
+
+ if(tStartConf && tStarted){
+ // One of other started nodes has crashed...
+ progError(__LINE__,
+ ERR_SR_OTHERNODEFAILED,
+ "Unhandled node failure of started node during restart");
+ }
+
+ Uint32 nodeId = 0;
+ while(!allFailed.isclear()){
+ nodeId = allFailed.find(nodeId + 1);
+ allFailed.clear(nodeId);
+ signal->theData[0] = nodeId;
+ sendSignal(QMGR_REF, GSN_NDB_FAILCONF, signal, 1, JBB);
+ }//for
+
+ return;
+ }
+
+ ndbrequire(!allFailed.get(getOwnNodeId()));
+
+ NodeFailRep * rep = (NodeFailRep *)&signal->theData[0];
+ rep->masterNodeId = cmasterNodeId;
+
+ sendSignal(DBTC_REF, GSN_NODE_FAILREP, signal,
+ NodeFailRep::SignalLength, JBB);
+
+ sendSignal(DBLQH_REF, GSN_NODE_FAILREP, signal,
+ NodeFailRep::SignalLength, JBB);
+
+ sendSignal(DBDIH_REF, GSN_NODE_FAILREP, signal,
+ NodeFailRep::SignalLength, JBB);
+
+ sendSignal(DBDICT_REF, GSN_NODE_FAILREP, signal,
+ NodeFailRep::SignalLength, JBB);
+
+ sendSignal(BACKUP_REF, GSN_NODE_FAILREP, signal,
+ NodeFailRep::SignalLength, JBB);
+
+ sendSignal(SUMA_REF, GSN_NODE_FAILREP, signal,
+ NodeFailRep::SignalLength, JBB);
+
+ sendSignal(GREP_REF, GSN_NODE_FAILREP, signal,
+ NodeFailRep::SignalLength, JBB);
+
+ Uint32 nodeId = 0;
+ while(!allFailed.isclear()){
+ nodeId = allFailed.find(nodeId + 1);
+ allFailed.clear(nodeId);
+ signal->theData[0] = NDB_LE_NODE_FAILREP;
+ signal->theData[1] = nodeId;
+ signal->theData[2] = 0;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
+ }//for
+
+ return;
+}//Ndbcntr::execNODE_FAILREP()
+
+/*******************************/
+/* READ_NODESREQ */
+/*******************************/
+void Ndbcntr::execREAD_NODESREQ(Signal* signal)
+{
+ jamEntry();
+
+ /*----------------------------------------------------------------------*/
+ // ANY BLOCK MAY SEND A REQUEST ABOUT NDB NODES AND VERSIONS IN THE
+ // SYSTEM. THIS REQUEST CAN ONLY BE HANDLED IN
+ // ABSOLUTE STARTPHASE 3 OR LATER
+ /*----------------------------------------------------------------------*/
+ BlockReference TuserBlockref = signal->theData[0];
+ ReadNodesConf * const readNodes = (ReadNodesConf *)&signal->theData[0];
+
+ /**
+ * Prepare inactiveNodes bitmask.
+ * The concept as such is by the way pretty useless.
+ * It makes parallell starts more or less impossible...
+ */
+ NdbNodeBitmask tmp1;
+ tmp1.bitOR(c_startedNodes);
+ if(!getNodeState().getNodeRestartInProgress()){
+ tmp1.bitOR(c_start.m_starting);
+ } else {
+ tmp1.set(getOwnNodeId());
+ }
+
+ NdbNodeBitmask tmp2;
+ tmp2.bitOR(c_allDefinedNodes);
+ tmp2.bitANDC(tmp1);
+ /**
+ * Fill in return signal
+ */
+ tmp2.copyto(NdbNodeBitmask::Size, readNodes->inactiveNodes);
+ c_allDefinedNodes.copyto(NdbNodeBitmask::Size, readNodes->allNodes);
+ c_clusterNodes.copyto(NdbNodeBitmask::Size, readNodes->clusterNodes);
+ c_startedNodes.copyto(NdbNodeBitmask::Size, readNodes->startedNodes);
+ c_start.m_starting.copyto(NdbNodeBitmask::Size, readNodes->startingNodes);
+
+ readNodes->noOfNodes = c_allDefinedNodes.count();
+ readNodes->masterNodeId = cmasterNodeId;
+ readNodes->ndynamicId = cdynamicNodeId;
+ if (cstartPhase > ZSTART_PHASE_2) {
+ jam();
+ sendSignal(TuserBlockref, GSN_READ_NODESCONF, signal,
+ ReadNodesConf::SignalLength, JBB);
+
+ } else {
+ jam();
+ signal->theData[0] = ZNOT_AVAILABLE;
+ sendSignal(TuserBlockref, GSN_READ_NODESREF, signal, 1, JBB);
+ }//if
+}//Ndbcntr::execREAD_NODESREQ()
+
+/*----------------------------------------------------------------------*/
+// SENDS APPL_ERROR TO QMGR AND THEN SET A POINTER OUT OF BOUNDS
+/*----------------------------------------------------------------------*/
+void Ndbcntr::systemErrorLab(Signal* signal)
+{
+ progError(0, 0); /* BUG INSERTION */
+ return;
+}//Ndbcntr::systemErrorLab()
+
+/*###########################################################################*/
+/* CNTR MASTER CREATES AND INITIALIZES A SYSTEMTABLE AT INITIALSTART */
+/* |-2048| # 1 00000001 | */
+/* | : | : | */
+/* | -1 | # 1 00000001 | */
+/* | 0 | 0 | */
+/* | 1 | 0 | */
+/* | : | : | */
+/* | 2047| 0 | */
+/*---------------------------------------------------------------------------*/
+void Ndbcntr::createSystableLab(Signal* signal, unsigned index)
+{
+ if (index >= g_sysTableCount) {
+ ndbassert(index == g_sysTableCount);
+ startInsertTransactions(signal);
+ return;
+ }
+ const SysTable& table = *g_sysTableList[index];
+ Uint32 propPage[256];
+ LinearWriter w(propPage, 256);
+
+ // XXX remove commented-out lines later
+
+ w.first();
+ w.add(DictTabInfo::TableName, table.name);
+ w.add(DictTabInfo::TableLoggedFlag, table.tableLoggedFlag);
+ //w.add(DictTabInfo::TableKValue, 6);
+ //w.add(DictTabInfo::MinLoadFactor, 70);
+ //w.add(DictTabInfo::MaxLoadFactor, 80);
+ w.add(DictTabInfo::FragmentTypeVal, (Uint32)table.fragmentType);
+ //w.add(DictTabInfo::TableStorageVal, (Uint32)DictTabInfo::MainMemory);
+ //w.add(DictTabInfo::NoOfKeyAttr, 1);
+ w.add(DictTabInfo::NoOfAttributes, (Uint32)table.columnCount);
+ //w.add(DictTabInfo::NoOfNullable, (Uint32)0);
+ //w.add(DictTabInfo::NoOfVariable, (Uint32)0);
+ //w.add(DictTabInfo::KeyLength, 1);
+ w.add(DictTabInfo::TableTypeVal, (Uint32)table.tableType);
+
+ for (unsigned i = 0; i < table.columnCount; i++) {
+ const SysColumn& column = table.columnList[i];
+ ndbassert(column.pos == i);
+ w.add(DictTabInfo::AttributeName, column.name);
+ w.add(DictTabInfo::AttributeId, (Uint32)column.pos);
+ w.add(DictTabInfo::AttributeKeyFlag, (Uint32)column.keyFlag);
+ //w.add(DictTabInfo::AttributeStorage, (Uint32)DictTabInfo::MainMemory);
+ w.add(DictTabInfo::AttributeNullableFlag, (Uint32)column.nullable);
+ w.add(DictTabInfo::AttributeExtType, (Uint32)column.type);
+ w.add(DictTabInfo::AttributeExtLength, (Uint32)column.length);
+ w.add(DictTabInfo::AttributeEnd, (Uint32)true);
+ }
+ w.add(DictTabInfo::TableEnd, (Uint32)true);
+
+ Uint32 length = w.getWordsUsed();
+ LinearSectionPtr ptr[3];
+ ptr[0].p = &propPage[0];
+ ptr[0].sz = length;
+
+ CreateTableReq* const req = (CreateTableReq*)signal->getDataPtrSend();
+ req->senderData = index;
+ req->senderRef = reference();
+ sendSignal(DBDICT_REF, GSN_CREATE_TABLE_REQ, signal,
+ CreateTableReq::SignalLength, JBB, ptr, 1);
+ return;
+}//Ndbcntr::createSystableLab()
+
+void Ndbcntr::execCREATE_TABLE_REF(Signal* signal)
+{
+ jamEntry();
+ progError(0,0);
+ return;
+}//Ndbcntr::execDICTTABREF()
+
+void Ndbcntr::execCREATE_TABLE_CONF(Signal* signal)
+{
+ jamEntry();
+ CreateTableConf * const conf = (CreateTableConf*)signal->getDataPtrSend();
+ //csystabId = conf->tableId;
+ ndbrequire(conf->senderData < g_sysTableCount);
+ const SysTable& table = *g_sysTableList[conf->senderData];
+ table.tableId = conf->tableId;
+ createSystableLab(signal, conf->senderData + 1);
+ //startInsertTransactions(signal);
+ return;
+}//Ndbcntr::execDICTTABCONF()
+
+/*******************************/
+/* DICTRELEASECONF */
+/*******************************/
+void Ndbcntr::startInsertTransactions(Signal* signal)
+{
+ jamEntry();
+
+ ckey = 1;
+ ctransidPhase = ZTRUE;
+ signal->theData[0] = 0;
+ signal->theData[1] = reference();
+ sendSignal(DBTC_REF, GSN_TCSEIZEREQ, signal, 2, JBB);
+ return;
+}//Ndbcntr::startInsertTransactions()
+
+/*******************************/
+/* TCSEIZECONF */
+/*******************************/
+void Ndbcntr::execTCSEIZECONF(Signal* signal)
+{
+ jamEntry();
+ ctcConnectionP = signal->theData[1];
+ crSystab7Lab(signal);
+ return;
+}//Ndbcntr::execTCSEIZECONF()
+
+const unsigned int RowsPerCommit = 16;
+void Ndbcntr::crSystab7Lab(Signal* signal)
+{
+ UintR tkey;
+ UintR Tmp;
+
+ TcKeyReq * const tcKeyReq = (TcKeyReq *)&signal->theData[0];
+
+ UintR reqInfo_Start = 0;
+ tcKeyReq->setOperationType(reqInfo_Start, ZINSERT); // Insert
+ tcKeyReq->setKeyLength (reqInfo_Start, 1);
+ tcKeyReq->setAIInTcKeyReq (reqInfo_Start, 5);
+ tcKeyReq->setAbortOption (reqInfo_Start, TcKeyReq::AbortOnError);
+
+/* KEY LENGTH = 1, ATTRINFO LENGTH IN TCKEYREQ = 5 */
+ cresponses = 0;
+ const UintR guard0 = ckey + (RowsPerCommit - 1);
+ for (Tmp = ckey; Tmp <= guard0; Tmp++) {
+ UintR reqInfo = reqInfo_Start;
+ if (Tmp == ckey) { // First iteration, Set start flag
+ jam();
+ tcKeyReq->setStartFlag(reqInfo, 1);
+ } //if
+ if (Tmp == guard0) { // Last iteration, Set commit flag
+ jam();
+ tcKeyReq->setCommitFlag(reqInfo, 1);
+ tcKeyReq->setExecuteFlag(reqInfo, 1);
+ } //if
+ if (ctransidPhase == ZTRUE) {
+ jam();
+ tkey = 0;
+ tkey = tkey - Tmp;
+ } else {
+ jam();
+ tkey = Tmp;
+ }//if
+
+ tcKeyReq->apiConnectPtr = ctcConnectionP;
+ tcKeyReq->attrLen = 5;
+ tcKeyReq->tableId = g_sysTable_SYSTAB_0.tableId;
+ tcKeyReq->requestInfo = reqInfo;
+ tcKeyReq->tableSchemaVersion = ZSYSTAB_VERSION;
+ tcKeyReq->transId1 = 0;
+ tcKeyReq->transId2 = ckey;
+
+//-------------------------------------------------------------
+// There is no optional part in this TCKEYREQ. There is one
+// key word and five ATTRINFO words.
+//-------------------------------------------------------------
+ Uint32* tKeyDataPtr = &tcKeyReq->scanInfo;
+ Uint32* tAIDataPtr = &tKeyDataPtr[1];
+
+ tKeyDataPtr[0] = tkey;
+
+ AttributeHeader::init(&tAIDataPtr[0], 0, 1);
+ tAIDataPtr[1] = tkey;
+ AttributeHeader::init(&tAIDataPtr[2], 1, 2);
+ tAIDataPtr[3] = (tkey << 16);
+ tAIDataPtr[4] = 1;
+ sendSignal(DBTC_REF, GSN_TCKEYREQ, signal,
+ TcKeyReq::StaticLength + 6, JBB);
+ }//for
+ ckey = ckey + RowsPerCommit;
+ return;
+}//Ndbcntr::crSystab7Lab()
+
+/*******************************/
+/* TCKEYCONF09 */
+/*******************************/
+void Ndbcntr::execTCKEYCONF(Signal* signal)
+{
+ const TcKeyConf * const keyConf = (TcKeyConf *)&signal->theData[0];
+
+ jamEntry();
+ cgciSystab = keyConf->gci;
+ UintR confInfo = keyConf->confInfo;
+
+ if (TcKeyConf::getMarkerFlag(confInfo)){
+ Uint32 transId1 = keyConf->transId1;
+ Uint32 transId2 = keyConf->transId2;
+ signal->theData[0] = transId1;
+ signal->theData[1] = transId2;
+ sendSignal(DBTC_REF, GSN_TC_COMMIT_ACK, signal, 2, JBB);
+ }//if
+
+ cresponses = cresponses + TcKeyConf::getNoOfOperations(confInfo);
+ if (TcKeyConf::getCommitFlag(confInfo)){
+ jam();
+ ndbrequire(cresponses == RowsPerCommit);
+
+ crSystab8Lab(signal);
+ return;
+ }
+ return;
+}//Ndbcntr::tckeyConfLab()
+
+void Ndbcntr::crSystab8Lab(Signal* signal)
+{
+ if (ckey < ZSIZE_SYSTAB) {
+ jam();
+ crSystab7Lab(signal);
+ return;
+ } else if (ctransidPhase == ZTRUE) {
+ jam();
+ ckey = 1;
+ ctransidPhase = ZFALSE;
+ crSystab7Lab(signal);
+ return;
+ }//if
+ signal->theData[0] = ctcConnectionP;
+ signal->theData[1] = reference();
+ signal->theData[2] = 0;
+ sendSignal(DBTC_REF, GSN_TCRELEASEREQ, signal, 2, JBB);
+ return;
+}//Ndbcntr::crSystab8Lab()
+
+/*******************************/
+/* TCRELEASECONF */
+/*******************************/
+void Ndbcntr::execTCRELEASECONF(Signal* signal)
+{
+ jamEntry();
+ waitpoint52Lab(signal);
+ return;
+}//Ndbcntr::execTCRELEASECONF()
+
+void Ndbcntr::crSystab9Lab(Signal* signal)
+{
+ signal->theData[1] = reference();
+ sendSignalWithDelay(DBDIH_REF, GSN_GETGCIREQ, signal, 100, 2);
+ return;
+}//Ndbcntr::crSystab9Lab()
+
+/*******************************/
+/* GETGCICONF */
+/*******************************/
+void Ndbcntr::execGETGCICONF(Signal* signal)
+{
+ jamEntry();
+
+#ifndef NO_GCP
+ if (signal->theData[1] < cgciSystab) {
+ jam();
+/*--------------------------------------*/
+/* MAKE SURE THAT THE SYSTABLE IS */
+/* NOW SAFE ON DISK */
+/*--------------------------------------*/
+ crSystab9Lab(signal);
+ return;
+ }//if
+#endif
+ waitpoint52Lab(signal);
+ return;
+}//Ndbcntr::execGETGCICONF()
+
+void Ndbcntr::execTCKEYREF(Signal* signal)
+{
+ jamEntry();
+ systemErrorLab(signal);
+ return;
+}//Ndbcntr::execTCKEYREF()
+
+void Ndbcntr::execTCROLLBACKREP(Signal* signal)
+{
+ jamEntry();
+ systemErrorLab(signal);
+ return;
+}//Ndbcntr::execTCROLLBACKREP()
+
+void Ndbcntr::execTCRELEASEREF(Signal* signal)
+{
+ jamEntry();
+ systemErrorLab(signal);
+ return;
+}//Ndbcntr::execTCRELEASEREF()
+
+void Ndbcntr::execTCSEIZEREF(Signal* signal)
+{
+ jamEntry();
+ systemErrorLab(signal);
+ return;
+}//Ndbcntr::execTCSEIZEREF()
+
+
+/*---------------------------------------------------------------------------*/
+/*INITIALIZE VARIABLES AND RECORDS */
+/*---------------------------------------------------------------------------*/
+void Ndbcntr::initData(Signal* signal)
+{
+ c_start.reset();
+ cmasterNodeId = 0;
+ cnoStartNodes = 0;
+ cnoWaitrep = 0;
+}//Ndbcntr::initData()
+
+
+/*---------------------------------------------------------------------------*/
+/*RESET VARIABLES USED DURING THE START */
+/*---------------------------------------------------------------------------*/
+void Ndbcntr::resetStartVariables(Signal* signal)
+{
+ cnoStartNodes = 0;
+ cnoWaitrep6 = cnoWaitrep7 = 0;
+}//Ndbcntr::resetStartVariables()
+
+
+/*---------------------------------------------------------------------------*/
+// SEND THE SIGNAL
+// INPUT CNDB_BLOCKS_COUNT
+/*---------------------------------------------------------------------------*/
+void Ndbcntr::sendNdbSttor(Signal* signal)
+{
+ NdbBlocksRecPtr ndbBlocksPtr;
+
+ ndbBlocksPtr.i = cndbBlocksCount;
+ ptrCheckGuard(ndbBlocksPtr, ZSIZE_NDB_BLOCKS_REC, ndbBlocksRec);
+
+ NdbSttor * const req = (NdbSttor*)signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->nodeId = getOwnNodeId();
+ req->internalStartPhase = cinternalStartphase;
+ req->typeOfStart = ctypeOfStart;
+ req->masterNodeId = cmasterNodeId;
+
+ for (int i = 0; i < 16; i++) {
+ // Garbage
+ req->config[i] = 0x88776655;
+ //cfgBlockPtr.p->cfgData[i];
+ }
+
+ //#define MAX_STARTPHASE 2
+#ifdef TRACE_STTOR
+ ndbout_c("sending NDB_STTOR(%d) to %s",
+ cinternalStartphase,
+ getBlockName( refToBlock(ndbBlocksPtr.p->blockref)));
+#endif
+ sendSignal(ndbBlocksPtr.p->blockref, GSN_NDB_STTOR, signal, 22, JBB);
+ cndbBlocksCount++;
+}//Ndbcntr::sendNdbSttor()
+
+/*---------------------------------------------------------------------------*/
+// JUST SEND THE SIGNAL
+/*---------------------------------------------------------------------------*/
+void Ndbcntr::sendSttorry(Signal* signal)
+{
+ signal->theData[3] = ZSTART_PHASE_1;
+ signal->theData[4] = ZSTART_PHASE_2;
+ signal->theData[5] = ZSTART_PHASE_3;
+ signal->theData[6] = ZSTART_PHASE_4;
+ signal->theData[7] = ZSTART_PHASE_5;
+ signal->theData[8] = ZSTART_PHASE_6;
+ // skip simulated phase 7
+ signal->theData[9] = ZSTART_PHASE_8;
+ signal->theData[10] = ZSTART_PHASE_9;
+ signal->theData[11] = ZSTART_PHASE_END;
+ sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 12, JBB);
+}//Ndbcntr::sendSttorry()
+
+void
+Ndbcntr::execDUMP_STATE_ORD(Signal* signal)
+{
+ DumpStateOrd * const & dumpState = (DumpStateOrd *)&signal->theData[0];
+ if(signal->theData[0] == 13){
+ infoEvent("Cntr: cstartPhase = %d, cinternalStartphase = %d, block = %d",
+ cstartPhase, cinternalStartphase, cndbBlocksCount);
+ infoEvent("Cntr: cmasterNodeId = %d", cmasterNodeId);
+ }
+
+ if (dumpState->args[0] == DumpStateOrd::NdbcntrTestStopOnError){
+ if (theConfiguration.stopOnError() == true)
+ ((Configuration&)theConfiguration).stopOnError(false);
+
+ const BlockReference tblockref = calcNdbCntrBlockRef(getOwnNodeId());
+
+ SystemError * const sysErr = (SystemError*)&signal->theData[0];
+ sysErr->errorCode = SystemError::TestStopOnError;
+ sysErr->errorRef = reference();
+ sendSignal(tblockref, GSN_SYSTEM_ERROR, signal,
+ SystemError::SignalLength, JBA);
+ }
+
+
+}//Ndbcntr::execDUMP_STATE_ORD()
+
+void Ndbcntr::execSET_VAR_REQ(Signal* signal) {
+#if 0
+ SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
+ ConfigParamId var = setVarReq->variable();
+
+ switch (var) {
+ case TimeToWaitAlive:
+ // Valid only during start so value not set.
+ sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
+ break;
+
+ default:
+ sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
+ }// switch
+#endif
+}//Ndbcntr::execSET_VAR_REQ()
+
+void Ndbcntr::updateNodeState(Signal* signal, const NodeState& newState) const{
+ NodeStateRep * const stateRep = (NodeStateRep *)&signal->theData[0];
+
+ stateRep->nodeState = newState;
+ stateRep->nodeState.masterNodeId = cmasterNodeId;
+ stateRep->nodeState.setNodeGroup(c_nodeGroup);
+
+ for(Uint32 i = 0; i<ALL_BLOCKS_SZ; i++){
+ sendSignal(ALL_BLOCKS[i].Ref, GSN_NODE_STATE_REP, signal,
+ NodeStateRep::SignalLength, JBB);
+ }
+}
+
+void
+Ndbcntr::execRESUME_REQ(Signal* signal){
+ //ResumeReq * const req = (ResumeReq *)&signal->theData[0];
+ //ResumeRef * const ref = (ResumeRef *)&signal->theData[0];
+
+ jamEntry();
+ //Uint32 senderData = req->senderData;
+ //BlockReference senderRef = req->senderRef;
+ NodeState newState(NodeState::SL_STARTED);
+ updateNodeState(signal, newState);
+ c_stopRec.stopReq.senderRef=0;
+}
+
+void
+Ndbcntr::execSTOP_REQ(Signal* signal){
+ StopReq * const req = (StopReq *)&signal->theData[0];
+ StopRef * const ref = (StopRef *)&signal->theData[0];
+ Uint32 singleuser = req->singleuser;
+ jamEntry();
+ Uint32 senderData = req->senderData;
+ BlockReference senderRef = req->senderRef;
+ bool abort = StopReq::getStopAbort(req->requestInfo);
+
+ if(getNodeState().startLevel < NodeState::SL_STARTED ||
+ abort && !singleuser){
+ /**
+ * Node is not started yet
+ *
+ * So stop it quickly
+ */
+ jam();
+ const Uint32 reqInfo = req->requestInfo;
+ if(StopReq::getPerformRestart(reqInfo)){
+ jam();
+ StartOrd * startOrd = (StartOrd *)&signal->theData[0];
+ startOrd->restartInfo = reqInfo;
+ sendSignal(CMVMI_REF, GSN_START_ORD, signal, 1, JBA);
+ } else {
+ jam();
+ sendSignal(CMVMI_REF, GSN_STOP_ORD, signal, 1, JBA);
+ }
+ return;
+ }
+
+ if(c_stopRec.stopReq.senderRef != 0 && !singleuser){
+ jam();
+ /**
+ * Requested a system shutdown
+ */
+ if(StopReq::getSystemStop(req->requestInfo)){
+ jam();
+ sendSignalWithDelay(reference(), GSN_STOP_REQ, signal, 100,
+ StopReq::SignalLength);
+ return;
+ }
+
+ /**
+ * Requested a node shutdown
+ */
+ if(StopReq::getSystemStop(c_stopRec.stopReq.requestInfo))
+ ref->errorCode = StopRef::SystemShutdownInProgress;
+ else
+ ref->errorCode = StopRef::NodeShutdownInProgress;
+ ref->senderData = senderData;
+ sendSignal(senderRef, GSN_STOP_REF, signal, StopRef::SignalLength, JBB);
+ return;
+ }
+
+ c_stopRec.stopReq = * req;
+ c_stopRec.stopInitiatedTime = NdbTick_CurrentMillisecond();
+
+ if(StopReq::getSystemStop(c_stopRec.stopReq.requestInfo) && !singleuser) {
+ jam();
+ if(StopReq::getPerformRestart(c_stopRec.stopReq.requestInfo)){
+ ((Configuration&)theConfiguration).stopOnError(false);
+ }
+ }
+ if(!singleuser) {
+ if(!c_stopRec.checkNodeFail(signal)){
+ jam();
+ return;
+ }
+ }
+
+ signal->theData[0] = NDB_LE_NDBStopStarted;
+ signal->theData[1] = StopReq::getSystemStop(c_stopRec.stopReq.requestInfo) ? 1 : 0;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
+
+ NodeState newState(NodeState::SL_STOPPING_1,
+ StopReq::getSystemStop(c_stopRec.stopReq.requestInfo));
+
+ if(singleuser) {
+ newState.setSingleUser(true);
+ newState.setSingleUserApi(c_stopRec.stopReq.singleUserApi);
+ }
+ updateNodeState(signal, newState);
+ signal->theData[0] = ZSHUTDOWN;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 1);
+}
+
+void
+Ndbcntr::StopRecord::checkTimeout(Signal* signal){
+ jamEntry();
+
+ if(!cntr.getNodeState().getSingleUserMode())
+ if(!checkNodeFail(signal)){
+ jam();
+ return;
+ }
+
+ switch(cntr.getNodeState().startLevel){
+ case NodeState::SL_STOPPING_1:
+ checkApiTimeout(signal);
+ break;
+ case NodeState::SL_STOPPING_2:
+ checkTcTimeout(signal);
+ break;
+ case NodeState::SL_STOPPING_3:
+ checkLqhTimeout_1(signal);
+ break;
+ case NodeState::SL_STOPPING_4:
+ checkLqhTimeout_2(signal);
+ break;
+ case NodeState::SL_SINGLEUSER:
+ break;
+ default:
+ ndbrequire(false);
+ }
+}
+
+bool
+Ndbcntr::StopRecord::checkNodeFail(Signal* signal){
+ jam();
+ if(StopReq::getSystemStop(stopReq.requestInfo)){
+ jam();
+ return true;
+ }
+
+ /**
+ * Check if I can survive me stopping
+ */
+ NodeBitmask ndbMask;
+ ndbMask.assign(cntr.c_startedNodes);
+ ndbMask.clear(cntr.getOwnNodeId());
+
+ CheckNodeGroups* sd = (CheckNodeGroups*)&signal->theData[0];
+ sd->blockRef = cntr.reference();
+ sd->requestType = CheckNodeGroups::Direct | CheckNodeGroups::ArbitCheck;
+ sd->mask = ndbMask;
+ cntr.EXECUTE_DIRECT(DBDIH, GSN_CHECKNODEGROUPSREQ, signal,
+ CheckNodeGroups::SignalLength);
+ jamEntry();
+ switch (sd->output) {
+ case CheckNodeGroups::Win:
+ case CheckNodeGroups::Partitioning:
+ return true;
+ break;
+ }
+
+ StopRef * const ref = (StopRef *)&signal->theData[0];
+
+ ref->senderData = stopReq.senderData;
+ ref->errorCode = StopRef::NodeShutdownWouldCauseSystemCrash;
+
+ const BlockReference bref = stopReq.senderRef;
+ cntr.sendSignal(bref, GSN_STOP_REF, signal, StopRef::SignalLength, JBB);
+
+ stopReq.senderRef = 0;
+
+ NodeState newState(NodeState::SL_STARTED);
+
+ cntr.updateNodeState(signal, newState);
+
+ signal->theData[0] = NDB_LE_NDBStopAborted;
+ cntr.sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 1, JBB);
+
+ return false;
+}
+
+void
+Ndbcntr::StopRecord::checkApiTimeout(Signal* signal){
+ const Int32 timeout = stopReq.apiTimeout;
+ const NDB_TICKS alarm = stopInitiatedTime + (NDB_TICKS)timeout;
+ const NDB_TICKS now = NdbTick_CurrentMillisecond();
+ if((timeout >= 0 && now >= alarm)){
+ // || checkWithApiInSomeMagicWay)
+ jam();
+ NodeState newState(NodeState::SL_STOPPING_2,
+ StopReq::getSystemStop(stopReq.requestInfo));
+ if(stopReq.singleuser) {
+ newState.setSingleUser(true);
+ newState.setSingleUserApi(stopReq.singleUserApi);
+ }
+ cntr.updateNodeState(signal, newState);
+
+ stopInitiatedTime = now;
+ }
+
+ signal->theData[0] = ZSHUTDOWN;
+ cntr.sendSignalWithDelay(cntr.reference(), GSN_CONTINUEB, signal, 100, 1);
+}
+
+void
+Ndbcntr::StopRecord::checkTcTimeout(Signal* signal){
+ const Int32 timeout = stopReq.transactionTimeout;
+ const NDB_TICKS alarm = stopInitiatedTime + (NDB_TICKS)timeout;
+ const NDB_TICKS now = NdbTick_CurrentMillisecond();
+ if((timeout >= 0 && now >= alarm)){
+ // || checkWithTcInSomeMagicWay)
+ jam();
+ if(stopReq.getSystemStop(stopReq.requestInfo) || stopReq.singleuser){
+ jam();
+ if(stopReq.singleuser)
+ {
+ jam();
+ AbortAllReq * req = (AbortAllReq*)&signal->theData[0];
+ req->senderRef = cntr.reference();
+ req->senderData = 12;
+ cntr.sendSignal(DBTC_REF, GSN_ABORT_ALL_REQ, signal,
+ AbortAllReq::SignalLength, JBB);
+ }
+ else
+ {
+ WaitGCPReq * req = (WaitGCPReq*)&signal->theData[0];
+ req->senderRef = cntr.reference();
+ req->senderData = 12;
+ req->requestType = WaitGCPReq::CompleteForceStart;
+ cntr.sendSignal(DBDIH_REF, GSN_WAIT_GCP_REQ, signal,
+ WaitGCPReq::SignalLength, JBB);
+ }
+ } else {
+ jam();
+ StopPermReq * req = (StopPermReq*)&signal->theData[0];
+ req->senderRef = cntr.reference();
+ req->senderData = 12;
+ cntr.sendSignal(DBDIH_REF, GSN_STOP_PERM_REQ, signal,
+ StopPermReq::SignalLength, JBB);
+ }
+ return;
+ }
+ signal->theData[0] = ZSHUTDOWN;
+ cntr.sendSignalWithDelay(cntr.reference(), GSN_CONTINUEB, signal, 100, 1);
+}
+
+void Ndbcntr::execSTOP_PERM_REF(Signal* signal){
+ //StopPermRef* const ref = (StopPermRef*)&signal->theData[0];
+
+ jamEntry();
+
+ signal->theData[0] = ZSHUTDOWN;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 1);
+}
+
+void Ndbcntr::execSTOP_PERM_CONF(Signal* signal){
+ jamEntry();
+
+ AbortAllReq * req = (AbortAllReq*)&signal->theData[0];
+ req->senderRef = reference();
+ req->senderData = 12;
+ sendSignal(DBTC_REF, GSN_ABORT_ALL_REQ, signal,
+ AbortAllReq::SignalLength, JBB);
+}
+
+void Ndbcntr::execABORT_ALL_CONF(Signal* signal){
+ jamEntry();
+ if(c_stopRec.stopReq.singleuser) {
+ jam();
+ NodeState newState(NodeState::SL_SINGLEUSER);
+ newState.setSingleUser(true);
+ newState.setSingleUserApi(c_stopRec.stopReq.singleUserApi);
+ updateNodeState(signal, newState);
+ c_stopRec.stopInitiatedTime = NdbTick_CurrentMillisecond();
+
+ }
+ else
+ {
+ jam();
+ NodeState newState(NodeState::SL_STOPPING_3,
+ StopReq::getSystemStop(c_stopRec.stopReq.requestInfo));
+ updateNodeState(signal, newState);
+
+ c_stopRec.stopInitiatedTime = NdbTick_CurrentMillisecond();
+
+ signal->theData[0] = ZSHUTDOWN;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 1);
+ }
+}
+
+void Ndbcntr::execABORT_ALL_REF(Signal* signal){
+ jamEntry();
+ ndbrequire(false);
+}
+
+void
+Ndbcntr::StopRecord::checkLqhTimeout_1(Signal* signal){
+ const Int32 timeout = stopReq.readOperationTimeout;
+ const NDB_TICKS alarm = stopInitiatedTime + (NDB_TICKS)timeout;
+ const NDB_TICKS now = NdbTick_CurrentMillisecond();
+
+ if((timeout >= 0 && now >= alarm)){
+ // || checkWithLqhInSomeMagicWay)
+ jam();
+
+ ChangeNodeStateReq * req = (ChangeNodeStateReq*)&signal->theData[0];
+
+ NodeState newState(NodeState::SL_STOPPING_4,
+ StopReq::getSystemStop(stopReq.requestInfo));
+ req->nodeState = newState;
+ req->senderRef = cntr.reference();
+ req->senderData = 12;
+ cntr.sendSignal(DBLQH_REF, GSN_CHANGE_NODE_STATE_REQ, signal, 2, JBB);
+ return;
+ }
+ signal->theData[0] = ZSHUTDOWN;
+ cntr.sendSignalWithDelay(cntr.reference(), GSN_CONTINUEB, signal, 100, 1);
+}
+
+void Ndbcntr::execCHANGE_NODE_STATE_CONF(Signal* signal){
+ jamEntry();
+ signal->theData[0] = reference();
+ signal->theData[1] = 12;
+ sendSignal(DBDIH_REF, GSN_STOP_ME_REQ, signal, 2, JBB);
+}
+
+void Ndbcntr::execSTOP_ME_REF(Signal* signal){
+ jamEntry();
+ ndbrequire(false);
+}
+
+
+void Ndbcntr::execSTOP_ME_CONF(Signal* signal){
+ jamEntry();
+
+ NodeState newState(NodeState::SL_STOPPING_4,
+ StopReq::getSystemStop(c_stopRec.stopReq.requestInfo));
+ updateNodeState(signal, newState);
+
+ c_stopRec.stopInitiatedTime = NdbTick_CurrentMillisecond();
+ signal->theData[0] = ZSHUTDOWN;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 1);
+}
+
+void
+Ndbcntr::StopRecord::checkLqhTimeout_2(Signal* signal){
+ const Int32 timeout = stopReq.operationTimeout;
+ const NDB_TICKS alarm = stopInitiatedTime + (NDB_TICKS)timeout;
+ const NDB_TICKS now = NdbTick_CurrentMillisecond();
+
+ if((timeout >= 0 && now >= alarm)){
+ // || checkWithLqhInSomeMagicWay)
+ jam();
+ if(StopReq::getPerformRestart(stopReq.requestInfo)){
+ jam();
+ StartOrd * startOrd = (StartOrd *)&signal->theData[0];
+ startOrd->restartInfo = stopReq.requestInfo;
+ cntr.sendSignal(CMVMI_REF, GSN_START_ORD, signal, 2, JBA);
+ } else {
+ jam();
+ cntr.sendSignal(CMVMI_REF, GSN_STOP_ORD, signal, 1, JBA);
+ }
+ return;
+ }
+ signal->theData[0] = ZSHUTDOWN;
+ cntr.sendSignalWithDelay(cntr.reference(), GSN_CONTINUEB, signal, 100, 1);
+}
+
+void Ndbcntr::execWAIT_GCP_REF(Signal* signal){
+ jamEntry();
+
+ //WaitGCPRef* const ref = (WaitGCPRef*)&signal->theData[0];
+
+ WaitGCPReq * req = (WaitGCPReq*)&signal->theData[0];
+ req->senderRef = reference();
+ req->senderData = 12;
+ req->requestType = WaitGCPReq::CompleteForceStart;
+ sendSignal(DBDIH_REF, GSN_WAIT_GCP_REQ, signal,
+ WaitGCPReq::SignalLength, JBB);
+}
+
+void Ndbcntr::execWAIT_GCP_CONF(Signal* signal){
+ jamEntry();
+
+ ndbrequire(StopReq::getSystemStop(c_stopRec.stopReq.requestInfo));
+ NodeState newState(NodeState::SL_STOPPING_3, true);
+
+ /**
+ * Inform QMGR so that arbitrator won't kill us
+ */
+ NodeStateRep * rep = (NodeStateRep *)&signal->theData[0];
+ rep->nodeState = newState;
+ rep->nodeState.masterNodeId = cmasterNodeId;
+ rep->nodeState.setNodeGroup(c_nodeGroup);
+ EXECUTE_DIRECT(QMGR, GSN_NODE_STATE_REP, signal, NodeStateRep::SignalLength);
+
+ if(StopReq::getPerformRestart(c_stopRec.stopReq.requestInfo)){
+ jam();
+ StartOrd * startOrd = (StartOrd *)&signal->theData[0];
+ startOrd->restartInfo = c_stopRec.stopReq.requestInfo;
+ sendSignalWithDelay(CMVMI_REF, GSN_START_ORD, signal, 500,
+ StartOrd::SignalLength);
+ } else {
+ jam();
+ sendSignalWithDelay(CMVMI_REF, GSN_STOP_ORD, signal, 500, 1);
+ }
+ return;
+}
+
+void Ndbcntr::execSTTORRY(Signal* signal){
+ jamEntry();
+ c_missra.execSTTORRY(signal);
+}
+
+void Ndbcntr::execREAD_CONFIG_CONF(Signal* signal){
+ jamEntry();
+ c_missra.execREAD_CONFIG_CONF(signal);
+}
+
+void Ndbcntr::execSTART_ORD(Signal* signal){
+ jamEntry();
+ ndbrequire(NO_OF_BLOCKS == ALL_BLOCKS_SZ);
+ c_missra.execSTART_ORD(signal);
+}
+
+void
+Ndbcntr::clearFilesystem(Signal* signal){
+ FsRemoveReq * req = (FsRemoveReq *)signal->getDataPtrSend();
+ req->userReference = reference();
+ req->userPointer = 0;
+ req->directory = 1;
+ req->ownDirectory = 1;
+ FsOpenReq::setVersion(req->fileNumber, 3);
+ FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_CTL); // Can by any...
+ FsOpenReq::v1_setDisk(req->fileNumber, c_fsRemoveCount);
+ sendSignal(NDBFS_REF, GSN_FSREMOVEREQ, signal,
+ FsRemoveReq::SignalLength, JBA);
+ c_fsRemoveCount++;
+}
+
+void
+Ndbcntr::execFSREMOVEREF(Signal* signal){
+ jamEntry();
+ ndbrequire(0);
+}
+
+void
+Ndbcntr::execFSREMOVECONF(Signal* signal){
+ jamEntry();
+ if(c_fsRemoveCount == 13){
+ jam();
+ sendSttorry(signal);
+ } else {
+ jam();
+ ndbrequire(c_fsRemoveCount < 13);
+ clearFilesystem(signal);
+ }//if
+}
+
+void Ndbcntr::Missra::execSTART_ORD(Signal* signal){
+ signal->theData[0] = NDB_LE_NDBStartStarted;
+ signal->theData[1] = NDB_VERSION;
+ cntr.sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
+
+ currentBlockIndex = 0;
+ sendNextREAD_CONFIG_REQ(signal);
+}
+
+void Ndbcntr::Missra::sendNextREAD_CONFIG_REQ(Signal* signal){
+
+ if(currentBlockIndex < ALL_BLOCKS_SZ){
+ jam();
+
+ ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtrSend();
+ req->senderData = 0;
+ req->senderRef = cntr.reference();
+ req->noOfParameters = 0;
+
+ const BlockReference ref = ALL_BLOCKS[currentBlockIndex].Ref;
+
+#if 0
+ ndbout_c("sending READ_CONFIG_REQ to %s(ref=%x index=%d)",
+ getBlockName( refToBlock(ref)),
+ ref,
+ currentBlockIndex);
+#endif
+
+ cntr.sendSignal(ref, GSN_READ_CONFIG_REQ, signal,
+ ReadConfigReq::SignalLength, JBB);
+ return;
+ }
+
+ /**
+ * Finished...
+ */
+ currentStartPhase = 0;
+ for(Uint32 i = 0; i<NO_OF_BLOCKS; i++){
+ if(ALL_BLOCKS[i].NextSP < currentStartPhase)
+ currentStartPhase = ALL_BLOCKS[i].NextSP;
+ }
+
+ currentBlockIndex = 0;
+ sendNextSTTOR(signal);
+}
+
+void Ndbcntr::Missra::execREAD_CONFIG_CONF(Signal* signal){
+ const ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtr();
+
+ const Uint32 ref = conf->senderRef;
+ ndbrequire(refToBlock(ALL_BLOCKS[currentBlockIndex].Ref) == refToBlock(ref));
+
+ currentBlockIndex++;
+ sendNextREAD_CONFIG_REQ(signal);
+}
+
+void Ndbcntr::Missra::execSTTORRY(Signal* signal){
+ const BlockReference ref = signal->senderBlockRef();
+ ndbrequire(refToBlock(ref) == refToBlock(ALL_BLOCKS[currentBlockIndex].Ref));
+
+ /**
+ * Update next start phase
+ */
+ for (Uint32 i = 3; i < 25; i++){
+ jam();
+ if (signal->theData[i] > currentStartPhase){
+ jam();
+ ALL_BLOCKS[currentBlockIndex].NextSP = signal->theData[i];
+ break;
+ }
+ }
+
+ currentBlockIndex++;
+ sendNextSTTOR(signal);
+}
+
+void Ndbcntr::Missra::sendNextSTTOR(Signal* signal){
+
+ for(; currentStartPhase < 255 ; currentStartPhase++){
+ jam();
+
+ const Uint32 start = currentBlockIndex;
+
+ for(; currentBlockIndex < ALL_BLOCKS_SZ; currentBlockIndex++){
+ jam();
+ if(ALL_BLOCKS[currentBlockIndex].NextSP == currentStartPhase){
+ jam();
+ signal->theData[0] = 0;
+ signal->theData[1] = currentStartPhase;
+ signal->theData[2] = 0;
+ signal->theData[3] = 0;
+ signal->theData[4] = 0;
+ signal->theData[5] = 0;
+ signal->theData[6] = 0;
+ signal->theData[7] = cntr.ctypeOfStart;
+
+ const BlockReference ref = ALL_BLOCKS[currentBlockIndex].Ref;
+
+#ifdef MAX_STARTPHASE
+ ndbrequire(currentStartPhase <= MAX_STARTPHASE);
+#endif
+
+#ifdef TRACE_STTOR
+ ndbout_c("sending STTOR(%d) to %s(ref=%x index=%d)",
+ currentStartPhase,
+ getBlockName( refToBlock(ref)),
+ ref,
+ currentBlockIndex);
+#endif
+
+ cntr.sendSignal(ref, GSN_STTOR, signal, 8, JBB);
+
+ return;
+ }
+ }
+
+ currentBlockIndex = 0;
+
+ if(start != 0){
+ /**
+ * At least one wanted this start phase, report it
+ */
+ jam();
+ signal->theData[0] = NDB_LE_StartPhaseCompleted;
+ signal->theData[1] = currentStartPhase;
+ signal->theData[2] = cntr.ctypeOfStart;
+ cntr.sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
+ }
+ }
+
+ signal->theData[0] = NDB_LE_NDBStartCompleted;
+ signal->theData[1] = NDB_VERSION;
+ cntr.sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
+
+ NodeState newState(NodeState::SL_STARTED);
+ cntr.updateNodeState(signal, newState);
+
+ /**
+ * Backward
+ */
+ UpgradeStartup::sendCmAppChg(cntr, signal, 3); //RUN
+
+ NdbNodeBitmask nodes = cntr.c_clusterNodes;
+ Uint32 node = 0;
+ while((node = nodes.find(node+1)) != NdbNodeBitmask::NotFound){
+ if(cntr.getNodeInfo(node).m_version < MAKE_VERSION(3,5,0)){
+ nodes.clear(node);
+ }
+ }
+
+ NodeReceiverGroup rg(NDBCNTR, nodes);
+ signal->theData[0] = cntr.getOwnNodeId();
+ cntr.sendSignal(rg, GSN_CNTR_START_REP, signal, 1, JBB);
+}
+
+/**
+ * Backward compatible code
+ */
+void
+UpgradeStartup::sendCmAppChg(Ndbcntr& cntr, Signal* signal, Uint32 startLevel){
+
+ if(cntr.getNodeInfo(cntr.cmasterNodeId).m_version >= MAKE_VERSION(3,5,0)){
+ jam();
+ return;
+ }
+
+ /**
+ * Old NDB running
+ */
+
+ signal->theData[0] = startLevel;
+ signal->theData[1] = cntr.getOwnNodeId();
+ signal->theData[2] = 3 | ('N' << 8);
+ signal->theData[3] = 'D' | ('B' << 8);
+ signal->theData[4] = 0;
+ signal->theData[5] = 0;
+ signal->theData[6] = 0;
+ signal->theData[7] = 0;
+ signal->theData[8] = 0;
+ signal->theData[9] = 0;
+ signal->theData[10] = 0;
+ signal->theData[11] = 0;
+
+ NdbNodeBitmask nodes = cntr.c_clusterNodes;
+ nodes.clear(cntr.getOwnNodeId());
+ Uint32 node = 0;
+ while((node = nodes.find(node+1)) != NdbNodeBitmask::NotFound){
+ if(cntr.getNodeInfo(node).m_version < MAKE_VERSION(3,5,0)){
+ cntr.sendSignal(cntr.calcQmgrBlockRef(node),
+ GSN_CM_APPCHG, signal, 12, JBB);
+ } else {
+ cntr.c_startedNodes.set(node); // Fake started
+ }
+ }
+}
+
+void
+UpgradeStartup::execCM_APPCHG(SimulatedBlock & block, Signal* signal){
+ Uint32 state = signal->theData[0];
+ Uint32 nodeId = signal->theData[1];
+ if(block.number() == QMGR){
+ Ndbcntr& cntr = * (Ndbcntr*)globalData.getBlock(CNTR);
+ switch(state){
+ case 0: // ZADD
+ break;
+ case 2: // ZSTART
+ break;
+ case 3: // ZRUN{
+ cntr.c_startedNodes.set(nodeId);
+
+ Uint32 recv = cntr.c_startedNodes.count();
+ Uint32 cnt = cntr.c_clusterNodes.count();
+ if(recv + 1 == cnt){ //+1 == own node
+ /**
+ * Check master
+ */
+ sendCntrMasterReq(cntr, signal, 0);
+ }
+ return;
+ }
+ }
+ block.progError(0,0);
+}
+
+void
+UpgradeStartup::sendCntrMasterReq(Ndbcntr& cntr, Signal* signal, Uint32 n){
+ Uint32 node = cntr.c_startedNodes.find(n);
+ if(node != NdbNodeBitmask::NotFound &&
+ (node == cntr.getOwnNodeId() ||
+ cntr.getNodeInfo(node).m_version >= MAKE_VERSION(3,5,0))){
+ node = cntr.c_startedNodes.find(node+1);
+ }
+
+ if(node == NdbNodeBitmask::NotFound){
+ cntr.progError(0,0);
+ }
+
+ CntrMasterReq * const cntrMasterReq = (CntrMasterReq*)&signal->theData[0];
+ cntr.c_clusterNodes.copyto(NdbNodeBitmask::Size, cntrMasterReq->theNodes);
+ NdbNodeBitmask::clear(cntrMasterReq->theNodes, cntr.getOwnNodeId());
+ cntrMasterReq->userBlockRef = 0;
+ cntrMasterReq->userNodeId = cntr.getOwnNodeId();
+ cntrMasterReq->typeOfStart = NodeState::ST_INITIAL_NODE_RESTART;
+ cntrMasterReq->noRestartNodes = cntr.c_clusterNodes.count() - 1;
+ cntr.sendSignal(cntr.calcNdbCntrBlockRef(node), GSN_CNTR_MASTERREQ,
+ signal, CntrMasterReq::SignalLength, JBB);
+}
+
+void
+UpgradeStartup::execCNTR_MASTER_REPLY(SimulatedBlock & block, Signal* signal){
+ Uint32 gsn = signal->header.theVerId_signalNumber;
+ Uint32 node = refToNode(signal->getSendersBlockRef());
+ if(block.number() == CNTR){
+ Ndbcntr& cntr = (Ndbcntr&)block;
+ switch(gsn){
+ case GSN_CNTR_MASTERREF:
+ sendCntrMasterReq(cntr, signal, node + 1);
+ return;
+ break;
+ case GSN_CNTR_MASTERCONF:{
+ CntrStartConf* conf = (CntrStartConf*)signal->getDataPtrSend();
+ conf->startGci = 0;
+ conf->masterNodeId = node;
+ conf->noStartNodes = 1;
+ conf->startType = NodeState::ST_INITIAL_NODE_RESTART;
+ NodeBitmask mask;
+ mask.clear();
+ mask.copyto(NdbNodeBitmask::Size, conf->startedNodes);
+ mask.clear();
+ mask.set(cntr.getOwnNodeId());
+ mask.copyto(NdbNodeBitmask::Size, conf->startingNodes);
+ cntr.execCNTR_START_CONF(signal);
+ return;
+ }
+ }
+ }
+ block.progError(0,0);
+}
diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp
new file mode 100644
index 00000000000..2a65271a32a
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp
@@ -0,0 +1,94 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "Ndbcntr.hpp"
+
+#define arrayLength(x) sizeof(x)/sizeof(x[0])
+
+// SYSTAB_0
+
+static const Ndbcntr::SysColumn
+column_SYSTAB_0[] = {
+ { 0, "SYSKEY_0",
+ DictTabInfo::ExtUnsigned, 1,
+ true, false
+ },
+ { 1, "NEXTID",
+ DictTabInfo::ExtBigunsigned, 1,
+ false, false
+ }
+};
+
+const Ndbcntr::SysTable
+Ndbcntr::g_sysTable_SYSTAB_0 = {
+ "sys/def/SYSTAB_0",
+ arrayLength(column_SYSTAB_0), column_SYSTAB_0,
+ DictTabInfo::SystemTable,
+ DictTabInfo::AllNodesSmallTable,
+ true, ~0
+};
+
+// NDB$EVENTS_0
+
+static const Ndbcntr::SysColumn
+column_NDBEVENTS_0[] = {
+ { 0, "NAME",
+ DictTabInfo::ExtBinary, MAX_TAB_NAME_SIZE,
+ true, false
+ },
+ { 1, "EVENT_TYPE",
+ DictTabInfo::ExtUnsigned, 1,
+ false, false
+ },
+ { 2, "TABLE_NAME",
+ DictTabInfo::ExtBinary, MAX_TAB_NAME_SIZE,
+ false, false
+ },
+ { 3, "ATTRIBUTE_MASK",
+ DictTabInfo::ExtUnsigned, MAXNROFATTRIBUTESINWORDS,
+ false, false
+ },
+ { 4, "SUBID",
+ DictTabInfo::ExtUnsigned, 1,
+ false, false
+ },
+ { 5, "SUBKEY",
+ DictTabInfo::ExtUnsigned, 1,
+ false, false
+ }
+};
+
+const Ndbcntr::SysTable
+Ndbcntr::g_sysTable_NDBEVENTS_0 = {
+ "sys/def/NDB$EVENTS_0",
+ arrayLength(column_NDBEVENTS_0), column_NDBEVENTS_0,
+ DictTabInfo::SystemTable,
+ DictTabInfo::AllNodesSmallTable,
+ true, ~0
+};
+
+// all
+
+const Ndbcntr::SysTable*
+Ndbcntr::g_sysTableList[] = {
+ &g_sysTable_SYSTAB_0,
+ &g_sysTable_NDBEVENTS_0
+};
+
+//TODO Backup needs this info to allocate appropriate number of records
+//BackupInit.cpp
+const unsigned
+Ndbcntr::g_sysTableCount = arrayLength(Ndbcntr::g_sysTableList);
diff --git a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
new file mode 100644
index 00000000000..f76440a462a
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
@@ -0,0 +1,1033 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <ndb_global.h>
+#include <my_sys.h>
+#include <my_pthread.h>
+
+#include <Error.hpp>
+#include "AsyncFile.hpp"
+
+#include <ErrorHandlingMacros.hpp>
+#include <kernel_types.h>
+#include <NdbMem.h>
+#include <NdbThread.h>
+#include <signaldata/FsOpenReq.hpp>
+
+// use this to test broken pread code
+//#define HAVE_BROKEN_PREAD
+
+#ifdef HAVE_BROKEN_PREAD
+#undef HAVE_PWRITE
+#undef HAVE_PREAD
+#endif
+
+#if defined NDB_WIN32 || defined NDB_OSE || defined NDB_SOFTOSE
+#else
+// For readv and writev
+#include <sys/uio.h>
+#endif
+
+#ifndef NDB_WIN32
+#include <dirent.h>
+#endif
+
+// Use this define if you want printouts from AsyncFile class
+//#define DEBUG_ASYNCFILE
+
+#ifdef DEBUG_ASYNCFILE
+#include <NdbOut.hpp>
+#define DEBUG(x) x
+#define PRINT_ERRORANDFLAGS(f) printErrorAndFlags(f)
+void printErrorAndFlags(Uint32 used_flags);
+#else
+#define DEBUG(x)
+#define PRINT_ERRORANDFLAGS(f)
+#endif
+
+// Define the size of the write buffer (for each thread)
+#if defined NDB_SOFTOSE || defined NDB_OSE
+#define WRITEBUFFERSIZE 65536
+#else
+#define WRITEBUFFERSIZE 262144
+#endif
+
+const char *actionName[] = {
+ "open",
+ "close",
+ "closeRemove",
+ "read",
+ "readv",
+ "write",
+ "writev",
+ "writeSync",
+ "writevSync",
+ "sync",
+ "end" };
+
+static int numAsyncFiles = 0;
+
+extern "C" void * runAsyncFile(void* arg)
+{
+ ((AsyncFile*)arg)->run();
+ return (NULL);
+}
+
+AsyncFile::AsyncFile() :
+ theFileName(),
+#ifdef NDB_WIN32
+ hFile(INVALID_HANDLE_VALUE),
+#else
+ theFd(-1),
+#endif
+ theReportTo(0),
+ theMemoryChannelPtr(NULL)
+{
+}
+
+void
+AsyncFile::doStart(Uint32 nodeId,
+ const char * filesystemPath,
+ const char * backup_path) {
+ theFileName.init(nodeId, filesystemPath, backup_path);
+
+ // Stacksize for filesystem threads
+ // An 8k stack should be enough
+ const NDB_THREAD_STACKSIZE stackSize = 8192;
+
+ char buf[16];
+ numAsyncFiles++;
+ BaseString::snprintf(buf, sizeof(buf), "AsyncFile%d", numAsyncFiles);
+
+ theStartMutexPtr = NdbMutex_Create();
+ theStartConditionPtr = NdbCondition_Create();
+ NdbMutex_Lock(theStartMutexPtr);
+ theStartFlag = false;
+ theThreadPtr = NdbThread_Create(runAsyncFile,
+ (void**)this,
+ stackSize,
+ (char*)&buf,
+ NDB_THREAD_PRIO_MEAN);
+
+ NdbCondition_Wait(theStartConditionPtr,
+ theStartMutexPtr);
+ NdbMutex_Unlock(theStartMutexPtr);
+ NdbMutex_Destroy(theStartMutexPtr);
+ NdbCondition_Destroy(theStartConditionPtr);
+}
+
+AsyncFile::~AsyncFile()
+{
+ void *status;
+ Request request;
+ request.action = Request::end;
+ theMemoryChannelPtr->writeChannel( &request );
+ NdbThread_WaitFor(theThreadPtr, &status);
+ NdbThread_Destroy(&theThreadPtr);
+ delete theMemoryChannelPtr;
+}
+
+void
+AsyncFile::reportTo( MemoryChannel<Request> *reportTo )
+{
+ theReportTo = reportTo;
+}
+
+void AsyncFile::execute(Request* request)
+{
+ theMemoryChannelPtr->writeChannel( request );
+}
+
+void
+AsyncFile::run()
+{
+ Request *request;
+ // Create theMemoryChannel in the thread that will wait for it
+ NdbMutex_Lock(theStartMutexPtr);
+ theMemoryChannelPtr = new MemoryChannel<Request>();
+ theStartFlag = true;
+ // Create write buffer for bigger writes
+ theWriteBufferSize = WRITEBUFFERSIZE;
+ theWriteBuffer = (char *) NdbMem_Allocate(theWriteBufferSize);
+ NdbMutex_Unlock(theStartMutexPtr);
+ NdbCondition_Signal(theStartConditionPtr);
+
+ if (!theWriteBuffer) {
+ DEBUG(ndbout_c("AsyncFile::writeReq, Failed allocating write buffer"));
+ return;
+ }//if
+
+ while (1) {
+ request = theMemoryChannelPtr->readChannel();
+ if (!request) {
+ DEBUG(ndbout_c("Nothing read from Memory Channel in AsyncFile"));
+ endReq();
+ return;
+ }//if
+ switch (request->action) {
+ case Request:: open:
+ openReq(request);
+ break;
+ case Request:: close:
+ closeReq(request);
+ break;
+ case Request:: closeRemove:
+ closeReq(request);
+ removeReq(request);
+ break;
+ case Request:: read:
+ readReq(request);
+ break;
+ case Request:: readv:
+ readvReq(request);
+ break;
+ case Request:: write:
+ writeReq(request);
+ break;
+ case Request:: writev:
+ writevReq(request);
+ break;
+ case Request:: writeSync:
+ writeReq(request);
+ syncReq(request);
+ break;
+ case Request:: writevSync:
+ writevReq(request);
+ syncReq(request);
+ break;
+ case Request:: sync:
+ syncReq(request);
+ break;
+ case Request:: append:
+ appendReq(request);
+ break;
+ case Request::rmrf:
+ rmrfReq(request, (char*)theFileName.c_str(), request->par.rmrf.own_directory);
+ break;
+ case Request:: end:
+ if (theFd > 0)
+ closeReq(request);
+ endReq();
+ return;
+ default:
+ abort();
+ break;
+ }//switch
+ theReportTo->writeChannel(request);
+ }//while
+}//AsyncFile::run()
+
+extern bool Global_useO_SYNC;
+extern bool Global_useO_DIRECT;
+extern bool Global_unlinkO_CREAT;
+extern Uint32 Global_syncFreq;
+
+void AsyncFile::openReq(Request* request)
+{
+ m_openedWithSync = false;
+ m_syncFrequency = 0;
+ m_syncCount= 0;
+
+ // for open.flags, see signal FSOPENREQ
+#ifdef NDB_WIN32
+ DWORD dwCreationDisposition;
+ DWORD dwDesiredAccess = 0;
+ DWORD dwShareMode = FILE_SHARE_READ | FILE_SHARE_WRITE;
+ DWORD dwFlagsAndAttributes = FILE_ATTRIBUTE_NORMAL | FILE_FLAG_RANDOM_ACCESS | FILE_FLAG_NO_BUFFERING;
+ const Uint32 flags = request->par.open.flags;
+
+ // Convert file open flags from Solaris to Windows
+ if ((flags & FsOpenReq::OM_CREATE) && (flags & FsOpenReq::OM_TRUNCATE)){
+ dwCreationDisposition = CREATE_ALWAYS;
+ } else if (flags & FsOpenReq::OM_TRUNCATE){
+ dwCreationDisposition = TRUNCATE_EXISTING;
+ } else if (flags & FsOpenReq::OM_CREATE){
+ dwCreationDisposition = CREATE_NEW;
+ } else {
+ dwCreationDisposition = OPEN_EXISTING;
+ }
+
+ switch(flags & 3){
+ case FsOpenReq::OM_READONLY:
+ dwDesiredAccess = GENERIC_READ;
+ break;
+ case FsOpenReq::OM_WRITEONLY:
+ dwDesiredAccess = GENERIC_WRITE;
+ break;
+ case FsOpenReq::OM_READWRITE:
+ dwDesiredAccess = GENERIC_READ | GENERIC_WRITE;
+ break;
+ default:
+ request->error = 1000;
+ break;
+ return;
+ }
+
+ hFile = CreateFile(theFileName.c_str(), dwDesiredAccess, dwShareMode,
+ 0, dwCreationDisposition, dwFlagsAndAttributes, 0);
+
+ if(INVALID_HANDLE_VALUE == hFile) {
+ request->error = GetLastError();
+ if(((ERROR_PATH_NOT_FOUND == request->error) || (ERROR_INVALID_NAME == request->error))
+ && (flags & FsOpenReq::OM_CREATE)) {
+ createDirectories();
+ hFile = CreateFile(theFileName.c_str(), dwDesiredAccess, dwShareMode,
+ 0, dwCreationDisposition, dwFlagsAndAttributes, 0);
+
+ if(INVALID_HANDLE_VALUE == hFile)
+ request->error = GetLastError();
+ else
+ request->error = 0;
+
+ return;
+ }
+ }
+ else {
+ request->error = 0;
+ return;
+ }
+#else
+ const Uint32 flags = request->par.open.flags;
+ Uint32 new_flags = 0;
+
+ // Convert file open flags from Solaris to Liux
+ if(flags & FsOpenReq::OM_CREATE){
+ new_flags |= O_CREAT;
+ }
+
+ if(flags & FsOpenReq::OM_TRUNCATE){
+#if 0
+ if(Global_unlinkO_CREAT){
+ unlink(theFileName.c_str());
+ } else
+#endif
+ new_flags |= O_TRUNC;
+ }
+
+ if(flags & FsOpenReq::OM_APPEND){
+ new_flags |= O_APPEND;
+ }
+
+ if(flags & FsOpenReq::OM_SYNC){
+#if 0
+ if(Global_useO_SYNC){
+ new_flags |= O_SYNC;
+ m_openedWithSync = true;
+ m_syncFrequency = 0;
+ } else {
+#endif
+ m_openedWithSync = false;
+ m_syncFrequency = Global_syncFreq;
+#if 0
+ }
+#endif
+ } else {
+ m_openedWithSync = false;
+ m_syncFrequency = 0;
+ }
+
+#if 0
+ //#if NDB_LINUX
+ if(Global_useO_DIRECT){
+ new_flags |= O_DIRECT;
+ }
+#endif
+
+ switch(flags & 0x3){
+ case FsOpenReq::OM_READONLY:
+ new_flags |= O_RDONLY;
+ break;
+ case FsOpenReq::OM_WRITEONLY:
+ new_flags |= O_WRONLY;
+ break;
+ case FsOpenReq::OM_READWRITE:
+ new_flags |= O_RDWR;
+ break;
+ default:
+ request->error = 1000;
+ break;
+ return;
+ }
+ const int mode = S_IRUSR | S_IWUSR | S_IRGRP;
+
+ if (-1 == (theFd = ::open(theFileName.c_str(), new_flags, mode))) {
+ PRINT_ERRORANDFLAGS(new_flags);
+ if( (errno == ENOENT ) && (new_flags & O_CREAT ) ) {
+ createDirectories();
+ if (-1 == (theFd = ::open(theFileName.c_str(), new_flags, mode))) {
+ PRINT_ERRORANDFLAGS(new_flags);
+ request->error = errno;
+ }
+ } else {
+ request->error = errno;
+ }
+ }
+#endif
+}
+
+int
+AsyncFile::readBuffer(char * buf, size_t size, off_t offset){
+ int return_value;
+
+#ifdef NDB_WIN32
+ DWORD dwSFP = SetFilePointer(hFile, offset, 0, FILE_BEGIN);
+ if(dwSFP != offset) {
+ return GetLastError();
+ }
+#elif ! defined(HAVE_PREAD)
+ off_t seek_val;
+ while((seek_val= lseek(theFd, offset, SEEK_SET)) == (off_t)-1
+ && errno == EINTR);
+ if(seek_val == (off_t)-1)
+ {
+ return errno;
+ }
+#endif
+
+ while (size > 0) {
+ size_t bytes_read = 0;
+
+#ifdef NDB_WIN32
+ DWORD dwBytesRead;
+ BOOL bRead = ReadFile(hFile,
+ buf,
+ size,
+ &dwBytesRead,
+ 0);
+ if(!bRead){
+ return GetLastError();
+ }
+ bytes_read = dwBytesRead;
+#elif ! defined(HAVE_PREAD)
+ return_value = ::read(theFd, buf, size);
+#else // UNIX
+ return_value = ::pread(theFd, buf, size, offset);
+#endif
+#ifndef NDB_WIN32
+ if (return_value == -1 && errno == EINTR) {
+ DEBUG(ndbout_c("EINTR in read"));
+ continue;
+ } else if (return_value == -1){
+ return errno;
+ } else {
+ bytes_read = return_value;
+ }
+#endif
+
+ if(bytes_read == 0){
+ DEBUG(ndbout_c("Read underflow %d %d\n %x\n%d %d",
+ size, offset, buf, bytes_read, return_value));
+ return ERR_ReadUnderflow;
+ }
+
+ if(bytes_read != size){
+ DEBUG(ndbout_c("Warning partial read %d != %d",
+ bytes_read, size));
+ }
+
+ buf += bytes_read;
+ size -= bytes_read;
+ offset += bytes_read;
+ }
+ return 0;
+}
+
+void
+AsyncFile::readReq( Request * request)
+{
+ for(int i = 0; i < request->par.readWrite.numberOfPages ; i++) {
+ off_t offset = request->par.readWrite.pages[i].offset;
+ size_t size = request->par.readWrite.pages[i].size;
+ char * buf = request->par.readWrite.pages[i].buf;
+
+ int err = readBuffer(buf, size, offset);
+ if(err != 0){
+ request->error = err;
+ return;
+ }
+ }
+}
+
+void
+AsyncFile::readvReq( Request * request)
+{
+#if ! defined(HAVE_PREAD)
+ readReq(request);
+ return;
+#elif defined NDB_WIN32
+ // ReadFileScatter?
+ readReq(request);
+ return;
+#else
+ int return_value;
+ int length = 0;
+ struct iovec iov[20]; // the parameter in the signal restricts this to 20 deep
+ for(int i=0; i < request->par.readWrite.numberOfPages ; i++) {
+ iov[i].iov_base= request->par.readWrite.pages[i].buf;
+ iov[i].iov_len= request->par.readWrite.pages[i].size;
+ length = length + iov[i].iov_len;
+ }
+ lseek( theFd, request->par.readWrite.pages[0].offset, SEEK_SET );
+ return_value = ::readv(theFd, iov, request->par.readWrite.numberOfPages);
+ if (return_value == -1) {
+ request->error = errno;
+ return;
+ } else if (return_value != length) {
+ request->error = 1011;
+ return;
+ }
+#endif
+}
+
+int
+AsyncFile::extendfile(Request* request) {
+#if ! defined(HAVE_PWRITE)
+ // Find max size of this file in this request
+ int maxOffset = 0;
+ int maxSize = 0;
+ for(int i=0; i < request->par.readWrite.numberOfPages ; i++) {
+ if (request->par.readWrite.pages[i].offset > maxOffset) {
+ maxOffset = request->par.readWrite.pages[i].offset;
+ maxSize = request->par.readWrite.pages[i].size;
+ }
+ }
+ DEBUG(ndbout_c("extendfile: maxOffset=%d, size=%d", maxOffset, maxSize));
+
+ // Allocate a buffer and fill it with zeros
+ void* pbuf = NdbMem_Allocate(maxSize);
+ memset(pbuf, 0, maxSize);
+ for (int p = 0; p <= maxOffset; p = p + maxSize) {
+ int return_value;
+ return_value = lseek(theFd,
+ p,
+ SEEK_SET);
+ if((return_value == -1 ) || (return_value != p)) {
+ return -1;
+ }
+ return_value = ::write(theFd,
+ pbuf,
+ maxSize);
+ if ((return_value == -1) || (return_value != maxSize)) {
+ return -1;
+ }
+ }
+ free(pbuf);
+
+ DEBUG(ndbout_c("extendfile: \"%s\" OK!", theFileName.c_str()));
+ return 0;
+#else
+ request = request;
+ abort();
+ return -1;
+#endif
+}
+
+void
+AsyncFile::writeReq( Request * request)
+{
+ int page_num = 0;
+ bool write_not_complete = true;
+
+ while(write_not_complete) {
+ int totsize = 0;
+ off_t offset = request->par.readWrite.pages[page_num].offset;
+ char* bufptr = theWriteBuffer;
+
+ write_not_complete = false;
+ if (request->par.readWrite.numberOfPages > 1) {
+ off_t page_offset = offset;
+
+ // Multiple page write, copy to buffer for one write
+ for(int i=page_num; i < request->par.readWrite.numberOfPages; i++) {
+ memcpy(bufptr,
+ request->par.readWrite.pages[i].buf,
+ request->par.readWrite.pages[i].size);
+ bufptr += request->par.readWrite.pages[i].size;
+ totsize += request->par.readWrite.pages[i].size;
+ if (((i + 1) < request->par.readWrite.numberOfPages)) {
+ // There are more pages to write
+ // Check that offsets are consequtive
+ off_t tmp = page_offset + request->par.readWrite.pages[i].size;
+ if (tmp != request->par.readWrite.pages[i+1].offset) {
+ // Next page is not aligned with previous, not allowed
+ DEBUG(ndbout_c("Page offsets are not aligned"));
+ request->error = EINVAL;
+ return;
+ }
+ if ((unsigned)(totsize + request->par.readWrite.pages[i+1].size) > (unsigned)theWriteBufferSize) {
+ // We are not finished and the buffer is full
+ write_not_complete = true;
+ // Start again with next page
+ page_num = i + 1;
+ break;
+ }
+ }
+ page_offset += request->par.readWrite.pages[i].size;
+ }
+ bufptr = theWriteBuffer;
+ } else {
+ // One page write, write page directly
+ bufptr = request->par.readWrite.pages[0].buf;
+ totsize = request->par.readWrite.pages[0].size;
+ }
+ int err = writeBuffer(bufptr, totsize, offset);
+ if(err != 0){
+ request->error = err;
+ return;
+ }
+ } // while(write_not_complete)
+}
+
+int
+AsyncFile::writeBuffer(const char * buf, size_t size, off_t offset,
+ size_t chunk_size)
+{
+ size_t bytes_to_write = chunk_size;
+ int return_value;
+
+#ifdef NDB_WIN32
+ DWORD dwSFP = SetFilePointer(hFile, offset, 0, FILE_BEGIN);
+ if(dwSFP != offset) {
+ return GetLastError();
+ }
+#elif ! defined(HAVE_PWRITE)
+ off_t seek_val;
+ while((seek_val= lseek(theFd, offset, SEEK_SET)) == (off_t)-1
+ && errno == EINTR);
+ if(seek_val == (off_t)-1)
+ {
+ return errno;
+ }
+#endif
+
+ while (size > 0) {
+ if (size < bytes_to_write){
+ // We are at the last chunk
+ bytes_to_write = size;
+ }
+ size_t bytes_written = 0;
+
+#ifdef NDB_WIN32
+ DWORD dwWritten;
+ BOOL bWrite = WriteFile(hFile, buf, bytes_to_write, &dwWritten, 0);
+ if(!bWrite) {
+ return GetLastError();
+ }
+ bytes_written = dwWritten;
+ if (bytes_written != bytes_to_write) {
+ DEBUG(ndbout_c("Warning partial write %d != %d", bytes_written, bytes_to_write));
+ }
+
+#elif ! defined(HAVE_PWRITE)
+ return_value = ::write(theFd, buf, bytes_to_write);
+#else // UNIX
+ return_value = ::pwrite(theFd, buf, bytes_to_write, offset);
+#endif
+#ifndef NDB_WIN32
+ if (return_value == -1 && errno == EINTR) {
+ bytes_written = 0;
+ DEBUG(ndbout_c("EINTR in write"));
+ } else if (return_value == -1){
+ return errno;
+ } else {
+ bytes_written = return_value;
+
+ if(bytes_written == 0){
+ abort();
+ }
+
+ if(bytes_written != bytes_to_write){
+ DEBUG(ndbout_c("Warning partial write %d != %d",
+ bytes_written, bytes_to_write));
+ }
+ }
+#endif
+
+ m_syncCount+= bytes_written;
+ buf += bytes_written;
+ size -= bytes_written;
+ offset += bytes_written;
+ }
+ return 0;
+}
+
+void
+AsyncFile::writevReq( Request * request)
+{
+ // WriteFileGather on WIN32?
+ writeReq(request);
+}
+
+
+void
+AsyncFile::closeReq(Request * request)
+{
+ syncReq(request);
+#ifdef NDB_WIN32
+ if(!CloseHandle(hFile)) {
+ request->error = GetLastError();
+ }
+ hFile = INVALID_HANDLE_VALUE;
+#else
+ if (-1 == ::close(theFd)) {
+#ifndef DBUG_OFF
+ if (theFd == -1)
+ abort();
+#endif
+ request->error = errno;
+ }
+ theFd = -1;
+#endif
+}
+
+bool AsyncFile::isOpen(){
+#ifdef NDB_WIN32
+ return (hFile != INVALID_HANDLE_VALUE);
+#else
+ return (theFd != -1);
+#endif
+}
+
+
+void
+AsyncFile::syncReq(Request * request)
+{
+ if(m_openedWithSync ||
+ m_syncCount == 0){
+ return;
+ }
+#ifdef NDB_WIN32
+ if(!FlushFileBuffers(hFile)) {
+ request->error = GetLastError();
+ return;
+ }
+#else
+ if (-1 == ::fsync(theFd)){
+ request->error = errno;
+ return;
+ }
+#endif
+ m_syncCount = 0;
+}
+
+void
+AsyncFile::appendReq(Request * request){
+
+ const char * buf = request->par.append.buf;
+ Uint32 size = request->par.append.size;
+
+ m_syncCount += size;
+
+#ifdef NDB_WIN32
+ DWORD dwWritten = 0;
+ while(size > 0){
+ if(!WriteFile(hFile, buf, size, &dwWritten, 0)){
+ request->error = GetLastError();
+ return ;
+ }
+
+ buf += dwWritten;
+ size -= dwWritten;
+ }
+#else
+ while(size > 0){
+ const int n = write(theFd, buf, size);
+ if(n == -1 && errno == EINTR){
+ continue;
+ }
+ if(n == -1){
+ request->error = errno;
+ return;
+ }
+ if(n == 0){
+ abort();
+ }
+ size -= n;
+ buf += n;
+ }
+#endif
+
+ if(m_syncFrequency != 0 && m_syncCount > m_syncFrequency){
+ syncReq(request);
+ }
+}
+
+void
+AsyncFile::removeReq(Request * request)
+{
+#ifdef NDB_WIN32
+ if(!DeleteFile(theFileName.c_str())) {
+ request->error = GetLastError();
+ }
+#else
+ if (-1 == ::remove(theFileName.c_str())) {
+ request->error = errno;
+
+ }
+#endif
+}
+
+void
+AsyncFile::rmrfReq(Request * request, char * path, bool removePath){
+ Uint32 path_len = strlen(path);
+ Uint32 path_max_copy = PATH_MAX - path_len;
+ char* path_add = &path[path_len];
+#ifndef NDB_WIN32
+ if(!request->par.rmrf.directory){
+ // Remove file
+ if(unlink((const char *)path) != 0 && errno != ENOENT)
+ request->error = errno;
+ return;
+ }
+ // Remove directory
+ DIR* dirp = opendir((const char *)path);
+ if(dirp == 0){
+ if(errno != ENOENT)
+ request->error = errno;
+ return;
+ }
+ struct dirent * dp;
+ while ((dp = readdir(dirp)) != NULL){
+ if ((strcmp(".", dp->d_name) != 0) && (strcmp("..", dp->d_name) != 0)) {
+ BaseString::snprintf(path_add, (size_t)path_max_copy, "%s%s",
+ DIR_SEPARATOR, dp->d_name);
+ if(remove((const char*)path) == 0){
+ path[path_len] = 0;
+ continue;
+ }
+
+ rmrfReq(request, path, true);
+ path[path_len] = 0;
+ if(request->error != 0){
+ closedir(dirp);
+ return;
+ }
+ }
+ }
+ closedir(dirp);
+ if(removePath && rmdir((const char *)path) != 0){
+ request->error = errno;
+ }
+ return;
+#else
+
+ if(!request->par.rmrf.directory){
+ // Remove file
+ if(!DeleteFile(path)){
+ DWORD dwError = GetLastError();
+ if(dwError!=ERROR_FILE_NOT_FOUND)
+ request->error = dwError;
+ }
+ return;
+ }
+
+ strcat(path, "\\*");
+ WIN32_FIND_DATA ffd;
+ HANDLE hFindFile = FindFirstFile(path, &ffd);
+ path[path_len] = 0;
+ if(INVALID_HANDLE_VALUE==hFindFile){
+ DWORD dwError = GetLastError();
+ if(dwError!=ERROR_PATH_NOT_FOUND)
+ request->error = dwError;
+ return;
+ }
+
+ do {
+ if(0!=strcmp(".", ffd.cFileName) && 0!=strcmp("..", ffd.cFileName)){
+ strcat(path, "\\");
+ strcat(path, ffd.cFileName);
+ if(DeleteFile(path)) {
+ path[path_len] = 0;
+ continue;
+ }//if
+
+ rmrfReq(request, path, true);
+ path[path_len] = 0;
+ if(request->error != 0){
+ FindClose(hFindFile);
+ return;
+ }
+ }
+ } while(FindNextFile(hFindFile, &ffd));
+
+ FindClose(hFindFile);
+
+ if(removePath && !RemoveDirectory(path))
+ request->error = GetLastError();
+
+#endif
+}
+
+void AsyncFile::endReq()
+{
+ // Thread is ended with return
+ if (theWriteBuffer) NdbMem_Free(theWriteBuffer);
+}
+
+
+void AsyncFile::createDirectories()
+{
+ for (int i = 0; i < theFileName.levels(); i++) {
+#ifdef NDB_WIN32
+ CreateDirectory(theFileName.directory(i), 0);
+#else
+ //printf("AsyncFile::createDirectories : \"%s\"\n", theFileName.directory(i));
+ mkdir(theFileName.directory(i), S_IRUSR | S_IWUSR | S_IXUSR | S_IXGRP | S_IRGRP);
+#endif
+ }
+}
+
+#ifdef DEBUG_ASYNCFILE
+void printErrorAndFlags(Uint32 used_flags) {
+ char buf[255];
+ sprintf(buf, "PEAF: errno=%d \"", errno);
+
+ switch(errno) {
+ case EACCES:
+ strcat(buf, "EACCES");
+ break;
+ case EDQUOT:
+ strcat(buf, "EDQUOT");
+ break;
+ case EEXIST :
+ strcat(buf, "EEXIST");
+ break;
+ case EINTR :
+ strcat(buf, "EINTR");
+ break;
+ case EFAULT :
+ strcat(buf, "EFAULT");
+ break;
+ case EIO :
+ strcat(buf, "EIO");
+ break;
+ case EISDIR :
+ strcat(buf, "EISDIR");
+ break;
+ case ELOOP :
+ strcat(buf, "ELOOP");
+ break;
+ case EMFILE :
+ strcat(buf, "EMFILE");
+ break;
+ case ENFILE :
+ strcat(buf, "ENFILE");
+ break;
+ case ENOENT :
+ strcat(buf, "ENOENT ");
+ break;
+ case ENOSPC :
+ strcat(buf, "ENOSPC");
+ break;
+ case ENOTDIR :
+ strcat(buf, "ENOTDIR");
+ break;
+ case ENXIO :
+ strcat(buf, "ENXIO");
+ break;
+ case EOPNOTSUPP:
+ strcat(buf, "EOPNOTSUPP");
+ break;
+#if !defined NDB_OSE && !defined NDB_SOFTOSE
+ case EMULTIHOP :
+ strcat(buf, "EMULTIHOP");
+ break;
+ case ENOLINK :
+ strcat(buf, "ENOLINK");
+ break;
+ case ENOSR :
+ strcat(buf, "ENOSR");
+ break;
+ case EOVERFLOW :
+ strcat(buf, "EOVERFLOW");
+ break;
+#endif
+ case EROFS :
+ strcat(buf, "EROFS");
+ break;
+ case EAGAIN :
+ strcat(buf, "EAGAIN");
+ break;
+ case EINVAL :
+ strcat(buf, "EINVAL");
+ break;
+ case ENOMEM :
+ strcat(buf, "ENOMEM");
+ break;
+ case ETXTBSY :
+ strcat(buf, "ETXTBSY");
+ break;
+ case ENAMETOOLONG:
+ strcat(buf, "ENAMETOOLONG");
+ break;
+ case EBADF:
+ strcat(buf, "EBADF");
+ break;
+ case ESPIPE:
+ strcat(buf, "ESPIPE");
+ break;
+ case ESTALE:
+ strcat(buf, "ESTALE");
+ break;
+ default:
+ strcat(buf, "EOTHER");
+ break;
+ }
+ strcat(buf, "\" ");
+#if defined NDB_OSE
+ strcat(buf, strerror(errno) << " ");
+#endif
+ strcat(buf, " flags: ");
+ switch(used_flags & 3){
+ case O_RDONLY:
+ strcat(buf, "O_RDONLY, ");
+ break;
+ case O_WRONLY:
+ strcat(buf, "O_WRONLY, ");
+ break;
+ case O_RDWR:
+ strcat(buf, "O_RDWR, ");
+ break;
+ default:
+ strcat(buf, "Unknown!!, ");
+ }
+
+ if((used_flags & O_APPEND)==O_APPEND)
+ strcat(buf, "O_APPEND, ");
+ if((used_flags & O_CREAT)==O_CREAT)
+ strcat(buf, "O_CREAT, ");
+ if((used_flags & O_EXCL)==O_EXCL)
+ strcat(buf, "O_EXCL, ");
+ if((used_flags & O_NOCTTY) == O_NOCTTY)
+ strcat(buf, "O_NOCTTY, ");
+ if((used_flags & O_NONBLOCK)==O_NONBLOCK)
+ strcat(buf, "O_NONBLOCK, ");
+ if((used_flags & O_TRUNC)==O_TRUNC)
+ strcat(buf, "O_TRUNC, ");
+#if !defined NDB_OSE && !defined NDB_SOFTOSE
+ if((used_flags & O_DSYNC)==O_DSYNC)
+ strcat(buf, "O_DSYNC, ");
+ if((used_flags & O_NDELAY)==O_NDELAY)
+ strcat(buf, "O_NDELAY, ");
+ if((used_flags & O_RSYNC)==O_RSYNC)
+ strcat(buf, "O_RSYNC, ");
+ if((used_flags & O_SYNC)==O_SYNC)
+ strcat(buf, "O_SYNC, ");
+ DEBUG(ndbout_c(buf));
+#endif
+
+}
+#endif
diff --git a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp
new file mode 100644
index 00000000000..2176c93c5d5
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp
@@ -0,0 +1,234 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef AsyncFile_H
+#define AsyncFile_H
+
+//===========================================================================
+//
+// .DESCRIPTION
+// Asynchronous file, All actions are executed concurrently with other
+// activity of the process.
+// Because all action are performed in a seperated thread the result of
+// of a action is send back tru a memory channel.
+// For the asyncronise notivication of a finished request all the calls
+// have a request as paramater, the user can use the userData pointer
+// to add information it needs when the request is send back.
+//
+//
+// .TYPICAL USE:
+// Writing or reading data to/from disk concurrently to other activities.
+//
+//===========================================================================
+//=============================================================================
+//
+// .PUBLIC
+//
+//=============================================================================
+///////////////////////////////////////////////////////////////////////////////
+//
+// AsyncFile( );
+// Description:
+// Initialisation of the class.
+// Parameters:
+// -
+///////////////////////////////////////////////////////////////////////////////
+//
+// ~AsyncFile( );
+// Description:
+// Tell the thread to stop and wait for it to return
+// Parameters:
+// -
+///////////////////////////////////////////////////////////////////////////////
+//
+// doStart( );
+// Description:
+// Spawns the new thread.
+// Parameters:
+// Base path of filesystem
+//
+///////////////////////////////////////////////////////////////////////////////
+//
+// void execute(Request *request);
+// Description:
+// performens the requered action.
+// Parameters:
+// request: request to be called when open is finished.
+// action= open|close|read|write|sync
+// if action is open then:
+// par.open.flags= UNIX open flags, see man open
+// par.open.name= name of the file to open
+// if action is read or write then:
+// par.readWrite.buf= user provided buffer to read/write
+// the data from/to
+// par.readWrite.size= how many bytes must be read/written
+// par.readWrite.offset= absolute offset in file in bytes
+// return:
+// return values are stored in the request error field:
+// error= return state of the action, UNIX error see man open/errno
+// userData= is untouched can be used be user.
+//
+///////////////////////////////////////////////////////////////////////////////
+//
+// void reportTo( MemoryChannel<Request> *reportTo );
+// Description:
+// set the channel where the file must report the result of the
+// actions back to.
+// Parameters:
+// reportTo: the memory channel to use use MemoryChannelMultipleWriter
+// if more
+// than one file uses this channel to report back.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include <kernel_types.h>
+#include "MemoryChannel.hpp"
+#include "Filename.hpp"
+
+const int ERR_ReadUnderflow = 1000;
+
+const int WRITECHUNK = 262144;
+
+class AsyncFile;
+
+class Request
+{
+public:
+ enum Action {
+ open,
+ close,
+ closeRemove,
+ read, // Allways leave readv directly after
+ // read because SimblockAsyncFileSystem depends on it
+ readv,
+ write,// Allways leave writev directly after
+ // write because SimblockAsyncFileSystem depends on it
+ writev,
+ writeSync,// Allways leave writevSync directly after
+ // writeSync because SimblockAsyncFileSystem depends on it
+ writevSync,
+ sync,
+ end,
+ append,
+ rmrf
+ };
+ Action action;
+ union {
+ struct {
+ Uint32 flags;
+ } open;
+ struct {
+ int numberOfPages;
+ struct{
+ char *buf;
+ size_t size;
+ off_t offset;
+ } pages[16];
+ } readWrite;
+ struct {
+ const char * buf;
+ size_t size;
+ } append;
+ struct {
+ bool directory;
+ bool own_directory;
+ } rmrf;
+ } par;
+ int error;
+
+ void set(BlockReference userReference,
+ Uint32 userPointer,
+ Uint16 filePointer);
+ BlockReference theUserReference;
+ Uint32 theUserPointer;
+ Uint16 theFilePointer;
+ // Information for open, needed if the first open action fails.
+ AsyncFile* file;
+ Uint32 theTrace;
+};
+
+
+inline
+void
+Request::set(BlockReference userReference,
+ Uint32 userPointer, Uint16 filePointer)
+{
+ theUserReference= userReference;
+ theUserPointer= userPointer;
+ theFilePointer= filePointer;
+}
+
+class AsyncFile
+{
+public:
+ AsyncFile();
+ ~AsyncFile();
+
+ void reportTo( MemoryChannel<Request> *reportTo );
+
+ void execute( Request* request );
+
+ void doStart(Uint32 nodeId, const char * fspath, const char * backup_path);
+ // its a thread so its always running
+ void run();
+
+ bool isOpen();
+
+ Filename theFileName;
+private:
+
+ void openReq(Request *request);
+ void readReq(Request *request);
+ void readvReq(Request *request);
+ void writeReq(Request *request);
+ void writevReq(Request *request);
+
+ void closeReq(Request *request);
+ void syncReq(Request *request);
+ void removeReq(Request *request);
+ void appendReq(Request *request);
+ void rmrfReq(Request *request, char * path, bool removePath);
+ void endReq();
+
+ int readBuffer(char * buf, size_t size, off_t offset);
+ int writeBuffer(const char * buf, size_t size, off_t offset,
+ size_t chunk_size = WRITECHUNK);
+
+ int extendfile(Request* request);
+ void createDirectories();
+
+#ifdef NDB_WIN32
+ HANDLE hFile;
+#else
+ int theFd;
+#endif
+
+ MemoryChannel<Request> *theReportTo;
+ MemoryChannel<Request>* theMemoryChannelPtr;
+
+ struct NdbThread* theThreadPtr;
+ NdbMutex* theStartMutexPtr;
+ NdbCondition* theStartConditionPtr;
+ bool theStartFlag;
+ int theWriteBufferSize;
+ char* theWriteBuffer;
+
+ bool m_openedWithSync;
+ Uint32 m_syncCount;
+ Uint32 m_syncFrequency;
+};
+
+#endif
diff --git a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFileTest/AsyncFileTest.cpp b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFileTest/AsyncFileTest.cpp
new file mode 100644
index 00000000000..004752c9543
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFileTest/AsyncFileTest.cpp
@@ -0,0 +1,695 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+//#define TESTDEBUG 1
+
+#include <ndb_global.h>
+
+#include <kernel_types.h>
+#include <Pool.hpp>
+#include "AsyncFile.hpp"
+#include "NdbOut.hpp"
+#include "NdbTick.h"
+#include "NdbThread.h"
+#include "NdbMain.h"
+
+// Test and benchmark functionality of AsyncFile
+// -n Number of files
+// -r Number of simultaneous requests
+// -s Filesize, number of pages
+// -l Number of iterations
+// -remove, remove files after close
+// -reverse, write files in reverse order, start with the last page
+
+#define MAXFILES 255
+#define DEFAULT_NUM_FILES 1
+#define MAXREQUESTS 256
+#define DEFAULT_NUM_REQUESTS 1
+#define MAXFILESIZE 4096
+#define DEFAULT_FILESIZE 2048
+#define FVERSION 0x01000000
+#define PAGESIZE 8192
+
+#define TIMER_START { Uint64 starttick = NdbTick_CurrentMillisecond()
+#define TIMER_PRINT(str, ops) Uint64 stoptick = NdbTick_CurrentMillisecond();\
+ Uint64 totaltime = (stoptick-starttick); \
+ ndbout << ops << " " << str << \
+ " total time " << (int)totaltime << "ms" << endl;\
+ char buf[255];\
+ sprintf(buf, "%d %s/sec\n",(int)((ops*1000)/totaltime), str);\
+ ndbout <<buf << endl;}
+
+static int numberOfFiles = DEFAULT_NUM_FILES;
+static int numberOfRequests = DEFAULT_NUM_REQUESTS;
+static int fileSize = DEFAULT_FILESIZE;
+static int removeFiles = 0;
+static int writeFilesReverse = 0;
+static int numberOfIterations = 1;
+Uint32 FileNameArray[4];
+
+Pool<AsyncFile>* files;
+AsyncFile* openFiles[MAXFILES];
+Pool<Request>* theRequestPool;
+MemoryChannelMultipleWriter<Request>* theReportChannel;
+
+char WritePages[MAXFILES][PAGESIZE];
+char ReadPages[MAXFILES][PAGESIZE];
+
+int readArguments(int argc, const char** argv);
+int openFile(int fileNum);
+int openFileWait();
+int closeFile(int fileNum);
+int closeFileWait();
+int writeFile( int fileNum, int pagenum);
+int writeFileWait();
+int writeSyncFile( int fileNum, int pagenum);
+int writeSyncFileWait();
+int readFile( int fileNum, int pagenum);
+int readFileWait();
+
+
+NDB_COMMAND(aftest, "aftest", "aftest [-n <Number of files>] [-r <Number of simultaneous requests>] [-s <Filesize, number of pages>] [-l <Number of iterations>] [-remove, remove files after close] [-reverse, write files in reverse order, start with the last page]", "Test the AsyncFile class of Ndb", 8192)
+{
+ int s, numReq, numOps;
+
+ readArguments(argc, argv);
+
+ files = new Pool<AsyncFile>(numberOfFiles, 2);
+ theRequestPool = new Pool<Request>;
+ theReportChannel = new MemoryChannelMultipleWriter<Request>;
+
+ ndbout << "AsyncFileTest starting" << endl;
+ ndbout << " " << numberOfFiles << " files" << endl;
+ ndbout << " " << numberOfRequests << " requests" << endl;
+ ndbout << " " << fileSize << " * 8k files" << endl << endl;
+ ndbout << " " << numberOfIterations << " iterations" << endl << endl;
+
+ NdbThread_SetConcurrencyLevel(numberOfFiles+2);
+
+ // initialize data to write to files
+ for (int i = 0; i < MAXFILES; i++) {
+ for (int j = 0; j < PAGESIZE; j++){
+ WritePages[i][j] = (64+i+j)%256;
+ }
+ // memset(&WritePages[i][0], i+64, PAGESIZE);
+ }
+
+ // Set file directory and name
+ // /T27/F27/NDBFS/S27Pnn.data
+ FileNameArray[0] = 27; // T27
+ FileNameArray[1] = 27; // F27
+ FileNameArray[2] = 27; // S27
+ FileNameArray[3] = FVERSION; // Version
+
+ for (int l = 0; l < numberOfIterations; l++)
+ {
+
+ ndbout << "Opening files" << endl;
+ // Open files
+ for (int f = 0; f < numberOfFiles; f++)
+ {
+ openFile(f);
+
+ }
+
+ // Wait for answer
+ openFileWait();
+
+ ndbout << "Files opened!" << endl<< endl;
+
+ // Write to files
+ ndbout << "Started writing" << endl;
+ TIMER_START;
+ s = 0;
+ numReq = 0;
+ numOps = 0;
+ while ( s < fileSize)
+ {
+ for (int r = 0; r < numberOfRequests; r++)
+ {
+ for (int f = 0; f < numberOfFiles; f++)
+ {
+ writeFile(f, s);
+ numReq++;
+ numOps++;
+ }
+
+ s++;
+ }
+
+ while (numReq > 0)
+ {
+ writeFileWait();
+ numReq--;
+ }
+
+ }
+
+ TIMER_PRINT("writes", numOps);
+
+
+ ndbout << "Started reading" << endl;
+ TIMER_START;
+
+ // Read from files
+ s = 0;
+ numReq = 0;
+ numOps = 0;
+ while ( s < fileSize)
+ {
+ for (int r = 0; r < numberOfRequests; r++)
+ {
+ for (int f = 0; f < numberOfFiles; f++)
+ {
+ readFile(f, s);
+ numReq++;
+ numOps++;
+ }
+
+ s++;
+
+ }
+
+ while (numReq > 0)
+ {
+ readFileWait();
+ numReq--;
+ }
+
+ }
+ TIMER_PRINT("reads", numOps);
+
+ ndbout << "Started writing with sync" << endl;
+ TIMER_START;
+
+ // Write to files
+ s = 0;
+ numReq = 0;
+ numOps = 0;
+ while ( s < fileSize)
+ {
+ for (int r = 0; r < numberOfRequests; r++)
+ {
+ for (int f = 0; f < numberOfFiles; f++)
+ {
+ writeSyncFile(f, s);
+ numReq++;
+ numOps++;
+ }
+
+ s++;
+ }
+
+ while (numReq > 0)
+ {
+ writeSyncFileWait();
+ numReq--;
+ }
+
+ }
+
+ TIMER_PRINT("writeSync", numOps);
+
+ // Close files
+ ndbout << "Closing files" << endl;
+ for (int f = 0; f < numberOfFiles; f++)
+ {
+ closeFile(f);
+
+ }
+
+ // Wait for answer
+ closeFileWait();
+
+ ndbout << "Files closed!" << endl<< endl;
+ }
+
+ // Deallocate memory
+ delete files;
+ delete theReportChannel;
+ delete theRequestPool;
+
+ return 0;
+
+}
+
+
+
+int forward( AsyncFile * file, Request* request )
+{
+ file->execute(request);
+ ERROR_CHECK 0;
+ return 1;
+}
+
+int openFile( int fileNum)
+{
+ AsyncFile* file = (AsyncFile *)files->get();
+
+ FileNameArray[3] = fileNum | FVERSION;
+ file->fileName().set( NDBFS_REF, &FileNameArray[0] );
+ ndbout << "openFile: " << file->fileName().c_str() << endl;
+
+ if( ERROR_STATE ) {
+ ERROR_RESET;
+ files->put( file );
+ ndbout << "Failed to set filename" << endl;
+ return 1;
+ }
+ file->reportTo(theReportChannel);
+
+ Request* request = theRequestPool->get();
+ request->action= Request::open;
+ request->error= 0;
+ request->par.open.flags = 0x302; //O_RDWR | O_CREAT | O_TRUNC ; // 770
+ request->set(NDBFS_REF, 0x23456789, fileNum );
+ request->file = file;
+
+ if (!forward(file,request)) {
+ // Something went wrong
+ ndbout << "Could not forward open request" << endl;
+ theRequestPool->put(request);
+ return 1;
+ }
+ return 0;
+}
+
+int closeFile( int fileNum)
+{
+
+ AsyncFile* file = openFiles[fileNum];
+
+ Request* request = theRequestPool->get();
+ if (removeFiles == 1)
+ request->action = Request::closeRemove;
+ else
+ request->action= Request::close;
+
+ request->error= 0;
+ request->set(NDBFS_REF, 0x23456789, fileNum );
+ request->file = file;
+
+ if (!forward(file,request)) {
+ // Something went wrong
+ ndbout << "Could not forward close request" << endl;
+ theRequestPool->put(request);
+ return 1;
+ }
+ return 0;
+}
+
+int writeFile( int fileNum, int pagenum)
+{
+ AsyncFile* file = openFiles[fileNum];
+#ifdef TESTDEBUG
+ ndbout << "writeFile" << fileNum <<": "<<pagenum<<", " << file->fileName().c_str()<< endl;
+#endif
+ Request *request = theRequestPool->get();
+ request->action = Request::write;
+ request->error = 0;
+ request->set(NDBFS_REF, pagenum, fileNum);
+ request->file = openFiles[fileNum];
+
+ // Write only one page, choose the correct page for each file using fileNum
+ request->par.readWrite.pages[0].buf = &WritePages[fileNum][0];
+ request->par.readWrite.pages[0].size = PAGESIZE;
+ if (writeFilesReverse == 1)
+ {
+ // write the last page in the files first
+ // This is a normal way for the Blocks in Ndb to write to a file
+ request->par.readWrite.pages[0].offset = (fileSize - pagenum - 1) * PAGESIZE;
+ }
+ else
+ {
+ request->par.readWrite.pages[0].offset = pagenum * PAGESIZE;
+ }
+ request->par.readWrite.numberOfPages = 1;
+
+ if (!forward(file,request)) {
+ // Something went wrong
+ ndbout << "Could not forward write request" << endl;
+ theRequestPool->put(request);
+ return 1;
+ }
+ return 0;
+
+}
+
+int writeSyncFile( int fileNum, int pagenum)
+{
+ AsyncFile* file = openFiles[fileNum];
+#ifdef TESTDEBUG
+ ndbout << "writeFile" << fileNum <<": "<<pagenum<<", " << file->fileName().c_str() << endl;
+#endif
+ Request *request = theRequestPool->get();
+ request->action = Request::writeSync;
+ request->error = 0;
+ request->set(NDBFS_REF, pagenum, fileNum);
+ request->file = openFiles[fileNum];
+
+ // Write only one page, choose the correct page for each file using fileNum
+ request->par.readWrite.pages[0].buf = &WritePages[fileNum][0];
+ request->par.readWrite.pages[0].size = PAGESIZE;
+ request->par.readWrite.pages[0].offset = pagenum * PAGESIZE;
+ request->par.readWrite.numberOfPages = 1;
+
+ if (!forward(file,request)) {
+ // Something went wrong
+ ndbout << "Could not forward write request" << endl;
+ theRequestPool->put(request);
+ return 1;
+ }
+ return 0;
+
+}
+
+int readFile( int fileNum, int pagenum)
+{
+ AsyncFile* file = openFiles[fileNum];
+#ifdef TESTDEBUG
+ ndbout << "readFile" << fileNum <<": "<<pagenum<<", " << file->fileName().c_str() << endl;
+#endif
+ Request *request = theRequestPool->get();
+ request->action = Request::read;
+ request->error = 0;
+ request->set(NDBFS_REF, pagenum, fileNum);
+ request->file = openFiles[fileNum];
+
+ // Read only one page, choose the correct page for each file using fileNum
+ request->par.readWrite.pages[0].buf = &ReadPages[fileNum][0];
+ request->par.readWrite.pages[0].size = PAGESIZE;
+ request->par.readWrite.pages[0].offset = pagenum * PAGESIZE;
+ request->par.readWrite.numberOfPages = 1;
+
+ if (!forward(file,request)) {
+ // Something went wrong
+ ndbout << "Could not forward read request" << endl;
+ theRequestPool->put(request);
+ return 1;
+ }
+ return 0;
+
+}
+
+int openFileWait()
+{
+ int openedFiles = 0;
+ while (openedFiles < numberOfFiles)
+ {
+ Request* request = theReportChannel->readChannel();
+ if (request)
+ {
+ if (request->action == Request::open)
+ {
+ if (request->error ==0)
+ {
+#ifdef TESTDEBUG
+ ndbout << "Opened file " << request->file->fileName().c_str() << endl;
+#endif
+ openFiles[request->theFilePointer] = request->file;
+ }
+ else
+ {
+ ndbout << "error while opening file" << endl;
+ exit(1);
+ }
+ theRequestPool->put(request);
+ openedFiles++;
+ }
+ else
+ {
+ ndbout << "Unexpected request received" << endl;
+ }
+ }
+ else
+ {
+ ndbout << "Nothing read from theReportChannel" << endl;
+ }
+ }
+ return 0;
+}
+
+int closeFileWait()
+{
+ int closedFiles = 0;
+ while (closedFiles < numberOfFiles)
+ {
+ Request* request = theReportChannel->readChannel();
+ if (request)
+ {
+ if (request->action == Request::close || request->action == Request::closeRemove)
+ {
+ if (request->error ==0)
+ {
+#ifdef TESTDEBUG
+ ndbout << "Closed file " << request->file->fileName().c_str() << endl;
+#endif
+ openFiles[request->theFilePointer] = NULL;
+ files->put(request->file);
+ }
+ else
+ {
+ ndbout << "error while closing file" << endl;
+ exit(1);
+ }
+ theRequestPool->put(request);
+ closedFiles++;
+ }
+ else
+ {
+ ndbout << "Unexpected request received" << endl;
+ }
+ }
+ else
+ {
+ ndbout << "Nothing read from theReportChannel" << endl;
+ }
+ }
+ return 0;
+}
+
+int writeFileWait()
+{
+ Request* request = theReportChannel->readChannel();
+ if (request)
+ {
+ if (request->action == Request::write)
+ {
+ if (request->error == 0)
+ {
+#ifdef TESTDEBUG
+ ndbout << "writeFileWait"<<request->theFilePointer<<", " << request->theUserPointer<<" "<< request->file->fileName().c_str() << endl;
+#endif
+
+ }
+ else
+ {
+ ndbout << "error while writing file, error=" << request->error << endl;
+ exit(1);
+ }
+ theRequestPool->put(request);
+ }
+ else
+ {
+ ndbout << "Unexpected request received" << endl;
+ }
+ }
+ else
+ {
+ ndbout << "Nothing read from theReportChannel" << endl;
+ }
+ return 0;
+}
+
+int writeSyncFileWait()
+{
+ Request* request = theReportChannel->readChannel();
+ if (request)
+ {
+ if (request->action == Request::writeSync)
+ {
+ if (request->error == 0)
+ {
+#ifdef TESTDEBUG
+ ndbout << "writeFileWait"<<request->theFilePointer<<", " << request->theUserPointer<<" "<< request->file->fileName().c_str() << endl;
+#endif
+
+ }
+ else
+ {
+ ndbout << "error while writing file" << endl;
+ exit(1);
+ }
+ theRequestPool->put(request);
+ }
+ else
+ {
+ ndbout << "Unexpected request received" << endl;
+ }
+ }
+ else
+ {
+ ndbout << "Nothing read from theReportChannel" << endl;
+ }
+ return 0;
+}
+
+int readFileWait()
+{
+ Request* request = theReportChannel->readChannel();
+ if (request)
+ {
+ if (request->action == Request::read)
+ {
+ if (request->error == 0)
+ {
+#ifdef TESTDEBUG
+ ndbout << "readFileWait"<<request->theFilePointer<<", " << request->theUserPointer<<" "<< request->file->fileName().c_str() << endl;
+#endif
+ if (memcmp(&(ReadPages[request->theFilePointer][0]), &(WritePages[request->theFilePointer][0]), PAGESIZE)!=0)
+ {
+ ndbout <<"Verification error!" << endl;
+ for (int i = 0; i < PAGESIZE; i++ ){
+ ndbout <<" Compare Page " << i << " : " << ReadPages[request->theFilePointer][i] <<", " <<WritePages[request->theFilePointer][i] << endl;;
+ if( ReadPages[request->theFilePointer][i] !=WritePages[request->theFilePointer][i])
+
+ exit(1);
+ }
+ }
+
+ }
+ else
+ {
+ ndbout << "error while reading file" << endl;
+ exit(1);
+ }
+ theRequestPool->put(request);
+ }
+ else
+ {
+ ndbout << "Unexpected request received" << endl;
+ }
+ }
+ else
+ {
+ ndbout << "Nothing read from theReportChannel" << endl;
+ }
+ return 0;
+}
+
+int readArguments(int argc, const char** argv)
+{
+
+ int i = 1;
+ while (argc > 1)
+ {
+ if (strcmp(argv[i], "-n") == 0)
+ {
+ numberOfFiles = atoi(argv[i+1]);
+ if ((numberOfFiles < 1) || (numberOfFiles > MAXFILES))
+ {
+ ndbout << "Wrong number of files, default = "<<DEFAULT_NUM_FILES << endl;
+ numberOfFiles = DEFAULT_NUM_FILES;
+ }
+ }
+ else if (strcmp(argv[i], "-r") == 0)
+ {
+ numberOfRequests = atoi(argv[i+1]);
+ if ((numberOfRequests < 1) || (numberOfRequests > MAXREQUESTS))
+ {
+ ndbout << "Wrong number of requests, default = "<<DEFAULT_NUM_REQUESTS << endl;
+ numberOfRequests = DEFAULT_NUM_REQUESTS;
+ }
+ }
+ else if (strcmp(argv[i], "-s") == 0)
+ {
+ fileSize = atoi(argv[i+1]);
+ if ((fileSize < 1) || (fileSize > MAXFILESIZE))
+ {
+ ndbout << "Wrong number of 8k pages, default = "<<DEFAULT_FILESIZE << endl;
+ fileSize = DEFAULT_FILESIZE;
+ }
+ }
+ else if (strcmp(argv[i], "-l") == 0)
+ {
+ numberOfIterations = atoi(argv[i+1]);
+ if ((numberOfIterations < 1))
+ {
+ ndbout << "Wrong number of iterations, default = 1" << endl;
+ numberOfIterations = 1;
+ }
+ }
+ else if (strcmp(argv[i], "-remove") == 0)
+ {
+ removeFiles = 1;
+ argc++;
+ i--;
+ }
+ else if (strcmp(argv[i], "-reverse") == 0)
+ {
+ ndbout << "Writing files reversed" << endl;
+ writeFilesReverse = 1;
+ argc++;
+ i--;
+ }
+
+ argc -= 2;
+ i = i + 2;
+ }
+
+ if ((fileSize % numberOfRequests)!= 0)
+ {
+ numberOfRequests = numberOfRequests - (fileSize % numberOfRequests);
+ ndbout <<"numberOfRequest must be modulo of filesize" << endl;
+ ndbout << "New numberOfRequest="<<numberOfRequests<<endl;
+ }
+ return 0;
+}
+
+
+// Needed for linking...
+
+void ErrorReporter::handleError(ErrorCategory type, int messageID,
+ const char* problemData, const char* objRef, NdbShutdownType stype)
+{
+
+ ndbout << "ErrorReporter::handleError activated" << endl;
+ ndbout << "type= " << type << endl;
+ ndbout << "messageID= " << messageID << endl;
+ ndbout << "problemData= " << problemData << endl;
+ ndbout << "objRef= " << objRef << endl;
+
+ exit(1);
+}
+
+void ErrorReporter::handleAssert(const char* message, const char* file, int line)
+{
+ ndbout << "ErrorReporter::handleAssert activated" << endl;
+ ndbout << "message= " << message << endl;
+ ndbout << "file= " << file << endl;
+ ndbout << "line= " << line << endl;
+ exit(1);
+}
+
+
+GlobalData globalData;
+
+
+Signal::Signal()
+{
+
+}
+
diff --git a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFileTest/Makefile b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFileTest/Makefile
new file mode 100644
index 00000000000..b0356e6da68
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFileTest/Makefile
@@ -0,0 +1,27 @@
+include .defs.mk
+
+TYPE := kernel
+
+BIN_TARGET := aftest
+BIN_TARGET_ARCHIVES := ndbfs portlib trace signaldataprint
+
+SOURCES = AsyncFileTest.cpp
+
+CFLAGS_AsyncFileTest.cpp = -I../
+
+include $(NDB_TOP)/Epilogue.mk
+
+
+# run basic tests
+run_test :
+ $(NDB_TOP)/bin/$(BIN_TARGET)
+ $(NDB_TOP)/bin/$(BIN_TARGET) -n 8 -r 8 -l 10 -remove
+ $(NDB_TOP)/bin/$(BIN_TARGET) -n 8 -r 8 -l 10 -reverse -remove
+ $(NDB_TOP)/bin/$(BIN_TARGET) -n 8 -r 8 -l 10 -s 512 -remove
+ $(NDB_TOP)/bin/$(BIN_TARGET) -n 8 -r 4 -l 1000
+
+
+
+
+
+
diff --git a/storage/ndb/src/kernel/blocks/ndbfs/CircularIndex.cpp b/storage/ndb/src/kernel/blocks/ndbfs/CircularIndex.cpp
new file mode 100644
index 00000000000..30b40097c9b
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/ndbfs/CircularIndex.cpp
@@ -0,0 +1,20 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "CircularIndex.hpp"
+
+
+
diff --git a/storage/ndb/src/kernel/blocks/ndbfs/CircularIndex.hpp b/storage/ndb/src/kernel/blocks/ndbfs/CircularIndex.hpp
new file mode 100644
index 00000000000..349cccdbcb4
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/ndbfs/CircularIndex.hpp
@@ -0,0 +1,116 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef CircularIndex_H
+#define CircularIndex_H
+
+//===========================================================================
+//
+// .DESCRIPTION
+// Building block for circular buffers. It increment as a normal index.
+// untill it it becomes the maximum size then it becomes zero.
+//
+// .TYPICAL USE:
+// to implement a circular buffer.
+//
+// .EXAMPLE:
+// See MemoryChannel.C
+//===========================================================================
+
+///////////////////////////////////////////////////////////////////////////////
+// CircularIndex( int start= 0,int size=256 );
+// Constuctor
+// Parameters:
+// start: where to start to index
+// size : range of the index, will be from 0 to size-1
+///////////////////////////////////////////////////////////////////////////////
+// operator int ();
+// returns the index
+///////////////////////////////////////////////////////////////////////////////
+// void operator ++ ();
+// increments the index with one, of size is reached it is set to zero
+///////////////////////////////////////////////////////////////////////////////
+// friend int full( const CircularIndex& write, const CircularIndex& read );
+// Taken the write index and the read index from a buffer it is calculated
+// if the buffer is full
+// Parameters:
+// write: index used a write index for the buffer
+// read : index used a read index for the buffer
+// return
+// 0 : not full
+// 1 : full
+///////////////////////////////////////////////////////////////////////////////
+// friend int empty( const CircularIndex& write, const CircularIndex& read );
+// Taken the write index and the read index from a buffer it is calculated
+// if the buffer is empty
+// Parameters:
+// write: index used a write index for the buffer
+// read : index used a read index for the buffer
+// return
+// 0 : not empty
+// 1 : empty
+///////////////////////////////////////////////////////////////////////////////
+
+class CircularIndex
+{
+public:
+ inline CircularIndex( int start= 0,int size=256 );
+ operator int ();
+ CircularIndex& operator ++ ();
+ friend int full( const CircularIndex& write, const CircularIndex& read );
+ friend int empty( const CircularIndex& write, const CircularIndex& read );
+private:
+ int theSize;
+ int theIndex;
+};
+
+inline CircularIndex::operator int ()
+{
+ return theIndex;
+}
+
+inline CircularIndex& CircularIndex::operator ++ ()
+{
+ ++theIndex;
+ if( theIndex >= theSize ){
+ theIndex= 0;
+ }
+ return *this;
+}
+
+
+inline int full( const CircularIndex& write, const CircularIndex& read )
+{
+ int readTmp= read.theIndex;
+
+ if( read.theIndex < write.theIndex )
+ readTmp += read.theSize;
+
+ return ( readTmp - write.theIndex) == 1;
+}
+
+inline int empty( const CircularIndex& write, const CircularIndex& read )
+{
+ return read.theIndex == write.theIndex;
+}
+
+
+inline CircularIndex::CircularIndex( int start,int size ):
+ theSize(size),
+ theIndex(start)
+{
+}
+#endif
diff --git a/storage/ndb/src/kernel/blocks/ndbfs/Filename.cpp b/storage/ndb/src/kernel/blocks/ndbfs/Filename.cpp
new file mode 100644
index 00000000000..15158ec19ef
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/ndbfs/Filename.cpp
@@ -0,0 +1,219 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <ndb_global.h>
+
+#include <NdbOut.hpp>
+
+#include "Filename.hpp"
+#include "ErrorHandlingMacros.hpp"
+#include "Error.hpp"
+#include "RefConvert.hpp"
+#include "DebuggerNames.hpp"
+
+#include <signaldata/FsOpenReq.hpp>
+
+static const char* fileExtension[] = {
+ ".Data",
+ ".FragLog",
+ ".LocLog",
+ ".FragList",
+ ".TableList",
+ ".SchemaLog",
+ ".sysfile",
+ ".log",
+ ".ctl"
+};
+
+static const Uint32 noOfExtensions = sizeof(fileExtension)/sizeof(char*);
+
+Filename::Filename() :
+ theLevelDepth(0)
+{
+}
+
+void
+Filename::init(Uint32 nodeid,
+ const char * pFileSystemPath,
+ const char * pBackupDirPath){
+ DBUG_ENTER("Filename::init");
+
+ if (pFileSystemPath == NULL) {
+ ERROR_SET(fatal, AFS_ERROR_NOPATH, ""," Filename::init()");
+ return;
+ }
+
+ BaseString::snprintf(theFileSystemDirectory, sizeof(theFileSystemDirectory),
+ "%sndb_%u_fs%s", pFileSystemPath, nodeid, DIR_SEPARATOR);
+ strncpy(theBackupDirectory, pBackupDirPath, sizeof(theBackupDirectory));
+
+ DBUG_PRINT("info", ("theFileSystemDirectory=%s", theFileSystemDirectory));
+ DBUG_PRINT("info", ("theBackupDirectory=%s", theBackupDirectory));
+
+#ifdef NDB_WIN32
+ CreateDirectory(theFileSystemDirectory, 0);
+#else
+ mkdir(theFileSystemDirectory, S_IRUSR | S_IWUSR | S_IXUSR | S_IXGRP | S_IRGRP);
+#endif
+ theBaseDirectory= 0;
+
+ DBUG_VOID_RETURN;
+}
+
+Filename::~Filename(){
+}
+
+void
+Filename::set(BlockReference blockReference,
+ const Uint32 filenumber[4], bool dir)
+{
+ char buf[PATH_MAX];
+ theLevelDepth = 0;
+
+ const Uint32 type = FsOpenReq::getSuffix(filenumber);
+ const Uint32 version = FsOpenReq::getVersion(filenumber);
+
+ if (version == 2)
+ theBaseDirectory= theBackupDirectory;
+ else
+ theBaseDirectory= theFileSystemDirectory;
+ strncpy(theName, theBaseDirectory, PATH_MAX);
+
+ switch(version){
+ case 1 :{
+ const Uint32 diskNo = FsOpenReq::v1_getDisk(filenumber);
+ const Uint32 table = FsOpenReq::v1_getTable(filenumber);
+ const Uint32 frag = FsOpenReq::v1_getFragment(filenumber);
+ const Uint32 S_val = FsOpenReq::v1_getS(filenumber);
+ const Uint32 P_val = FsOpenReq::v1_getP(filenumber);
+
+ if (diskNo < 0xff){
+ BaseString::snprintf(buf, sizeof(buf), "D%d%s", diskNo, DIR_SEPARATOR);
+ strcat(theName, buf);
+ theLevelDepth++;
+ }
+
+ {
+ const char* blockName = getBlockName( refToBlock(blockReference) );
+ if (blockName == NULL){
+ ERROR_SET(ecError, AFS_ERROR_PARAMETER,"","No Block Name");
+ return;
+ }
+ BaseString::snprintf(buf, sizeof(buf), "%s%s", blockName, DIR_SEPARATOR);
+ strcat(theName, buf);
+ theLevelDepth++;
+ }
+
+ if (table < 0xffffffff){
+ BaseString::snprintf(buf, sizeof(buf), "T%d%s", table, DIR_SEPARATOR);
+ strcat(theName, buf);
+ theLevelDepth++;
+ }
+
+ if (frag < 0xffffffff){
+ BaseString::snprintf(buf, sizeof(buf), "F%d%s", frag, DIR_SEPARATOR);
+ strcat(theName, buf);
+ theLevelDepth++;
+ }
+
+
+ if (S_val < 0xffffffff){
+ BaseString::snprintf(buf, sizeof(buf), "S%d", S_val);
+ strcat(theName, buf);
+ }
+
+ if (P_val < 0xff){
+ BaseString::snprintf(buf, sizeof(buf), "P%d", P_val);
+ strcat(theName, buf);
+ }
+
+ }
+ break;
+ case 2:{
+ const Uint32 seq = FsOpenReq::v2_getSequence(filenumber);
+ const Uint32 nodeId = FsOpenReq::v2_getNodeId(filenumber);
+ const Uint32 count = FsOpenReq::v2_getCount(filenumber);
+
+ BaseString::snprintf(buf, sizeof(buf), "BACKUP%sBACKUP-%d%s",
+ DIR_SEPARATOR, seq, DIR_SEPARATOR);
+ strcat(theName, buf);
+ if(count == 0xffffffff) {
+ BaseString::snprintf(buf, sizeof(buf), "BACKUP-%d.%d",
+ seq, nodeId); strcat(theName, buf);
+ } else {
+ BaseString::snprintf(buf, sizeof(buf), "BACKUP-%d-%d.%d",
+ seq, count, nodeId); strcat(theName, buf);
+ }
+ theLevelDepth = 2;
+ break;
+ }
+ break;
+ case 3:{
+ const Uint32 diskNo = FsOpenReq::v1_getDisk(filenumber);
+
+ if(diskNo == 0xFF){
+ ERROR_SET(ecError, AFS_ERROR_PARAMETER,"","Invalid disk specification");
+ }
+
+ BaseString::snprintf(buf, sizeof(buf), "D%d%s", diskNo, DIR_SEPARATOR);
+ strcat(theName, buf);
+ theLevelDepth++;
+ }
+ break;
+ default:
+ ERROR_SET(ecError, AFS_ERROR_PARAMETER,"","Wrong version");
+ }
+ if (type >= noOfExtensions){
+ ERROR_SET(ecError, AFS_ERROR_PARAMETER,"","File Type doesn't exist");
+ return;
+ }
+ strcat(theName, fileExtension[type]);
+
+ if(dir == true){
+ for(int l = strlen(theName) - 1; l >= 0; l--){
+ if(theName[l] == DIR_SEPARATOR[0]){
+ theName[l] = 0;
+ break;
+ }
+ }
+ }
+}
+
+/**
+ * Find out directory name on level
+ * Ex:
+ * theName = "/tmp/fs/T0/NDBFS/D0/P0/S27.data"
+ * level = 1
+ * would return "/tmp/fs/T0/NDBFS/
+ */
+const char* Filename::directory(int level)
+{
+ const char* p;
+
+ p = theName;
+ p += strlen(theBaseDirectory);
+
+ for (int i = 0; i <= level; i++){
+ p = strstr(p, DIR_SEPARATOR);
+ p++;
+ }
+
+ strncpy(theDirectory, theName, p - theName - 1);
+ theDirectory[p-theName-1] = 0;
+ return theDirectory;
+}
+
+
diff --git a/storage/ndb/src/kernel/blocks/ndbfs/Filename.hpp b/storage/ndb/src/kernel/blocks/ndbfs/Filename.hpp
new file mode 100644
index 00000000000..249c1b1ca10
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/ndbfs/Filename.hpp
@@ -0,0 +1,100 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef Filename_H
+#define Filename_H
+
+//===========================================================================
+//
+// .DESCRIPTION
+// Takes a 128 bits value (done as a array of four longs) and
+// makes a filename out of it acording the following schema
+// Bits 0-31 T
+// Bits 32-63 F
+// Bits 64-95 S
+// Bits 96-103 P
+// Bits 104-111 D
+// Bits 112-119 File Type
+// Bits 120-127 Version number of Filename
+//
+// T, is used to find/create a directory. If T = 0xFFFF then the
+// file is on top level. In that case the F is of no relevance.
+// F, same as T.
+// S, is used to find/create a filename. If S= 0xFFFF then it is ignored.
+// P, same as S
+// D, is used to find/create the root directory, this is the
+// directory before the blockname. If D= 0xFF then it is ignored.
+// File Type
+// 0 => .Data
+// 1 => .FragLog
+// 2 => .LocLog
+// 3 => .FragList
+// 4 => .TableList
+// 5 => .SchemaLog
+// 6 => .sysfile
+// 15=> ignored
+// Version number of Filename, current version is 0x1, must be
+// used for the this style of options.
+//
+//
+//===========================================================================
+
+#include <ndb_global.h>
+#include <kernel_types.h>
+
+class Filename
+{
+public:
+ // filenumber is 64 bits but is split in to 4 32bits words
+ Filename();
+ ~Filename();
+ void set(BlockReference blockReference,
+ const Uint32 filenumber[4], bool dir = false);
+ const char* baseDirectory() const;
+ const char* directory(int level);
+ int levels() const;
+ const char* c_str() const;
+
+ void init(Uint32 nodeid, const char * fileSystemPath,
+ const char * backupDirPath);
+
+private:
+ int theLevelDepth;
+ char theName[PATH_MAX];
+ char theFileSystemDirectory[PATH_MAX];
+ char theBackupDirectory[PATH_MAX];
+ char *theBaseDirectory;
+ char theDirectory[PATH_MAX];
+};
+
+// inline methods
+inline const char* Filename::c_str() const{
+ return theName;
+}
+
+inline const char* Filename::baseDirectory() const{
+ return theBaseDirectory;
+}
+
+inline int Filename::levels() const{
+ return theLevelDepth;
+}
+
+#endif
+
+
+
+
diff --git a/storage/ndb/src/kernel/blocks/ndbfs/Makefile.am b/storage/ndb/src/kernel/blocks/ndbfs/Makefile.am
new file mode 100644
index 00000000000..a22386f8612
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/ndbfs/Makefile.am
@@ -0,0 +1,27 @@
+noinst_LIBRARIES = libndbfs.a
+
+libndbfs_a_SOURCES = \
+ AsyncFile.cpp \
+ Ndbfs.cpp VoidFs.cpp \
+ Filename.cpp \
+ CircularIndex.cpp
+
+include $(top_srcdir)/ndb/config/common.mk.am
+include $(top_srcdir)/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libndbfs.dsp
+
+libndbfs.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-lib.am \
+ $(top_srcdir)/ndb/config/win-name \
+ $(top_srcdir)/ndb/config/win-includes \
+ $(top_srcdir)/ndb/config/win-sources \
+ $(top_srcdir)/ndb/config/win-libraries
+ cat $(top_srcdir)/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/ndb/config/win-sources $@ $(libndbfs_a_SOURCES)
+ @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.cpp b/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.cpp
new file mode 100644
index 00000000000..a1aebdef7a1
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.cpp
@@ -0,0 +1,18 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+//#include "MemoryChannel.hpp"
+
diff --git a/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp b/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp
new file mode 100644
index 00000000000..03911d195ec
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp
@@ -0,0 +1,166 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef MemoryChannel_H
+#define MemoryChannel_H
+
+//===========================================================================
+//
+// .DESCRIPTION
+// Pointer based communication channel for communication between two
+// thread. It does not copy any data in or out the channel so the
+// item that is put in can not be used untill the other thread has
+// given it back. There is no support for detecting the return of a
+// item. The channel is half-duplex.
+// For comminication between 1 writer and 1 reader use the MemoryChannel
+// class, for comminication between multiple writer and 1 reader use the
+// MemoryChannelMultipleWriter. There is no support for multiple readers.
+//
+// .TYPICAL USE:
+// to communicate between threads.
+//
+// .EXAMPLE:
+// See AsyncFile.C
+//===========================================================================
+//
+//
+// MemoryChannel( int size= 256);
+// Constuctor
+// Parameters:
+// size : amount of pointer it can hold
+//
+// void operator ++ ();
+// increments the index with one, if size is reached it is set to zero
+//
+// virtual void write( T *t);
+// Puts the item in the channel if the channel is full an error is reported.
+// Parameters:
+// t: pointer to item to put in the channel, after this the item
+// is shared with the other thread.
+// errors
+// AFS_ERROR_CHANNALFULL, channel is full
+//
+// T* read();
+// Reads a itemn from the channel, if channel is empty it blocks untill
+// an item can be read.
+// return
+// T : item from the channel
+//
+// T* tryRead();
+// Reads a item from the channel, if channel is empty it returns zero.
+// return
+// T : item from the channel or zero if channel is empty.
+//
+
+#if defined NDB_OSE || defined NDB_SOFTOSE
+#include "MemoryChannelOSE.hpp"
+#else
+
+#include "ErrorHandlingMacros.hpp"
+#include "Error.hpp"
+#include "CircularIndex.hpp"
+#include "NdbMutex.h"
+#include "NdbCondition.h"
+#include <NdbOut.hpp>
+
+
+template <class T>
+class MemoryChannel
+{
+public:
+ MemoryChannel( int size= 256);
+ virtual ~MemoryChannel( );
+
+ virtual void writeChannel( T *t);
+ T* readChannel();
+ T* tryReadChannel();
+
+private:
+ int theSize;
+ T **theChannel;
+ CircularIndex theWriteIndex;
+ CircularIndex theReadIndex;
+ NdbMutex* theMutexPtr;
+ NdbCondition* theConditionPtr;
+
+};
+
+
+template <class T> MemoryChannel<T>::MemoryChannel( int size):
+ theSize(size),
+ theChannel(new T*[size] ),
+ theWriteIndex(0, size),
+ theReadIndex(0, size)
+{
+ theMutexPtr = NdbMutex_Create();
+ theConditionPtr = NdbCondition_Create();
+}
+
+template <class T> MemoryChannel<T>::~MemoryChannel( )
+{
+ NdbMutex_Destroy(theMutexPtr);
+ NdbCondition_Destroy(theConditionPtr);
+ delete [] theChannel;
+}
+
+template <class T> void MemoryChannel<T>::writeChannel( T *t)
+{
+
+ NdbMutex_Lock(theMutexPtr);
+ if(full(theWriteIndex, theReadIndex) || theChannel == NULL) abort();
+ theChannel[theWriteIndex]= t;
+ ++theWriteIndex;
+ NdbMutex_Unlock(theMutexPtr);
+ NdbCondition_Signal(theConditionPtr);
+}
+
+
+template <class T> T* MemoryChannel<T>::readChannel()
+{
+ T* tmp;
+
+ NdbMutex_Lock(theMutexPtr);
+ while ( empty(theWriteIndex, theReadIndex) )
+ {
+ NdbCondition_Wait(theConditionPtr,
+ theMutexPtr);
+ }
+
+ tmp= theChannel[theReadIndex];
+ ++theReadIndex;
+ NdbMutex_Unlock(theMutexPtr);
+ return tmp;
+}
+
+template <class T> T* MemoryChannel<T>::tryReadChannel()
+{
+ T* tmp= 0;
+ NdbMutex_Lock(theMutexPtr);
+ NdbCondition_WaitTimeout(theConditionPtr,
+ theMutexPtr, 0);
+ if ( !empty(theWriteIndex, theReadIndex) )
+ {
+ tmp= theChannel[theReadIndex];
+ ++theReadIndex;
+ }
+ NdbMutex_Unlock(theMutexPtr);
+ return tmp;
+}
+
+#endif
+
+#endif // MemoryChannel_H
+
diff --git a/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelOSE.hpp b/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelOSE.hpp
new file mode 100644
index 00000000000..ca90bc60153
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelOSE.hpp
@@ -0,0 +1,204 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef MemoryChannelOSE_H
+#define MemoryChannelOSE_H
+
+//===========================================================================
+//
+// .DESCRIPTION
+// Pointer based communication channel for communication between two
+// thread. It sends the pointer to the other signal via an OSE signal
+//
+// .TYPICAL USE:
+// to communicate between threads.
+//
+// .EXAMPLE:
+// See AsyncFile.C
+//===========================================================================
+//
+//
+// MemoryChannel( int size= 256);
+// Constuctor
+// Parameters:
+// size : is ignored in OSE version
+//
+// void operator ++ ();
+// increments the index with one, if size is reached it is set to zero
+//
+// virtual void write( T *t);
+// Puts the item in the channel if the channel is full an error is reported.
+// Parameters:
+// t: pointer to item to put in the channel, after this the item
+// is shared with the other thread.
+// errors
+// AFS_ERROR_CHANNALFULL, channel is full
+//
+// T* read();
+// Reads a itemn from the channel, if channel is empty it blocks untill
+// an item can be read.
+// return
+// T : item from the channel
+//
+// T* tryRead();
+// Reads a item from the channel, if channel is empty it returns zero.
+// return
+// T : item from the channel or zero if channel is empty.
+//
+
+#include <ose.h>
+#include "ErrorHandlingMacros.hpp"
+#include "Error.hpp"
+#include "NdbMutex.h"
+#include "NdbCondition.h"
+
+
+
+
+
+template <class T>
+class MemoryChannel
+{
+public:
+ MemoryChannel( int size= 256);
+ virtual ~MemoryChannel( );
+
+ virtual void writeChannel( T *t);
+ T* readChannel();
+ T* tryReadChannel();
+
+private:
+ PROCESS theReceiverPid;
+};
+
+template <class T> class MemoryChannelMultipleWriter:public MemoryChannel<T>
+{
+public:
+ MemoryChannelMultipleWriter( int size= 256);
+ ~MemoryChannelMultipleWriter( );
+ void writeChannel( T *t);
+
+private:
+};
+
+
+#define MEMCHANNEL_SIGBASE 5643
+
+#define MEMCHANNEL_SIGNAL (MEMCHANNEL_SIGBASE + 1) /* !-SIGNO(struct MemChannelSignal)-! */
+
+
+struct MemChannelSignal
+{
+ SIGSELECT sigNo;
+ void* ptr;
+};
+
+union SIGNAL
+{
+ SIGSELECT sigNo;
+ struct MemChannelSignal memChanSig;
+};
+
+template <class T> MemoryChannel<T>::MemoryChannel( int size )
+{
+ // Default receiver for this channel is the creating process
+ theReceiverPid = current_process();
+}
+
+template <class T> MemoryChannel<T>::~MemoryChannel( )
+{
+}
+
+template <class T> void MemoryChannel<T>::writeChannel( T *t)
+{
+ union SIGNAL* sig;
+
+ sig = alloc(sizeof(struct MemChannelSignal), MEMCHANNEL_SIGNAL);
+ ((struct MemChannelSignal*)sig)->ptr = t;
+ send(&sig, theReceiverPid);
+}
+
+
+template <class T> T* MemoryChannel<T>::readChannel()
+{
+ T* tmp;
+
+ static const SIGSELECT sel_mem[] = {1, MEMCHANNEL_SIGNAL};
+ union SIGNAL* sig;
+
+ tmp = NULL; /* Default value */
+
+ sig = receive((SIGSELECT*)sel_mem);
+ if (sig != NIL){
+ if (sig->sigNo == MEMCHANNEL_SIGNAL){
+ tmp = (T*)(((struct MemChannelSignal*)sig)->ptr);
+ }else{
+ assert(1==0);
+ }
+ free_buf(&sig);
+ }
+
+ return tmp;
+}
+
+template <class T> T* MemoryChannel<T>::tryReadChannel()
+{
+ T* tmp;
+
+ static const SIGSELECT sel_mem[] = {1, MEMCHANNEL_SIGNAL};
+ union SIGNAL* sig;
+
+ tmp = NULL; /* Default value */
+
+ sig = receive_w_tmo(0, (SIGSELECT*)sel_mem);
+ if (sig != NIL){
+ if (sig->sigNo == MEMCHANNEL_SIGNAL){
+ tmp = (T*)(((struct MemChannelSignal*)sig)->ptr);
+ }else{
+ assert(1==0);
+ }
+ free_buf(&sig);
+ }
+
+ return tmp;
+}
+
+
+#endif // MemoryChannel_H
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/Makefile b/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/Makefile
new file mode 100644
index 00000000000..68f71bfc4cd
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/Makefile
@@ -0,0 +1,13 @@
+include .defs.mk
+
+TYPE := kernel
+
+BIN_TARGET := mctest
+BIN_TARGET_ARCHIVES := portlib
+
+SOURCES = MemoryChannelTest.cpp
+
+CFLAGS_MemoryChannelTest.cpp = -I../
+
+include $(NDB_TOP)/Epilogue.mk
+
diff --git a/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/MemoryChannelTest.cpp b/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/MemoryChannelTest.cpp
new file mode 100644
index 00000000000..b98c60693f4
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/MemoryChannelTest.cpp
@@ -0,0 +1,193 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "MemoryChannel.hpp"
+#include "NdbThread.h"
+#include "NdbSleep.h"
+#include "NdbOut.hpp"
+#include "NdbMain.h"
+
+
+
+MemoryChannel<int>* theMemoryChannel;
+
+
+extern "C" void* runProducer(void*arg)
+{
+ // The producer will items into the MemoryChannel
+ int count = *(int*)arg;
+ int* p;
+ int i = 0;
+ while (i <= count)
+ {
+ p = new int(i);
+ ndbout << "P: " << *p << endl;
+ theMemoryChannel->writeChannel(p);
+ if (i%5==0)
+ NdbSleep_MilliSleep(i);
+ i++;
+ }
+ return NULL;
+}
+
+extern "C" void* runConsumer(void* arg)
+{
+ // The producer will read items from MemoryChannel and print on screen
+ int count = *(int*)arg;
+ int* p;
+ int i = 0;
+ while (i < count)
+ {
+ p = theMemoryChannel->readChannel();
+ ndbout << "C: " << *p << endl;
+ i = *p;
+ delete p;
+
+ }
+ return NULL;
+}
+
+
+
+class ArgStruct
+{
+public:
+ ArgStruct(int _items, int _no){
+ items=_items;
+ no=_no;
+ };
+ int items;
+ int no;
+};
+
+MemoryChannelMultipleWriter<ArgStruct>* theMemoryChannel2;
+
+extern "C" void* runProducer2(void*arg)
+{
+ // The producer will items into the MemoryChannel
+ ArgStruct* pArg = (ArgStruct*)arg;
+ int count = pArg->items;
+ ArgStruct* p;
+ int i = 0;
+ while (i < count)
+ {
+ p = new ArgStruct(i, pArg->no);
+ ndbout << "P"<<pArg->no<<": " << i << endl;
+ theMemoryChannel2->writeChannel(p);
+ NdbSleep_MilliSleep(i);
+ i++;
+ }
+ return NULL;
+}
+
+extern "C" void* runConsumer2(void* arg)
+{
+ // The producer will read items from MemoryChannel and print on screen
+ ArgStruct* pArg = (ArgStruct*)arg;
+ int count = pArg->items * pArg->no;
+ ArgStruct* p;
+ int i = 0;
+ while (i < count)
+ {
+ p = theMemoryChannel2->readChannel();
+ ndbout << "C: "<< p->no << ", " << p->items << endl;
+ i++;
+ delete p;
+ }
+ ndbout << "Consumer2: " << count << " received" << endl;
+ return NULL;
+}
+
+
+
+
+//#if defined MEMORYCHANNELTEST
+
+//int main(int argc, char **argv)
+NDB_COMMAND(mctest, "mctest", "mctest", "Test the memory channel used in Ndb", 32768)
+{
+
+ ndbout << "==== testing MemoryChannel ====" << endl;
+
+ theMemoryChannel = new MemoryChannel<int>;
+ theMemoryChannel2 = new MemoryChannelMultipleWriter<ArgStruct>;
+
+ NdbThread* consumerThread;
+ NdbThread* producerThread;
+
+ NdbThread_SetConcurrencyLevel(2);
+
+ int numItems = 100;
+ producerThread = NdbThread_Create(runProducer,
+ (void**)&numItems,
+ 4096,
+ (char*)"producer");
+
+ consumerThread = NdbThread_Create(runConsumer,
+ (void**)&numItems,
+ 4096,
+ (char*)"consumer");
+
+
+ void *status;
+ NdbThread_WaitFor(consumerThread, &status);
+ NdbThread_WaitFor(producerThread, &status);
+
+ ndbout << "==== testing MemoryChannelMultipleWriter ====" << endl;
+#define NUM_THREADS2 5
+ NdbThread_SetConcurrencyLevel(NUM_THREADS2+2);
+ NdbThread* producerThreads[NUM_THREADS2];
+
+ ArgStruct *pArg;
+ for (int j = 0; j < NUM_THREADS2; j++)
+ {
+ char buf[25];
+ sprintf((char*)&buf, "producer%d", j);
+ pArg = new ArgStruct(numItems, j);
+ producerThreads[j] = NdbThread_Create(runProducer2,
+ (void**)pArg,
+ 4096,
+ (char*)&buf);
+ }
+
+ pArg = new ArgStruct(numItems, NUM_THREADS2);
+ consumerThread = NdbThread_Create(runConsumer2,
+ (void**)pArg,
+ 4096,
+ (char*)"consumer");
+
+
+ NdbThread_WaitFor(consumerThread, &status);
+ for (int j = 0; j < NUM_THREADS2; j++)
+ {
+ NdbThread_WaitFor(producerThreads[j], &status);
+ }
+
+
+ return 0;
+
+}
+
+void ErrorReporter::handleError(ErrorCategory type, int messageID,
+ const char* problemData, const char* objRef,
+ NdbShutdownType nst)
+{
+
+ ndbout << "ErrorReporter::handleError activated" << endl;
+ exit(1);
+}
+
+//#endif
diff --git a/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp b/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp
new file mode 100644
index 00000000000..9750e1c5179
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp
@@ -0,0 +1,1018 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <ndb_global.h>
+
+#include "Ndbfs.hpp"
+#include "AsyncFile.hpp"
+#include "Filename.hpp"
+#include "Error.hpp"
+
+#include <signaldata/FsOpenReq.hpp>
+#include <signaldata/FsCloseReq.hpp>
+#include <signaldata/FsReadWriteReq.hpp>
+#include <signaldata/FsAppendReq.hpp>
+#include <signaldata/FsRemoveReq.hpp>
+#include <signaldata/FsConf.hpp>
+#include <signaldata/FsRef.hpp>
+#include <signaldata/NdbfsContinueB.hpp>
+#include <signaldata/DumpStateOrd.hpp>
+
+#include <RefConvert.hpp>
+#include <NdbSleep.h>
+#include <NdbOut.hpp>
+#include <Configuration.hpp>
+
+#define DEBUG(x) { ndbout << "FS::" << x << endl; }
+
+inline
+int pageSize( const NewVARIABLE* baseAddrRef )
+{
+ int log_psize;
+ int log_qsize = baseAddrRef->bits.q;
+ int log_vsize = baseAddrRef->bits.v;
+ if (log_vsize < 3)
+ log_vsize = 3;
+ log_psize = log_qsize + log_vsize - 3;
+ return (1 << log_psize);
+}
+
+
+Ndbfs::Ndbfs(const Configuration & conf) :
+ SimulatedBlock(NDBFS, conf),
+ scanningInProgress(false),
+ theLastId(0),
+ m_maxOpenedFiles(0)
+{
+ theFileSystemPath = conf.fileSystemPath();
+ theBackupFilePath = conf.backupFilePath();
+
+ theRequestPool = new Pool<Request>;
+
+ const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ m_maxFiles = 40;
+ ndb_mgm_get_int_parameter(p, CFG_DB_MAX_OPEN_FILES, &m_maxFiles);
+
+ // Create idle AsyncFiles
+ Uint32 noIdleFiles = m_maxFiles > 27 ? 27 : m_maxFiles ;
+ for (Uint32 i = 0; i < noIdleFiles; i++){
+ theIdleFiles.push_back(createAsyncFile());
+ }
+
+ BLOCK_CONSTRUCTOR(Ndbfs);
+
+ // Set received signals
+ addRecSignal(GSN_DUMP_STATE_ORD, &Ndbfs::execDUMP_STATE_ORD);
+ addRecSignal(GSN_STTOR, &Ndbfs::execSTTOR);
+ addRecSignal(GSN_FSOPENREQ, &Ndbfs::execFSOPENREQ);
+ addRecSignal(GSN_FSCLOSEREQ, &Ndbfs::execFSCLOSEREQ);
+ addRecSignal(GSN_FSWRITEREQ, &Ndbfs::execFSWRITEREQ);
+ addRecSignal(GSN_FSREADREQ, &Ndbfs::execFSREADREQ);
+ addRecSignal(GSN_FSSYNCREQ, &Ndbfs::execFSSYNCREQ);
+ addRecSignal(GSN_CONTINUEB, &Ndbfs::execCONTINUEB);
+ addRecSignal(GSN_FSAPPENDREQ, &Ndbfs::execFSAPPENDREQ);
+ addRecSignal(GSN_FSREMOVEREQ, &Ndbfs::execFSREMOVEREQ);
+ // Set send signals
+}
+
+Ndbfs::~Ndbfs()
+{
+ // Delete all files
+ // AsyncFile destuctor will take care of deleting
+ // the thread it has created
+ for (unsigned i = 0; i < theFiles.size(); i++){
+ AsyncFile* file = theFiles[i];
+ delete file;
+ theFiles[i] = NULL;
+ }//for
+ theFiles.clear();
+
+ delete theRequestPool;
+}
+
+/* Received a restart signal.
+ * Answer it like any other block
+ * PR0 : StartCase
+ * DR0 : StartPhase
+ * DR1 : ?
+ * DR2 : ?
+ * DR3 : ?
+ * DR4 : ?
+ * DR5 : SignalKey
+ */
+void
+Ndbfs::execSTTOR(Signal* signal)
+{
+ jamEntry();
+
+ if(signal->theData[1] == 0){ // StartPhase 0
+ jam();
+ cownref = NDBFS_REF;
+ // close all open files
+ ndbrequire(theOpenFiles.size() == 0);
+
+ scanningInProgress = false;
+
+ signal->theData[0] = NdbfsContinueB::ZSCAN_MEMORYCHANNEL_10MS_DELAY;
+ sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 10, 1);
+
+ signal->theData[3] = 255;
+ sendSignal(NDBCNTR_REF, GSN_STTORRY, signal,4, JBB);
+ return;
+ }
+ ndbrequire(0);
+}
+
+int
+Ndbfs::forward( AsyncFile * file, Request* request)
+{
+ jam();
+ file->execute(request);
+ return 1;
+}
+
+void
+Ndbfs::execFSOPENREQ(Signal* signal)
+{
+ jamEntry();
+ const FsOpenReq * const fsOpenReq = (FsOpenReq *)&signal->theData[0];
+ const BlockReference userRef = fsOpenReq->userReference;
+ AsyncFile* file = getIdleFile();
+ ndbrequire(file != NULL);
+ ndbrequire(signal->getLength() == FsOpenReq::SignalLength)
+ file->theFileName.set( userRef, fsOpenReq->fileNumber);
+ file->reportTo(&theFromThreads);
+
+ Request* request = theRequestPool->get();
+ request->action = Request::open;
+ request->error = 0;
+ request->par.open.flags = fsOpenReq->fileFlags;
+ request->set(userRef, fsOpenReq->userPointer, newId() );
+ request->file = file;
+ request->theTrace = signal->getTrace();
+
+ ndbrequire(forward(file, request));
+}
+
+void
+Ndbfs::execFSREMOVEREQ(Signal* signal)
+{
+ jamEntry();
+ const FsRemoveReq * const req = (FsRemoveReq *)signal->getDataPtr();
+ const BlockReference userRef = req->userReference;
+ AsyncFile* file = getIdleFile();
+ ndbrequire(file != NULL);
+
+ file->theFileName.set( userRef, req->fileNumber, req->directory);
+ file->reportTo(&theFromThreads);
+
+ Request* request = theRequestPool->get();
+ request->action = Request::rmrf;
+ request->par.rmrf.directory = req->directory;
+ request->par.rmrf.own_directory = req->ownDirectory;
+ request->error = 0;
+ request->set(userRef, req->userPointer, newId() );
+ request->file = file;
+ request->theTrace = signal->getTrace();
+
+ ndbrequire(forward(file, request));
+}
+
+/*
+ * PR0: File Pointer DR0: User reference DR1: User Pointer DR2: Flag bit 0= 1
+ * remove file
+ */
+void
+Ndbfs::execFSCLOSEREQ(Signal * signal)
+{
+ jamEntry();
+ const FsCloseReq * const fsCloseReq = (FsCloseReq *)&signal->theData[0];
+ const BlockReference userRef = fsCloseReq->userReference;
+ const Uint16 filePointer = (Uint16)fsCloseReq->filePointer;
+ const UintR userPointer = fsCloseReq->userPointer;
+
+ AsyncFile* openFile = theOpenFiles.find(filePointer);
+ if (openFile == NULL) {
+ // The file was not open, send error back to sender
+ jam();
+ // Initialise FsRef signal
+ FsRef * const fsRef = (FsRef *)&signal->theData[0];
+ fsRef->userPointer = userPointer;
+ fsRef->setErrorCode(fsRef->errorCode, FsRef::fsErrFileDoesNotExist);
+ fsRef->osErrorCode = ~0; // Indicate local error
+ sendSignal(userRef, GSN_FSCLOSEREF, signal, 3, JBB);
+ return;
+ }
+
+ Request *request = theRequestPool->get();
+ if( fsCloseReq->getRemoveFileFlag(fsCloseReq->fileFlag) == true ) {
+ jam();
+ request->action = Request::closeRemove;
+ } else {
+ jam();
+ request->action = Request::close;
+ }
+ request->set(userRef, fsCloseReq->userPointer, filePointer);
+ request->file = openFile;
+ request->error = 0;
+ request->theTrace = signal->getTrace();
+
+ ndbrequire(forward(openFile, request));
+}
+
+void
+Ndbfs::readWriteRequest(int action, Signal * signal)
+{
+ const FsReadWriteReq * const fsRWReq = (FsReadWriteReq *)&signal->theData[0];
+ Uint16 filePointer = (Uint16)fsRWReq->filePointer;
+ const UintR userPointer = fsRWReq->userPointer;
+ const BlockReference userRef = fsRWReq->userReference;
+ const BlockNumber blockNumber = refToBlock(userRef);
+
+ AsyncFile* openFile = theOpenFiles.find(filePointer);
+
+ const NewVARIABLE *myBaseAddrRef = &getBat(blockNumber)[fsRWReq->varIndex];
+ unsigned int tPageSize;
+ unsigned int tClusterSize;
+ unsigned int tNRR;
+ unsigned int tPageOffset;
+ char* tWA;
+ FsRef::NdbfsErrorCodeType errorCode;
+
+ Request *request = theRequestPool->get();
+ request->error = 0;
+ request->set(userRef, userPointer, filePointer);
+ request->file = openFile;
+ request->action = (Request::Action) action;
+ request->theTrace = signal->getTrace();
+
+ if (fsRWReq->numberOfPages == 0) { //Zero pages not allowed
+ jam();
+ errorCode = FsRef::fsErrInvalidParameters;
+ goto error;
+ }
+
+ if (fsRWReq->varIndex >= getBatSize(blockNumber)) {
+ jam();// Ensure that a valid variable is used
+ errorCode = FsRef::fsErrInvalidParameters;
+ goto error;
+ }
+ if (myBaseAddrRef == NULL) {
+ jam(); // Ensure that a valid variable is used
+ errorCode = FsRef::fsErrInvalidParameters;
+ goto error;
+ }
+ if (openFile == NULL) {
+ jam(); //file not open
+ errorCode = FsRef::fsErrFileDoesNotExist;
+ goto error;
+ }
+ tPageSize = pageSize(myBaseAddrRef);
+ tClusterSize = myBaseAddrRef->ClusterSize;
+ tNRR = myBaseAddrRef->nrr;
+ tWA = (char*)myBaseAddrRef->WA;
+
+ switch (fsRWReq->getFormatFlag(fsRWReq->operationFlag)) {
+
+ // List of memory and file pages pairs
+ case FsReadWriteReq::fsFormatListOfPairs: {
+ jam();
+ for (unsigned int i = 0; i < fsRWReq->numberOfPages; i++) {
+ jam();
+ const Uint32 varIndex = fsRWReq->data.listOfPair[i].varIndex;
+ const Uint32 fileOffset = fsRWReq->data.listOfPair[i].fileOffset;
+ if (varIndex >= tNRR) {
+ jam();
+ errorCode = FsRef::fsErrInvalidParameters;
+ goto error;
+ }//if
+ request->par.readWrite.pages[i].buf = &tWA[varIndex * tClusterSize];
+ request->par.readWrite.pages[i].size = tPageSize;
+ request->par.readWrite.pages[i].offset = fileOffset * tPageSize;
+ }//for
+ request->par.readWrite.numberOfPages = fsRWReq->numberOfPages;
+ break;
+ }//case
+
+ // Range of memory page with one file page
+ case FsReadWriteReq::fsFormatArrayOfPages: {
+ if ((fsRWReq->numberOfPages + fsRWReq->data.arrayOfPages.varIndex) > tNRR) {
+ jam();
+ errorCode = FsRef::fsErrInvalidParameters;
+ goto error;
+ }//if
+ const Uint32 varIndex = fsRWReq->data.arrayOfPages.varIndex;
+ const Uint32 fileOffset = fsRWReq->data.arrayOfPages.fileOffset;
+
+ request->par.readWrite.pages[0].offset = fileOffset * tPageSize;
+ request->par.readWrite.pages[0].size = tPageSize * fsRWReq->numberOfPages;
+ request->par.readWrite.numberOfPages = 1;
+ request->par.readWrite.pages[0].buf = &tWA[varIndex * tPageSize];
+ break;
+ }//case
+
+ // List of memory pages followed by one file page
+ case FsReadWriteReq::fsFormatListOfMemPages: {
+
+ tPageOffset = fsRWReq->data.listOfMemPages.varIndex[fsRWReq->numberOfPages];
+ tPageOffset *= tPageSize;
+
+ for (unsigned int i = 0; i < fsRWReq->numberOfPages; i++) {
+ jam();
+ Uint32 varIndex = fsRWReq->data.listOfMemPages.varIndex[i];
+
+ if (varIndex >= tNRR) {
+ jam();
+ errorCode = FsRef::fsErrInvalidParameters;
+ goto error;
+ }//if
+ request->par.readWrite.pages[i].buf = &tWA[varIndex * tClusterSize];
+ request->par.readWrite.pages[i].size = tPageSize;
+ request->par.readWrite.pages[i].offset = tPageOffset + (i*tPageSize);
+ }//for
+ request->par.readWrite.numberOfPages = fsRWReq->numberOfPages;
+ break;
+ // make it a writev or readv
+ }//case
+
+ default: {
+ jam();
+ errorCode = FsRef::fsErrInvalidParameters;
+ goto error;
+ }//default
+
+ }//switch
+
+ ndbrequire(forward(openFile, request));
+ return;
+
+error:
+ theRequestPool->put(request);
+ FsRef * const fsRef = (FsRef *)&signal->theData[0];
+ fsRef->userPointer = userPointer;
+ fsRef->setErrorCode(fsRef->errorCode, errorCode);
+ fsRef->osErrorCode = ~0; // Indicate local error
+ switch (action) {
+ case Request:: write:
+ case Request:: writeSync: {
+ jam();
+ sendSignal(userRef, GSN_FSWRITEREF, signal, 3, JBB);
+ break;
+ }//case
+ case Request:: read: {
+ jam();
+ sendSignal(userRef, GSN_FSREADREF, signal, 3, JBB);
+ }//case
+ }//switch
+ return;
+}
+
+/*
+ PR0: File Pointer , theData[0]
+ DR0: User reference, theData[1]
+ DR1: User Pointer, etc.
+ DR2: Flag
+ DR3: Var number
+ DR4: amount of pages
+ DR5->: Memory Page id and File page id according to Flag
+*/
+void
+Ndbfs::execFSWRITEREQ(Signal* signal)
+{
+ jamEntry();
+ const FsReadWriteReq * const fsWriteReq = (FsReadWriteReq *)&signal->theData[0];
+
+ if (fsWriteReq->getSyncFlag(fsWriteReq->operationFlag) == true){
+ jam();
+ readWriteRequest( Request::writeSync, signal );
+ } else {
+ jam();
+ readWriteRequest( Request::write, signal );
+ }
+}
+
+/*
+ PR0: File Pointer
+ DR0: User reference
+ DR1: User Pointer
+ DR2: Flag
+ DR3: Var number
+ DR4: amount of pages
+ DR5->: Memory Page id and File page id according to Flag
+*/
+void
+Ndbfs::execFSREADREQ(Signal* signal)
+{
+ jamEntry();
+ readWriteRequest( Request::read, signal );
+}
+
+/*
+ * PR0: File Pointer DR0: User reference DR1: User Pointer
+ */
+void
+Ndbfs::execFSSYNCREQ(Signal * signal)
+{
+ jamEntry();
+ Uint16 filePointer = (Uint16)signal->theData[0];
+ BlockReference userRef = signal->theData[1];
+ const UintR userPointer = signal->theData[2];
+ AsyncFile* openFile = theOpenFiles.find(filePointer);
+
+ if (openFile == NULL) {
+ jam(); //file not open
+ FsRef * const fsRef = (FsRef *)&signal->theData[0];
+ fsRef->userPointer = userPointer;
+ fsRef->setErrorCode(fsRef->errorCode, FsRef::fsErrFileDoesNotExist);
+ fsRef->osErrorCode = ~0; // Indicate local error
+ sendSignal(userRef, GSN_FSSYNCREF, signal, 3, JBB);
+ return;
+ }
+
+ Request *request = theRequestPool->get();
+ request->error = 0;
+ request->action = Request::sync;
+ request->set(userRef, userPointer, filePointer);
+ request->file = openFile;
+ request->theTrace = signal->getTrace();
+
+ ndbrequire(forward(openFile,request));
+}
+
+void
+Ndbfs::execFSAPPENDREQ(Signal * signal)
+{
+ const FsAppendReq * const fsReq = (FsAppendReq *)&signal->theData[0];
+ const Uint16 filePointer = (Uint16)fsReq->filePointer;
+ const UintR userPointer = fsReq->userPointer;
+ const BlockReference userRef = fsReq->userReference;
+ const BlockNumber blockNumber = refToBlock(userRef);
+
+ FsRef::NdbfsErrorCodeType errorCode;
+
+ AsyncFile* openFile = theOpenFiles.find(filePointer);
+ const NewVARIABLE *myBaseAddrRef = &getBat(blockNumber)[fsReq->varIndex];
+
+ const Uint32* tWA = (const Uint32*)myBaseAddrRef->WA;
+ const Uint32 tSz = myBaseAddrRef->nrr;
+ const Uint32 offset = fsReq->offset;
+ const Uint32 size = fsReq->size;
+ Request *request = theRequestPool->get();
+
+ if (openFile == NULL) {
+ jam();
+ errorCode = FsRef::fsErrFileDoesNotExist;
+ goto error;
+ }
+
+ if (myBaseAddrRef == NULL) {
+ jam(); // Ensure that a valid variable is used
+ errorCode = FsRef::fsErrInvalidParameters;
+ goto error;
+ }
+
+ if (fsReq->varIndex >= getBatSize(blockNumber)) {
+ jam();// Ensure that a valid variable is used
+ errorCode = FsRef::fsErrInvalidParameters;
+ goto error;
+ }
+
+ if(offset + size > tSz){
+ jam(); // Ensure that a valid variable is used
+ errorCode = FsRef::fsErrInvalidParameters;
+ goto error;
+ }
+
+ request->error = 0;
+ request->set(userRef, userPointer, filePointer);
+ request->file = openFile;
+ request->action = Request::append;
+ request->theTrace = signal->getTrace();
+
+ request->par.append.buf = (const char *)(tWA + offset);
+ request->par.append.size = size << 2;
+
+ ndbrequire(forward(openFile, request));
+ return;
+
+error:
+ jam();
+ theRequestPool->put(request);
+ FsRef * const fsRef = (FsRef *)&signal->theData[0];
+ fsRef->userPointer = userPointer;
+ fsRef->setErrorCode(fsRef->errorCode, errorCode);
+ fsRef->osErrorCode = ~0; // Indicate local error
+
+ jam();
+ sendSignal(userRef, GSN_FSAPPENDREF, signal, 3, JBB);
+ return;
+}
+
+Uint16
+Ndbfs::newId()
+{
+ // finds a new key, eg a new filepointer
+ for (int i = 1; i < SHRT_MAX; i++)
+ {
+ if (theLastId == SHRT_MAX) {
+ jam();
+ theLastId = 1;
+ } else {
+ jam();
+ theLastId++;
+ }
+
+ if(theOpenFiles.find(theLastId) == NULL) {
+ jam();
+ return theLastId;
+ }
+ }
+ ndbrequire(1 == 0);
+ // The program will not reach this point
+ return 0;
+}
+
+AsyncFile*
+Ndbfs::createAsyncFile(){
+
+ // Check limit of open files
+ if (theFiles.size()+1 == m_maxFiles) {
+ // Print info about all open files
+ for (unsigned i = 0; i < theFiles.size(); i++){
+ AsyncFile* file = theFiles[i];
+ ndbout_c("%2d (0x%x): %s", i, file, file->isOpen()?"OPEN":"CLOSED");
+ }
+ ERROR_SET(fatal, AFS_ERROR_MAXOPEN,""," Ndbfs::createAsyncFile");
+ }
+
+ AsyncFile* file = new AsyncFile;
+ file->doStart(getOwnNodeId(), theFileSystemPath, theBackupFilePath);
+
+ // Put the file in list of all files
+ theFiles.push_back(file);
+
+#ifdef VM_TRACE
+ infoEvent("NDBFS: Created new file thread %d", theFiles.size());
+#endif
+
+ return file;
+}
+
+AsyncFile*
+Ndbfs::getIdleFile(){
+ AsyncFile* file;
+ if (theIdleFiles.size() > 0){
+ file = theIdleFiles[0];
+ theIdleFiles.erase(0);
+ } else {
+ file = createAsyncFile();
+ }
+ return file;
+}
+
+
+
+void
+Ndbfs::report(Request * request, Signal* signal)
+{
+ const Uint32 orgTrace = signal->getTrace();
+ signal->setTrace(request->theTrace);
+ const BlockReference ref = request->theUserReference;
+ if (request->error) {
+ jam();
+ // Initialise FsRef signal
+ FsRef * const fsRef = (FsRef *)&signal->theData[0];
+ fsRef->userPointer = request->theUserPointer;
+ fsRef->setErrorCode(fsRef->errorCode, translateErrno(request->error));
+ fsRef->osErrorCode = request->error;
+
+ switch (request->action) {
+ case Request:: open: {
+ jam();
+ // Put the file back in idle files list
+ theIdleFiles.push_back(request->file);
+ sendSignal(ref, GSN_FSOPENREF, signal, FsRef::SignalLength, JBB);
+ break;
+ }
+ case Request:: closeRemove:
+ case Request:: close: {
+ jam();
+ sendSignal(ref, GSN_FSCLOSEREF, signal, FsRef::SignalLength, JBB);
+ break;
+ }
+ case Request:: writeSync:
+ case Request:: writevSync:
+ case Request:: write:
+ case Request:: writev: {
+ jam();
+ sendSignal(ref, GSN_FSWRITEREF, signal, FsRef::SignalLength, JBB);
+ break;
+ }
+ case Request:: read:
+ case Request:: readv: {
+ jam();
+ sendSignal(ref, GSN_FSREADREF, signal, FsRef::SignalLength, JBB);
+ break;
+ }
+ case Request:: sync: {
+ jam();
+ sendSignal(ref, GSN_FSSYNCREF, signal, FsRef::SignalLength, JBB);
+ break;
+ }
+ case Request::append: {
+ jam();
+ sendSignal(ref, GSN_FSAPPENDREF, signal, FsRef::SignalLength, JBB);
+ break;
+ }
+ case Request::rmrf: {
+ jam();
+ // Put the file back in idle files list
+ theIdleFiles.push_back(request->file);
+ sendSignal(ref, GSN_FSREMOVEREF, signal, FsRef::SignalLength, JBB);
+ break;
+ }
+
+ case Request:: end: {
+ // Report nothing
+ break;
+ }
+ }//switch
+ } else {
+ jam();
+ FsConf * const fsConf = (FsConf *)&signal->theData[0];
+ fsConf->userPointer = request->theUserPointer;
+ switch (request->action) {
+ case Request:: open: {
+ jam();
+ theOpenFiles.insert(request->file, request->theFilePointer);
+
+ // Keep track on max number of opened files
+ if (theOpenFiles.size() > m_maxOpenedFiles)
+ m_maxOpenedFiles = theOpenFiles.size();
+
+ fsConf->filePointer = request->theFilePointer;
+ sendSignal(ref, GSN_FSOPENCONF, signal, 3, JBB);
+ break;
+ }
+ case Request:: closeRemove:
+ case Request:: close: {
+ jam();
+ // removes the file from OpenFiles list
+ theOpenFiles.erase(request->theFilePointer);
+ // Put the file in idle files list
+ theIdleFiles.push_back(request->file);
+ sendSignal(ref, GSN_FSCLOSECONF, signal, 1, JBB);
+ break;
+ }
+ case Request:: writeSync:
+ case Request:: writevSync:
+ case Request:: write:
+ case Request:: writev: {
+ jam();
+ sendSignal(ref, GSN_FSWRITECONF, signal, 1, JBB);
+ break;
+ }
+ case Request:: read:
+ case Request:: readv: {
+ jam();
+ sendSignal(ref, GSN_FSREADCONF, signal, 1, JBB);
+ break;
+ }
+ case Request:: sync: {
+ jam();
+ sendSignal(ref, GSN_FSSYNCCONF, signal, 1, JBB);
+ break;
+ }//case
+ case Request::append: {
+ jam();
+ signal->theData[1] = request->par.append.size;
+ sendSignal(ref, GSN_FSAPPENDCONF, signal, 2, JBB);
+ break;
+ }
+ case Request::rmrf: {
+ jam();
+ // Put the file in idle files list
+ theIdleFiles.push_back(request->file);
+ sendSignal(ref, GSN_FSREMOVECONF, signal, 1, JBB);
+ break;
+ }
+ case Request:: end: {
+ // Report nothing
+ break;
+ }
+ }
+ }//if
+ signal->setTrace(orgTrace);
+}
+
+
+bool
+Ndbfs::scanIPC(Signal* signal)
+{
+ Request* request = theFromThreads.tryReadChannel();
+ jam();
+ if (request) {
+ jam();
+ report(request, signal);
+ theRequestPool->put(request);
+ return true;
+ }
+ return false;
+}
+
+#if defined NDB_WIN32
+int Ndbfs::translateErrno(int aErrno)
+{
+ switch (aErrno)
+ {
+ //permission denied
+ case ERROR_ACCESS_DENIED:
+
+ return FsRef::fsErrPermissionDenied;
+ //temporary not accessible
+ case ERROR_PATH_BUSY:
+ case ERROR_NO_MORE_SEARCH_HANDLES:
+
+ return FsRef::fsErrTemporaryNotAccessible;
+ //no space left on device
+ case ERROR_HANDLE_DISK_FULL:
+ case ERROR_DISK_FULL:
+
+ return FsRef::fsErrNoSpaceLeftOnDevice;
+ //none valid parameters
+ case ERROR_INVALID_HANDLE:
+ case ERROR_INVALID_DRIVE:
+ case ERROR_INVALID_ACCESS:
+ case ERROR_HANDLE_EOF:
+ case ERROR_BUFFER_OVERFLOW:
+
+ return FsRef::fsErrInvalidParameters;
+ //environment error
+ case ERROR_CRC:
+ case ERROR_ARENA_TRASHED:
+ case ERROR_BAD_ENVIRONMENT:
+ case ERROR_INVALID_BLOCK:
+ case ERROR_WRITE_FAULT:
+ case ERROR_READ_FAULT:
+ case ERROR_OPEN_FAILED:
+
+ return FsRef::fsErrEnvironmentError;
+
+ //no more process resources
+ case ERROR_TOO_MANY_OPEN_FILES:
+ case ERROR_NOT_ENOUGH_MEMORY:
+ case ERROR_OUTOFMEMORY:
+ return FsRef::fsErrNoMoreResources;
+ //no file
+ case ERROR_FILE_NOT_FOUND:
+ return FsRef::fsErrFileDoesNotExist;
+
+ case ERR_ReadUnderflow:
+ return FsRef::fsErrReadUnderflow;
+
+ default:
+ return FsRef::fsErrUnknown;
+ }
+}
+#elif defined NDB_OSE || defined NDB_SOFTOSE
+int Ndbfs::translateErrno(int aErrno)
+{
+ switch (aErrno)
+ {
+ //permission denied
+ case EACCES:
+ case EROFS:
+ case ENXIO:
+ return FsRef::fsErrPermissionDenied;
+ //temporary not accessible
+ case EAGAIN:
+ case ETIMEDOUT:
+ case ENOLCK:
+ return FsRef::fsErrTemporaryNotAccessible;
+ //no space left on device
+ case ENFILE:
+ case EDQUOT:
+ case ENOSPC:
+ return FsRef::fsErrNoSpaceLeftOnDevice;
+ //none valid parameters
+ case EINVAL:
+ case EFBIG:
+ case EBADF:
+ case ENAMETOOLONG:
+ case EFAULT:
+ case EISDIR:
+ return FsRef::fsErrInvalidParameters;
+ //environment error
+ case EMLINK:
+ case ELOOP:
+ return FsRef::fsErrEnvironmentError;
+
+ //no more process resources
+ case EMFILE:
+ case ENOMEM:
+ return FsRef::fsErrNoMoreResources;
+ //no file
+ case ENOENT:
+ return FsRef::fsErrFileDoesNotExist;
+
+ case ERR_ReadUnderflow:
+ return FsRef::fsErrReadUnderflow;
+
+ default:
+ return FsRef::fsErrUnknown;
+ }
+}
+#else
+int Ndbfs::translateErrno(int aErrno)
+{
+ switch (aErrno)
+ {
+ //permission denied
+ case EACCES:
+ case EROFS:
+ case ENXIO:
+ return FsRef::fsErrPermissionDenied;
+ //temporary not accessible
+ case EAGAIN:
+ case ETIMEDOUT:
+ case ENOLCK:
+ case EINTR:
+ case EIO:
+ return FsRef::fsErrTemporaryNotAccessible;
+ //no space left on device
+ case ENFILE:
+ case EDQUOT:
+#ifdef ENOSR
+ case ENOSR:
+#endif
+ case ENOSPC:
+ case EFBIG:
+ return FsRef::fsErrNoSpaceLeftOnDevice;
+ //none valid parameters
+ case EINVAL:
+ case EBADF:
+ case ENAMETOOLONG:
+ case EFAULT:
+ case EISDIR:
+ case ENOTDIR:
+ case EEXIST:
+ case ETXTBSY:
+ return FsRef::fsErrInvalidParameters;
+ //environment error
+ case ELOOP:
+#ifdef ENOLINK
+ case ENOLINK:
+#endif
+#ifdef EMULTIHOP
+ case EMULTIHOP:
+#endif
+#ifdef EOPNOTSUPP
+ case EOPNOTSUPP:
+#endif
+#ifdef ESPIPE
+ case ESPIPE:
+#endif
+ case EPIPE:
+ return FsRef::fsErrEnvironmentError;
+
+ //no more process resources
+ case EMFILE:
+ case ENOMEM:
+ return FsRef::fsErrNoMoreResources;
+ //no file
+ case ENOENT:
+ return FsRef::fsErrFileDoesNotExist;
+
+ case ERR_ReadUnderflow:
+ return FsRef::fsErrReadUnderflow;
+
+ default:
+ return FsRef::fsErrUnknown;
+ }
+}
+#endif
+
+
+
+void
+Ndbfs::execCONTINUEB(Signal* signal)
+{
+ jamEntry();
+ if (signal->theData[0] == NdbfsContinueB::ZSCAN_MEMORYCHANNEL_10MS_DELAY) {
+ jam();
+
+ // Also send CONTINUEB to ourself in order to scan for
+ // incoming answers from AsyncFile on MemoryChannel theFromThreads
+ signal->theData[0] = NdbfsContinueB::ZSCAN_MEMORYCHANNEL_10MS_DELAY;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 10, 1);
+ if (scanningInProgress == true) {
+ jam();
+ return;
+ }
+ }
+ if (scanIPC(signal)) {
+ jam();
+ scanningInProgress = true;
+ signal->theData[0] = NdbfsContinueB::ZSCAN_MEMORYCHANNEL_NO_DELAY;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 1, JBB);
+ } else {
+ jam();
+ scanningInProgress = false;
+ }
+ return;
+}
+
+bool Global_useO_SYNC = false;
+bool Global_useO_DIRECT = false;
+bool Global_unlinkO_CREAT = false;
+Uint32 Global_syncFreq = 1024 * 1024;
+
+void
+Ndbfs::execDUMP_STATE_ORD(Signal* signal)
+{
+ if(signal->theData[0] == 19){
+ if(signal->length() > 1){
+ Global_useO_SYNC = signal->theData[1];
+ }
+ if(signal->length() > 2){
+ Global_syncFreq = signal->theData[2] * 1024 * 1024;
+ }
+ if(signal->length() > 3){
+ Global_unlinkO_CREAT = signal->theData[3];
+ }
+ if(signal->length() > 4){
+ Global_useO_DIRECT = signal->theData[4];
+ }
+ ndbout_c("useO_SYNC = %d syncFreq = %d unlinkO_CREATE = %d O_DIRECT = %d",
+ Global_useO_SYNC,
+ Global_syncFreq,
+ Global_unlinkO_CREAT,
+ Global_useO_DIRECT);
+ return;
+ }
+ if(signal->theData[0] == DumpStateOrd::NdbfsDumpFileStat){
+ infoEvent("NDBFS: Files: %d Open files: %d",
+ theFiles.size(),
+ theOpenFiles.size());
+ infoEvent(" Idle files: %d Max opened files: %d",
+ theIdleFiles.size(),
+ m_maxOpenedFiles);
+ infoEvent(" Max files: %d",
+ m_maxFiles);
+ infoEvent(" Requests: %d",
+ theRequestPool->size());
+
+ return;
+ }
+ if(signal->theData[0] == DumpStateOrd::NdbfsDumpOpenFiles){
+ infoEvent("NDBFS: Dump open files: %d", theOpenFiles.size());
+
+ for (unsigned i = 0; i < theOpenFiles.size(); i++){
+ AsyncFile* file = theOpenFiles.getFile(i);
+ infoEvent("%2d (0x%x): %s", i,file, file->theFileName.c_str());
+ }
+ return;
+ }
+ if(signal->theData[0] == DumpStateOrd::NdbfsDumpAllFiles){
+ infoEvent("NDBFS: Dump all files: %d", theFiles.size());
+
+ for (unsigned i = 0; i < theFiles.size(); i++){
+ AsyncFile* file = theFiles[i];
+ infoEvent("%2d (0x%x): %s", i,file, file->isOpen()?"OPEN":"CLOSED");
+ }
+ return;
+ }
+ if(signal->theData[0] == DumpStateOrd::NdbfsDumpIdleFiles){
+ infoEvent("NDBFS: Dump idle files: %d", theIdleFiles.size());
+
+ for (unsigned i = 0; i < theIdleFiles.size(); i++){
+ AsyncFile* file = theIdleFiles[i];
+ infoEvent("%2d (0x%x): %s", i,file, file->isOpen()?"OPEN":"CLOSED");
+ }
+ return;
+ }
+}//Ndbfs::execDUMP_STATE_ORD()
+
+
+
+BLOCK_FUNCTIONS(Ndbfs)
+
+template class Vector<AsyncFile*>;
+template class Vector<OpenFiles::OpenFileItem>;
+template class MemoryChannel<Request>;
+template class Pool<Request>;
diff --git a/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp b/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp
new file mode 100644
index 00000000000..c5aaa4e5c49
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp
@@ -0,0 +1,127 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef SIMBLOCKASYNCFILESYSTEM_H
+#define SIMBLOCKASYNCFILESYSTEM_H
+
+#include <pc.hpp>
+#include <SimulatedBlock.hpp>
+#include "Pool.hpp"
+#include "AsyncFile.hpp"
+#include "OpenFiles.hpp"
+
+
+
+// Because one NDB Signal request can result in multiple requests to
+// AsyncFile one class must be made responsible to keep track
+// of all out standing request and when all are finished the result
+// must be reported to the sending block.
+
+
+class Ndbfs : public SimulatedBlock
+{
+public:
+ Ndbfs(const class Configuration & conf);
+ virtual ~Ndbfs();
+
+protected:
+ BLOCK_DEFINES(Ndbfs);
+
+ // The signal processing functions
+ void execDUMP_STATE_ORD(Signal* signal);
+ void execFSOPENREQ(Signal* signal);
+ void execFSCLOSEREQ(Signal* signal);
+ void execFSWRITEREQ(Signal* signal);
+ void execFSREADREQ(Signal* signal);
+ void execFSSYNCREQ(Signal* signal);
+ void execFSAPPENDREQ(Signal* signal);
+ void execFSREMOVEREQ(Signal* signal);
+ void execSTTOR(Signal* signal);
+ void execCONTINUEB(Signal* signal);
+
+ bool scanningInProgress;
+ Uint16 newId();
+
+private:
+ int forward(AsyncFile *file, Request* Request);
+ void report(Request* request, Signal* signal);
+ bool scanIPC(Signal* signal);
+
+ // Declared but not defined
+ Ndbfs(Ndbfs & );
+ void operator = (Ndbfs &);
+
+ // Used for uniqe number generation
+ Uint16 theLastId;
+ BlockReference cownref;
+
+ // Communication from files
+ MemoryChannel<Request> theFromThreads;
+
+ Pool<Request>* theRequestPool;
+
+ AsyncFile* createAsyncFile();
+ AsyncFile* getIdleFile();
+
+ Vector<AsyncFile*> theFiles; // List all created AsyncFiles
+ Vector<AsyncFile*> theIdleFiles; // List of idle AsyncFiles
+ OpenFiles theOpenFiles; // List of open AsyncFiles
+ const char * theFileSystemPath;
+ const char * theBackupFilePath;
+
+ // Statistics variables
+ Uint32 m_maxOpenedFiles;
+
+ // Limit for max number of AsyncFiles created
+ Uint32 m_maxFiles;
+
+ void readWriteRequest( int action, Signal * signal );
+
+ static int translateErrno(int aErrno);
+};
+
+class VoidFs : public SimulatedBlock
+{
+public:
+ VoidFs(const class Configuration & conf);
+ virtual ~VoidFs();
+
+protected:
+ BLOCK_DEFINES(VoidFs);
+
+ // The signal processing functions
+ void execDUMP_STATE_ORD(Signal* signal);
+ void execFSOPENREQ(Signal* signal);
+ void execFSCLOSEREQ(Signal* signal);
+ void execFSWRITEREQ(Signal* signal);
+ void execFSREADREQ(Signal* signal);
+ void execFSSYNCREQ(Signal* signal);
+ void execFSAPPENDREQ(Signal* signal);
+ void execFSREMOVEREQ(Signal* signal);
+ void execSTTOR(Signal* signal);
+
+private:
+ // Declared but not defined
+ VoidFs(VoidFs & );
+ void operator = (VoidFs &);
+
+ // Used for uniqe number generation
+ Uint32 c_maxFileNo;
+};
+
+#endif
+
+
diff --git a/storage/ndb/src/kernel/blocks/ndbfs/OpenFiles.hpp b/storage/ndb/src/kernel/blocks/ndbfs/OpenFiles.hpp
new file mode 100644
index 00000000000..b944bb5485b
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/ndbfs/OpenFiles.hpp
@@ -0,0 +1,114 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef OPENFILES_H
+#define OPENFILES_H
+
+#include <Vector.hpp>
+
+class OpenFiles
+{
+public:
+ OpenFiles(){ }
+
+ /* Get a pointer to the file with id */
+ AsyncFile* find(Uint16 id);
+ /* Insert file with id */
+ bool insert(AsyncFile* file, Uint16 id);
+ /* Erase file with id */
+ bool erase(Uint16 id);
+ /* Get number of open files */
+ unsigned size();
+
+ Uint16 getId(unsigned i);
+ AsyncFile* getFile(unsigned i);
+
+
+private:
+
+ class OpenFileItem {
+ public:
+ OpenFileItem(): m_file(NULL), m_id(0){};
+
+ AsyncFile* m_file;
+ Uint16 m_id;
+ };
+
+ Vector<OpenFileItem> m_files;
+};
+
+
+//*****************************************************************************
+inline AsyncFile* OpenFiles::find(Uint16 id){
+ for (unsigned i = 0; i < m_files.size(); i++){
+ if (m_files[i].m_id == id){
+ return m_files[i].m_file;
+ }
+ }
+ return NULL;
+}
+
+//*****************************************************************************
+inline bool OpenFiles::erase(Uint16 id){
+ for (unsigned i = 0; i < m_files.size(); i++){
+ if (m_files[i].m_id == id){
+ m_files.erase(i);
+ return true;
+ }
+ }
+ // Item was not found in list
+ return false;
+}
+
+
+//*****************************************************************************
+inline bool OpenFiles::insert(AsyncFile* file, Uint16 id){
+ // Check if file has already been opened
+ for (unsigned i = 0; i < m_files.size(); i++){
+ if(m_files[i].m_file == NULL)
+ continue;
+
+ if(strcmp(m_files[i].m_file->theFileName.c_str(),
+ file->theFileName.c_str()) == 0){
+ ERROR_SET(fatal, AFS_ERROR_ALLREADY_OPEN,"","OpenFiles::insert()");
+ }
+ }
+
+ // Insert the file into vector
+ OpenFileItem openFile;
+ openFile.m_id = id;
+ openFile.m_file = file;
+ m_files.push_back(openFile);
+
+ return true;
+}
+
+//*****************************************************************************
+inline Uint16 OpenFiles::getId(unsigned i){
+ return m_files[i].m_id;
+}
+
+//*****************************************************************************
+inline AsyncFile* OpenFiles::getFile(unsigned i){
+ return m_files[i].m_file;
+}
+
+//*****************************************************************************
+inline unsigned OpenFiles::size(){
+ return m_files.size();
+}
+
+#endif
diff --git a/storage/ndb/src/kernel/blocks/ndbfs/Pool.hpp b/storage/ndb/src/kernel/blocks/ndbfs/Pool.hpp
new file mode 100644
index 00000000000..0410673af6f
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/ndbfs/Pool.hpp
@@ -0,0 +1,261 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef FOR_LIB_POOL_H
+#define FOR_LIB_POOL_H
+
+
+//===========================================================================
+//
+// .PUBLIC
+//
+//===========================================================================
+
+////////////////////////////////////////////////////////////////
+//
+// enum { defInitSize = 256, defIncSize = 64 };
+// Description: type to store initial and incremental size in.
+//
+////////////////////////////////////////////////////////////////
+//
+// Pool(int anInitSize = defInitSize, int anIncSize = defIncSize);
+// Description:
+// Constructor. Allocates anInitSize of objects <template argument>.
+// When the pool runs out of elements, anIncSize elements are added to the
+// pool. (When the pool is not optimized to allocate multiple elements
+// more efficient, the anIncSize MUST be set to 1 to get the best
+// performance...
+//
+// Parameters:
+// defInitSize: Initial size of the pool (# of elements in the pool)
+// defIncSize: # of elements added to the pool when a request to an empty
+// pool is made.
+// Return value:
+// _
+// Errors:
+// -
+// Asserts:
+// _
+//
+////////////////////////////////////////////////////////////////
+//
+// virtual ~Pool();
+// Description:
+// Elements in the pool are all deallocated.
+// Parameters:
+// _
+// Return value:
+// _
+// Errors:
+// -
+// Asserts:
+// theEmptyNodeList==0. No elements are in still in use.
+//
+////////////////////////////////////////////////////////////////
+//
+// T& get();
+// Description:
+// get's an element from the Pool.
+// Parameters:
+// _
+// Return value:
+// T& the element extracted from the Pool. (element must be cleared to
+// mimick newly created element)
+// Errors:
+// -
+// Asserts:
+// _
+//
+////////////////////////////////////////////////////////////////
+//
+// void put(T& aT);
+// Description:
+// Returns an element to the pool.
+// Parameters:
+// aT The element to put back in the pool
+// Return value:
+// void
+// Errors:
+// -
+// Asserts:
+// The pool has "empty" elements, to put element back in...
+//
+//===========================================================================
+//
+// .PRIVATE
+//
+//===========================================================================
+
+////////////////////////////////////////////////////////////////
+//
+// void allocate(int aSize);
+// Description:
+// add aSize elements to the pool
+// Parameters:
+// aSize: # of elements to add to the pool
+// Return value:
+// void
+// Errors:
+// -
+// Asserts:
+// _
+//
+////////////////////////////////////////////////////////////////
+//
+// void deallocate();
+// Description:
+// frees all elements kept in the pool.
+// Parameters:
+// _
+// Return value:
+// void
+// Errors:
+// -
+// Asserts:
+// No elements are "empty" i.e. in use.
+//
+//===========================================================================
+//
+// .PRIVATE
+//
+//===========================================================================
+
+////////////////////////////////////////////////////////////////
+//
+// Pool<T>& operator=(const Pool<T>& cp);
+// Description:
+// Prohibit use of assignement operator.
+// Parameters:
+// cp
+// Return value:
+// Pool<T>&
+// Asserts:
+// _
+//
+////////////////////////////////////////////////////////////////
+//
+// Pool(const Pool<T>& cp);
+// Description:
+// Prohibit use of default copy constructor.
+// Parameters:
+// cp
+// Return value:
+// _
+// Errors:
+// -
+// Asserts:
+// _
+//
+////////////////////////////////////////////////////////////////
+//
+// int initSize;
+// Description: size of the initial size of the pool
+//
+////////////////////////////////////////////////////////////////
+//
+// int incSize;
+// Description: # of elements added to the pool when pool is exhausted.
+//
+////////////////////////////////////////////////////////////////
+//
+// PoolElement<T>* theFullNodeList;
+// Description: List to contain all "unused" elements in the pool
+//
+////////////////////////////////////////////////////////////////
+//
+// PoolElement<T>* theEmptyNodeList;
+// Description: List to contain all "in use" elements in the pool
+//
+//-------------------------------------------------------------------------
+
+template <class T>
+class Pool
+{
+public:
+ enum { defInitSize = 256, defIncSize = 64 };
+
+ Pool(int anInitSize = defInitSize, int anIncSize = defIncSize) :
+ theIncSize(anIncSize),
+ theTop(0),
+ theCurrentSize(0),
+ theList(0)
+ {
+ allocate(anInitSize);
+ }
+
+ virtual ~Pool(void)
+ {
+ for (int i=0; i <theTop ; ++i)
+ delete theList[i];
+
+ delete []theList;
+ }
+
+ T* get();
+ void put(T* aT);
+
+ unsigned size(){ return theTop; };
+
+protected:
+ void allocate(int aSize)
+ {
+ T** tList = theList;
+ int i;
+ theList = new T*[aSize+theCurrentSize];
+ // allocate full list
+ for (i = 0; i < theTop; i++) {
+ theList[i] = tList[i];
+ }
+ delete []tList;
+ for (; (theTop < aSize); theTop++){
+ theList[theTop] = (T*)new T;
+ }
+ theCurrentSize += aSize;
+ }
+
+private:
+ Pool<T>& operator=(const Pool<T>& cp);
+ Pool(const Pool<T>& cp);
+
+ int theIncSize;
+ int theTop;
+ int theCurrentSize;
+
+ T** theList;
+};
+
+//******************************************************************************
+template <class T> inline T* Pool<T>::get()
+{
+ T* tmp;
+ if( theTop == 0 )
+ {
+ allocate(theIncSize);
+ }
+ --theTop;
+ tmp = theList[theTop];
+ return tmp;
+}
+
+//
+//******************************************************************************
+template <class T> inline void Pool<T>::put(T* aT)
+{
+ theList[theTop]= aT;
+ ++theTop;
+}
+
+#endif
diff --git a/storage/ndb/src/kernel/blocks/ndbfs/VoidFs.cpp b/storage/ndb/src/kernel/blocks/ndbfs/VoidFs.cpp
new file mode 100644
index 00000000000..d093089acfc
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/ndbfs/VoidFs.cpp
@@ -0,0 +1,200 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <limits.h>
+#include <errno.h>
+
+#include "Ndbfs.hpp"
+#include "AsyncFile.hpp"
+#include "Filename.hpp"
+#include "Error.hpp"
+
+#include <signaldata/FsOpenReq.hpp>
+#include <signaldata/FsCloseReq.hpp>
+#include <signaldata/FsReadWriteReq.hpp>
+#include <signaldata/FsAppendReq.hpp>
+#include <signaldata/FsRemoveReq.hpp>
+#include <signaldata/FsConf.hpp>
+#include <signaldata/FsRef.hpp>
+#include <signaldata/NdbfsContinueB.hpp>
+#include <signaldata/DumpStateOrd.hpp>
+
+#include <RefConvert.hpp>
+#include <NdbSleep.h>
+#include <NdbOut.hpp>
+#include <Configuration.hpp>
+
+#define DEBUG(x) { ndbout << "FS::" << x << endl; }
+
+VoidFs::VoidFs(const Configuration & conf) :
+ SimulatedBlock(NDBFS, conf)
+{
+ BLOCK_CONSTRUCTOR(VoidFs);
+
+ // Set received signals
+ addRecSignal(GSN_DUMP_STATE_ORD, &VoidFs::execDUMP_STATE_ORD);
+ addRecSignal(GSN_STTOR, &VoidFs::execSTTOR);
+ addRecSignal(GSN_FSOPENREQ, &VoidFs::execFSOPENREQ);
+ addRecSignal(GSN_FSCLOSEREQ, &VoidFs::execFSCLOSEREQ);
+ addRecSignal(GSN_FSWRITEREQ, &VoidFs::execFSWRITEREQ);
+ addRecSignal(GSN_FSREADREQ, &VoidFs::execFSREADREQ);
+ addRecSignal(GSN_FSSYNCREQ, &VoidFs::execFSSYNCREQ);
+ addRecSignal(GSN_FSAPPENDREQ, &VoidFs::execFSAPPENDREQ);
+ addRecSignal(GSN_FSREMOVEREQ, &VoidFs::execFSREMOVEREQ);
+ // Set send signals
+}
+
+VoidFs::~VoidFs()
+{
+}
+
+void
+VoidFs::execSTTOR(Signal* signal)
+{
+ jamEntry();
+
+ if(signal->theData[1] == 0){ // StartPhase 0
+ jam();
+ signal->theData[3] = 255;
+ sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 4, JBB);
+ return;
+ }
+ ndbrequire(0);
+}
+
+void
+VoidFs::execFSOPENREQ(Signal* signal)
+{
+ jamEntry();
+ const FsOpenReq * const fsOpenReq = (FsOpenReq *)&signal->theData[0];
+ const BlockReference userRef = fsOpenReq->userReference;
+ const Uint32 userPointer = fsOpenReq->userPointer;
+
+ Uint32 flags = fsOpenReq->fileFlags;
+ if(flags == FsOpenReq::OM_READONLY){
+ // Initialise FsRef signal
+ FsRef * const fsRef = (FsRef *)&signal->theData[0];
+ fsRef->userPointer = userPointer;
+ fsRef->errorCode = FsRef::fsErrFileDoesNotExist;
+ fsRef->osErrorCode = ~0;
+ sendSignal(userRef, GSN_FSOPENREF, signal, 3, JBB);
+ return;
+ }
+
+ if(flags & FsOpenReq::OM_WRITEONLY || flags & FsOpenReq::OM_READWRITE){
+ signal->theData[0] = userPointer;
+ signal->theData[1] = c_maxFileNo++;
+ sendSignal(userRef, GSN_FSOPENCONF, signal, 2, JBB);
+ }
+}
+
+void
+VoidFs::execFSREMOVEREQ(Signal* signal)
+{
+ jamEntry();
+ const FsRemoveReq * const req = (FsRemoveReq *)signal->getDataPtr();
+ const Uint32 userRef = req->userReference;
+ const Uint32 userPointer = req->userPointer;
+
+ signal->theData[0] = userPointer;
+ sendSignal(userRef, GSN_FSREMOVECONF, signal, 1, JBB);
+}
+
+/*
+ * PR0: File Pointer DR0: User reference DR1: User Pointer DR2: Flag bit 0= 1
+ * remove file
+ */
+void
+VoidFs::execFSCLOSEREQ(Signal * signal)
+{
+ jamEntry();
+
+ const FsCloseReq * const req = (FsCloseReq *)signal->getDataPtr();
+ const Uint32 userRef = req->userReference;
+ const Uint32 userPointer = req->userPointer;
+
+ signal->theData[0] = userPointer;
+ sendSignal(userRef, GSN_FSCLOSECONF, signal, 1, JBB);
+}
+
+void
+VoidFs::execFSWRITEREQ(Signal* signal)
+{
+ jamEntry();
+ const FsReadWriteReq * const req = (FsReadWriteReq *)signal->getDataPtr();
+ const Uint32 userRef = req->userReference;
+ const Uint32 userPointer = req->userPointer;
+
+ signal->theData[0] = userPointer;
+ sendSignal(userRef, GSN_FSWRITECONF, signal, 1, JBB);
+}
+
+void
+VoidFs::execFSREADREQ(Signal* signal)
+{
+ jamEntry();
+
+ const FsReadWriteReq * const req = (FsReadWriteReq *)signal->getDataPtr();
+ const Uint32 userRef = req->userReference;
+ const Uint32 userPointer = req->userPointer;
+
+ signal->theData[0] = userPointer;
+ sendSignal(userRef, GSN_FSREADCONF, signal, 1, JBB);
+#if 0
+ FsRef * const fsRef = (FsRef *)&signal->theData[0];
+ fsRef->userPointer = userPointer;
+ fsRef->errorCode = FsRef::fsErrEnvironmentError;
+ fsRef->osErrorCode = ~0; // Indicate local error
+ sendSignal(userRef, GSN_FSREADREF, signal, 3, JBB);
+#endif
+}
+
+void
+VoidFs::execFSSYNCREQ(Signal * signal)
+{
+ jamEntry();
+
+ BlockReference userRef = signal->theData[1];
+ const UintR userPointer = signal->theData[2];
+
+ signal->theData[0] = userPointer;
+ sendSignal(userRef, GSN_FSSYNCCONF, signal, 1, JBB);
+
+ return;
+}
+
+void
+VoidFs::execFSAPPENDREQ(Signal * signal)
+{
+ const FsAppendReq * const fsReq = (FsAppendReq *)&signal->theData[0];
+ const UintR userPointer = fsReq->userPointer;
+ const BlockReference userRef = fsReq->userReference;
+ const Uint32 size = fsReq->size;
+
+ signal->theData[0] = userPointer;
+ signal->theData[1] = size << 2;
+ sendSignal(userRef, GSN_FSAPPENDCONF, signal, 2, JBB);
+}
+
+void
+VoidFs::execDUMP_STATE_ORD(Signal* signal)
+{
+}//VoidFs::execDUMP_STATE_ORD()
+
+
+
+BLOCK_FUNCTIONS(VoidFs)
+
diff --git a/storage/ndb/src/kernel/blocks/new-block.tar.gz b/storage/ndb/src/kernel/blocks/new-block.tar.gz
new file mode 100644
index 00000000000..327503ea0b1
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/new-block.tar.gz
Binary files differ
diff --git a/storage/ndb/src/kernel/blocks/qmgr/Makefile.am b/storage/ndb/src/kernel/blocks/qmgr/Makefile.am
new file mode 100644
index 00000000000..278af2a7865
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/qmgr/Makefile.am
@@ -0,0 +1,25 @@
+noinst_LIBRARIES = libqmgr.a
+
+libqmgr_a_SOURCES = \
+ QmgrInit.cpp \
+ QmgrMain.cpp
+
+include $(top_srcdir)/ndb/config/common.mk.am
+include $(top_srcdir)/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libqmgr.dsp
+
+libqmgr.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-lib.am \
+ $(top_srcdir)/ndb/config/win-name \
+ $(top_srcdir)/ndb/config/win-includes \
+ $(top_srcdir)/ndb/config/win-sources \
+ $(top_srcdir)/ndb/config/win-libraries
+ cat $(top_srcdir)/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/ndb/config/win-sources $@ $(libqmgr_a_SOURCES)
+ @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp b/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp
new file mode 100644
index 00000000000..bff79215264
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp
@@ -0,0 +1,392 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef QMGR_H
+#define QMGR_H
+
+
+#include <pc.hpp>
+#include <NdbTick.h>
+#include <SimulatedBlock.hpp>
+#include <NodeBitmask.hpp>
+#include <SignalCounter.hpp>
+
+#include <signaldata/EventReport.hpp>
+#include <signaldata/ArbitSignalData.hpp>
+#include <signaldata/CmRegSignalData.hpp>
+#include <signaldata/ApiRegSignalData.hpp>
+#include <signaldata/FailRep.hpp>
+
+#include "timer.hpp"
+
+#ifdef QMGR_C
+
+#define NO_REG_APP 1
+
+/* Delay values, ms -----------------------------*/
+#define ZDELAY_REGREQ 1000
+
+/* Type of refuse in CM_NODEINFOREF -------------*/
+#define ZNOT_RUNNING 0
+
+/* Type of continue in CONTINUEB ----------------*/
+#define ZREGREQ_TIMELIMIT 0
+#define ZHB_HANDLING 1
+#define ZREGREQ_MASTER_TIMELIMIT 2
+#define ZAPI_HB_HANDLING 3
+#define ZTIMER_HANDLING 4
+#define ZARBIT_HANDLING 5
+
+/* Error Codes ------------------------------*/
+#define ZERRTOOMANY 1101
+#define ZERRALREADYREG 1102
+#define ZERRNHMISSING 1103
+#define ZERRNLMISSING 1104
+#define ZERRAPPMISSING 1105
+#define ZERROR_NOT_IN_CFGFILE 1106
+#define ZERROR_TIMEOUT 1107
+#define ZERROR_NOT_ZINIT 1108
+#define ZERROR_NODEINFOREF 1109
+#define ZERROR_NOTLOCALQMGR 1110
+#define ZERROR_NOTRUNNING 1111
+#define ZCOULD_NOT_OCCUR_ERROR 1112
+#define ZTIME_OUT_ERROR 1113
+#define ZERROR_NOT_DEAD 1114
+#define ZDECLARED_FAIL_ERROR 1115
+#define ZOWN_NODE_ERROR 1116
+#define ZWRONG_STATE_ERROR 1117
+#define ZNODE_ZERO_ERROR 1118
+#define ZWRONG_NODE_ERROR 1119
+
+#endif
+
+
+class Qmgr : public SimulatedBlock {
+public:
+ // State values
+ enum QmgrState {
+ Q_NOT_ACTIVE = 0,
+ Q_ACTIVE = 1
+ };
+
+ enum FailState {
+ NORMAL = 0,
+ WAITING_FOR_FAILCONF1 = 1,
+ WAITING_FOR_FAILCONF2 = 2,
+ WAITING_FOR_NDB_FAILCONF = 3
+ };
+
+ enum Phase {
+ ZINIT = 1, /* All nodes start in phase INIT */
+ ZSTARTING = 2, /* Node is connecting to cluster */
+ ZRUNNING = 3, /* Node is running in the cluster */
+ ZPREPARE_FAIL = 4, /* PREPARATION FOR FAILURE */
+ ZFAIL_CLOSING = 5, /* API/NDB IS DISCONNECTING */
+ ZAPI_ACTIVE = 6, /* API IS RUNNING IN NODE */
+ ZAPI_INACTIVE = 7 /* Inactive API */
+ };
+
+ struct StartRecord {
+ void reset(){ m_startKey++; m_startNode = 0;}
+ Uint32 m_startKey;
+ Uint32 m_startNode;
+ Uint64 m_startTimeout;
+
+ Uint32 m_gsn;
+ SignalCounter m_nodes;
+ } c_start;
+
+ NdbNodeBitmask c_definedNodes; // DB nodes in config
+ NdbNodeBitmask c_clusterNodes; // DB nodes in cluster
+ NodeBitmask c_connectedNodes; // All kinds of connected nodes
+ Uint32 c_maxDynamicId;
+
+ // Records
+ struct NodeRec {
+ UintR ndynamicId;
+ Phase phase;
+ UintR alarmCount;
+
+ QmgrState sendPrepFailReqStatus;
+ QmgrState sendCommitFailReqStatus;
+ QmgrState sendPresToStatus;
+ FailState failState;
+ BlockReference rcv[2]; // remember which failconf we have received
+ BlockReference blockRef;
+
+ NodeRec() { }
+ }; /* p2c: size = 52 bytes */
+
+ typedef Ptr<NodeRec> NodeRecPtr;
+
+ enum ArbitState {
+ ARBIT_NULL = 0,
+ ARBIT_INIT = 1, // create new ticket
+ ARBIT_FIND = 2, // find candidate arbitrator node
+ ARBIT_PREP1 = 3, // PREP db nodes with null ticket
+ ARBIT_PREP2 = 4, // PREP db nodes with current ticket
+ ARBIT_START = 5, // START arbitrator API thread
+ ARBIT_RUN = 6, // running with arbitrator
+ ARBIT_CHOOSE = 7, // ask arbitrator after network partition
+ ARBIT_CRASH = 8 // crash ourselves
+ };
+
+ struct ArbitRec {
+ ArbitState state; // state
+ bool newstate; // flag to initialize new state
+ unsigned thread; // identifies a continueB "thread"
+ NodeId node; // current arbitrator candidate
+ ArbitTicket ticket; // ticket
+ NodeBitmask apiMask[1+2]; // arbitrators 0=all 1,2=per rank
+ NodeBitmask newMask; // new nodes to process in RUN state
+ Uint8 sendCount; // control send/recv of signals
+ Uint8 recvCount;
+ NodeBitmask recvMask; // left to recv
+ Uint32 code; // code field from signal
+ Uint32 failureNr; // cfailureNr at arbitration start
+ Uint32 timeout; // timeout for CHOOSE state
+ NDB_TICKS timestamp; // timestamp for checking timeouts
+
+ inline bool match(ArbitSignalData* sd) {
+ return
+ node == sd->node &&
+ ticket.match(sd->ticket);
+ }
+
+ inline void setTimestamp() {
+ timestamp = NdbTick_CurrentMillisecond();
+ }
+
+ inline NDB_TICKS getTimediff() {
+ NDB_TICKS now = NdbTick_CurrentMillisecond();
+ return now < timestamp ? 0 : now - timestamp;
+ }
+ };
+
+public:
+ Qmgr(const class Configuration &);
+ virtual ~Qmgr();
+
+private:
+ BLOCK_DEFINES(Qmgr);
+
+ // Transit signals
+ void execDEBUG_SIG(Signal* signal);
+ void execCONTINUEB(Signal* signal);
+ void execCM_HEARTBEAT(Signal* signal);
+ void execCM_ADD(Signal* signal);
+ void execCM_ACKADD(Signal* signal);
+ void execCM_REGREQ(Signal* signal);
+ void execCM_REGCONF(Signal* signal);
+ void execCM_REGREF(Signal* signal);
+ void execCM_NODEINFOREQ(Signal* signal);
+ void execCM_NODEINFOCONF(Signal* signal);
+ void execCM_NODEINFOREF(Signal* signal);
+ void execPREP_FAILREQ(Signal* signal);
+ void execPREP_FAILCONF(Signal* signal);
+ void execPREP_FAILREF(Signal* signal);
+ void execCOMMIT_FAILREQ(Signal* signal);
+ void execCOMMIT_FAILCONF(Signal* signal);
+ void execFAIL_REP(Signal* signal);
+ void execPRES_TOREQ(Signal* signal);
+ void execPRES_TOCONF(Signal* signal);
+ void execDISCONNECT_REP(Signal* signal);
+ void execSYSTEM_ERROR(Signal* signal);
+
+ // Received signals
+ void execDUMP_STATE_ORD(Signal* signal);
+ void execCONNECT_REP(Signal* signal);
+ void execNDB_FAILCONF(Signal* signal);
+ void execSTTOR(Signal* signal);
+ void execCM_INFOCONF(Signal* signal);
+ void execCLOSE_COMCONF(Signal* signal);
+ void execAPI_REGREQ(Signal* signal);
+ void execAPI_FAILCONF(Signal* signal);
+ void execREAD_NODESREQ(Signal* signal);
+ void execSET_VAR_REQ(Signal* signal);
+
+
+ void execAPI_VERSION_REQ(Signal* signal);
+
+ // Arbitration signals
+ void execARBIT_CFG(Signal* signal);
+ void execARBIT_PREPREQ(Signal* signal);
+ void execARBIT_PREPCONF(Signal* signal);
+ void execARBIT_PREPREF(Signal* signal);
+ void execARBIT_STARTCONF(Signal* signal);
+ void execARBIT_STARTREF(Signal* signal);
+ void execARBIT_CHOOSECONF(Signal* signal);
+ void execARBIT_CHOOSEREF(Signal* signal);
+ void execARBIT_STOPREP(Signal* signal);
+
+ // Statement blocks
+ void node_failed(Signal* signal, Uint16 aFailedNode);
+ void checkStartInterface(Signal* signal);
+ void failReport(Signal* signal,
+ Uint16 aFailedNode,
+ UintR aSendFailRep,
+ FailRep::FailCause failCause);
+ void findNeighbours(Signal* signal);
+ Uint16 translateDynamicIdToNodeId(Signal* signal, UintR TdynamicId);
+
+ void initData(Signal* signal);
+ void sendCloseComReq(Signal* signal, BlockReference TBRef, Uint16 TfailNo);
+ void sendPrepFailReq(Signal* signal, Uint16 aNode);
+ void sendApiFailReq(Signal* signal, Uint16 aFailedNode);
+ void sendApiRegRef(Signal*, Uint32 ref, ApiRegRef::ErrorCode);
+
+ // Generated statement blocks
+ void startphase1(Signal* signal);
+ void electionWon();
+ void cmInfoconf010Lab(Signal* signal);
+ void apiHbHandlingLab(Signal* signal);
+ void timerHandlingLab(Signal* signal);
+ void hbReceivedLab(Signal* signal);
+ void sendCmRegrefLab(Signal* signal, BlockReference ref,
+ CmRegRef::ErrorCode);
+ void systemErrorBecauseOtherNodeFailed(Signal* signal, NodeId);
+ void systemErrorLab(Signal* signal,
+ const char* message = NULL);
+ void prepFailReqLab(Signal* signal);
+ void prepFailConfLab(Signal* signal);
+ void prepFailRefLab(Signal* signal);
+ void commitFailReqLab(Signal* signal);
+ void commitFailConfLab(Signal* signal);
+ void failReportLab(Signal* signal, Uint16 aFailedNode,
+ FailRep::FailCause aFailCause);
+ void sendCommitFailReq(Signal* signal);
+ void presToConfLab(Signal* signal);
+ void sendSttorryLab(Signal* signal);
+ void sttor020Lab(Signal* signal);
+ void closeComConfLab(Signal* signal);
+ void apiRegReqLab(Signal* signal);
+ void regreqTimeLimitLab(Signal* signal);
+ void regreqTimeMasterLimitLab(Signal* signal);
+ void cmRegreq010Lab(Signal* signal);
+ void cmRegconf010Lab(Signal* signal);
+ void sttor010Lab(Signal* signal);
+ void sendHeartbeat(Signal* signal);
+ void checkHeartbeat(Signal* signal);
+ void setHbDelay(UintR aHbDelay);
+ void setHbApiDelay(UintR aHbApiDelay);
+ void setArbitTimeout(UintR aArbitTimeout);
+
+ // Interface to arbitration module
+ void handleArbitStart(Signal* signal);
+ void handleArbitApiFail(Signal* signal, Uint16 nodeId);
+ void handleArbitNdbAdd(Signal* signal, Uint16 nodeId);
+ void handleArbitCheck(Signal* signal);
+
+ // Private arbitration routines
+ Uint32 getArbitDelay();
+ Uint32 getArbitTimeout();
+ void startArbitThread(Signal* signal);
+ void runArbitThread(Signal* signal);
+ void stateArbitInit(Signal* signal);
+ void stateArbitFind(Signal* signal);
+ void stateArbitPrep(Signal* signal);
+ void stateArbitStart(Signal* signal);
+ void stateArbitRun(Signal* signal);
+ void stateArbitChoose(Signal* signal);
+ void stateArbitCrash(Signal* signal);
+ void computeArbitNdbMask(NodeBitmask& aMask);
+ void reportArbitEvent(Signal* signal, Ndb_logevent_type type);
+
+ // Initialisation
+ void initData();
+ void initRecords();
+
+ // Transit signals
+ // Variables
+
+ bool checkAPIVersion(NodeId, Uint32 nodeVersion, Uint32 ownVersion) const;
+ bool checkNDBVersion(NodeId, Uint32 nodeVersion, Uint32 ownVersion) const;
+
+ void cmAddPrepare(Signal* signal, NodeRecPtr nodePtr, const NodeRec* self);
+ void sendCmAckAdd(Signal *, Uint32 nodeId, CmAdd::RequestType);
+ void joinedCluster(Signal* signal, NodeRecPtr nodePtr);
+ void sendCmRegReq(Signal * signal, Uint32 nodeId);
+ void sendCmNodeInfoReq(Signal* signal, Uint32 nodeId, const NodeRec * self);
+
+private:
+ void sendPrepFailReqRef(Signal* signal,
+ Uint32 dstBlockRef,
+ GlobalSignalNumber gsn,
+ Uint32 blockRef,
+ Uint32 failNo,
+ Uint32 noOfNodes,
+ const NodeId theNodes[]);
+
+
+
+ /* Wait this time until we try to join the */
+ /* cluster again */
+
+ /**** Common stored variables ****/
+
+ NodeRec *nodeRec;
+ ArbitRec arbitRec;
+
+ /* Block references ------------------------------*/
+ BlockReference cpdistref; /* Dist. ref of president */
+
+ /* Node numbers. ---------------------------------*/
+ Uint16 cneighbourl; /* Node no. of lower neighbour */
+ Uint16 cneighbourh; /* Node no. of higher neighbour */
+ Uint16 cpresident; /* Node no. of president */
+
+ /* Counters --------------------------------------*/
+ Uint16 cnoOfNodes; /* Static node counter */
+ /* Status flags ----------------------------------*/
+
+ Uint32 c_restartPartialTimeout;
+
+ Uint16 creadyDistCom;
+ Uint16 c_regReqReqSent;
+ Uint16 c_regReqReqRecv;
+ Uint64 c_stopElectionTime;
+ Uint16 cpresidentCandidate;
+ Uint16 cdelayRegreq;
+ Uint16 cpresidentAlive;
+ Uint16 cnoFailedNodes;
+ Uint16 cnoPrepFailedNodes;
+ Uint16 cnoCommitFailedNodes;
+ Uint16 cactivateApiCheck;
+ UintR chbApiDelay;
+
+ UintR ccommitFailureNr;
+ UintR cprepareFailureNr;
+ UintR ctoFailureNr;
+ UintR cfailureNr;
+
+ QmgrState ctoStatus;
+ UintR cLqhTimeSignalCount;
+ bool cHbSent;
+ NDB_TICKS clatestTransactionCheck;
+
+ class Timer interface_check_timer;
+ class Timer hb_check_timer;
+ class Timer hb_send_timer;
+ class Timer hb_api_timer;
+
+
+ Uint16 cfailedNodes[MAX_NDB_NODES];
+ Uint16 cprepFailedNodes[MAX_NDB_NODES];
+ Uint16 ccommitFailedNodes[MAX_NDB_NODES];
+
+};
+
+#endif
diff --git a/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp b/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp
new file mode 100644
index 00000000000..ecaeadff47a
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp
@@ -0,0 +1,106 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+
+#define QMGR_C
+#include "Qmgr.hpp"
+
+#define DEBUG(x) { ndbout << "Qmgr::" << x << endl; }
+
+
+void Qmgr::initData()
+{
+ creadyDistCom = ZFALSE;
+
+ // Records with constant sizes
+ nodeRec = new NodeRec[MAX_NODES];
+
+ cnoCommitFailedNodes = 0;
+ c_maxDynamicId = 0;
+ c_clusterNodes.clear();
+
+ Uint32 hbDBAPI = 500;
+ setHbApiDelay(hbDBAPI);
+
+ c_connectedNodes.clear();
+ c_connectedNodes.set(getOwnNodeId());
+}//Qmgr::initData()
+
+void Qmgr::initRecords()
+{
+ // Records with dynamic sizes
+}//Qmgr::initRecords()
+
+Qmgr::Qmgr(const class Configuration & conf)
+ : SimulatedBlock(QMGR, conf)
+{
+ BLOCK_CONSTRUCTOR(Qmgr);
+
+ // Transit signals
+ addRecSignal(GSN_DUMP_STATE_ORD, &Qmgr::execDUMP_STATE_ORD);
+ addRecSignal(GSN_DEBUG_SIG, &Qmgr::execDEBUG_SIG);
+ addRecSignal(GSN_CONTINUEB, &Qmgr::execCONTINUEB);
+ addRecSignal(GSN_CM_HEARTBEAT, &Qmgr::execCM_HEARTBEAT);
+ addRecSignal(GSN_CM_ADD, &Qmgr::execCM_ADD);
+ addRecSignal(GSN_CM_ACKADD, &Qmgr::execCM_ACKADD);
+ addRecSignal(GSN_CM_REGREQ, &Qmgr::execCM_REGREQ);
+ addRecSignal(GSN_CM_REGCONF, &Qmgr::execCM_REGCONF);
+ addRecSignal(GSN_CM_REGREF, &Qmgr::execCM_REGREF);
+ addRecSignal(GSN_CM_NODEINFOREQ, &Qmgr::execCM_NODEINFOREQ);
+ addRecSignal(GSN_CM_NODEINFOCONF, &Qmgr::execCM_NODEINFOCONF);
+ addRecSignal(GSN_CM_NODEINFOREF, &Qmgr::execCM_NODEINFOREF);
+ addRecSignal(GSN_PREP_FAILREQ, &Qmgr::execPREP_FAILREQ);
+ addRecSignal(GSN_PREP_FAILCONF, &Qmgr::execPREP_FAILCONF);
+ addRecSignal(GSN_PREP_FAILREF, &Qmgr::execPREP_FAILREF);
+ addRecSignal(GSN_COMMIT_FAILREQ, &Qmgr::execCOMMIT_FAILREQ);
+ addRecSignal(GSN_COMMIT_FAILCONF, &Qmgr::execCOMMIT_FAILCONF);
+ addRecSignal(GSN_FAIL_REP, &Qmgr::execFAIL_REP);
+ addRecSignal(GSN_PRES_TOREQ, &Qmgr::execPRES_TOREQ);
+ addRecSignal(GSN_PRES_TOCONF, &Qmgr::execPRES_TOCONF);
+
+ // Received signals
+ addRecSignal(GSN_CONNECT_REP, &Qmgr::execCONNECT_REP);
+ addRecSignal(GSN_NDB_FAILCONF, &Qmgr::execNDB_FAILCONF);
+ addRecSignal(GSN_STTOR, &Qmgr::execSTTOR);
+ addRecSignal(GSN_CLOSE_COMCONF, &Qmgr::execCLOSE_COMCONF);
+ addRecSignal(GSN_API_REGREQ, &Qmgr::execAPI_REGREQ);
+ addRecSignal(GSN_API_VERSION_REQ, &Qmgr::execAPI_VERSION_REQ);
+ addRecSignal(GSN_DISCONNECT_REP, &Qmgr::execDISCONNECT_REP);
+ addRecSignal(GSN_API_FAILCONF, &Qmgr::execAPI_FAILCONF);
+ addRecSignal(GSN_READ_NODESREQ, &Qmgr::execREAD_NODESREQ);
+ addRecSignal(GSN_SET_VAR_REQ, &Qmgr::execSET_VAR_REQ);
+
+ // Arbitration signals
+ addRecSignal(GSN_ARBIT_PREPREQ, &Qmgr::execARBIT_PREPREQ);
+ addRecSignal(GSN_ARBIT_PREPCONF, &Qmgr::execARBIT_PREPCONF);
+ addRecSignal(GSN_ARBIT_PREPREF, &Qmgr::execARBIT_PREPREF);
+ addRecSignal(GSN_ARBIT_STARTCONF, &Qmgr::execARBIT_STARTCONF);
+ addRecSignal(GSN_ARBIT_STARTREF, &Qmgr::execARBIT_STARTREF);
+ addRecSignal(GSN_ARBIT_CHOOSECONF, &Qmgr::execARBIT_CHOOSECONF);
+ addRecSignal(GSN_ARBIT_CHOOSEREF, &Qmgr::execARBIT_CHOOSEREF);
+ addRecSignal(GSN_ARBIT_STOPREP, &Qmgr::execARBIT_STOPREP);
+
+ initData();
+}//Qmgr::Qmgr()
+
+Qmgr::~Qmgr()
+{
+ delete []nodeRec;
+}//Qmgr::~Qmgr()
+
+
+BLOCK_FUNCTIONS(Qmgr)
diff --git a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
new file mode 100644
index 00000000000..04373dae93c
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
@@ -0,0 +1,3928 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+#define QMGR_C
+#include "Qmgr.hpp"
+#include <pc.hpp>
+#include <NdbTick.h>
+#include <signaldata/EventReport.hpp>
+#include <signaldata/StartOrd.hpp>
+#include <signaldata/CmInit.hpp>
+#include <signaldata/CloseComReqConf.hpp>
+#include <signaldata/PrepFailReqRef.hpp>
+#include <signaldata/NodeFailRep.hpp>
+#include <signaldata/ReadNodesConf.hpp>
+#include <signaldata/NFCompleteRep.hpp>
+#include <signaldata/CheckNodeGroups.hpp>
+#include <signaldata/ArbitSignalData.hpp>
+#include <signaldata/ApiRegSignalData.hpp>
+#include <signaldata/ApiVersion.hpp>
+#include <signaldata/BlockCommitOrd.hpp>
+#include <signaldata/FailRep.hpp>
+#include <signaldata/DisconnectRep.hpp>
+
+#include <ndb_version.h>
+
+#ifdef DEBUG_ARBIT
+#include <NdbOut.hpp>
+#endif
+
+//#define DEBUG_QMGR_START
+#ifdef DEBUG_QMGR_START
+#include <DebuggerNames.hpp>
+#define DEBUG(x) ndbout << "QMGR " << __LINE__ << ": " << x << endl
+#define DEBUG_START(gsn, node, msg) DEBUG(getSignalName(gsn) << " to: " << node << " - " << msg)
+#define DEBUG_START2(gsn, rg, msg) { char nodes[255]; DEBUG(getSignalName(gsn) << " to: " << rg.m_nodes.getText(nodes) << " - " << msg); }
+#define DEBUG_START3(signal, msg) DEBUG(getSignalName(signal->header.theVerId_signalNumber) << " from " << refToNode(signal->getSendersBlockRef()) << " - " << msg);
+#else
+#define DEBUG(x)
+#define DEBUG_START(gsn, node, msg)
+#define DEBUG_START2(gsn, rg, msg)
+#define DEBUG_START3(signal, msg)
+#endif
+
+// Signal entries and statement blocks
+/* 4 P R O G R A M */
+/*******************************/
+/* CMHEART_BEAT */
+/*******************************/
+void Qmgr::execCM_HEARTBEAT(Signal* signal)
+{
+ NodeRecPtr hbNodePtr;
+ jamEntry();
+ hbNodePtr.i = signal->theData[0];
+ ptrCheckGuard(hbNodePtr, MAX_NDB_NODES, nodeRec);
+ hbNodePtr.p->alarmCount = 0;
+ return;
+}//Qmgr::execCM_HEARTBEAT()
+
+/*******************************/
+/* CM_NODEINFOREF */
+/*******************************/
+void Qmgr::execCM_NODEINFOREF(Signal* signal)
+{
+ jamEntry();
+ systemErrorLab(signal);
+ return;
+}//Qmgr::execCM_NODEINFOREF()
+
+/*******************************/
+/* CONTINUEB */
+/*******************************/
+void Qmgr::execCONTINUEB(Signal* signal)
+{
+ jamEntry();
+ const Uint32 tcontinuebType = signal->theData[0];
+ const Uint32 tdata0 = signal->theData[1];
+ const Uint32 tdata1 = signal->theData[2];
+ switch (tcontinuebType) {
+ case ZREGREQ_TIMELIMIT:
+ jam();
+ if (c_start.m_startKey != tdata0 || c_start.m_startNode != tdata1) {
+ jam();
+ return;
+ }//if
+ regreqTimeLimitLab(signal);
+ break;
+ case ZREGREQ_MASTER_TIMELIMIT:
+ jam();
+ if (c_start.m_startKey != tdata0 || c_start.m_startNode != tdata1) {
+ jam();
+ return;
+ }//if
+ //regreqMasterTimeLimitLab(signal);
+ failReportLab(signal, c_start.m_startNode, FailRep::ZSTART_IN_REGREQ);
+ return;
+ break;
+ case ZTIMER_HANDLING:
+ jam();
+ timerHandlingLab(signal);
+ return;
+ break;
+ case ZARBIT_HANDLING:
+ jam();
+ runArbitThread(signal);
+ return;
+ break;
+ default:
+ jam();
+ // ZCOULD_NOT_OCCUR_ERROR;
+ systemErrorLab(signal);
+ return;
+ break;
+ }//switch
+ return;
+}//Qmgr::execCONTINUEB()
+
+
+void Qmgr::execDEBUG_SIG(Signal* signal)
+{
+ NodeRecPtr debugNodePtr;
+ jamEntry();
+ debugNodePtr.i = signal->theData[0];
+ ptrCheckGuard(debugNodePtr, MAX_NODES, nodeRec);
+ return;
+}//Qmgr::execDEBUG_SIG()
+
+/*******************************/
+/* FAIL_REP */
+/*******************************/
+void Qmgr::execFAIL_REP(Signal* signal)
+{
+ const FailRep * const failRep = (FailRep *)&signal->theData[0];
+ const NodeId failNodeId = failRep->failNodeId;
+ const FailRep::FailCause failCause = (FailRep::FailCause)failRep->failCause;
+
+ jamEntry();
+ failReportLab(signal, failNodeId, failCause);
+ return;
+}//Qmgr::execFAIL_REP()
+
+/*******************************/
+/* PRES_TOREQ */
+/*******************************/
+void Qmgr::execPRES_TOREQ(Signal* signal)
+{
+ jamEntry();
+ BlockReference Tblockref = signal->theData[0];
+ signal->theData[0] = getOwnNodeId();
+ signal->theData[1] = ccommitFailureNr;
+ sendSignal(Tblockref, GSN_PRES_TOCONF, signal, 2, JBA);
+ return;
+}//Qmgr::execPRES_TOREQ()
+
+/*
+4.2 ADD NODE MODULE*/
+/*##########################################################################*/
+/*
+4.2.1 STTOR */
+/**--------------------------------------------------------------------------
+ * Start phase signal, must be handled by all blocks.
+ * QMGR is only interested in the first phase.
+ * During phase one we clear all registered applications.
+ *---------------------------------------------------------------------------*/
+/*******************************/
+/* STTOR */
+/*******************************/
+void Qmgr::execSTTOR(Signal* signal)
+{
+ jamEntry();
+
+ switch(signal->theData[1]){
+ case 1:
+ initData(signal);
+ startphase1(signal);
+ return;
+ case 7:
+ cactivateApiCheck = 1;
+ /**
+ * Start arbitration thread. This could be done as soon as
+ * we have all nodes (or a winning majority).
+ */
+ if (cpresident == getOwnNodeId())
+ handleArbitStart(signal);
+ break;
+ }
+
+ sendSttorryLab(signal);
+ return;
+}//Qmgr::execSTTOR()
+
+void Qmgr::sendSttorryLab(Signal* signal)
+{
+/****************************<*/
+/*< STTORRY <*/
+/****************************<*/
+ signal->theData[3] = 7;
+ signal->theData[4] = 255;
+ sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 5, JBB);
+ return;
+}//Qmgr::sendSttorryLab()
+
+void Qmgr::startphase1(Signal* signal)
+{
+ jamEntry();
+
+
+ NodeRecPtr nodePtr;
+ nodePtr.i = getOwnNodeId();
+ ptrAss(nodePtr, nodeRec);
+ nodePtr.p->phase = ZSTARTING;
+ nodePtr.p->blockRef = reference();
+ c_connectedNodes.set(nodePtr.i);
+
+ signal->theData[0] = 0; // no answer
+ signal->theData[1] = 0; // no id
+ signal->theData[2] = NodeInfo::DB;
+ sendSignal(CMVMI_REF, GSN_OPEN_COMREQ, signal, 3, JBB);
+
+ execCM_INFOCONF(signal);
+ return;
+}
+
+void Qmgr::setHbDelay(UintR aHbDelay)
+{
+ hb_send_timer.setDelay(aHbDelay < 10 ? 10 : aHbDelay);
+ hb_send_timer.reset();
+ hb_check_timer.setDelay(aHbDelay < 10 ? 10 : aHbDelay);
+ hb_check_timer.reset();
+}
+
+void Qmgr::setHbApiDelay(UintR aHbApiDelay)
+{
+ chbApiDelay = (aHbApiDelay < 100 ? 100 : aHbApiDelay);
+ hb_api_timer.setDelay(chbApiDelay);
+ hb_api_timer.reset();
+}
+
+void Qmgr::setArbitTimeout(UintR aArbitTimeout)
+{
+ arbitRec.timeout = (aArbitTimeout < 10 ? 10 : aArbitTimeout);
+}
+
+void Qmgr::execCONNECT_REP(Signal* signal)
+{
+ const Uint32 nodeId = signal->theData[0];
+ c_connectedNodes.set(nodeId);
+ NodeRecPtr nodePtr;
+ nodePtr.i = getOwnNodeId();
+ ptrCheckGuard(nodePtr, MAX_NODES, nodeRec);
+ switch(nodePtr.p->phase){
+ case ZSTARTING:
+ jam();
+ break;
+ case ZRUNNING:
+ case ZPREPARE_FAIL:
+ case ZFAIL_CLOSING:
+ jam();
+ return;
+ case ZINIT:
+ ndbrequire(false);
+ case ZAPI_ACTIVE:
+ case ZAPI_INACTIVE:
+ return;
+ }
+
+ if(!c_start.m_nodes.isWaitingFor(nodeId)){
+ jam();
+ return;
+ }
+
+ switch(c_start.m_gsn){
+ case GSN_CM_REGREQ:
+ jam();
+ sendCmRegReq(signal, nodeId);
+ return;
+ case GSN_CM_NODEINFOREQ:{
+ jam();
+ sendCmNodeInfoReq(signal, nodeId, nodePtr.p);
+ return;
+ }
+ default:
+ return;
+ }
+ return;
+}//Qmgr::execCONNECT_REP()
+
+/*******************************/
+/* CM_INFOCONF */
+/*******************************/
+void Qmgr::execCM_INFOCONF(Signal* signal)
+{
+ cpresident = ZNIL;
+ cpresidentCandidate = getOwnNodeId();
+ cpresidentAlive = ZFALSE;
+ c_stopElectionTime = NdbTick_CurrentMillisecond();
+ c_stopElectionTime += c_restartPartialTimeout;
+ cmInfoconf010Lab(signal);
+
+ return;
+}//Qmgr::execCM_INFOCONF()
+
+void Qmgr::cmInfoconf010Lab(Signal* signal)
+{
+ c_start.m_startKey = 0;
+ c_start.m_startNode = getOwnNodeId();
+ c_start.m_nodes.clearWaitingFor();
+ c_start.m_gsn = GSN_CM_REGREQ;
+
+ NodeRecPtr nodePtr;
+ c_regReqReqSent = c_regReqReqRecv = 0;
+ cnoOfNodes = 0;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRec);
+
+ if(getNodeInfo(nodePtr.i).getType() != NodeInfo::DB)
+ continue;
+
+ c_start.m_nodes.setWaitingFor(nodePtr.i);
+ cnoOfNodes++;
+
+ if(!c_connectedNodes.get(nodePtr.i))
+ continue;
+
+ sendCmRegReq(signal, nodePtr.i);
+ }
+
+ //----------------------------------------
+ /* Wait for a while. When it returns */
+ /* we will check if we got any CM_REGREF*/
+ /* or CM_REGREQ (lower nodeid than our */
+ /* own). */
+ //----------------------------------------
+ signal->theData[0] = ZREGREQ_TIMELIMIT;
+ signal->theData[1] = c_start.m_startKey;
+ signal->theData[2] = c_start.m_startNode;
+ sendSignalWithDelay(QMGR_REF, GSN_CONTINUEB, signal, 3000, 3);
+
+ creadyDistCom = ZTRUE;
+ return;
+}//Qmgr::cmInfoconf010Lab()
+
+void
+Qmgr::sendCmRegReq(Signal * signal, Uint32 nodeId){
+ c_regReqReqSent++;
+ CmRegReq * const cmRegReq = (CmRegReq *)&signal->theData[0];
+ cmRegReq->blockRef = reference();
+ cmRegReq->nodeId = getOwnNodeId();
+ cmRegReq->version = NDB_VERSION;
+ const Uint32 ref = calcQmgrBlockRef(nodeId);
+ sendSignal(ref, GSN_CM_REGREQ, signal, CmRegReq::SignalLength, JBB);
+ DEBUG_START(GSN_CM_REGREQ, nodeId, "");
+}
+
+/*
+4.4.11 CM_REGREQ */
+/**--------------------------------------------------------------------------
+ * If this signal is received someone tries to get registrated.
+ * Only the president have the authority make decissions about new nodes,
+ * so only a president or a node that claims to be the president may send a
+ * reply to this signal.
+ * This signal can occur any time after that STTOR was received.
+ * CPRESIDENT: Timelimit has expired and someone has
+ * decided to enter the president role
+ * CPRESIDENT_CANDIDATE:
+ * Assigned when we receive a CM_REGREF, if we got more than one REF
+ * then we always keep the lowest nodenumber.
+ * We accept this nodeno as president when our timelimit expires
+ * We should consider the following cases:
+ * 1- We are the president. If we are busy by adding new nodes to cluster,
+ * then we have to refuse this node to be added.
+ * The refused node will try in ZREFUSE_ADD_TIME seconds again.
+ * If we are not busy then we confirm
+ *
+ * 2- We know the president, we dont bother us about this REQ.
+ * The president has also got this REQ and will take care of it.
+ *
+ * 3- The president are not known. We have received CM_INIT, so we compare the
+ * senders node number to GETOWNNODEID().
+ * If we have a lower number than the sender then we will claim
+ * that we are the president so we send him a refuse signal back.
+ * We have to wait for the CONTINUEB signal before we can enter the
+ * president role. If our GETOWNNODEID() if larger than sender node number,
+ * we are not the president and just have to wait for the
+ * reply signal (REF) to our CM_REGREQ_2.
+ * 4- We havent received the CM_INIT signal so we don't know who we are.
+ * Ignore the request.
+ *--------------------------------------------------------------------------*/
+/*******************************/
+/* CM_REGREQ */
+/*******************************/
+void Qmgr::execCM_REGREQ(Signal* signal)
+{
+ DEBUG_START3(signal, "");
+
+ NodeRecPtr addNodePtr;
+ jamEntry();
+
+ CmRegReq * const cmRegReq = (CmRegReq *)&signal->theData[0];
+ const BlockReference Tblockref = cmRegReq->blockRef;
+ const Uint32 startingVersion = cmRegReq->version;
+ addNodePtr.i = cmRegReq->nodeId;
+
+ if (creadyDistCom == ZFALSE) {
+ jam();
+ /* NOT READY FOR DISTRIBUTED COMMUNICATION.*/
+ return;
+ }//if
+
+ if (!ndbCompatible_ndb_ndb(NDB_VERSION, startingVersion)) {
+ jam();
+ sendCmRegrefLab(signal, Tblockref, CmRegRef::ZINCOMPATIBLE_VERSION);
+ return;
+ }
+
+ ptrCheckGuard(addNodePtr, MAX_NDB_NODES, nodeRec);
+
+ if (cpresident != getOwnNodeId()){
+ jam();
+ if (cpresident == ZNIL) {
+ /***
+ * We don't know the president.
+ * If the node to be added has lower node id
+ * than our president cancidate. Set it as
+ * candidate
+ */
+ jam();
+ if (addNodePtr.i < cpresidentCandidate) {
+ jam();
+ cpresidentCandidate = addNodePtr.i;
+ }//if
+ sendCmRegrefLab(signal, Tblockref, CmRegRef::ZELECTION);
+ return;
+ }
+ /**
+ * We are not the president.
+ * We know the president.
+ * President will answer.
+ */
+ sendCmRegrefLab(signal, Tblockref, CmRegRef::ZNOT_PRESIDENT);
+ return;
+ }//if
+
+ if (c_start.m_startNode != 0){
+ jam();
+ /**
+ * President busy by adding another node
+ */
+ sendCmRegrefLab(signal, Tblockref, CmRegRef::ZBUSY_PRESIDENT);
+ return;
+ }//if
+
+ if (ctoStatus == Q_ACTIVE) {
+ jam();
+ /**
+ * Active taking over as president
+ */
+ sendCmRegrefLab(signal, Tblockref, CmRegRef::ZBUSY_TO_PRES);
+ return;
+ }//if
+
+ if (getNodeInfo(addNodePtr.i).m_type != NodeInfo::DB) {
+ jam();
+ /**
+ * The new node is not in config file
+ */
+ sendCmRegrefLab(signal, Tblockref, CmRegRef::ZNOT_IN_CFG);
+ return;
+ }
+
+ Phase phase = addNodePtr.p->phase;
+ if (phase != ZINIT){
+ jam();
+ DEBUG("phase = " << phase);
+ sendCmRegrefLab(signal, Tblockref, CmRegRef::ZNOT_DEAD);
+ return;
+ }//if
+
+ jam();
+ /**
+ * WE ARE PRESIDENT AND WE ARE NOT BUSY ADDING ANOTHER NODE.
+ * WE WILL TAKE CARE OF THE INCLUSION OF THIS NODE INTO THE CLUSTER.
+ * WE NEED TO START TIME SUPERVISION OF THIS. SINCE WE CANNOT STOP
+ * TIMED SIGNAL IF THE INCLUSION IS INTERRUPTED WE IDENTIFY
+ * EACH INCLUSION WITH A UNIQUE IDENTITY. THIS IS CHECKED WHEN
+ * THE SIGNAL ARRIVES. IF IT HAS CHANGED THEN WE SIMPLY IGNORE
+ * THE TIMED SIGNAL.
+ */
+
+ /**
+ * Update start record
+ */
+ c_start.m_startKey++;
+ c_start.m_startNode = addNodePtr.i;
+
+ /**
+ * Assign dynamic id
+ */
+ UintR TdynId = ++c_maxDynamicId;
+ setNodeInfo(addNodePtr.i).m_version = startingVersion;
+ addNodePtr.p->ndynamicId = TdynId;
+
+ /**
+ * Reply with CM_REGCONF
+ */
+ CmRegConf * const cmRegConf = (CmRegConf *)&signal->theData[0];
+ cmRegConf->presidentBlockRef = reference();
+ cmRegConf->presidentNodeId = getOwnNodeId();
+ cmRegConf->presidentVersion = getNodeInfo(getOwnNodeId()).m_version;
+ cmRegConf->dynamicId = TdynId;
+ c_clusterNodes.copyto(NdbNodeBitmask::Size, cmRegConf->allNdbNodes);
+ sendSignal(Tblockref, GSN_CM_REGCONF, signal,
+ CmRegConf::SignalLength, JBA);
+ DEBUG_START(GSN_CM_REGCONF, refToNode(Tblockref), "");
+
+ /**
+ * Send CmAdd to all nodes (including starting)
+ */
+ c_start.m_nodes = c_clusterNodes;
+ c_start.m_nodes.setWaitingFor(addNodePtr.i);
+ c_start.m_gsn = GSN_CM_ADD;
+
+ NodeReceiverGroup rg(QMGR, c_start.m_nodes);
+ CmAdd * const cmAdd = (CmAdd*)signal->getDataPtrSend();
+ cmAdd->requestType = CmAdd::Prepare;
+ cmAdd->startingNodeId = addNodePtr.i;
+ cmAdd->startingVersion = startingVersion;
+ sendSignal(rg, GSN_CM_ADD, signal, CmAdd::SignalLength, JBA);
+ DEBUG_START2(GSN_CM_ADD, rg, "Prepare");
+
+ /**
+ * Set timer
+ */
+ return;
+ signal->theData[0] = ZREGREQ_MASTER_TIMELIMIT;
+ signal->theData[1] = c_start.m_startKey;
+ sendSignalWithDelay(QMGR_REF, GSN_CONTINUEB, signal, 30000, 2);
+
+ return;
+}//Qmgr::execCM_REGREQ()
+
+void Qmgr::sendCmRegrefLab(Signal* signal, BlockReference TBRef,
+ CmRegRef::ErrorCode Terror)
+{
+ CmRegRef* ref = (CmRegRef*)signal->getDataPtrSend();
+ ref->blockRef = reference();
+ ref->nodeId = getOwnNodeId();
+ ref->errorCode = Terror;
+ ref->presidentCandidate = (cpresident == ZNIL ? cpresidentCandidate : cpresident);
+ sendSignal(TBRef, GSN_CM_REGREF, signal,
+ CmRegRef::SignalLength, JBB);
+ DEBUG_START(GSN_CM_REGREF, refToNode(TBRef), "");
+ return;
+}//Qmgr::sendCmRegrefLab()
+
+/*
+4.4.11 CM_REGCONF */
+/**--------------------------------------------------------------------------
+ * President gives permission to a node which wants to join the cluster.
+ * The president will prepare the cluster that a new node will be added to
+ * cluster. When the new node has set up all connections to the cluster,
+ * the president will send commit to all clusternodes so the phase of the
+ * new node can be changed to ZRUNNING.
+ *--------------------------------------------------------------------------*/
+/*******************************/
+/* CM_REGCONF */
+/*******************************/
+void Qmgr::execCM_REGCONF(Signal* signal)
+{
+ DEBUG_START3(signal, "");
+
+ NodeRecPtr myNodePtr;
+ NodeRecPtr nodePtr;
+ jamEntry();
+
+ const CmRegConf * const cmRegConf = (CmRegConf *)&signal->theData[0];
+
+ if (!ndbCompatible_ndb_ndb(NDB_VERSION, cmRegConf->presidentVersion)) {
+ jam();
+ char buf[128];
+ BaseString::snprintf(buf,sizeof(buf),"incompatible version own=0x%x other=0x%x, shutting down", NDB_VERSION, cmRegConf->presidentVersion);
+ systemErrorLab(signal, buf);
+ return;
+ }
+
+
+ cpdistref = cmRegConf->presidentBlockRef;
+ cpresident = cmRegConf->presidentNodeId;
+ UintR TdynamicId = cmRegConf->dynamicId;
+ c_maxDynamicId = TdynamicId;
+ c_clusterNodes.assign(NdbNodeBitmask::Size, cmRegConf->allNdbNodes);
+
+/*--------------------------------------------------------------*/
+// Send this as an EVENT REPORT to inform about hearing about
+// other NDB node proclaiming to be president.
+/*--------------------------------------------------------------*/
+ signal->theData[0] = NDB_LE_CM_REGCONF;
+ signal->theData[1] = getOwnNodeId();
+ signal->theData[2] = cpresident;
+ signal->theData[3] = TdynamicId;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB);
+
+ myNodePtr.i = getOwnNodeId();
+ ptrCheckGuard(myNodePtr, MAX_NDB_NODES, nodeRec);
+ myNodePtr.p->ndynamicId = TdynamicId;
+
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ if (c_clusterNodes.get(nodePtr.i)){
+ jam();
+ ptrAss(nodePtr, nodeRec);
+
+ ndbrequire(nodePtr.p->phase == ZINIT);
+ nodePtr.p->phase = ZRUNNING;
+
+ if(c_connectedNodes.get(nodePtr.i)){
+ jam();
+ sendCmNodeInfoReq(signal, nodePtr.i, myNodePtr.p);
+ }
+ }
+ }
+
+ c_start.m_gsn = GSN_CM_NODEINFOREQ;
+ c_start.m_nodes = c_clusterNodes;
+
+ return;
+}//Qmgr::execCM_REGCONF()
+
+void
+Qmgr::sendCmNodeInfoReq(Signal* signal, Uint32 nodeId, const NodeRec * self){
+ CmNodeInfoReq * const req = (CmNodeInfoReq*)signal->getDataPtrSend();
+ req->nodeId = getOwnNodeId();
+ req->dynamicId = self->ndynamicId;
+ req->version = getNodeInfo(getOwnNodeId()).m_version;
+ const Uint32 ref = calcQmgrBlockRef(nodeId);
+ sendSignal(ref,GSN_CM_NODEINFOREQ, signal, CmNodeInfoReq::SignalLength, JBB);
+ DEBUG_START(GSN_CM_NODEINFOREQ, nodeId, "");
+}
+
+/*
+4.4.11 CM_REGREF */
+/**--------------------------------------------------------------------------
+ * Only a president or a president candidate can refuse a node to get added to
+ * the cluster.
+ * Refuse reasons:
+ * ZBUSY We know that the sender is the president and we have to
+ * make a new CM_REGREQ.
+ * ZNOT_IN_CFG This node number is not specified in the configfile,
+ * SYSTEM ERROR
+ * ZELECTION Sender is a president candidate, his timelimit
+ * hasn't expired so maybe someone else will show up.
+ * Update the CPRESIDENT_CANDIDATE, then wait for our
+ * timelimit to expire.
+ *---------------------------------------------------------------------------*/
+/*******************************/
+/* CM_REGREF */
+/*******************************/
+void Qmgr::execCM_REGREF(Signal* signal)
+{
+ jamEntry();
+ c_regReqReqRecv++;
+
+ // Ignore block reference in data[0]
+ UintR TaddNodeno = signal->theData[1];
+ UintR TrefuseReason = signal->theData[2];
+ Uint32 candidate = signal->theData[3];
+ DEBUG_START3(signal, TrefuseReason);
+
+ if(candidate != cpresidentCandidate){
+ jam();
+ c_regReqReqRecv = ~0;
+ }
+
+ switch (TrefuseReason) {
+ case CmRegRef::ZINCOMPATIBLE_VERSION:
+ jam();
+ systemErrorLab(signal, "incompatible version, connection refused by running ndb node");
+ break;
+ case CmRegRef::ZBUSY:
+ case CmRegRef::ZBUSY_TO_PRES:
+ case CmRegRef::ZBUSY_PRESIDENT:
+ jam();
+ cpresidentAlive = ZTRUE;
+ signal->theData[3] = 0;
+ break;
+ case CmRegRef::ZNOT_IN_CFG:
+ jam();
+ progError(__LINE__, ERR_NODE_NOT_IN_CONFIG);
+ break;
+ case CmRegRef::ZNOT_DEAD:
+ jam();
+ progError(__LINE__, ERR_NODE_NOT_DEAD);
+ break;
+ case CmRegRef::ZELECTION:
+ jam();
+ if (cpresidentCandidate > TaddNodeno) {
+ jam();
+ //----------------------------------------
+ /* We may already have a candidate */
+ /* choose the lowest nodeno */
+ //----------------------------------------
+ signal->theData[3] = 2;
+ cpresidentCandidate = TaddNodeno;
+ } else {
+ signal->theData[3] = 4;
+ }//if
+ break;
+ case CmRegRef::ZNOT_PRESIDENT:
+ jam();
+ cpresidentAlive = ZTRUE;
+ signal->theData[3] = 3;
+ break;
+ default:
+ jam();
+ signal->theData[3] = 5;
+ /*empty*/;
+ break;
+ }//switch
+/*--------------------------------------------------------------*/
+// Send this as an EVENT REPORT to inform about hearing about
+// other NDB node proclaiming not to be president.
+/*--------------------------------------------------------------*/
+ signal->theData[0] = NDB_LE_CM_REGREF;
+ signal->theData[1] = getOwnNodeId();
+ signal->theData[2] = TaddNodeno;
+//-----------------------------------------
+// signal->theData[3] filled in above
+//-----------------------------------------
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB);
+
+ if(cpresidentAlive == ZTRUE){
+ jam();
+ DEBUG("");
+ return;
+ }
+
+ if(c_regReqReqSent != c_regReqReqRecv){
+ jam();
+ DEBUG( c_regReqReqSent << " != " << c_regReqReqRecv);
+ return;
+ }
+
+ if(cpresidentCandidate != getOwnNodeId()){
+ jam();
+ DEBUG("");
+ return;
+ }
+
+ /**
+ * All configured nodes has agreed
+ */
+ Uint64 now = NdbTick_CurrentMillisecond();
+ if((c_regReqReqRecv == cnoOfNodes) || now > c_stopElectionTime){
+ jam();
+ electionWon();
+ sendSttorryLab(signal);
+
+ /**
+ * Start timer handling
+ */
+ signal->theData[0] = ZTIMER_HANDLING;
+ sendSignal(QMGR_REF, GSN_CONTINUEB, signal, 10, JBB);
+ }
+
+ return;
+}//Qmgr::execCM_REGREF()
+
+void
+Qmgr::electionWon(){
+ NodeRecPtr myNodePtr;
+ cpresident = getOwnNodeId(); /* This node becomes president. */
+ myNodePtr.i = getOwnNodeId();
+ ptrCheckGuard(myNodePtr, MAX_NDB_NODES, nodeRec);
+
+ myNodePtr.p->phase = ZRUNNING;
+
+ cpdistref = reference();
+ cneighbourl = ZNIL;
+ cneighbourh = ZNIL;
+ myNodePtr.p->ndynamicId = 1;
+ c_maxDynamicId = 1;
+ c_clusterNodes.clear();
+ c_clusterNodes.set(getOwnNodeId());
+
+ cpresidentAlive = ZTRUE;
+ c_stopElectionTime = ~0;
+ c_start.reset();
+}
+
+/*
+4.4.11 CONTINUEB */
+/*--------------------------------------------------------------------------*/
+/* */
+/*--------------------------------------------------------------------------*/
+/****************************>---------------------------------------------*/
+/* CONTINUEB > SENDER: Own block, Own node */
+/****************************>-------+INPUT : TCONTINUEB_TYPE */
+/*--------------------------------------------------------------*/
+void Qmgr::regreqTimeLimitLab(Signal* signal)
+{
+ if(cpresident == ZNIL){
+ cmInfoconf010Lab(signal);
+ }
+}//Qmgr::regreqTimelimitLab()
+
+/**---------------------------------------------------------------------------
+ * The new node will take care of giving information about own node and ask
+ * all other nodes for nodeinfo. The new node will use CM_NODEINFOREQ for
+ * that purpose. When the setup of connections to all running, the president
+ * will send a commit to all running nodes + the new node
+ * INPUT: NODE_PTR1, must be set as ZNIL if we don't enter CONNECT_NODES)
+ * from signal CM_NODEINFOCONF.
+ *---------------------------------------------------------------------------*/
+/*******************************/
+/* CM_NODEINFOCONF */
+/*******************************/
+void Qmgr::execCM_NODEINFOCONF(Signal* signal)
+{
+ DEBUG_START3(signal, "");
+
+ jamEntry();
+
+ CmNodeInfoConf * const conf = (CmNodeInfoConf*)signal->getDataPtr();
+
+ const Uint32 nodeId = conf->nodeId;
+ const Uint32 dynamicId = conf->dynamicId;
+ const Uint32 version = conf->version;
+
+ NodeRecPtr nodePtr;
+ nodePtr.i = getOwnNodeId();
+ ptrAss(nodePtr, nodeRec);
+ ndbrequire(nodePtr.p->phase == ZSTARTING);
+ ndbrequire(c_start.m_gsn == GSN_CM_NODEINFOREQ);
+ c_start.m_nodes.clearWaitingFor(nodeId);
+
+ /**
+ * Update node info
+ */
+ NodeRecPtr replyNodePtr;
+ replyNodePtr.i = nodeId;
+ ptrCheckGuard(replyNodePtr, MAX_NDB_NODES, nodeRec);
+ replyNodePtr.p->ndynamicId = dynamicId;
+ replyNodePtr.p->blockRef = signal->getSendersBlockRef();
+ setNodeInfo(replyNodePtr.i).m_version = version;
+
+ if(!c_start.m_nodes.done()){
+ jam();
+ return;
+ }
+
+ /**********************************************<*/
+ /* Send an ack. back to the president. */
+ /* CM_ACKADD */
+ /* The new node has been registered by all */
+ /* running nodes and has stored nodeinfo about */
+ /* all running nodes. The new node has to wait */
+ /* for CM_ADD (commit) from president to become */
+ /* a running node in the cluster. */
+ /**********************************************<*/
+ sendCmAckAdd(signal, getOwnNodeId(), CmAdd::Prepare);
+ return;
+}//Qmgr::execCM_NODEINFOCONF()
+
+/**---------------------------------------------------------------------------
+ * A new node sends nodeinfo about himself. The new node asks for
+ * corresponding nodeinfo back in the CM_NODEINFOCONF.
+ *---------------------------------------------------------------------------*/
+/*******************************/
+/* CM_NODEINFOREQ */
+/*******************************/
+void Qmgr::execCM_NODEINFOREQ(Signal* signal)
+{
+ jamEntry();
+
+ const Uint32 Tblockref = signal->getSendersBlockRef();
+
+ NodeRecPtr nodePtr;
+ nodePtr.i = getOwnNodeId();
+ ptrAss(nodePtr, nodeRec);
+ if(nodePtr.p->phase != ZRUNNING){
+ jam();
+ signal->theData[0] = reference();
+ signal->theData[1] = getOwnNodeId();
+ signal->theData[2] = ZNOT_RUNNING;
+ sendSignal(Tblockref, GSN_CM_NODEINFOREF, signal, 3, JBB);
+ return;
+ }
+
+ NodeRecPtr addNodePtr;
+ CmNodeInfoReq * const req = (CmNodeInfoReq*)signal->getDataPtr();
+ addNodePtr.i = req->nodeId;
+ ptrCheckGuard(addNodePtr, MAX_NDB_NODES, nodeRec);
+ addNodePtr.p->ndynamicId = req->dynamicId;
+ addNodePtr.p->blockRef = signal->getSendersBlockRef();
+ setNodeInfo(addNodePtr.i).m_version = req->version;
+ c_maxDynamicId = req->dynamicId;
+
+ cmAddPrepare(signal, addNodePtr, nodePtr.p);
+}//Qmgr::execCM_NODEINFOREQ()
+
+void
+Qmgr::cmAddPrepare(Signal* signal, NodeRecPtr nodePtr, const NodeRec * self){
+ jam();
+
+ switch(nodePtr.p->phase){
+ case ZINIT:
+ jam();
+ nodePtr.p->phase = ZSTARTING;
+ return;
+ case ZFAIL_CLOSING:
+ jam();
+#ifdef VM_TRACE
+ ndbout_c("Enabling communication to CM_ADD node state=%d",
+ nodePtr.p->phase);
+#endif
+ nodePtr.p->phase = ZSTARTING;
+ nodePtr.p->failState = NORMAL;
+ signal->theData[0] = 0;
+ signal->theData[1] = nodePtr.i;
+ sendSignal(CMVMI_REF, GSN_OPEN_COMREQ, signal, 2, JBA);
+ return;
+ case ZSTARTING:
+ break;
+ case ZRUNNING:
+ case ZPREPARE_FAIL:
+ case ZAPI_ACTIVE:
+ case ZAPI_INACTIVE:
+ ndbrequire(false);
+ }
+
+ sendCmAckAdd(signal, nodePtr.i, CmAdd::Prepare);
+
+ /* President have prepared us */
+ CmNodeInfoConf * conf = (CmNodeInfoConf*)signal->getDataPtrSend();
+ conf->nodeId = getOwnNodeId();
+ conf->dynamicId = self->ndynamicId;
+ conf->version = getNodeInfo(getOwnNodeId()).m_version;
+ sendSignal(nodePtr.p->blockRef, GSN_CM_NODEINFOCONF, signal,
+ CmNodeInfoConf::SignalLength, JBB);
+ DEBUG_START(GSN_CM_NODEINFOCONF, refToNode(nodePtr.p->blockRef), "");
+}
+
+void
+Qmgr::sendCmAckAdd(Signal * signal, Uint32 nodeId, CmAdd::RequestType type){
+
+ CmAckAdd * cmAckAdd = (CmAckAdd*)signal->getDataPtrSend();
+ cmAckAdd->requestType = type;
+ cmAckAdd->startingNodeId = nodeId;
+ cmAckAdd->senderNodeId = getOwnNodeId();
+ sendSignal(cpdistref, GSN_CM_ACKADD, signal, CmAckAdd::SignalLength, JBA);
+ DEBUG_START(GSN_CM_ACKADD, cpresident, "");
+
+ switch(type){
+ case CmAdd::Prepare:
+ return;
+ case CmAdd::AddCommit:
+ case CmAdd::CommitNew:
+ break;
+ }
+
+ signal->theData[0] = nodeId;
+ EXECUTE_DIRECT(NDBCNTR, GSN_CM_ADD_REP, signal, 1);
+ jamEntry();
+}
+
+/*
+4.4.11 CM_ADD */
+/**--------------------------------------------------------------------------
+ * Prepare a running node to add a new node to the cluster. The running node
+ * will change phase of the new node fron ZINIT to ZWAITING. The running node
+ * will also mark that we have received a prepare. When the new node has sent
+ * us nodeinfo we can send an acknowledgement back to the president. When all
+ * running nodes has acknowledged the new node, the president will send a
+ * commit and we can change phase of the new node to ZRUNNING. The president
+ * will also send CM_ADD to himself.
+ *---------------------------------------------------------------------------*/
+/*******************************/
+/* CM_ADD */
+/*******************************/
+void Qmgr::execCM_ADD(Signal* signal)
+{
+ NodeRecPtr addNodePtr;
+ jamEntry();
+
+ NodeRecPtr nodePtr;
+ nodePtr.i = getOwnNodeId();
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRec);
+
+ CmAdd * const cmAdd = (CmAdd*)signal->getDataPtr();
+ const CmAdd::RequestType type = (CmAdd::RequestType)cmAdd->requestType;
+ addNodePtr.i = cmAdd->startingNodeId;
+ //const Uint32 startingVersion = cmAdd->startingVersion;
+ ptrCheckGuard(addNodePtr, MAX_NDB_NODES, nodeRec);
+
+ DEBUG_START3(signal, type);
+
+ if(nodePtr.p->phase == ZSTARTING){
+ jam();
+ /**
+ * We are joining...
+ */
+ ndbrequire(addNodePtr.i == nodePtr.i);
+ switch(type){
+ case CmAdd::Prepare:
+ ndbrequire(c_start.m_gsn == GSN_CM_NODEINFOREQ);
+ /**
+ * Wait for CM_NODEINFO_CONF
+ */
+ return;
+ case CmAdd::CommitNew:
+ /**
+ * Tata. we're in the cluster
+ */
+ joinedCluster(signal, addNodePtr);
+ return;
+ case CmAdd::AddCommit:
+ ndbrequire(false);
+ }
+ }
+
+ switch (type) {
+ case CmAdd::Prepare:
+ cmAddPrepare(signal, addNodePtr, nodePtr.p);
+ break;
+ case CmAdd::AddCommit:{
+ jam();
+ ndbrequire(addNodePtr.p->phase == ZSTARTING);
+ addNodePtr.p->phase = ZRUNNING;
+ addNodePtr.p->alarmCount = 0;
+ c_clusterNodes.set(addNodePtr.i);
+ findNeighbours(signal);
+
+ /**
+ * SEND A HEARTBEAT IMMEDIATELY TO DECREASE THE RISK THAT WE MISS EARLY
+ * HEARTBEATS.
+ */
+ sendHeartbeat(signal);
+
+ /**
+ * ENABLE COMMUNICATION WITH ALL BLOCKS WITH THE NEWLY ADDED NODE
+ */
+ signal->theData[0] = addNodePtr.i;
+ sendSignal(CMVMI_REF, GSN_ENABLE_COMORD, signal, 1, JBA);
+
+ sendCmAckAdd(signal, addNodePtr.i, CmAdd::AddCommit);
+ if(getOwnNodeId() != cpresident){
+ jam();
+ c_start.reset();
+ }
+ break;
+ }
+ case CmAdd::CommitNew:
+ jam();
+ ndbrequire(false);
+ }
+
+}//Qmgr::execCM_ADD()
+
+void
+Qmgr::joinedCluster(Signal* signal, NodeRecPtr nodePtr){
+ /**
+ * WE HAVE BEEN INCLUDED IN THE CLUSTER WE CAN START BEING PART OF THE
+ * HEARTBEAT PROTOCOL AND WE WILL ALSO ENABLE COMMUNICATION WITH ALL
+ * NODES IN THE CLUSTER.
+ */
+ nodePtr.p->phase = ZRUNNING;
+ nodePtr.p->alarmCount = 0;
+ findNeighbours(signal);
+ c_clusterNodes.set(nodePtr.i);
+ c_start.reset();
+
+ /**
+ * SEND A HEARTBEAT IMMEDIATELY TO DECREASE THE RISK
+ * THAT WE MISS EARLY HEARTBEATS.
+ */
+ sendHeartbeat(signal);
+
+ /**
+ * ENABLE COMMUNICATION WITH ALL BLOCKS IN THE CURRENT CLUSTER AND SET
+ * THE NODES IN THE CLUSTER TO BE RUNNING.
+ */
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRec);
+ if ((nodePtr.p->phase == ZRUNNING) && (nodePtr.i != getOwnNodeId())) {
+ /*-------------------------------------------------------------------*/
+ // Enable full communication to all other nodes. Not really necessary
+ // to open communication to ourself.
+ /*-------------------------------------------------------------------*/
+ jam();
+ signal->theData[0] = nodePtr.i;
+ sendSignal(CMVMI_REF, GSN_ENABLE_COMORD, signal, 1, JBA);
+ }//if
+ }//for
+
+ sendSttorryLab(signal);
+
+ /**
+ * Start timer handling
+ */
+ signal->theData[0] = ZTIMER_HANDLING;
+ sendSignal(QMGR_REF, GSN_CONTINUEB, signal, 10, JBB);
+
+ sendCmAckAdd(signal, getOwnNodeId(), CmAdd::CommitNew);
+}
+
+/* 4.10.7 CM_ACKADD - PRESIDENT IS RECEIVER - */
+/*---------------------------------------------------------------------------*/
+/* Entry point for an ack add signal.
+ * The TTYPE defines if it is a prepare or a commit. */
+/*---------------------------------------------------------------------------*/
+void Qmgr::execCM_ACKADD(Signal* signal)
+{
+ NodeRecPtr addNodePtr;
+ NodeRecPtr senderNodePtr;
+ jamEntry();
+
+ CmAckAdd * const cmAckAdd = (CmAckAdd*)signal->getDataPtr();
+ const CmAdd::RequestType type = (CmAdd::RequestType)cmAckAdd->requestType;
+ addNodePtr.i = cmAckAdd->startingNodeId;
+ senderNodePtr.i = cmAckAdd->senderNodeId;
+
+ DEBUG_START3(signal, type);
+
+ if (cpresident != getOwnNodeId()) {
+ jam();
+ /*-----------------------------------------------------------------------*/
+ /* IF WE ARE NOT PRESIDENT THEN WE SHOULD NOT RECEIVE THIS MESSAGE. */
+ /*------------------------------------------------------------_----------*/
+ warningEvent("Received CM_ACKADD from %d president=%d",
+ senderNodePtr.i, cpresident);
+ return;
+ }//if
+
+ if (addNodePtr.i != c_start.m_startNode) {
+ jam();
+ /*----------------------------------------------------------------------*/
+ /* THIS IS NOT THE STARTING NODE. WE ARE ACTIVE NOW WITH ANOTHER START. */
+ /*----------------------------------------------------------------------*/
+ warningEvent("Received CM_ACKADD from %d with startNode=%d != own %d",
+ senderNodePtr.i, addNodePtr.i, c_start.m_startNode);
+ return;
+ }//if
+
+ ndbrequire(c_start.m_gsn == GSN_CM_ADD);
+ c_start.m_nodes.clearWaitingFor(senderNodePtr.i);
+ if(!c_start.m_nodes.done()){
+ jam();
+ return;
+ }
+
+ switch (type) {
+ case CmAdd::Prepare:{
+ jam();
+
+ /*----------------------------------------------------------------------*/
+ /* ALL RUNNING NODES HAVE PREPARED THE INCLUSION OF THIS NEW NODE. */
+ /*----------------------------------------------------------------------*/
+ c_start.m_gsn = GSN_CM_ADD;
+ c_start.m_nodes = c_clusterNodes;
+
+ CmAdd * const cmAdd = (CmAdd*)signal->getDataPtrSend();
+ cmAdd->requestType = CmAdd::AddCommit;
+ cmAdd->startingNodeId = addNodePtr.i;
+ cmAdd->startingVersion = getNodeInfo(addNodePtr.i).m_version;
+ NodeReceiverGroup rg(QMGR, c_clusterNodes);
+ sendSignal(rg, GSN_CM_ADD, signal, CmAdd::SignalLength, JBA);
+ DEBUG_START2(GSN_CM_ADD, rg, "AddCommit");
+ return;
+ }
+ case CmAdd::AddCommit:{
+ jam();
+
+ /****************************************/
+ /* Send commit to the new node so he */
+ /* will change PHASE into ZRUNNING */
+ /****************************************/
+ c_start.m_gsn = GSN_CM_ADD;
+ c_start.m_nodes.clearWaitingFor();
+ c_start.m_nodes.setWaitingFor(addNodePtr.i);
+
+ CmAdd * const cmAdd = (CmAdd*)signal->getDataPtrSend();
+ cmAdd->requestType = CmAdd::CommitNew;
+ cmAdd->startingNodeId = addNodePtr.i;
+ cmAdd->startingVersion = getNodeInfo(addNodePtr.i).m_version;
+
+ sendSignal(calcQmgrBlockRef(addNodePtr.i), GSN_CM_ADD, signal,
+ CmAdd::SignalLength, JBA);
+ DEBUG_START(GSN_CM_ADD, addNodePtr.i, "CommitNew");
+ return;
+ }
+ case CmAdd::CommitNew:
+ jam();
+ /**
+ * Tell arbitration about new node.
+ */
+ handleArbitNdbAdd(signal, addNodePtr.i);
+ c_start.reset();
+ return;
+ }//switch
+ ndbrequire(false);
+}//Qmgr::execCM_ACKADD()
+
+/**-------------------------------------------------------------------------
+ * WE HAVE BEEN INCLUDED INTO THE CLUSTER. IT IS NOW TIME TO CALCULATE WHICH
+ * ARE OUR LEFT AND RIGHT NEIGHBOURS FOR THE HEARTBEAT PROTOCOL.
+ *--------------------------------------------------------------------------*/
+void Qmgr::findNeighbours(Signal* signal)
+{
+ UintR toldLeftNeighbour;
+ UintR tfnLeftFound;
+ UintR tfnMaxFound;
+ UintR tfnMinFound;
+ UintR tfnRightFound;
+ NodeRecPtr fnNodePtr;
+ NodeRecPtr fnOwnNodePtr;
+
+ toldLeftNeighbour = cneighbourl;
+ tfnLeftFound = 0;
+ tfnMaxFound = 0;
+ tfnMinFound = (UintR)-1;
+ tfnRightFound = (UintR)-1;
+ fnOwnNodePtr.i = getOwnNodeId();
+ ptrCheckGuard(fnOwnNodePtr, MAX_NDB_NODES, nodeRec);
+ for (fnNodePtr.i = 1; fnNodePtr.i < MAX_NDB_NODES; fnNodePtr.i++) {
+ jam();
+ ptrAss(fnNodePtr, nodeRec);
+ if (fnNodePtr.i != fnOwnNodePtr.i) {
+ if (fnNodePtr.p->phase == ZRUNNING) {
+ if (tfnMinFound > fnNodePtr.p->ndynamicId) {
+ jam();
+ tfnMinFound = fnNodePtr.p->ndynamicId;
+ }//if
+ if (tfnMaxFound < fnNodePtr.p->ndynamicId) {
+ jam();
+ tfnMaxFound = fnNodePtr.p->ndynamicId;
+ }//if
+ if (fnOwnNodePtr.p->ndynamicId > fnNodePtr.p->ndynamicId) {
+ jam();
+ if (fnNodePtr.p->ndynamicId > tfnLeftFound) {
+ jam();
+ tfnLeftFound = fnNodePtr.p->ndynamicId;
+ }//if
+ } else {
+ jam();
+ if (fnNodePtr.p->ndynamicId < tfnRightFound) {
+ jam();
+ tfnRightFound = fnNodePtr.p->ndynamicId;
+ }//if
+ }//if
+ }//if
+ }//if
+ }//for
+ if (tfnLeftFound == 0) {
+ if (tfnMinFound == (UintR)-1) {
+ jam();
+ cneighbourl = ZNIL;
+ } else {
+ jam();
+ cneighbourl = translateDynamicIdToNodeId(signal, tfnMaxFound);
+ }//if
+ } else {
+ jam();
+ cneighbourl = translateDynamicIdToNodeId(signal, tfnLeftFound);
+ }//if
+ if (tfnRightFound == (UintR)-1) {
+ if (tfnMaxFound == 0) {
+ jam();
+ cneighbourh = ZNIL;
+ } else {
+ jam();
+ cneighbourh = translateDynamicIdToNodeId(signal, tfnMinFound);
+ }//if
+ } else {
+ jam();
+ cneighbourh = translateDynamicIdToNodeId(signal, tfnRightFound);
+ }//if
+ if (toldLeftNeighbour != cneighbourl) {
+ jam();
+ if (cneighbourl != ZNIL) {
+ jam();
+ /**-------------------------------------------------------------------*/
+ /* WE ARE SUPERVISING A NEW LEFT NEIGHBOUR. WE START WITH ALARM COUNT
+ * EQUAL TO ZERO.
+ *---------------------------------------------------------------------*/
+ fnNodePtr.i = cneighbourl;
+ ptrCheckGuard(fnNodePtr, MAX_NDB_NODES, nodeRec);
+ fnNodePtr.p->alarmCount = 0;
+ }//if
+ }//if
+
+ signal->theData[0] = NDB_LE_FIND_NEIGHBOURS;
+ signal->theData[1] = getOwnNodeId();
+ signal->theData[2] = cneighbourl;
+ signal->theData[3] = cneighbourh;
+ signal->theData[4] = fnOwnNodePtr.p->ndynamicId;
+ UintR Tlen = 5;
+ for (fnNodePtr.i = 1; fnNodePtr.i < MAX_NDB_NODES; fnNodePtr.i++) {
+ jam();
+ ptrAss(fnNodePtr, nodeRec);
+ if (fnNodePtr.i != fnOwnNodePtr.i) {
+ if (fnNodePtr.p->phase == ZRUNNING) {
+ jam();
+ signal->theData[Tlen] = fnNodePtr.i;
+ signal->theData[Tlen + 1] = fnNodePtr.p->ndynamicId;
+ if (Tlen < 25) {
+ /*----------------------------------------------------------------*/
+ // This code can only report 11 nodes.
+ // We need to update this when increasing the number of nodes
+ // supported.
+ /*-----------------------------------------------------------------*/
+ Tlen += 2;
+ }
+ }//if
+ }//if
+ }//for
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, Tlen, JBB);
+}//Qmgr::findNeighbours()
+
+/*
+4.10.7 INIT_DATA */
+/*---------------------------------------------------------------------------*/
+/*---------------------------------------------------------------------------*/
+void Qmgr::initData(Signal* signal)
+{
+ NodeRecPtr nodePtr;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NODES; nodePtr.i++) {
+ ptrAss(nodePtr, nodeRec);
+ nodePtr.p->ndynamicId = 0;
+ if(getNodeInfo(nodePtr.i).m_type == NodeInfo::DB){
+ nodePtr.p->phase = ZINIT;
+ c_definedNodes.set(nodePtr.i);
+ } else {
+ nodePtr.p->phase = ZAPI_INACTIVE;
+ }
+
+ nodePtr.p->alarmCount = 0;
+ nodePtr.p->sendPrepFailReqStatus = Q_NOT_ACTIVE;
+ nodePtr.p->sendCommitFailReqStatus = Q_NOT_ACTIVE;
+ nodePtr.p->sendPresToStatus = Q_NOT_ACTIVE;
+ nodePtr.p->failState = NORMAL;
+ nodePtr.p->rcv[0] = 0;
+ nodePtr.p->rcv[1] = 0;
+ }//for
+ cfailureNr = 1;
+ ccommitFailureNr = 1;
+ cprepareFailureNr = 1;
+ cnoFailedNodes = 0;
+ cnoPrepFailedNodes = 0;
+ creadyDistCom = ZFALSE;
+ cpresident = ZNIL;
+ cpresidentCandidate = ZNIL;
+ cpdistref = 0;
+ cneighbourh = ZNIL;
+ cneighbourl = ZNIL;
+ cdelayRegreq = ZDELAY_REGREQ;
+ cactivateApiCheck = 0;
+ ctoStatus = Q_NOT_ACTIVE;
+
+ interface_check_timer.setDelay(1000);
+ interface_check_timer.reset();
+ clatestTransactionCheck = 0;
+
+ cLqhTimeSignalCount = 0;
+
+ // catch-all for missing initializations
+ memset(&arbitRec, 0, sizeof(arbitRec));
+
+ /**
+ * Timeouts
+ */
+ const ndb_mgm_configuration_iterator * p =
+ theConfiguration.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ Uint32 hbDBDB = 1500;
+ Uint32 hbDBAPI = 1500;
+ Uint32 arbitTimeout = 1000;
+ c_restartPartialTimeout = 30000;
+ ndb_mgm_get_int_parameter(p, CFG_DB_HEARTBEAT_INTERVAL, &hbDBDB);
+ ndb_mgm_get_int_parameter(p, CFG_DB_API_HEARTBEAT_INTERVAL, &hbDBAPI);
+ ndb_mgm_get_int_parameter(p, CFG_DB_ARBIT_TIMEOUT, &arbitTimeout);
+ ndb_mgm_get_int_parameter(p, CFG_DB_START_PARTIAL_TIMEOUT,
+ &c_restartPartialTimeout);
+ if(c_restartPartialTimeout == 0){
+ c_restartPartialTimeout = ~0;
+ }
+
+ setHbDelay(hbDBDB);
+ setHbApiDelay(hbDBAPI);
+ setArbitTimeout(arbitTimeout);
+
+ arbitRec.state = ARBIT_NULL; // start state for all nodes
+ arbitRec.apiMask[0].clear(); // prepare for ARBIT_CFG
+
+ ArbitSignalData* const sd = (ArbitSignalData*)&signal->theData[0];
+ for (unsigned rank = 1; rank <= 2; rank++) {
+ sd->sender = getOwnNodeId();
+ sd->code = rank;
+ sd->node = 0;
+ sd->ticket.clear();
+ sd->mask.clear();
+ ndb_mgm_configuration_iterator * iter =
+ theConfiguration.getClusterConfigIterator();
+ for (ndb_mgm_first(iter); ndb_mgm_valid(iter); ndb_mgm_next(iter)) {
+ Uint32 tmp = 0;
+ if (ndb_mgm_get_int_parameter(iter, CFG_NODE_ARBIT_RANK, &tmp) == 0 &&
+ tmp == rank){
+ Uint32 nodeId = 0;
+ ndbrequire(!ndb_mgm_get_int_parameter(iter, CFG_NODE_ID, &nodeId));
+ sd->mask.set(nodeId);
+ }
+ }
+
+ execARBIT_CFG(signal);
+ }
+ setNodeInfo(getOwnNodeId()).m_version = NDB_VERSION;
+}//Qmgr::initData()
+
+
+/**---------------------------------------------------------------------------
+ * HERE WE RECEIVE THE JOB TABLE SIGNAL EVERY 10 MILLISECONDS.
+ * WE WILL USE THIS TO CHECK IF IT IS TIME TO CHECK THE NEIGHBOUR NODE.
+ * WE WILL ALSO SEND A SIGNAL TO BLOCKS THAT NEED A TIME SIGNAL AND
+ * DO NOT WANT TO USE JOB TABLE SIGNALS.
+ *---------------------------------------------------------------------------*/
+void Qmgr::timerHandlingLab(Signal* signal)
+{
+ NDB_TICKS TcurrentTime = NdbTick_CurrentMillisecond();
+ NodeRecPtr myNodePtr;
+ myNodePtr.i = getOwnNodeId();
+ ptrCheckGuard(myNodePtr, MAX_NDB_NODES, nodeRec);
+
+ if (myNodePtr.p->phase == ZRUNNING) {
+ jam();
+ /**---------------------------------------------------------------------
+ * WE ARE ONLY PART OF HEARTBEAT CLUSTER IF WE ARE UP AND RUNNING.
+ *---------------------------------------------------------------------*/
+ if (hb_send_timer.check(TcurrentTime)) {
+ jam();
+ sendHeartbeat(signal);
+ hb_send_timer.reset();
+ }
+ if (hb_check_timer.check(TcurrentTime)) {
+ jam();
+ checkHeartbeat(signal);
+ hb_check_timer.reset();
+ }
+ }
+
+ if (interface_check_timer.check(TcurrentTime)) {
+ jam();
+ interface_check_timer.reset();
+ checkStartInterface(signal);
+ }
+
+ if (cactivateApiCheck != 0) {
+ jam();
+ if (hb_api_timer.check(TcurrentTime)) {
+ jam();
+ hb_api_timer.reset();
+ apiHbHandlingLab(signal);
+ }//if
+ if (clatestTransactionCheck == 0) {
+ //-------------------------------------------------------------
+ // Initialise the Transaction check timer.
+ //-------------------------------------------------------------
+ clatestTransactionCheck = TcurrentTime;
+ }//if
+ int counter = 0;
+ while (TcurrentTime > ((NDB_TICKS)10 + clatestTransactionCheck)) {
+ jam();
+ clatestTransactionCheck += (NDB_TICKS)10;
+ sendSignal(DBTC_REF, GSN_TIME_SIGNAL, signal, 1, JBB);
+ cLqhTimeSignalCount++;
+ if (cLqhTimeSignalCount >= 100) {
+ cLqhTimeSignalCount = 0;
+ sendSignal(DBLQH_REF, GSN_TIME_SIGNAL, signal, 1, JBB);
+ }//if
+ counter++;
+ if (counter > 1) {
+ jam();
+ break;
+ } else {
+ ;
+ }//if
+ }//while
+ }//if
+
+ //--------------------------------------------------
+ // Resend this signal with 10 milliseconds delay.
+ //--------------------------------------------------
+ signal->theData[0] = ZTIMER_HANDLING;
+ sendSignalWithDelay(QMGR_REF, GSN_CONTINUEB, signal, 10, 1);
+ return;
+}//Qmgr::timerHandlingLab()
+
+/*---------------------------------------------------------------------------*/
+/* THIS MODULE HANDLES THE SENDING AND RECEIVING OF HEARTBEATS. */
+/*---------------------------------------------------------------------------*/
+void Qmgr::sendHeartbeat(Signal* signal)
+{
+ NodeRecPtr localNodePtr;
+ localNodePtr.i = cneighbourh;
+ if (localNodePtr.i == ZNIL) {
+ jam();
+ /**---------------------------------------------------------------------
+ * THERE ARE NO NEIGHBOURS. THIS IS POSSIBLE IF WE ARE THE ONLY NODE IN
+ * THE CLUSTER.IN THIS CASE WE DO NOT NEED TO SEND ANY HEARTBEAT SIGNALS.
+ *-----------------------------------------------------------------------*/
+ return;
+ }//if
+ ptrCheckGuard(localNodePtr, MAX_NDB_NODES, nodeRec);
+ signal->theData[0] = getOwnNodeId();
+
+ sendSignal(localNodePtr.p->blockRef, GSN_CM_HEARTBEAT, signal, 1, JBA);
+#ifdef VM_TRACE
+ signal->theData[0] = NDB_LE_SentHeartbeat;
+ signal->theData[1] = localNodePtr.i;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
+#endif
+}//Qmgr::sendHeartbeat()
+
+void Qmgr::checkHeartbeat(Signal* signal)
+{
+ NodeRecPtr nodePtr;
+
+ nodePtr.i = cneighbourl;
+ if (nodePtr.i == ZNIL) {
+ jam();
+ /**---------------------------------------------------------------------
+ * THERE ARE NO NEIGHBOURS. THIS IS POSSIBLE IF WE ARE THE ONLY NODE IN
+ * THE CLUSTER. IN THIS CASE WE DO NOT NEED TO CHECK ANY HEARTBEATS.
+ *-----------------------------------------------------------------------*/
+ return;
+ }//if
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRec);
+
+ nodePtr.p->alarmCount ++;
+ ndbrequire(nodePtr.p->phase == ZRUNNING);
+ ndbrequire(getNodeInfo(nodePtr.i).m_type == NodeInfo::DB);
+
+ if(nodePtr.p->alarmCount > 2){
+ signal->theData[0] = NDB_LE_MissedHeartbeat;
+ signal->theData[1] = nodePtr.i;
+ signal->theData[2] = nodePtr.p->alarmCount - 1;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
+ }
+
+ if (nodePtr.p->alarmCount > 4) {
+ jam();
+ /**----------------------------------------------------------------------
+ * OUR LEFT NEIGHBOUR HAVE KEPT QUIET FOR THREE CONSECUTIVE HEARTBEAT
+ * PERIODS. THUS WE DECLARE HIM DOWN.
+ *----------------------------------------------------------------------*/
+ signal->theData[0] = NDB_LE_DeadDueToHeartbeat;
+ signal->theData[1] = nodePtr.i;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
+
+ failReportLab(signal, nodePtr.i, FailRep::ZHEARTBEAT_FAILURE);
+ return;
+ }//if
+}//Qmgr::checkHeartbeat()
+
+void Qmgr::apiHbHandlingLab(Signal* signal)
+{
+ NodeRecPtr TnodePtr;
+
+ for (TnodePtr.i = 1; TnodePtr.i < MAX_NODES; TnodePtr.i++) {
+ const Uint32 nodeId = TnodePtr.i;
+ ptrAss(TnodePtr, nodeRec);
+
+ const NodeInfo::NodeType type = getNodeInfo(nodeId).getType();
+ if(type == NodeInfo::DB)
+ continue;
+
+ if(type == NodeInfo::INVALID)
+ continue;
+
+ if (TnodePtr.p->phase == ZAPI_ACTIVE){
+ jam();
+ TnodePtr.p->alarmCount ++;
+
+ if(TnodePtr.p->alarmCount > 2){
+ signal->theData[0] = NDB_LE_MissedHeartbeat;
+ signal->theData[1] = nodeId;
+ signal->theData[2] = TnodePtr.p->alarmCount - 1;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
+ }
+
+ if (TnodePtr.p->alarmCount > 4) {
+ jam();
+ /*------------------------------------------------------------------*/
+ /* THE API NODE HAS NOT SENT ANY HEARTBEAT FOR THREE SECONDS.
+ * WE WILL DISCONNECT FROM IT NOW.
+ *------------------------------------------------------------------*/
+ /*------------------------------------------------------------------*/
+ /* We call node_failed to release all connections for this api node */
+ /*------------------------------------------------------------------*/
+ signal->theData[0] = NDB_LE_DeadDueToHeartbeat;
+ signal->theData[1] = nodeId;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
+
+ node_failed(signal, nodeId);
+ }//if
+ }//if
+ }//for
+ return;
+}//Qmgr::apiHbHandlingLab()
+
+void Qmgr::checkStartInterface(Signal* signal)
+{
+ NodeRecPtr nodePtr;
+ /*------------------------------------------------------------------------*/
+ // This method is called once per second. After a disconnect we wait at
+ // least three seconds before allowing new connects. We will also ensure
+ // that handling of the failure is completed before we allow new connections.
+ /*------------------------------------------------------------------------*/
+ for (nodePtr.i = 1; nodePtr.i < MAX_NODES; nodePtr.i++) {
+ ptrAss(nodePtr, nodeRec);
+ if (nodePtr.p->phase == ZFAIL_CLOSING) {
+ jam();
+ nodePtr.p->alarmCount = nodePtr.p->alarmCount + 1;
+ if (c_connectedNodes.get(nodePtr.i)){
+ jam();
+ /*-------------------------------------------------------------------*/
+ // We need to ensure that the connection is not restored until it has
+ // been disconnected for at least three seconds.
+ /*-------------------------------------------------------------------*/
+ nodePtr.p->alarmCount = 0;
+ }//if
+ if ((nodePtr.p->alarmCount > 3) && (nodePtr.p->failState == NORMAL)) {
+ /**------------------------------------------------------------------
+ * WE HAVE DISCONNECTED THREE SECONDS AGO. WE ARE NOW READY TO
+ * CONNECT AGAIN AND ACCEPT NEW REGISTRATIONS FROM THIS NODE.
+ * WE WILL NOT ALLOW CONNECTIONS OF API NODES UNTIL API FAIL HANDLING
+ * IS COMPLETE.
+ *-------------------------------------------------------------------*/
+ nodePtr.p->failState = NORMAL;
+ if (getNodeInfo(nodePtr.i).m_type != NodeInfo::DB){
+ jam();
+ nodePtr.p->phase = ZAPI_INACTIVE;
+ } else {
+ jam();
+ nodePtr.p->phase = ZINIT;
+ }//if
+
+ nodePtr.p->alarmCount = 0;
+ signal->theData[0] = 0;
+ signal->theData[1] = nodePtr.i;
+ sendSignal(CMVMI_REF, GSN_OPEN_COMREQ, signal, 2, JBA);
+ } else {
+ if(((nodePtr.p->alarmCount + 1) % 60) == 0){
+ char buf[100];
+ BaseString::snprintf(buf, sizeof(buf),
+ "Failure handling of node %d has not completed in %d min."
+ " - state = %d",
+ nodePtr.i,
+ (nodePtr.p->alarmCount + 1)/60,
+ nodePtr.p->failState);
+ warningEvent(buf);
+ }
+ }
+ }//if
+ }//for
+ return;
+}//Qmgr::checkStartInterface()
+
+/**-------------------------------------------------------------------------
+ * This method is called when a DISCONNECT_REP signal arrived which means that
+ * the API node is gone and we want to release resources in TC/DICT blocks.
+ *---------------------------------------------------------------------------*/
+void Qmgr::sendApiFailReq(Signal* signal, Uint16 failedNodeNo)
+{
+ NodeRecPtr failedNodePtr;
+
+ jamEntry();
+ failedNodePtr.i = failedNodeNo;
+ signal->theData[0] = failedNodePtr.i;
+ signal->theData[1] = QMGR_REF;
+
+ ptrCheckGuard(failedNodePtr, MAX_NODES, nodeRec);
+
+ ndbrequire(failedNodePtr.p->failState == NORMAL);
+
+ failedNodePtr.p->failState = WAITING_FOR_FAILCONF1;
+ sendSignal(DBTC_REF, GSN_API_FAILREQ, signal, 2, JBA);
+ sendSignal(DBDICT_REF, GSN_API_FAILREQ, signal, 2, JBA);
+ sendSignal(SUMA_REF, GSN_API_FAILREQ, signal, 2, JBA);
+
+ /**
+ * GREP also need the information that an API node
+ * (actually a REP node) has failed.
+ *
+ * GREP does however NOT send a CONF on this signal, i.e.
+ * the API_FAILREQ signal to GREP is like a REP signal
+ * (i.e. without any confirmation).
+ */
+ sendSignal(GREP_REF, GSN_API_FAILREQ, signal, 2, JBA);
+
+ /**-------------------------------------------------------------------------
+ * THE OTHER NODE WAS AN API NODE. THE COMMUNICATION LINK IS ALREADY
+ * BROKEN AND THUS NO ACTION IS NEEDED TO BREAK THE CONNECTION.
+ * WE ONLY NEED TO SET PARAMETERS TO ENABLE A NEW CONNECTION IN A FEW
+ * SECONDS.
+ *-------------------------------------------------------------------------*/
+ failedNodePtr.p->alarmCount = 0;
+
+ CloseComReqConf * const closeCom = (CloseComReqConf *)&signal->theData[0];
+
+ closeCom->xxxBlockRef = reference();
+ closeCom->failNo = 0;
+ closeCom->noOfNodes = 1;
+ NodeBitmask::clear(closeCom->theNodes);
+ NodeBitmask::set(closeCom->theNodes, failedNodePtr.i);
+ sendSignal(CMVMI_REF, GSN_CLOSE_COMREQ, signal,
+ CloseComReqConf::SignalLength, JBA);
+}//Qmgr::sendApiFailReq()
+
+void Qmgr::execAPI_FAILCONF(Signal* signal)
+{
+ NodeRecPtr failedNodePtr;
+
+ jamEntry();
+ failedNodePtr.i = signal->theData[0];
+ ptrCheckGuard(failedNodePtr, MAX_NODES, nodeRec);
+
+ if (failedNodePtr.p->failState == WAITING_FOR_FAILCONF1){
+ jam();
+
+ failedNodePtr.p->rcv[0] = signal->theData[1];
+ failedNodePtr.p->failState = WAITING_FOR_FAILCONF2;
+
+ } else if (failedNodePtr.p->failState == WAITING_FOR_FAILCONF2) {
+ failedNodePtr.p->rcv[1] = signal->theData[1];
+ failedNodePtr.p->failState = NORMAL;
+
+ if (failedNodePtr.p->rcv[0] == failedNodePtr.p->rcv[1]) {
+ jam();
+ systemErrorLab(signal);
+ } else {
+ jam();
+ failedNodePtr.p->rcv[0] = 0;
+ failedNodePtr.p->rcv[1] = 0;
+ }//if
+ } else {
+ jam();
+#ifdef VM_TRACE
+ ndbout << "failedNodePtr.p->failState = "
+ << (Uint32)(failedNodePtr.p->failState) << endl;
+#endif
+ systemErrorLab(signal);
+ }//if
+ return;
+}//Qmgr::execAPI_FAILCONF()
+
+void Qmgr::execNDB_FAILCONF(Signal* signal)
+{
+ NodeRecPtr failedNodePtr;
+ NodeRecPtr nodePtr;
+
+ jamEntry();
+ failedNodePtr.i = signal->theData[0];
+ ptrCheckGuard(failedNodePtr, MAX_NODES, nodeRec);
+ if (failedNodePtr.p->failState == WAITING_FOR_NDB_FAILCONF){
+ failedNodePtr.p->failState = NORMAL;
+ } else {
+ jam();
+ systemErrorLab(signal);
+ }//if
+ if (cpresident == getOwnNodeId()) {
+ jam();
+ /**
+ * Prepare a NFCompleteRep and send to all connected API's
+ * They can then abort all transaction waiting for response from
+ * the failed node
+ */
+ NFCompleteRep * const nfComp = (NFCompleteRep *)&signal->theData[0];
+ nfComp->blockNo = QMGR_REF;
+ nfComp->nodeId = getOwnNodeId();
+ nfComp->failedNodeId = failedNodePtr.i;
+
+ for (nodePtr.i = 1; nodePtr.i < MAX_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRec);
+ if (nodePtr.p->phase == ZAPI_ACTIVE){
+ jam();
+ sendSignal(nodePtr.p->blockRef, GSN_NF_COMPLETEREP, signal,
+ NFCompleteRep::SignalLength, JBA);
+ }//if
+ }//for
+ }
+ return;
+}//Qmgr::execNDB_FAILCONF()
+
+/*******************************/
+/* DISCONNECT_REP */
+/*******************************/
+void Qmgr::execDISCONNECT_REP(Signal* signal)
+{
+ jamEntry();
+ const DisconnectRep * const rep = (DisconnectRep *)&signal->theData[0];
+ const Uint32 nodeId = rep->nodeId;
+ c_connectedNodes.clear(nodeId);
+
+ NodeRecPtr nodePtr;
+ nodePtr.i = getOwnNodeId();
+ ptrCheckGuard(nodePtr, MAX_NODES, nodeRec);
+ switch(nodePtr.p->phase){
+ case ZRUNNING:
+ jam();
+ break;
+ case ZINIT:
+ case ZSTARTING:
+ case ZPREPARE_FAIL:
+ case ZFAIL_CLOSING:
+ case ZAPI_ACTIVE:
+ case ZAPI_INACTIVE:
+ ndbrequire(false);
+ }
+
+ node_failed(signal, nodeId);
+}//DISCONNECT_REP
+
+void Qmgr::node_failed(Signal* signal, Uint16 aFailedNode)
+{
+ NodeRecPtr failedNodePtr;
+ /**------------------------------------------------------------------------
+ * A COMMUNICATION LINK HAS BEEN DISCONNECTED. WE MUST TAKE SOME ACTION
+ * DUE TO THIS.
+ *-----------------------------------------------------------------------*/
+ failedNodePtr.i = aFailedNode;
+ ptrCheckGuard(failedNodePtr, MAX_NODES, nodeRec);
+
+ if (getNodeInfo(failedNodePtr.i).getType() == NodeInfo::DB){
+ jam();
+ /**---------------------------------------------------------------------
+ * THE OTHER NODE IS AN NDB NODE, WE HANDLE IT AS IF A HEARTBEAT
+ * FAILURE WAS DISCOVERED.
+ *---------------------------------------------------------------------*/
+ switch(failedNodePtr.p->phase){
+ case ZRUNNING:
+ jam();
+ failReportLab(signal, aFailedNode, FailRep::ZLINK_FAILURE);
+ return;
+ case ZFAIL_CLOSING:
+ jam();
+ return;
+ case ZSTARTING:
+ c_start.reset();
+ // Fall-through
+ default:
+ jam();
+ /*---------------------------------------------------------------------*/
+ // The other node is still not in the cluster but disconnected.
+ // We must restart communication in three seconds.
+ /*---------------------------------------------------------------------*/
+ failedNodePtr.p->failState = NORMAL;
+ failedNodePtr.p->phase = ZFAIL_CLOSING;
+ failedNodePtr.p->alarmCount = 0;
+
+ CloseComReqConf * const closeCom =
+ (CloseComReqConf *)&signal->theData[0];
+
+ closeCom->xxxBlockRef = reference();
+ closeCom->failNo = 0;
+ closeCom->noOfNodes = 1;
+ NodeBitmask::clear(closeCom->theNodes);
+ NodeBitmask::set(closeCom->theNodes, failedNodePtr.i);
+ sendSignal(CMVMI_REF, GSN_CLOSE_COMREQ, signal,
+ CloseComReqConf::SignalLength, JBA);
+ }//if
+ return;
+ }
+
+ /**
+ * API code
+ */
+ jam();
+ if (failedNodePtr.p->phase != ZFAIL_CLOSING){
+ jam();
+ //-------------------------------------------------------------------------
+ // The API was active and has now failed. We need to initiate API failure
+ // handling. If the API had already failed then we can ignore this
+ // discovery.
+ //-------------------------------------------------------------------------
+ failedNodePtr.p->phase = ZFAIL_CLOSING;
+
+ sendApiFailReq(signal, aFailedNode);
+ arbitRec.code = ArbitCode::ApiFail;
+ handleArbitApiFail(signal, aFailedNode);
+ }//if
+ return;
+}//Qmgr::node_failed()
+
+/**--------------------------------------------------------------------------
+ * AN API NODE IS REGISTERING. IF FOR THE FIRST TIME WE WILL ENABLE
+ * COMMUNICATION WITH ALL NDB BLOCKS.
+ *---------------------------------------------------------------------------*/
+/*******************************/
+/* API_REGREQ */
+/*******************************/
+void Qmgr::execAPI_REGREQ(Signal* signal)
+{
+ jamEntry();
+
+ ApiRegReq* req = (ApiRegReq*)signal->getDataPtr();
+ const Uint32 version = req->version;
+ const BlockReference ref = req->ref;
+
+ NodeRecPtr apiNodePtr;
+ apiNodePtr.i = refToNode(ref);
+ ptrCheckGuard(apiNodePtr, MAX_NODES, nodeRec);
+
+#if 0
+ ndbout_c("Qmgr::execAPI_REGREQ: Recd API_REGREQ (NodeId=%d)", apiNodePtr.i);
+#endif
+
+ bool compatability_check;
+ switch(getNodeInfo(apiNodePtr.i).getType()){
+ case NodeInfo::API:
+ compatability_check = ndbCompatible_ndb_api(NDB_VERSION, version);
+ if (!compatability_check)
+ infoEvent("Connection attempt from api or mysqld id=%d with %s "
+ "incompatible with %s", apiNodePtr.i,
+ getVersionString(version,""), NDB_VERSION_STRING);
+ break;
+ case NodeInfo::MGM:
+ compatability_check = ndbCompatible_ndb_mgmt(NDB_VERSION, version);
+ if (!compatability_check)
+ infoEvent("Connection attempt from management server id=%d with %s "
+ "incompatible with %s", apiNodePtr.i,
+ getVersionString(version,""), NDB_VERSION_STRING);
+ break;
+ case NodeInfo::REP:
+ // compatability_check = ndbCompatible_ndb_api(NDB_VERSION, version);
+ // break;
+ case NodeInfo::DB:
+ case NodeInfo::INVALID:
+ default:
+ sendApiRegRef(signal, ref, ApiRegRef::WrongType);
+ infoEvent("Invalid connection attempt with type %d",
+ getNodeInfo(apiNodePtr.i).getType());
+ return;
+ }
+
+ if (!compatability_check) {
+ jam();
+ apiNodePtr.p->phase = ZAPI_INACTIVE;
+ sendApiRegRef(signal, ref, ApiRegRef::UnsupportedVersion);
+ return;
+ }
+
+ setNodeInfo(apiNodePtr.i).m_version = version;
+
+ apiNodePtr.p->alarmCount = 0;
+
+ ApiRegConf * const apiRegConf = (ApiRegConf *)&signal->theData[0];
+ apiRegConf->qmgrRef = reference();
+ apiRegConf->apiHeartbeatFrequency = (chbApiDelay / 10);
+ apiRegConf->version = NDB_VERSION;
+ apiRegConf->nodeState = getNodeState();
+ {
+ NodeRecPtr nodePtr;
+ nodePtr.i = getOwnNodeId();
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRec);
+ Uint32 dynamicId = nodePtr.p->ndynamicId;
+
+ if(apiRegConf->nodeState.masterNodeId != getOwnNodeId()){
+ jam();
+ apiRegConf->nodeState.dynamicId = dynamicId;
+ } else {
+ apiRegConf->nodeState.dynamicId = -dynamicId;
+ }
+ }
+ apiRegConf->nodeState.m_connected_nodes.assign(c_connectedNodes);
+
+ sendSignal(ref, GSN_API_REGCONF, signal, ApiRegConf::SignalLength, JBB);
+
+ if ((getNodeState().startLevel == NodeState::SL_STARTED ||
+ getNodeState().getSingleUserMode())
+ && apiNodePtr.p->phase == ZAPI_INACTIVE) {
+ jam();
+ /**----------------------------------------------------------------------
+ * THE API NODE IS REGISTERING. WE WILL ACCEPT IT BY CHANGING STATE AND
+ * SENDING A CONFIRM.
+ *----------------------------------------------------------------------*/
+ apiNodePtr.p->phase = ZAPI_ACTIVE;
+ apiNodePtr.p->blockRef = ref;
+ signal->theData[0] = apiNodePtr.i;
+ sendSignal(CMVMI_REF, GSN_ENABLE_COMORD, signal, 1, JBA);
+ }
+ return;
+}//Qmgr::execAPI_REGREQ()
+
+
+void
+Qmgr::execAPI_VERSION_REQ(Signal * signal) {
+ jamEntry();
+ ApiVersionReq * const req = (ApiVersionReq *)signal->getDataPtr();
+
+ Uint32 senderRef = req->senderRef;
+ Uint32 nodeId = req->nodeId;
+
+ ApiVersionConf * conf = (ApiVersionConf *)req;
+ if(getNodeInfo(nodeId).m_connected)
+ conf->version = getNodeInfo(nodeId).m_version;
+ else
+ conf->version = 0;
+ conf->nodeId = nodeId;
+
+ sendSignal(senderRef,
+ GSN_API_VERSION_CONF,
+ signal,
+ ApiVersionConf::SignalLength, JBB);
+
+
+}
+
+
+#if 0
+bool
+Qmgr::checkAPIVersion(NodeId nodeId,
+ Uint32 apiVersion, Uint32 ownVersion) const {
+ bool ret=true;
+ /**
+ * First implementation...
+ */
+ if ((getMajor(apiVersion) < getMajor(ownVersion) ||
+ getMinor(apiVersion) < getMinor(ownVersion)) &&
+ apiVersion >= API_UPGRADE_VERSION) {
+ jam();
+ if ( getNodeInfo(nodeId).getType() != NodeInfo::MGM ) {
+ jam();
+ ret = false;
+ } else {
+ jam();
+ /* we have a software upgrade situation, mgmtsrvr should be
+ * the highest, let him decide what to do
+ */
+ ;
+ }
+ }
+ return ret;
+}
+#endif
+
+void
+Qmgr::sendApiRegRef(Signal* signal, Uint32 Tref, ApiRegRef::ErrorCode err){
+ ApiRegRef* ref = (ApiRegRef*)signal->getDataPtrSend();
+ ref->ref = reference();
+ ref->version = NDB_VERSION;
+ ref->errorCode = err;
+ sendSignal(Tref, GSN_API_REGREF, signal, ApiRegRef::SignalLength, JBB);
+}
+
+/**--------------------------------------------------------------------------
+ * A NODE HAS BEEN DECLARED AS DOWN. WE WILL CLOSE THE COMMUNICATION TO THIS
+ * NODE IF NOT ALREADY DONE. IF WE ARE PRESIDENT OR BECOMES PRESIDENT BECAUSE
+ * OF A FAILED PRESIDENT THEN WE WILL TAKE FURTHER ACTION.
+ *---------------------------------------------------------------------------*/
+void Qmgr::failReportLab(Signal* signal, Uint16 aFailedNode,
+ FailRep::FailCause aFailCause)
+{
+ NodeRecPtr nodePtr;
+ NodeRecPtr failedNodePtr;
+ NodeRecPtr myNodePtr;
+ UintR TnoFailedNodes;
+
+ failedNodePtr.i = aFailedNode;
+ ptrCheckGuard(failedNodePtr, MAX_NODES, nodeRec);
+ if (failedNodePtr.i == getOwnNodeId()) {
+ jam();
+ systemErrorLab(signal);
+ return;
+ }//if
+
+ myNodePtr.i = getOwnNodeId();
+ ptrCheckGuard(myNodePtr, MAX_NDB_NODES, nodeRec);
+ if (myNodePtr.p->phase != ZRUNNING) {
+ jam();
+ systemErrorLab(signal);
+ return;
+ }//if
+ TnoFailedNodes = cnoFailedNodes;
+ failReport(signal, failedNodePtr.i, (UintR)ZTRUE, aFailCause);
+ if (cpresident == getOwnNodeId()) {
+ jam();
+ if (ctoStatus == Q_NOT_ACTIVE) {
+ jam();
+ /**--------------------------------------------------------------------
+ * AS PRESIDENT WE ARE REQUIRED TO START THE EXCLUSION PROCESS SUCH THAT
+ * THE APPLICATION SEE NODE FAILURES IN A CONSISTENT ORDER.
+ * IF WE HAVE BECOME PRESIDENT NOW (CTO_STATUS = ACTIVE) THEN WE HAVE
+ * TO COMPLETE THE PREVIOUS COMMIT FAILED NODE PROCESS BEFORE STARTING
+ * A NEW.
+ * CTO_STATUS = ACTIVE CAN ALSO MEAN THAT WE ARE PRESIDENT AND ARE
+ * CURRENTLY COMMITTING A SET OF NODE CRASHES. IN THIS CASE IT IS NOT
+ * ALLOWED TO START PREPARING NEW NODE CRASHES.
+ *---------------------------------------------------------------------*/
+ if (TnoFailedNodes != cnoFailedNodes) {
+ jam();
+ cfailureNr = cfailureNr + 1;
+ for (nodePtr.i = 1;
+ nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRec);
+ if (nodePtr.p->phase == ZRUNNING) {
+ jam();
+ sendPrepFailReq(signal, nodePtr.i);
+ }//if
+ }//for
+ }//if
+ }//if
+ }//if
+ return;
+}//Qmgr::failReportLab()
+
+/**-------------------------------------------------------------------------
+ * WE HAVE RECEIVED A PREPARE TO EXCLUDE A NUMBER OF NODES FROM THE CLUSTER.
+ * WE WILL FIRST CHECK THAT WE HAVE NOT ANY MORE NODES THAT
+ * WE ALSO HAVE EXCLUDED
+ *--------------------------------------------------------------------------*/
+/*******************************/
+/* PREP_FAILREQ */
+/*******************************/
+void Qmgr::execPREP_FAILREQ(Signal* signal)
+{
+ NodeRecPtr myNodePtr;
+ jamEntry();
+
+ PrepFailReqRef * const prepFail = (PrepFailReqRef *)&signal->theData[0];
+
+ BlockReference Tblockref = prepFail->xxxBlockRef;
+ Uint16 TfailureNr = prepFail->failNo;
+ cnoPrepFailedNodes = prepFail->noOfNodes;
+ UintR arrayIndex = 0;
+ Uint32 Tindex;
+ for (Tindex = 0; Tindex < MAX_NDB_NODES; Tindex++) {
+ if (NodeBitmask::get(prepFail->theNodes, Tindex)){
+ cprepFailedNodes[arrayIndex] = Tindex;
+ arrayIndex++;
+ }//if
+ }//for
+ UintR guard0;
+
+ /**
+ * Block commit until node failures has stabilized
+ *
+ * @See RT352
+ */
+ BlockCommitOrd* const block = (BlockCommitOrd *)&signal->theData[0];
+ block->failNo = TfailureNr;
+ EXECUTE_DIRECT(DBDIH, GSN_BLOCK_COMMIT_ORD, signal,
+ BlockCommitOrd::SignalLength);
+
+ myNodePtr.i = getOwnNodeId();
+ ptrCheckGuard(myNodePtr, MAX_NDB_NODES, nodeRec);
+ if (myNodePtr.p->phase != ZRUNNING) {
+ jam();
+ systemErrorLab(signal);
+ return;
+ }//if
+
+ guard0 = cnoPrepFailedNodes - 1;
+ arrGuard(guard0, MAX_NDB_NODES);
+ for (Tindex = 0; Tindex <= guard0; Tindex++) {
+ jam();
+ failReport(signal,
+ cprepFailedNodes[Tindex],
+ (UintR)ZFALSE,
+ FailRep::ZIN_PREP_FAIL_REQ);
+ }//for
+ sendCloseComReq(signal, Tblockref, TfailureNr);
+ cnoCommitFailedNodes = 0;
+ cprepareFailureNr = TfailureNr;
+ return;
+}//Qmgr::execPREP_FAILREQ()
+
+/**---------------------------------------------------------------------------
+ * THE CRASHED NODES HAS BEEN EXCLUDED FROM COMMUNICATION.
+ * WE WILL CHECK WHETHER ANY MORE NODES HAVE FAILED DURING THE PREPARE PROCESS.
+ * IF SO WE WILL REFUSE THE PREPARE PHASE AND EXPECT A NEW PREPARE MESSAGE
+ * WITH ALL FAILED NODES INCLUDED.
+ *---------------------------------------------------------------------------*/
+/*******************************/
+/* CLOSE_COMCONF */
+/*******************************/
+void Qmgr::execCLOSE_COMCONF(Signal* signal)
+{
+ jamEntry();
+
+ CloseComReqConf * const closeCom = (CloseComReqConf *)&signal->theData[0];
+
+ BlockReference Tblockref = closeCom->xxxBlockRef;
+ Uint16 TfailureNr = closeCom->failNo;
+
+ cnoPrepFailedNodes = closeCom->noOfNodes;
+ UintR arrayIndex = 0;
+ UintR Tindex = 0;
+ for(Tindex = 0; Tindex < MAX_NDB_NODES; Tindex++){
+ if(NodeBitmask::get(closeCom->theNodes, Tindex)){
+ cprepFailedNodes[arrayIndex] = Tindex;
+ arrayIndex++;
+ }
+ }
+ UintR tprepFailConf;
+ UintR Tindex2;
+ UintR guard0;
+ UintR guard1;
+ UintR Tfound;
+ Uint16 TfailedNodeNo;
+
+ tprepFailConf = ZTRUE;
+ if (cnoFailedNodes > 0) {
+ jam();
+ guard0 = cnoFailedNodes - 1;
+ arrGuard(guard0, MAX_NDB_NODES);
+ for (Tindex = 0; Tindex <= guard0; Tindex++) {
+ jam();
+ TfailedNodeNo = cfailedNodes[Tindex];
+ Tfound = ZFALSE;
+ guard1 = cnoPrepFailedNodes - 1;
+ arrGuard(guard1, MAX_NDB_NODES);
+ for (Tindex2 = 0; Tindex2 <= guard1; Tindex2++) {
+ jam();
+ if (TfailedNodeNo == cprepFailedNodes[Tindex2]) {
+ jam();
+ Tfound = ZTRUE;
+ }//if
+ }//for
+ if (Tfound == ZFALSE) {
+ jam();
+ tprepFailConf = ZFALSE;
+ arrGuard(cnoPrepFailedNodes, MAX_NDB_NODES);
+ cprepFailedNodes[cnoPrepFailedNodes] = TfailedNodeNo;
+ cnoPrepFailedNodes = cnoPrepFailedNodes + 1;
+ }//if
+ }//for
+ }//if
+ if (tprepFailConf == ZFALSE) {
+ jam();
+ for (Tindex = 1; Tindex < MAX_NDB_NODES; Tindex++) {
+ cfailedNodes[Tindex] = cprepFailedNodes[Tindex];
+ }//for
+ cnoFailedNodes = cnoPrepFailedNodes;
+ sendPrepFailReqRef(signal,
+ Tblockref,
+ GSN_PREP_FAILREF,
+ reference(),
+ cfailureNr,
+ cnoPrepFailedNodes,
+ cprepFailedNodes);
+ } else {
+ jam();
+ cnoCommitFailedNodes = cnoPrepFailedNodes;
+ guard0 = cnoPrepFailedNodes - 1;
+ arrGuard(guard0, MAX_NDB_NODES);
+ for (Tindex = 0; Tindex <= guard0; Tindex++) {
+ jam();
+ arrGuard(Tindex, MAX_NDB_NODES);
+ ccommitFailedNodes[Tindex] = cprepFailedNodes[Tindex];
+ }//for
+ signal->theData[0] = getOwnNodeId();
+ signal->theData[1] = TfailureNr;
+ sendSignal(Tblockref, GSN_PREP_FAILCONF, signal, 2, JBA);
+ }//if
+ return;
+}//Qmgr::execCLOSE_COMCONF()
+
+/*---------------------------------------------------------------------------*/
+/* WE HAVE RECEIVED A CONFIRM OF THAT THIS NODE HAVE PREPARED THE FAILURE. */
+/*---------------------------------------------------------------------------*/
+/*******************************/
+/* PREP_FAILCONF */
+/*******************************/
+void Qmgr::execPREP_FAILCONF(Signal* signal)
+{
+ NodeRecPtr nodePtr;
+ NodeRecPtr replyNodePtr;
+ jamEntry();
+ replyNodePtr.i = signal->theData[0];
+ Uint16 TfailureNr = signal->theData[1];
+ if (TfailureNr != cfailureNr) {
+ jam();
+ /**----------------------------------------------------------------------
+ * WE HAVE ALREADY STARTING A NEW ATTEMPT TO EXCLUDE A NUMBER OF NODES.
+ * IGNORE
+ *----------------------------------------------------------------------*/
+ return;
+ }//if
+ ptrCheckGuard(replyNodePtr, MAX_NDB_NODES, nodeRec);
+ replyNodePtr.p->sendPrepFailReqStatus = Q_NOT_ACTIVE;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRec);
+ if (nodePtr.p->phase == ZRUNNING) {
+ if (nodePtr.p->sendPrepFailReqStatus == Q_ACTIVE) {
+ jam();
+ return;
+ }//if
+ }//if
+ }//for
+ /**
+ * Check node count and groups and invoke arbitrator if necessary.
+ * Continues via sendCommitFailReq() if successful.
+ */
+ arbitRec.failureNr = cfailureNr;
+ const NodeState & s = getNodeState();
+ if(s.startLevel == NodeState::SL_STOPPING_3 && s.stopping.systemShutdown){
+ jam();
+ /**
+ * We're performing a system shutdown,
+ * don't let artibtrator shut us down
+ */
+ return;
+ }
+ handleArbitCheck(signal);
+ return;
+}//Qmgr::execPREP_FAILCONF()
+
+void
+Qmgr::sendCommitFailReq(Signal* signal)
+{
+ NodeRecPtr nodePtr;
+ jam();
+ if (arbitRec.failureNr != cfailureNr) {
+ jam();
+ /**----------------------------------------------------------------------
+ * WE HAVE ALREADY STARTING A NEW ATTEMPT TO EXCLUDE A NUMBER OF NODES.
+ * IGNORE
+ *----------------------------------------------------------------------*/
+ return;
+ }//if
+ /**-----------------------------------------------------------------------
+ * WE HAVE SUCCESSFULLY PREPARED A SET OF NODE FAILURES. WE WILL NOW COMMIT
+ * THESE NODE FAILURES.
+ *-------------------------------------------------------------------------*/
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRec);
+ if (nodePtr.p->phase == ZRUNNING) {
+ jam();
+ nodePtr.p->sendCommitFailReqStatus = Q_ACTIVE;
+ signal->theData[0] = cpdistref;
+ signal->theData[1] = cfailureNr;
+ sendSignal(nodePtr.p->blockRef, GSN_COMMIT_FAILREQ, signal, 2, JBA);
+ }//if
+ }//for
+ ctoStatus = Q_ACTIVE;
+ cnoFailedNodes = 0;
+ return;
+}//sendCommitFailReq()
+
+/*---------------------------------------------------------------------------*/
+/* SOME NODE HAVE DISCOVERED A NODE FAILURE THAT WE HAVE NOT YET DISCOVERED. */
+/* WE WILL START ANOTHER ROUND OF PREPARING A SET OF NODE FAILURES. */
+/*---------------------------------------------------------------------------*/
+/*******************************/
+/* PREP_FAILREF */
+/*******************************/
+void Qmgr::execPREP_FAILREF(Signal* signal)
+{
+ NodeRecPtr nodePtr;
+ jamEntry();
+
+ PrepFailReqRef * const prepFail = (PrepFailReqRef *)&signal->theData[0];
+
+ Uint16 TfailureNr = prepFail->failNo;
+ cnoPrepFailedNodes = prepFail->noOfNodes;
+
+ UintR arrayIndex = 0;
+ UintR Tindex = 0;
+ for(Tindex = 0; Tindex < MAX_NDB_NODES; Tindex++) {
+ jam();
+ if(NodeBitmask::get(prepFail->theNodes, Tindex)){
+ jam();
+ cprepFailedNodes[arrayIndex] = Tindex;
+ arrayIndex++;
+ }//if
+ }//for
+ if (TfailureNr != cfailureNr) {
+ jam();
+ /**---------------------------------------------------------------------
+ * WE HAVE ALREADY STARTING A NEW ATTEMPT TO EXCLUDE A NUMBER OF NODES.
+ * IGNORE
+ *----------------------------------------------------------------------*/
+ return;
+ }//if
+ UintR guard0;
+ UintR Ti;
+
+ cnoFailedNodes = cnoPrepFailedNodes;
+ guard0 = cnoPrepFailedNodes - 1;
+ arrGuard(guard0, MAX_NDB_NODES);
+ for (Ti = 0; Ti <= guard0; Ti++) {
+ jam();
+ cfailedNodes[Ti] = cprepFailedNodes[Ti];
+ }//for
+ cfailureNr = cfailureNr + 1;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRec);
+ if (nodePtr.p->phase == ZRUNNING) {
+ jam();
+ sendPrepFailReq(signal, nodePtr.i);
+ }//if
+ }//for
+ return;
+}//Qmgr::execPREP_FAILREF()
+
+/*---------------------------------------------------------------------------*/
+/* THE PRESIDENT IS NOW COMMITTING THE PREVIOUSLY PREPARED NODE FAILURE. */
+/*---------------------------------------------------------------------------*/
+/***********************/
+/* COMMIT_FAILREQ */
+/***********************/
+void Qmgr::execCOMMIT_FAILREQ(Signal* signal)
+{
+ NodeRecPtr nodePtr;
+ jamEntry();
+ BlockReference Tblockref = signal->theData[0];
+ UintR TfailureNr = signal->theData[1];
+ if (Tblockref != cpdistref) {
+ jam();
+ return;
+ }//if
+ UintR guard0;
+ UintR Tj;
+
+ /**
+ * Block commit until node failures has stabilized
+ *
+ * @See RT352
+ */
+ UnblockCommitOrd* const unblock = (UnblockCommitOrd *)&signal->theData[0];
+ unblock->failNo = TfailureNr;
+ EXECUTE_DIRECT(DBDIH, GSN_UNBLOCK_COMMIT_ORD, signal,
+ UnblockCommitOrd::SignalLength);
+
+ if ((ccommitFailureNr != TfailureNr) &&
+ (cnoCommitFailedNodes > 0)) {
+ jam();
+ /**-----------------------------------------------------------------------
+ * WE ONLY DO THIS PART OF THE COMMIT HANDLING THE FIRST TIME WE HEAR THIS
+ * SIGNAL. WE CAN HEAR IT SEVERAL TIMES IF THE PRESIDENTS KEEP FAILING.
+ *-----------------------------------------------------------------------*/
+ ccommitFailureNr = TfailureNr;
+ NodeFailRep * const nodeFail = (NodeFailRep *)&signal->theData[0];
+
+ nodeFail->failNo = ccommitFailureNr;
+ nodeFail->noOfNodes = cnoCommitFailedNodes;
+ nodeFail->masterNodeId = cpresident;
+ NodeBitmask::clear(nodeFail->theNodes);
+ for(unsigned i = 0; i < cnoCommitFailedNodes; i++) {
+ jam();
+ NodeBitmask::set(nodeFail->theNodes, ccommitFailedNodes[i]);
+ }//if
+ sendSignal(NDBCNTR_REF, GSN_NODE_FAILREP, signal,
+ NodeFailRep::SignalLength, JBB);
+
+ guard0 = cnoCommitFailedNodes - 1;
+ arrGuard(guard0, MAX_NDB_NODES);
+ /**--------------------------------------------------------------------
+ * WE MUST PREPARE TO ACCEPT THE CRASHED NODE INTO THE CLUSTER AGAIN BY
+ * SETTING UP CONNECTIONS AGAIN AFTER THREE SECONDS OF DELAY.
+ *--------------------------------------------------------------------*/
+ for (Tj = 0; Tj <= guard0; Tj++) {
+ jam();
+ nodePtr.i = ccommitFailedNodes[Tj];
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRec);
+ nodePtr.p->phase = ZFAIL_CLOSING;
+ nodePtr.p->failState = WAITING_FOR_NDB_FAILCONF;
+ nodePtr.p->alarmCount = 0;
+ c_clusterNodes.clear(nodePtr.i);
+ }//for
+ /*----------------------------------------------------------------------*/
+ /* WE INFORM THE API'S WE HAVE CONNECTED ABOUT THE FAILED NODES. */
+ /*----------------------------------------------------------------------*/
+ for (nodePtr.i = 1; nodePtr.i < MAX_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRec);
+ if (nodePtr.p->phase == ZAPI_ACTIVE) {
+ jam();
+
+ NodeFailRep * const nodeFail = (NodeFailRep *)&signal->theData[0];
+
+ nodeFail->failNo = ccommitFailureNr;
+ nodeFail->noOfNodes = cnoCommitFailedNodes;
+ NodeBitmask::clear(nodeFail->theNodes);
+ for(unsigned i = 0; i < cnoCommitFailedNodes; i++) {
+ jam();
+ NodeBitmask::set(nodeFail->theNodes, ccommitFailedNodes[i]);
+ }//for
+ sendSignal(nodePtr.p->blockRef, GSN_NODE_FAILREP, signal,
+ NodeFailRep::SignalLength, JBB);
+ }//if
+ }//for
+ if (cpresident != getOwnNodeId()) {
+ jam();
+ cnoFailedNodes = cnoCommitFailedNodes - cnoFailedNodes;
+ if (cnoFailedNodes > 0) {
+ jam();
+ guard0 = cnoFailedNodes - 1;
+ arrGuard(guard0 + cnoCommitFailedNodes, MAX_NDB_NODES);
+ for (Tj = 0; Tj <= guard0; Tj++) {
+ jam();
+ cfailedNodes[Tj] = cfailedNodes[Tj + cnoCommitFailedNodes];
+ }//for
+ }//if
+ }//if
+ cnoCommitFailedNodes = 0;
+ }//if
+ /**-----------------------------------------------------------------------
+ * WE WILL ALWAYS ACKNOWLEDGE THE COMMIT EVEN WHEN RECEIVING IT MULTIPLE
+ * TIMES SINCE IT WILL ALWAYS COME FROM A NEW PRESIDENT.
+ *------------------------------------------------------------------------*/
+ signal->theData[0] = getOwnNodeId();
+ sendSignal(Tblockref, GSN_COMMIT_FAILCONF, signal, 1, JBA);
+ return;
+}//Qmgr::execCOMMIT_FAILREQ()
+
+/*--------------------------------------------------------------------------*/
+/* WE HAVE RECEIVED A CONFIRM OF THAT THIS NODE HAVE COMMITTED THE FAILURES.*/
+/*--------------------------------------------------------------------------*/
+/*******************************/
+/* COMMIT_FAILCONF */
+/*******************************/
+void Qmgr::execCOMMIT_FAILCONF(Signal* signal)
+{
+ NodeRecPtr nodePtr;
+ NodeRecPtr replyNodePtr;
+ jamEntry();
+ replyNodePtr.i = signal->theData[0];
+
+ ptrCheckGuard(replyNodePtr, MAX_NDB_NODES, nodeRec);
+ replyNodePtr.p->sendCommitFailReqStatus = Q_NOT_ACTIVE;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRec);
+ if (nodePtr.p->phase == ZRUNNING) {
+ if (nodePtr.p->sendCommitFailReqStatus == Q_ACTIVE) {
+ jam();
+ return;
+ }//if
+ }//if
+ }//for
+ /*-----------------------------------------------------------------------*/
+ /* WE HAVE SUCCESSFULLY COMMITTED A SET OF NODE FAILURES. */
+ /*-----------------------------------------------------------------------*/
+ ctoStatus = Q_NOT_ACTIVE;
+ if (cnoFailedNodes != 0) {
+ jam();
+ /**----------------------------------------------------------------------
+ * A FAILURE OCCURRED IN THE MIDDLE OF THE COMMIT PROCESS. WE ARE NOW
+ * READY TO START THE FAILED NODE PROCESS FOR THIS NODE.
+ *----------------------------------------------------------------------*/
+ cfailureNr = cfailureNr + 1;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRec);
+ if (nodePtr.p->phase == ZRUNNING) {
+ jam();
+ sendPrepFailReq(signal, nodePtr.i);
+ }//if
+ }//for
+ }//if
+ return;
+}//Qmgr::execCOMMIT_FAILCONF()
+
+/**--------------------------------------------------------------------------
+ * IF THE PRESIDENT FAILS IN THE MIDDLE OF THE COMMIT OF A FAILED NODE THEN
+ * THE NEW PRESIDENT NEEDS TO QUERY THE COMMIT STATUS IN THE RUNNING NODES.
+ *---------------------------------------------------------------------------*/
+/*******************************/
+/* PRES_TOCONF */
+/*******************************/
+void Qmgr::execPRES_TOCONF(Signal* signal)
+{
+ NodeRecPtr nodePtr;
+ NodeRecPtr replyNodePtr;
+ jamEntry();
+ replyNodePtr.i = signal->theData[0];
+ UintR TfailureNr = signal->theData[1];
+ if (ctoFailureNr < TfailureNr) {
+ jam();
+ ctoFailureNr = TfailureNr;
+ }//if
+ ptrCheckGuard(replyNodePtr, MAX_NDB_NODES, nodeRec);
+ replyNodePtr.p->sendPresToStatus = Q_NOT_ACTIVE;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRec);
+ if (nodePtr.p->sendPresToStatus == Q_ACTIVE) {
+ jam();
+ return;
+ }//if
+ }//for
+ /*-------------------------------------------------------------------------*/
+ /* WE ARE NOW READY TO DISCOVER WHETHER THE FAILURE WAS COMMITTED OR NOT. */
+ /*-------------------------------------------------------------------------*/
+ if (ctoFailureNr > ccommitFailureNr) {
+ jam();
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRec);
+ if (nodePtr.p->phase == ZRUNNING) {
+ jam();
+ nodePtr.p->sendCommitFailReqStatus = Q_ACTIVE;
+ signal->theData[0] = cpdistref;
+ signal->theData[1] = ctoFailureNr;
+ sendSignal(nodePtr.p->blockRef, GSN_COMMIT_FAILREQ, signal, 2, JBA);
+ }//if
+ }//for
+ return;
+ }//if
+ /*-------------------------------------------------------------------------*/
+ /* WE ARE NOW READY TO START THE NEW NODE FAILURE PROCESS. */
+ /*-------------------------------------------------------------------------*/
+ ctoStatus = Q_NOT_ACTIVE;
+ cfailureNr = cfailureNr + 1;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRec);
+ if (nodePtr.p->phase == ZRUNNING) {
+ jam();
+ sendPrepFailReq(signal, nodePtr.i);
+ }//if
+ }//for
+ return;
+}//Qmgr::execPRES_TOCONF()
+
+/*--------------------------------------------------------------------------*/
+// Provide information about the configured NDB nodes in the system.
+/*--------------------------------------------------------------------------*/
+void Qmgr::execREAD_NODESREQ(Signal* signal)
+{
+ jamEntry();
+
+ BlockReference TBref = signal->theData[0];
+
+ ReadNodesConf * const readNodes = (ReadNodesConf *)&signal->theData[0];
+
+ NodeRecPtr nodePtr;
+ nodePtr.i = getOwnNodeId();
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRec);
+
+ NdbNodeBitmask tmp = c_definedNodes;
+ tmp.bitANDC(c_clusterNodes);
+
+ readNodes->noOfNodes = c_definedNodes.count();
+ readNodes->masterNodeId = cpresident;
+ readNodes->ndynamicId = nodePtr.p->ndynamicId;
+ c_definedNodes.copyto(NdbNodeBitmask::Size, readNodes->definedNodes);
+ c_clusterNodes.copyto(NdbNodeBitmask::Size, readNodes->clusterNodes);
+ tmp.copyto(NdbNodeBitmask::Size, readNodes->inactiveNodes);
+ NdbNodeBitmask::clear(readNodes->startingNodes);
+ NdbNodeBitmask::clear(readNodes->startedNodes);
+
+ sendSignal(TBref, GSN_READ_NODESCONF, signal,
+ ReadNodesConf::SignalLength, JBB);
+}//Qmgr::execREAD_NODESREQ()
+
+void Qmgr::systemErrorBecauseOtherNodeFailed(Signal* signal,
+ NodeId failedNodeId) {
+ jam();
+
+ // Broadcast that this node is failing to other nodes
+ failReport(signal, getOwnNodeId(), (UintR)ZTRUE, FailRep::ZOWN_FAILURE);
+
+ char buf[100];
+ BaseString::snprintf(buf, 100,
+ "Node was shutdown during startup because node %d failed",
+ failedNodeId);
+
+ progError(__LINE__, ERR_SR_OTHERNODEFAILED, buf);
+}
+
+
+void Qmgr::systemErrorLab(Signal* signal, const char * message)
+{
+ jam();
+ // Broadcast that this node is failing to other nodes
+ failReport(signal, getOwnNodeId(), (UintR)ZTRUE, FailRep::ZOWN_FAILURE);
+
+ // If it's known why shutdown occured
+ // an error message has been passed to this function
+ progError(__LINE__, 0, message);
+
+ return;
+}//Qmgr::systemErrorLab()
+
+
+/**---------------------------------------------------------------------------
+ * A FAILURE HAVE BEEN DISCOVERED ON A NODE. WE NEED TO CLEAR A
+ * NUMBER OF VARIABLES.
+ *---------------------------------------------------------------------------*/
+void Qmgr::failReport(Signal* signal,
+ Uint16 aFailedNode,
+ UintR aSendFailRep,
+ FailRep::FailCause aFailCause)
+{
+ UintR tfrMinDynamicId;
+ NodeRecPtr failedNodePtr;
+ NodeRecPtr nodePtr;
+ NodeRecPtr presidentNodePtr;
+
+
+ failedNodePtr.i = aFailedNode;
+ ptrCheckGuard(failedNodePtr, MAX_NDB_NODES, nodeRec);
+ if (failedNodePtr.p->phase == ZRUNNING) {
+ jam();
+/* WE ALSO NEED TO ADD HERE SOME CODE THAT GETS OUR NEW NEIGHBOURS. */
+ if (cpresident == getOwnNodeId()) {
+ jam();
+ if (failedNodePtr.p->sendCommitFailReqStatus == Q_ACTIVE) {
+ jam();
+ signal->theData[0] = failedNodePtr.i;
+ sendSignal(QMGR_REF, GSN_COMMIT_FAILCONF, signal, 1, JBA);
+ }//if
+ if (failedNodePtr.p->sendPresToStatus == Q_ACTIVE) {
+ jam();
+ signal->theData[0] = failedNodePtr.i;
+ signal->theData[1] = ccommitFailureNr;
+ sendSignal(QMGR_REF, GSN_PRES_TOCONF, signal, 2, JBA);
+ }//if
+ }//if
+ failedNodePtr.p->phase = ZPREPARE_FAIL;
+ failedNodePtr.p->sendPrepFailReqStatus = Q_NOT_ACTIVE;
+ failedNodePtr.p->sendCommitFailReqStatus = Q_NOT_ACTIVE;
+ failedNodePtr.p->sendPresToStatus = Q_NOT_ACTIVE;
+ failedNodePtr.p->alarmCount = 0;
+ if (aSendFailRep == ZTRUE) {
+ jam();
+ if (failedNodePtr.i != getOwnNodeId()) {
+ jam();
+ FailRep * const failRep = (FailRep *)&signal->theData[0];
+ failRep->failNodeId = failedNodePtr.i;
+ failRep->failCause = aFailCause;
+ sendSignal(failedNodePtr.p->blockRef, GSN_FAIL_REP, signal,
+ FailRep::SignalLength, JBA);
+ }//if
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRec);
+ if (nodePtr.p->phase == ZRUNNING) {
+ jam();
+ FailRep * const failRep = (FailRep *)&signal->theData[0];
+ failRep->failNodeId = failedNodePtr.i;
+ failRep->failCause = aFailCause;
+ sendSignal(nodePtr.p->blockRef, GSN_FAIL_REP, signal,
+ FailRep::SignalLength, JBA);
+ }//if
+ }//for
+ }//if
+ if (failedNodePtr.i == getOwnNodeId()) {
+ jam();
+ return;
+ }//if
+ failedNodePtr.p->ndynamicId = 0;
+ findNeighbours(signal);
+ if (failedNodePtr.i == cpresident) {
+ jam();
+ /**--------------------------------------------------------------------
+ * IF PRESIDENT HAVE FAILED WE MUST CALCULATE THE NEW PRESIDENT BY
+ * FINDING THE NODE WITH THE MINIMUM DYNAMIC IDENTITY.
+ *---------------------------------------------------------------------*/
+ tfrMinDynamicId = (UintR)-1;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRec);
+ if (nodePtr.p->phase == ZRUNNING) {
+ if (nodePtr.p->ndynamicId < tfrMinDynamicId) {
+ jam();
+ tfrMinDynamicId = nodePtr.p->ndynamicId;
+ cpresident = nodePtr.i;
+ }//if
+ }//if
+ }//for
+ presidentNodePtr.i = cpresident;
+ ptrCheckGuard(presidentNodePtr, MAX_NDB_NODES, nodeRec);
+ cpdistref = presidentNodePtr.p->blockRef;
+ if (cpresident == getOwnNodeId()) {
+ CRASH_INSERTION(920);
+ cfailureNr = cprepareFailureNr;
+ ctoFailureNr = 0;
+ ctoStatus = Q_ACTIVE;
+ if (cnoCommitFailedNodes > 0) {
+ jam();
+ /**-----------------------------------------------------------------
+ * IN THIS SITUATION WE ARE UNCERTAIN OF WHETHER THE NODE FAILURE
+ * PROCESS WAS COMMITTED. WE NEED TO QUERY THE OTHER NODES ABOUT
+ * THEIR STATUS.
+ *-----------------------------------------------------------------*/
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES;
+ nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRec);
+ if (nodePtr.p->phase == ZRUNNING) {
+ jam();
+ nodePtr.p->sendPresToStatus = Q_ACTIVE;
+ signal->theData[0] = cpdistref;
+ signal->theData[1] = cprepareFailureNr;
+ sendSignal(nodePtr.p->blockRef, GSN_PRES_TOREQ,
+ signal, 1, JBA);
+ }//if
+ }//for
+ } else {
+ jam();
+ /*-----------------------------------------------------------------*/
+ // In this case it could be that a commit process is still ongoing.
+ // If so we must conclude it as the new master.
+ /*-----------------------------------------------------------------*/
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES;
+ nodePtr.i++) {
+ jam();
+ ptrAss(nodePtr, nodeRec);
+ if (nodePtr.p->phase == ZRUNNING) {
+ jam();
+ nodePtr.p->sendCommitFailReqStatus = Q_ACTIVE;
+ signal->theData[0] = cpdistref;
+ signal->theData[1] = ccommitFailureNr;
+ sendSignal(nodePtr.p->blockRef, GSN_COMMIT_FAILREQ, signal,
+ 2, JBA);
+ }//if
+ }//for
+ }//if
+ }//if
+ }//if
+ arrGuard(cnoFailedNodes, MAX_NDB_NODES);
+ cfailedNodes[cnoFailedNodes] = failedNodePtr.i;
+ cnoFailedNodes = cnoFailedNodes + 1;
+ }//if
+}//Qmgr::failReport()
+
+/*---------------------------------------------------------------------------*/
+/* INPUT: TTDI_DYN_ID */
+/* OUTPUT: TTDI_NODE_ID */
+/*---------------------------------------------------------------------------*/
+Uint16 Qmgr::translateDynamicIdToNodeId(Signal* signal, UintR TdynamicId)
+{
+ NodeRecPtr tdiNodePtr;
+ Uint16 TtdiNodeId = ZNIL;
+
+ for (tdiNodePtr.i = 1; tdiNodePtr.i < MAX_NDB_NODES; tdiNodePtr.i++) {
+ jam();
+ ptrAss(tdiNodePtr, nodeRec);
+ if (tdiNodePtr.p->ndynamicId == TdynamicId) {
+ jam();
+ TtdiNodeId = tdiNodePtr.i;
+ break;
+ }//if
+ }//for
+ if (TtdiNodeId == ZNIL) {
+ jam();
+ systemErrorLab(signal);
+ }//if
+ return TtdiNodeId;
+}//Qmgr::translateDynamicIdToNodeId()
+
+/**--------------------------------------------------------------------------
+ * WHEN RECEIVING PREPARE FAILURE REQUEST WE WILL IMMEDIATELY CLOSE
+ * COMMUNICATION WITH ALL THOSE NODES.
+ *--------------------------------------------------------------------------*/
+void Qmgr::sendCloseComReq(Signal* signal, BlockReference TBRef, Uint16 aFailNo)
+{
+ CloseComReqConf * const closeCom = (CloseComReqConf *)&signal->theData[0];
+
+ closeCom->xxxBlockRef = TBRef;
+ closeCom->failNo = aFailNo;
+ closeCom->noOfNodes = cnoPrepFailedNodes;
+
+ NodeBitmask::clear(closeCom->theNodes);
+
+ for(int i = 0; i < cnoPrepFailedNodes; i++) {
+ const NodeId nodeId = cprepFailedNodes[i];
+ jam();
+ NodeBitmask::set(closeCom->theNodes, nodeId);
+ }
+
+ sendSignal(CMVMI_REF, GSN_CLOSE_COMREQ, signal,
+ CloseComReqConf::SignalLength, JBA);
+
+}//Qmgr::sendCloseComReq()
+
+void
+Qmgr::sendPrepFailReqRef(Signal* signal,
+ Uint32 dstBlockRef,
+ GlobalSignalNumber gsn,
+ Uint32 blockRef,
+ Uint32 failNo,
+ Uint32 noOfNodes,
+ const NodeId theNodes[]){
+
+ PrepFailReqRef * const prepFail = (PrepFailReqRef *)&signal->theData[0];
+ prepFail->xxxBlockRef = blockRef;
+ prepFail->failNo = failNo;
+ prepFail->noOfNodes = noOfNodes;
+
+ NodeBitmask::clear(prepFail->theNodes);
+
+ for(Uint32 i = 0; i<noOfNodes; i++){
+ const NodeId nodeId = theNodes[i];
+ NodeBitmask::set(prepFail->theNodes, nodeId);
+ }
+
+ sendSignal(dstBlockRef, gsn, signal, PrepFailReqRef::SignalLength, JBA);
+}
+
+
+/**--------------------------------------------------------------------------
+ * SEND PREPARE FAIL REQUEST FROM PRESIDENT.
+ *---------------------------------------------------------------------------*/
+void Qmgr::sendPrepFailReq(Signal* signal, Uint16 aNode)
+{
+ NodeRecPtr sendNodePtr;
+ sendNodePtr.i = aNode;
+ ptrCheckGuard(sendNodePtr, MAX_NDB_NODES, nodeRec);
+ sendNodePtr.p->sendPrepFailReqStatus = Q_ACTIVE;
+
+ sendPrepFailReqRef(signal,
+ sendNodePtr.p->blockRef,
+ GSN_PREP_FAILREQ,
+ reference(),
+ cfailureNr,
+ cnoFailedNodes,
+ cfailedNodes);
+}//Qmgr::sendPrepFailReq()
+
+/**
+ * Arbitration module. Rest of QMGR calls us only via
+ * the "handle" routines.
+ */
+
+/**
+ * Should < 1/2 nodes die unconditionally. Affects only >= 3-way
+ * replication.
+ */
+static const bool g_ndb_arbit_one_half_rule = false;
+
+/**
+ * Config signals are logically part of CM_INIT.
+ */
+void
+Qmgr::execARBIT_CFG(Signal* signal)
+{
+ jamEntry();
+ ArbitSignalData* sd = (ArbitSignalData*)&signal->theData[0];
+ unsigned rank = sd->code;
+ ndbrequire(1 <= rank && rank <= 2);
+ arbitRec.apiMask[0].bitOR(sd->mask);
+ arbitRec.apiMask[rank] = sd->mask;
+}
+
+/**
+ * ContinueB delay (0=JBA 1=JBB)
+ */
+Uint32 Qmgr::getArbitDelay()
+{
+ switch (arbitRec.state) {
+ case ARBIT_NULL:
+ jam();
+ break;
+ case ARBIT_INIT:
+ jam();
+ case ARBIT_FIND:
+ jam();
+ case ARBIT_PREP1:
+ jam();
+ case ARBIT_PREP2:
+ jam();
+ case ARBIT_START:
+ jam();
+ return 100;
+ case ARBIT_RUN:
+ jam();
+ return 1000;
+ case ARBIT_CHOOSE:
+ jam();
+ return 10;
+ case ARBIT_CRASH: // if we could wait
+ jam();
+ return 100;
+ }
+ ndbrequire(false);
+ return (Uint32)-1;
+}
+
+/**
+ * Time to wait for reply. There is only 1 config parameter
+ * (timeout for CHOOSE). XXX The rest are guesses.
+ */
+Uint32 Qmgr::getArbitTimeout()
+{
+ switch (arbitRec.state) {
+ case ARBIT_NULL:
+ jam();
+ break;
+ case ARBIT_INIT: // not used
+ jam();
+ case ARBIT_FIND: // not used
+ jam();
+ return 1000;
+ case ARBIT_PREP1:
+ jam();
+ case ARBIT_PREP2:
+ jam();
+ return 1000 + cnoOfNodes * hb_send_timer.getDelay();
+ case ARBIT_START:
+ jam();
+ return 1000 + arbitRec.timeout;
+ case ARBIT_RUN: // not used (yet)
+ jam();
+ return 1000;
+ case ARBIT_CHOOSE:
+ jam();
+ return arbitRec.timeout;
+ case ARBIT_CRASH: // if we could wait
+ jam();
+ return 100;
+ }
+ ndbrequire(false);
+ return (Uint32)-1;
+}
+
+/**
+ * Start arbitration thread when we are president and database
+ * is opened for the first time.
+ *
+ * XXX Do arbitration check just like on node failure. Since
+ * there is no arbitrator yet, must win on counts alone.
+ */
+void
+Qmgr::handleArbitStart(Signal* signal)
+{
+ jam();
+ ndbrequire(cpresident == getOwnNodeId());
+ ndbrequire(arbitRec.state == ARBIT_NULL);
+ arbitRec.state = ARBIT_INIT;
+ arbitRec.newstate = true;
+ startArbitThread(signal);
+}
+
+/**
+ * Handle API node failure. Called also by non-president nodes.
+ * If we are president go back to INIT state, otherwise to NULL.
+ * Start new thread to save time.
+ */
+void
+Qmgr::handleArbitApiFail(Signal* signal, Uint16 nodeId)
+{
+ if (arbitRec.node != nodeId) {
+ jam();
+ return;
+ }
+ reportArbitEvent(signal, NDB_LE_ArbitState);
+ arbitRec.node = 0;
+ switch (arbitRec.state) {
+ case ARBIT_NULL: // should not happen
+ jam();
+ case ARBIT_INIT:
+ jam();
+ case ARBIT_FIND:
+ jam();
+ break;
+ case ARBIT_PREP1: // start from beginning
+ jam();
+ case ARBIT_PREP2:
+ jam();
+ case ARBIT_START:
+ jam();
+ case ARBIT_RUN:
+ if (cpresident == getOwnNodeId()) {
+ jam();
+ arbitRec.state = ARBIT_INIT;
+ arbitRec.newstate = true;
+ startArbitThread(signal);
+ } else {
+ jam();
+ arbitRec.state = ARBIT_NULL;
+ }
+ break;
+ case ARBIT_CHOOSE: // XXX too late
+ jam();
+ case ARBIT_CRASH:
+ jam();
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }
+}
+
+/**
+ * Handle NDB node add. Ignore if arbitration thread not yet
+ * started. If PREP is not ready, go back to INIT. Otherwise
+ * the new node gets arbitrator and ticket once we reach RUN state.
+ * Start new thread to save time.
+ */
+void
+Qmgr::handleArbitNdbAdd(Signal* signal, Uint16 nodeId)
+{
+ jam();
+ ndbrequire(cpresident == getOwnNodeId());
+ switch (arbitRec.state) {
+ case ARBIT_NULL: // before db opened
+ jam();
+ break;
+ case ARBIT_INIT: // start from beginning
+ jam();
+ case ARBIT_FIND:
+ jam();
+ case ARBIT_PREP1:
+ jam();
+ case ARBIT_PREP2:
+ jam();
+ arbitRec.state = ARBIT_INIT;
+ arbitRec.newstate = true;
+ startArbitThread(signal);
+ break;
+ case ARBIT_START: // process in RUN state
+ jam();
+ case ARBIT_RUN:
+ jam();
+ arbitRec.newMask.set(nodeId);
+ break;
+ case ARBIT_CHOOSE: // XXX too late
+ jam();
+ case ARBIT_CRASH:
+ jam();
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }
+}
+
+/**
+ * Check if current nodeset can survive. The decision is
+ * based on node count, node groups, and on external arbitrator
+ * (if we have one). Always starts a new thread because
+ * 1) CHOOSE cannot wait 2) if we are new president we need
+ * a thread 3) if we are old president it does no harm.
+ */
+void
+Qmgr::handleArbitCheck(Signal* signal)
+{
+ jam();
+ ndbrequire(cpresident == getOwnNodeId());
+ NodeBitmask ndbMask;
+ computeArbitNdbMask(ndbMask);
+ if (g_ndb_arbit_one_half_rule &&
+ 2 * ndbMask.count() < cnoOfNodes) {
+ jam();
+ arbitRec.code = ArbitCode::LoseNodes;
+ } else {
+ jam();
+ CheckNodeGroups* sd = (CheckNodeGroups*)&signal->theData[0];
+ sd->blockRef = reference();
+ sd->requestType = CheckNodeGroups::Direct | CheckNodeGroups::ArbitCheck;
+ sd->mask = ndbMask;
+ EXECUTE_DIRECT(DBDIH, GSN_CHECKNODEGROUPSREQ, signal,
+ CheckNodeGroups::SignalLength);
+ jamEntry();
+ switch (sd->output) {
+ case CheckNodeGroups::Win:
+ jam();
+ arbitRec.code = ArbitCode::WinGroups;
+ break;
+ case CheckNodeGroups::Lose:
+ jam();
+ arbitRec.code = ArbitCode::LoseGroups;
+ break;
+ case CheckNodeGroups::Partitioning:
+ jam();
+ arbitRec.code = ArbitCode::Partitioning;
+ if (g_ndb_arbit_one_half_rule &&
+ 2 * ndbMask.count() > cnoOfNodes) {
+ jam();
+ arbitRec.code = ArbitCode::WinNodes;
+ }
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }
+ }
+ switch (arbitRec.code) {
+ case ArbitCode::LoseNodes:
+ jam();
+ case ArbitCode::LoseGroups:
+ jam();
+ goto crashme;
+ case ArbitCode::WinNodes:
+ jam();
+ case ArbitCode::WinGroups:
+ jam();
+ if (arbitRec.state == ARBIT_RUN) {
+ jam();
+ break;
+ }
+ arbitRec.state = ARBIT_INIT;
+ arbitRec.newstate = true;
+ break;
+ case ArbitCode::Partitioning:
+ if (arbitRec.state == ARBIT_RUN) {
+ jam();
+ arbitRec.state = ARBIT_CHOOSE;
+ arbitRec.newstate = true;
+ break;
+ }
+ if (arbitRec.apiMask[0].count() != 0) {
+ jam();
+ arbitRec.code = ArbitCode::LoseNorun;
+ } else {
+ jam();
+ arbitRec.code = ArbitCode::LoseNocfg;
+ }
+ goto crashme;
+ default:
+ crashme:
+ jam();
+ arbitRec.state = ARBIT_CRASH;
+ arbitRec.newstate = true;
+ break;
+ }
+ reportArbitEvent(signal, NDB_LE_ArbitResult);
+ switch (arbitRec.state) {
+ default:
+ jam();
+ arbitRec.newMask.bitAND(ndbMask); // delete failed nodes
+ arbitRec.recvMask.bitAND(ndbMask);
+ sendCommitFailReq(signal); // start commit of failed nodes
+ break;
+ case ARBIT_CHOOSE:
+ jam();
+ case ARBIT_CRASH:
+ jam();
+ break;
+ }
+ startArbitThread(signal);
+}
+
+/**
+ * Start a new continueB thread. The thread id is incremented
+ * so that any old thread will exit.
+ */
+void
+Qmgr::startArbitThread(Signal* signal)
+{
+ jam();
+ ndbrequire(cpresident == getOwnNodeId());
+ arbitRec.code = ArbitCode::ThreadStart;
+ reportArbitEvent(signal, NDB_LE_ArbitState);
+ signal->theData[1] = ++arbitRec.thread;
+ runArbitThread(signal);
+}
+
+/**
+ * Handle arbitration thread. The initial thread normally ends
+ * up in RUN state. New thread can be started to save time.
+ */
+void
+Qmgr::runArbitThread(Signal* signal)
+{
+#ifdef DEBUG_ARBIT
+ NodeBitmask ndbMask;
+ computeArbitNdbMask(ndbMask);
+ ndbout << "arbit thread:";
+ ndbout << " state=" << arbitRec.state;
+ ndbout << " newstate=" << arbitRec.newstate;
+ ndbout << " thread=" << arbitRec.thread;
+ ndbout << " node=" << arbitRec.node;
+ ndbout << " ticket=" << arbitRec.ticket.getText();
+ ndbout << " ndbmask=" << ndbMask.getText();
+ ndbout << " sendcount=" << arbitRec.sendCount;
+ ndbout << " recvcount=" << arbitRec.recvCount;
+ ndbout << " recvmask=" << arbitRec.recvMask.getText();
+ ndbout << " code=" << arbitRec.code;
+ ndbout << endl;
+#endif
+ if (signal->theData[1] != arbitRec.thread) {
+ jam();
+ return; // old thread dies
+ }
+ switch (arbitRec.state) {
+ case ARBIT_INIT: // main thread
+ jam();
+ stateArbitInit(signal);
+ break;
+ case ARBIT_FIND:
+ jam();
+ stateArbitFind(signal);
+ break;
+ case ARBIT_PREP1:
+ jam();
+ case ARBIT_PREP2:
+ jam();
+ stateArbitPrep(signal);
+ break;
+ case ARBIT_START:
+ jam();
+ stateArbitStart(signal);
+ break;
+ case ARBIT_RUN:
+ jam();
+ stateArbitRun(signal);
+ break;
+ case ARBIT_CHOOSE: // partitition thread
+ jam();
+ stateArbitChoose(signal);
+ break;
+ case ARBIT_CRASH:
+ jam();
+ stateArbitCrash(signal);
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }
+ signal->theData[0] = ZARBIT_HANDLING;
+ signal->theData[1] = arbitRec.thread;
+ signal->theData[2] = arbitRec.state; // just for signal log
+ Uint32 delay = getArbitDelay();
+ if (delay == 0) {
+ jam();
+ sendSignal(QMGR_REF, GSN_CONTINUEB, signal, 3, JBA);
+ } else if (delay == 1) {
+ jam();
+ sendSignal(QMGR_REF, GSN_CONTINUEB, signal, 3, JBB);
+ } else {
+ jam();
+ sendSignalWithDelay(QMGR_REF, GSN_CONTINUEB, signal, delay, 3);
+ }//if
+}
+
+/**
+ * Handle INIT state. Generate next ticket. Switch to FIND
+ * state without delay.
+ */
+void
+Qmgr::stateArbitInit(Signal* signal)
+{
+ if (arbitRec.newstate) {
+ jam();
+ CRASH_INSERTION((Uint32)910 + arbitRec.state);
+
+ arbitRec.node = 0;
+ arbitRec.ticket.update();
+ arbitRec.newMask.clear();
+ arbitRec.code = 0;
+ arbitRec.newstate = false;
+ }
+ arbitRec.state = ARBIT_FIND;
+ arbitRec.newstate = true;
+ stateArbitFind(signal);
+}
+
+/**
+ * Handle FIND state. Find first arbitrator which is alive
+ * and invoke PREP state without delay. If none are found,
+ * loop in FIND state. This is forever if no arbitrators
+ * are configured (not the normal case).
+ *
+ * XXX Add adaptive behaviour to avoid getting stuck on API
+ * nodes which are alive but do not respond or die too soon.
+ */
+void
+Qmgr::stateArbitFind(Signal* signal)
+{
+ if (arbitRec.newstate) {
+ jam();
+ CRASH_INSERTION((Uint32)910 + arbitRec.state);
+
+ arbitRec.code = 0;
+ arbitRec.newstate = false;
+ }
+ NodeRecPtr aPtr;
+ for (unsigned rank = 1; rank <= 2; rank++) {
+ jam();
+ aPtr.i = 0;
+ const unsigned stop = NodeBitmask::NotFound;
+ while ((aPtr.i = arbitRec.apiMask[rank].find(aPtr.i + 1)) != stop) {
+ jam();
+ ptrAss(aPtr, nodeRec);
+ if (aPtr.p->phase != ZAPI_ACTIVE)
+ continue;
+ arbitRec.node = aPtr.i;
+ arbitRec.state = ARBIT_PREP1;
+ arbitRec.newstate = true;
+ stateArbitPrep(signal);
+ return;
+ }
+ }
+}
+
+/**
+ * Handle PREP states. First round nulls any existing tickets.
+ * Second round sends new ticket. When all confirms have been
+ * received invoke START state immediately.
+ */
+void
+Qmgr::stateArbitPrep(Signal* signal)
+{
+ if (arbitRec.newstate) {
+ jam();
+ CRASH_INSERTION((Uint32)910 + arbitRec.state);
+
+ arbitRec.sendCount = 0; // send all at once
+ computeArbitNdbMask(arbitRec.recvMask); // to send and recv
+ arbitRec.recvMask.clear(getOwnNodeId());
+ arbitRec.code = 0;
+ arbitRec.newstate = false;
+ }
+ if (! arbitRec.sendCount) {
+ jam();
+ NodeRecPtr aPtr;
+ aPtr.i = 0;
+ const unsigned stop = NodeBitmask::NotFound;
+ while ((aPtr.i = arbitRec.recvMask.find(aPtr.i + 1)) != stop) {
+ jam();
+ ptrAss(aPtr, nodeRec);
+ ArbitSignalData* sd = (ArbitSignalData*)&signal->theData[0];
+ sd->sender = getOwnNodeId();
+ if (arbitRec.state == ARBIT_PREP1) {
+ jam();
+ sd->code = ArbitCode::PrepPart1;
+ } else {
+ jam();
+ sd->code = ArbitCode::PrepPart2;
+ }
+ sd->node = arbitRec.node;
+ sd->ticket = arbitRec.ticket;
+ sd->mask.clear();
+ sendSignal(aPtr.p->blockRef, GSN_ARBIT_PREPREQ, signal,
+ ArbitSignalData::SignalLength, JBB);
+ }
+ arbitRec.setTimestamp(); // send time
+ arbitRec.sendCount = 1;
+ return;
+ }
+ if (arbitRec.code != 0) { // error
+ jam();
+ arbitRec.state = ARBIT_INIT;
+ arbitRec.newstate = true;
+ return;
+ }
+ if (arbitRec.recvMask.count() == 0) { // recv all
+ if (arbitRec.state == ARBIT_PREP1) {
+ jam();
+ arbitRec.state = ARBIT_PREP2;
+ arbitRec.newstate = true;
+ } else {
+ jam();
+ arbitRec.state = ARBIT_START;
+ arbitRec.newstate = true;
+ stateArbitStart(signal);
+ }
+ return;
+ }
+ if (arbitRec.getTimediff() > getArbitTimeout()) {
+ jam();
+ arbitRec.state = ARBIT_INIT;
+ arbitRec.newstate = true;
+ return;
+ }
+}
+
+void
+Qmgr::execARBIT_PREPREQ(Signal* signal)
+{
+ jamEntry();
+ ArbitSignalData* sd = (ArbitSignalData*)&signal->theData[0];
+ if (getOwnNodeId() == cpresident) {
+ jam();
+ return; // wrong state
+ }
+ if (sd->sender != cpresident) {
+ jam();
+ return; // wrong state
+ }
+ NodeRecPtr aPtr;
+ aPtr.i = sd->sender;
+ ptrAss(aPtr, nodeRec);
+ switch (sd->code) {
+ case ArbitCode::PrepPart1: // zero them just to be sure
+ jam();
+ arbitRec.node = 0;
+ arbitRec.ticket.clear();
+ break;
+ case ArbitCode::PrepPart2: // non-president enters RUN state
+ jam();
+ case ArbitCode::PrepAtrun:
+ jam();
+ arbitRec.node = sd->node;
+ arbitRec.ticket = sd->ticket;
+ arbitRec.code = sd->code;
+ reportArbitEvent(signal, NDB_LE_ArbitState);
+ arbitRec.state = ARBIT_RUN;
+ arbitRec.newstate = true;
+ if (sd->code == ArbitCode::PrepAtrun) {
+ jam();
+ return;
+ }
+ break;
+ default:
+ jam();
+ ndbrequire(false);
+ }
+ sd->sender = getOwnNodeId();
+ sd->code = 0;
+ sendSignal(aPtr.p->blockRef, GSN_ARBIT_PREPCONF, signal,
+ ArbitSignalData::SignalLength, JBB);
+}
+
+void
+Qmgr::execARBIT_PREPCONF(Signal* signal)
+{
+ jamEntry();
+ ArbitSignalData* sd = (ArbitSignalData*)&signal->theData[0];
+ if (! arbitRec.match(sd)) {
+ jam();
+ return; // stray signal
+ }
+ if (arbitRec.state != ARBIT_PREP1 && arbitRec.state != ARBIT_PREP2) {
+ jam();
+ return; // wrong state
+ }
+ if (! arbitRec.recvMask.get(sd->sender)) {
+ jam();
+ return; // wrong state
+ }
+ arbitRec.recvMask.clear(sd->sender);
+ if (arbitRec.code == 0 && sd->code != 0) {
+ jam();
+ arbitRec.code = sd->code;
+ }//if
+}
+
+void
+Qmgr::execARBIT_PREPREF(Signal* signal)
+{
+ jamEntry();
+ ArbitSignalData* sd = (ArbitSignalData*)&signal->theData[0];
+ if (sd->code == 0) {
+ jam();
+ sd->code = ArbitCode::ErrUnknown;
+ }
+ execARBIT_PREPCONF(signal);
+}
+
+/**
+ * Handle START state. On first call send start request to
+ * the chosen arbitrator. Then wait for a CONF.
+ */
+void
+Qmgr::stateArbitStart(Signal* signal)
+{
+ if (arbitRec.newstate) {
+ jam();
+ CRASH_INSERTION((Uint32)910 + arbitRec.state);
+
+ arbitRec.sendCount = 0;
+ arbitRec.recvCount = 0;
+ arbitRec.code = 0;
+ arbitRec.newstate = false;
+ }
+ if (! arbitRec.sendCount) {
+ jam();
+ BlockReference blockRef = calcApiClusterMgrBlockRef(arbitRec.node);
+ ArbitSignalData* sd = (ArbitSignalData*)&signal->theData[0];
+ sd->sender = getOwnNodeId();
+ sd->code = 0;
+ sd->node = arbitRec.node;
+ sd->ticket = arbitRec.ticket;
+ sd->mask.clear();
+ sendSignal(blockRef, GSN_ARBIT_STARTREQ, signal,
+ ArbitSignalData::SignalLength, JBB);
+ arbitRec.sendCount = 1;
+ arbitRec.setTimestamp(); // send time
+ return;
+ }
+ if (arbitRec.recvCount) {
+ jam();
+ reportArbitEvent(signal, NDB_LE_ArbitState);
+ if (arbitRec.code == ArbitCode::ApiStart) {
+ jam();
+ arbitRec.state = ARBIT_RUN;
+ arbitRec.newstate = true;
+ return;
+ }
+ arbitRec.state = ARBIT_INIT;
+ arbitRec.newstate = true;
+ return;
+ }
+ if (arbitRec.getTimediff() > getArbitTimeout()) {
+ jam();
+ arbitRec.code = ArbitCode::ErrTimeout;
+ reportArbitEvent(signal, NDB_LE_ArbitState);
+ arbitRec.state = ARBIT_INIT;
+ arbitRec.newstate = true;
+ return;
+ }
+}
+
+void
+Qmgr::execARBIT_STARTCONF(Signal* signal)
+{
+ jamEntry();
+ ArbitSignalData* sd = (ArbitSignalData*)&signal->theData[0];
+ if (! arbitRec.match(sd)) {
+ jam();
+ return; // stray signal
+ }
+ if (arbitRec.state != ARBIT_START) {
+ jam();
+ return; // wrong state
+ }
+ if (arbitRec.recvCount) {
+ jam();
+ return; // wrong state
+ }
+ arbitRec.code = sd->code;
+ arbitRec.recvCount = 1;
+}
+
+void
+Qmgr::execARBIT_STARTREF(Signal* signal)
+{
+ jamEntry();
+ ArbitSignalData* sd = (ArbitSignalData*)&signal->theData[0];
+ if (sd->code == 0) {
+ jam();
+ sd->code = ArbitCode::ErrUnknown;
+ }
+ execARBIT_STARTCONF(signal);
+}
+
+/**
+ * Handle RUN state. Send ticket to any new nodes which have
+ * appeared after PREP state. We don't care about a CONF.
+ */
+void
+Qmgr::stateArbitRun(Signal* signal)
+{
+ if (arbitRec.newstate) {
+ jam();
+ CRASH_INSERTION((Uint32)910 + arbitRec.state);
+
+ arbitRec.code = 0;
+ arbitRec.newstate = false;
+ }
+ NodeRecPtr aPtr;
+ aPtr.i = 0;
+ const unsigned stop = NodeBitmask::NotFound;
+ while ((aPtr.i = arbitRec.newMask.find(aPtr.i + 1)) != stop) {
+ jam();
+ arbitRec.newMask.clear(aPtr.i);
+ ptrAss(aPtr, nodeRec);
+ ArbitSignalData* sd = (ArbitSignalData*)&signal->theData[0];
+ sd->sender = getOwnNodeId();
+ sd->code = ArbitCode::PrepAtrun;
+ sd->node = arbitRec.node;
+ sd->ticket = arbitRec.ticket;
+ sd->mask.clear();
+ sendSignal(aPtr.p->blockRef, GSN_ARBIT_PREPREQ, signal,
+ ArbitSignalData::SignalLength, JBB);
+ }
+}
+
+/**
+ * Handle CHOOSE state. Entered only from RUN state when
+ * there is a possible network partitioning. Send CHOOSE to
+ * the arbitrator. On win switch to INIT state because a new
+ * ticket must be created.
+ */
+void
+Qmgr::stateArbitChoose(Signal* signal)
+{
+ if (arbitRec.newstate) {
+ jam();
+ CRASH_INSERTION((Uint32)910 + arbitRec.state);
+
+ arbitRec.sendCount = 0;
+ arbitRec.recvCount = 0;
+ arbitRec.code = 0;
+ arbitRec.newstate = false;
+ }
+ if (! arbitRec.sendCount) {
+ jam();
+ BlockReference blockRef = calcApiClusterMgrBlockRef(arbitRec.node);
+ ArbitSignalData* sd = (ArbitSignalData*)&signal->theData[0];
+ sd->sender = getOwnNodeId();
+ sd->code = 0;
+ sd->node = arbitRec.node;
+ sd->ticket = arbitRec.ticket;
+ computeArbitNdbMask(sd->mask);
+ sendSignal(blockRef, GSN_ARBIT_CHOOSEREQ, signal,
+ ArbitSignalData::SignalLength, JBA);
+ arbitRec.sendCount = 1;
+ arbitRec.setTimestamp(); // send time
+ return;
+ }
+ if (arbitRec.recvCount) {
+ jam();
+ reportArbitEvent(signal, NDB_LE_ArbitResult);
+ if (arbitRec.code == ArbitCode::WinChoose) {
+ jam();
+ sendCommitFailReq(signal); // start commit of failed nodes
+ arbitRec.state = ARBIT_INIT;
+ arbitRec.newstate = true;
+ return;
+ }
+ arbitRec.state = ARBIT_CRASH;
+ arbitRec.newstate = true;
+ stateArbitCrash(signal); // do it at once
+ return;
+ }
+ if (arbitRec.getTimediff() > getArbitTimeout()) {
+ jam();
+ arbitRec.code = ArbitCode::ErrTimeout;
+ reportArbitEvent(signal, NDB_LE_ArbitState);
+ arbitRec.state = ARBIT_CRASH;
+ arbitRec.newstate = true;
+ stateArbitCrash(signal); // do it at once
+ return;
+ }
+}
+
+void
+Qmgr::execARBIT_CHOOSECONF(Signal* signal)
+{
+ jamEntry();
+ ArbitSignalData* sd = (ArbitSignalData*)&signal->theData[0];
+ if (!arbitRec.match(sd)) {
+ jam();
+ return; // stray signal
+ }
+ if (arbitRec.state != ARBIT_CHOOSE) {
+ jam();
+ return; // wrong state
+ }
+ if (arbitRec.recvCount) {
+ jam();
+ return; // wrong state
+ }
+ arbitRec.recvCount = 1;
+ arbitRec.code = sd->code;
+}
+
+void
+Qmgr::execARBIT_CHOOSEREF(Signal* signal)
+{
+ jamEntry();
+ ArbitSignalData* sd = (ArbitSignalData*)&signal->theData[0];
+ if (sd->code == 0) {
+ jam();
+ sd->code = ArbitCode::ErrUnknown;
+ }
+ execARBIT_CHOOSECONF(signal);
+}
+
+/**
+ * Handle CRASH state. We must crash immediately.
+ * XXX tell other nodes in our party to crash too.
+ */
+void
+Qmgr::stateArbitCrash(Signal* signal)
+{
+ jam();
+ if (arbitRec.newstate) {
+ jam();
+ CRASH_INSERTION((Uint32)910 + arbitRec.state);
+ arbitRec.setTimestamp();
+ arbitRec.code = 0;
+ arbitRec.newstate = false;
+ }
+#ifdef ndb_arbit_crash_wait_for_event_report_to_get_out
+ if (! (arbitRec.getTimediff() > getArbitTimeout()))
+ return;
+#endif
+ progError(__LINE__, ERR_ARBIT_SHUTDOWN, "Arbitrator decided to shutdown this node");
+}
+
+/**
+ * Arbitrator may inform us that it will exit. This lets us
+ * start looking sooner for a new one. Handle it like API node
+ * failure.
+ */
+void
+Qmgr::execARBIT_STOPREP(Signal* signal)
+{
+ jamEntry();
+ ArbitSignalData* sd = (ArbitSignalData*)&signal->theData[0];
+ if (! arbitRec.match(sd)) {
+ jam();
+ return; // stray signal
+ }
+ arbitRec.code = ArbitCode::ApiExit;
+ handleArbitApiFail(signal, arbitRec.node);
+}
+
+void
+Qmgr::computeArbitNdbMask(NodeBitmask& aMask)
+{
+ NodeRecPtr aPtr;
+ aMask.clear();
+ for (aPtr.i = 1; aPtr.i < MAX_NDB_NODES; aPtr.i++) {
+ jam();
+ ptrAss(aPtr, nodeRec);
+ if (getNodeInfo(aPtr.i).getType() == NodeInfo::DB && aPtr.p->phase == ZRUNNING){
+ jam();
+ aMask.set(aPtr.i);
+ }
+ }
+}
+
+/**
+ * Report arbitration event. We use arbitration signal format
+ * where sender (word 0) is event type.
+ */
+void
+Qmgr::reportArbitEvent(Signal* signal, Ndb_logevent_type type)
+{
+ ArbitSignalData* sd = (ArbitSignalData*)&signal->theData[0];
+ sd->sender = type;
+ sd->code = arbitRec.code | (arbitRec.state << 16);
+ sd->node = arbitRec.node;
+ sd->ticket = arbitRec.ticket;
+ sd->mask.clear();
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal,
+ ArbitSignalData::SignalLength, JBB);
+}
+
+// end of arbitration module
+
+void
+Qmgr::execDUMP_STATE_ORD(Signal* signal)
+{
+ switch (signal->theData[0]) {
+ case 1:
+ infoEvent("creadyDistCom = %d, cpresident = %d\n",
+ creadyDistCom, cpresident);
+ infoEvent("cpresidentAlive = %d, cpresidentCand = %d\n",
+ cpresidentAlive, cpresidentCandidate);
+ infoEvent("ctoStatus = %d\n", ctoStatus);
+ for(Uint32 i = 1; i<MAX_NDB_NODES; i++){
+ if(getNodeInfo(i).getType() == NodeInfo::DB){
+ NodeRecPtr nodePtr;
+ nodePtr.i = i;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRec);
+ char buf[100];
+ switch(nodePtr.p->phase){
+ case ZINIT:
+ sprintf(buf, "Node %d: ZINIT(%d)", i, nodePtr.p->phase);
+ break;
+ case ZSTARTING:
+ sprintf(buf, "Node %d: ZSTARTING(%d)", i, nodePtr.p->phase);
+ break;
+ case ZRUNNING:
+ sprintf(buf, "Node %d: ZRUNNING(%d)", i, nodePtr.p->phase);
+ break;
+ case ZPREPARE_FAIL:
+ sprintf(buf, "Node %d: ZPREPARE_FAIL(%d)", i, nodePtr.p->phase);
+ break;
+ case ZFAIL_CLOSING:
+ sprintf(buf, "Node %d: ZFAIL_CLOSING(%d)", i, nodePtr.p->phase);
+ break;
+ case ZAPI_INACTIVE:
+ sprintf(buf, "Node %d: ZAPI_INACTIVE(%d)", i, nodePtr.p->phase);
+ break;
+ case ZAPI_ACTIVE:
+ sprintf(buf, "Node %d: ZAPI_ACTIVE(%d)", i, nodePtr.p->phase);
+ break;
+ default:
+ sprintf(buf, "Node %d: <UNKNOWN>(%d)", i, nodePtr.p->phase);
+ break;
+ }
+ infoEvent(buf);
+ }
+ }
+ default:
+ ;
+ }//switch
+}//Qmgr::execDUMP_STATE_ORD()
+
+void Qmgr::execSET_VAR_REQ(Signal* signal)
+{
+#if 0
+ SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
+ ConfigParamId var = setVarReq->variable();
+ UintR val = setVarReq->value();
+
+ switch (var) {
+ case HeartbeatIntervalDbDb:
+ setHbDelay(val/10);
+ sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
+ break;
+
+ case HeartbeatIntervalDbApi:
+ setHbApiDelay(val/10);
+ sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
+ break;
+
+ case ArbitTimeout:
+ setArbitTimeout(val);
+ sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
+ break;
+
+ default:
+ sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
+ }// switch
+#endif
+}//execSET_VAR_REQ()
diff --git a/storage/ndb/src/kernel/blocks/qmgr/timer.hpp b/storage/ndb/src/kernel/blocks/qmgr/timer.hpp
new file mode 100644
index 00000000000..9c35a23766c
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/qmgr/timer.hpp
@@ -0,0 +1,72 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/**
+ * @class Timer
+ * @brief A timer class that can't be fooled by NTP:ing the system clock to old time
+ */
+class Timer {
+public:
+ Timer() {
+ m_delay = 10;
+ };
+
+ Timer(NDB_TICKS delay_time) {
+ m_delay = delay_time;
+ }
+
+ /**
+ * Set/Get alarm time of timer
+ */
+ inline void setDelay(NDB_TICKS delay_time) { m_delay = delay_time; }
+ inline NDB_TICKS getDelay() { return m_delay; }
+
+ /**
+ * Start timer
+ */
+ inline void reset() {
+ m_current_time = NdbTick_CurrentMillisecond();
+ m_alarm_time = m_current_time + m_delay;
+ }
+
+ /**
+ * Check for alarm
+ */
+ inline bool check() { return check(NdbTick_CurrentMillisecond()); }
+
+ inline bool check(NDB_TICKS check_time) {
+ /**
+ * Standard alarm check
+ */
+ if (check_time > m_alarm_time) return true;
+
+ /**
+ * Time progressing, but it is not alarm time yet
+ */
+ if (check_time >= m_current_time) return false;
+
+ /**
+ * Time has moved backwards
+ */
+ reset();
+ return false;
+ }
+
+private:
+ NDB_TICKS m_current_time;
+ NDB_TICKS m_alarm_time;
+ NDB_TICKS m_delay;
+};
diff --git a/storage/ndb/src/kernel/blocks/suma/Makefile.am b/storage/ndb/src/kernel/blocks/suma/Makefile.am
new file mode 100644
index 00000000000..5a74dbb74eb
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/suma/Makefile.am
@@ -0,0 +1,23 @@
+noinst_LIBRARIES = libsuma.a
+
+libsuma_a_SOURCES = Suma.cpp SumaInit.cpp
+
+include $(top_srcdir)/ndb/config/common.mk.am
+include $(top_srcdir)/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libsuma.dsp
+
+libsuma.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-lib.am \
+ $(top_srcdir)/ndb/config/win-name \
+ $(top_srcdir)/ndb/config/win-includes \
+ $(top_srcdir)/ndb/config/win-sources \
+ $(top_srcdir)/ndb/config/win-libraries
+ cat $(top_srcdir)/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/ndb/config/win-sources $@ $(libsuma_a_SOURCES)
+ @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.cpp b/storage/ndb/src/kernel/blocks/suma/Suma.cpp
new file mode 100644
index 00000000000..ed54505b729
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp
@@ -0,0 +1,4073 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "Suma.hpp"
+
+#include <ndb_version.h>
+
+#include <NdbTCP.h>
+#include <Bitmask.hpp>
+#include <SimpleProperties.hpp>
+
+#include <signaldata/NodeFailRep.hpp>
+#include <signaldata/ReadNodesConf.hpp>
+
+#include <signaldata/ListTables.hpp>
+#include <signaldata/GetTabInfo.hpp>
+#include <signaldata/GetTableId.hpp>
+#include <signaldata/DictTabInfo.hpp>
+#include <signaldata/SumaImpl.hpp>
+#include <signaldata/ScanFrag.hpp>
+#include <signaldata/TransIdAI.hpp>
+#include <signaldata/CreateTrig.hpp>
+#include <signaldata/AlterTrig.hpp>
+#include <signaldata/DropTrig.hpp>
+#include <signaldata/FireTrigOrd.hpp>
+#include <signaldata/TrigAttrInfo.hpp>
+#include <signaldata/CheckNodeGroups.hpp>
+#include <signaldata/GCPSave.hpp>
+#include <GrepError.hpp>
+
+#include <DebuggerNames.hpp>
+
+//#define HANDOVER_DEBUG
+//#define NODEFAIL_DEBUG
+//#define NODEFAIL_DEBUG2
+//#define DEBUG_SUMA_SEQUENCE
+//#define EVENT_DEBUG
+//#define EVENT_PH3_DEBUG
+//#define EVENT_DEBUG2
+#if 0
+#undef DBUG_ENTER
+#undef DBUG_PRINT
+#undef DBUG_RETURN
+#undef DBUG_VOID_RETURN
+
+#define DBUG_ENTER(a) {ndbout_c("%s:%d >%s", __FILE__, __LINE__, a);}
+#define DBUG_PRINT(a,b) {ndbout << __FILE__ << ":" << __LINE__ << " " << a << ": "; ndbout_c b ;}
+#define DBUG_RETURN(a) { ndbout_c("%s:%d <", __FILE__, __LINE__); return(a); }
+#define DBUG_VOID_RETURN { ndbout_c("%s:%d <", __FILE__, __LINE__); return; }
+#endif
+
+/**
+ * @todo:
+ * SUMA crashes if an index is created at the same time as
+ * global replication. Very easy to reproduce using testIndex.
+ * Note: This only happens occasionally, but is quite easy to reprod.
+ */
+
+Uint32 g_subPtrI = RNIL;
+static const Uint32 SUMA_SEQUENCE = 0xBABEBABE;
+
+
+/**************************************************************
+ *
+ * Start of suma
+ *
+ */
+
+#define PRINT_ONLY 0
+static Uint32 g_TypeOfStart = NodeState::ST_ILLEGAL_TYPE;
+
+void
+Suma::getNodeGroupMembers(Signal* signal) {
+ jam();
+ /**
+ * Ask DIH for nodeGroupMembers
+ */
+ CheckNodeGroups * sd = (CheckNodeGroups*)signal->getDataPtrSend();
+ sd->blockRef = reference();
+ sd->requestType =
+ CheckNodeGroups::Direct |
+ CheckNodeGroups::GetNodeGroupMembers;
+ sd->nodeId = getOwnNodeId();
+ EXECUTE_DIRECT(DBDIH, GSN_CHECKNODEGROUPSREQ, signal,
+ CheckNodeGroups::SignalLength);
+ jamEntry();
+
+ c_nodeGroup = sd->output;
+ c_noNodesInGroup = 0;
+ for (int i = 0; i < MAX_NDB_NODES; i++) {
+ if (sd->mask.get(i)) {
+ if (i == getOwnNodeId()) c_idInNodeGroup = c_noNodesInGroup;
+ c_nodesInGroup[c_noNodesInGroup] = i;
+ c_noNodesInGroup++;
+ }
+ }
+
+ // ndbout_c("c_noNodesInGroup=%d", c_noNodesInGroup);
+ ndbrequire(c_noNodesInGroup > 0); // at least 1 node in the nodegroup
+
+#ifdef NODEFAIL_DEBUG
+ for (Uint32 i = 0; i < c_noNodesInGroup; i++) {
+ ndbout_c ("Suma: NodeGroup %u, me %u, me in group %u, member[%u] %u",
+ c_nodeGroup, getOwnNodeId(), c_idInNodeGroup,
+ i, c_nodesInGroup[i]);
+ }
+#endif
+}
+
+void
+Suma::execSTTOR(Signal* signal) {
+ jamEntry();
+
+ DBUG_ENTER("Suma::execSTTOR");
+ const Uint32 startphase = signal->theData[1];
+ const Uint32 typeOfStart = signal->theData[7];
+
+ DBUG_PRINT("info",("startphase = %u, typeOfStart = %u", startphase, typeOfStart));
+
+ if(startphase == 1){
+ jam();
+ c_restartLock = true;
+ }
+
+ if(startphase == 3){
+ jam();
+ g_TypeOfStart = typeOfStart;
+ signal->theData[0] = reference();
+ sendSignal(NDBCNTR_REF, GSN_READ_NODESREQ, signal, 1, JBB);
+
+#if 0
+
+ /**
+ * Debug
+ */
+
+
+ SubscriptionPtr subPtr;
+ Ptr<SyncRecord> syncPtr;
+ ndbrequire(c_subscriptions.seize(subPtr));
+ ndbrequire(c_syncPool.seize(syncPtr));
+
+
+ ndbout_c("Suma: subPtr.i = %d syncPtr.i = %d", subPtr.i, syncPtr.i);
+
+ subPtr.p->m_syncPtrI = syncPtr.i;
+ subPtr.p->m_subscriptionType = SubCreateReq::DatabaseSnapshot;
+ syncPtr.p->m_subscriptionPtrI = subPtr.i;
+ syncPtr.p->ptrI = syncPtr.i;
+ g_subPtrI = subPtr.i;
+ // sendSTTORRY(signal);
+#endif
+ DBUG_VOID_RETURN;
+ }
+
+ if(startphase == 5) {
+ getNodeGroupMembers(signal);
+ if (g_TypeOfStart == NodeState::ST_NODE_RESTART) {
+ jam();
+ for (Uint32 i = 0; i < c_noNodesInGroup; i++) {
+ Uint32 ref = calcSumaBlockRef(c_nodesInGroup[i]);
+ if (ref != reference())
+ sendSignal(ref, GSN_SUMA_START_ME, signal,
+ 1 /*SumaStartMe::SignalLength*/, JBB);
+ }
+ }
+ }
+
+ if(startphase == 7) {
+ c_restartLock = false; // may be set false earlier with HANDOVER_REQ
+
+ if (g_TypeOfStart != NodeState::ST_NODE_RESTART) {
+ for( int i = 0; i < NO_OF_BUCKETS; i++) {
+ if (getResponsibleSumaNodeId(i) == refToNode(reference())) {
+ // I'm running this bucket
+ DBUG_PRINT("info",("bucket %u set to true", i));
+ c_buckets[i].active = true;
+ }
+ }
+ }
+
+ if(g_TypeOfStart == NodeState::ST_INITIAL_START &&
+ c_masterNodeId == getOwnNodeId()) {
+ jam();
+ createSequence(signal);
+ DBUG_VOID_RETURN;
+ }//if
+ }//if
+
+
+ sendSTTORRY(signal);
+
+ DBUG_VOID_RETURN;
+}
+
+void
+Suma::createSequence(Signal* signal)
+{
+ jam();
+ DBUG_ENTER("Suma::createSequence");
+
+ UtilSequenceReq * req = (UtilSequenceReq*)signal->getDataPtrSend();
+
+ req->senderData = RNIL;
+ req->sequenceId = SUMA_SEQUENCE;
+ req->requestType = UtilSequenceReq::Create;
+ sendSignal(DBUTIL_REF, GSN_UTIL_SEQUENCE_REQ,
+ signal, UtilSequenceReq::SignalLength, JBB);
+ // execUTIL_SEQUENCE_CONF will call createSequenceReply()
+ DBUG_VOID_RETURN;
+}
+
+void
+Suma::createSequenceReply(Signal* signal,
+ UtilSequenceConf * conf,
+ UtilSequenceRef * ref)
+{
+ jam();
+
+ if (ref != NULL)
+ ndbrequire(false);
+
+ sendSTTORRY(signal);
+}
+
+void
+Suma::execREAD_NODESCONF(Signal* signal){
+ jamEntry();
+ ReadNodesConf * const conf = (ReadNodesConf *)signal->getDataPtr();
+
+ c_aliveNodes.clear();
+ c_preparingNodes.clear();
+
+ Uint32 count = 0;
+ for(Uint32 i = 0; i < MAX_NDB_NODES; i++){
+ if(NodeBitmask::get(conf->allNodes, i)){
+ jam();
+
+ count++;
+
+ NodePtr node;
+ ndbrequire(c_nodes.seize(node));
+
+ node.p->nodeId = i;
+ if(NodeBitmask::get(conf->inactiveNodes, i)){
+ jam();
+ node.p->alive = 0;
+ } else {
+ jam();
+ node.p->alive = 1;
+ c_aliveNodes.set(i);
+ }
+ } else
+ jam();
+ }
+ c_masterNodeId = conf->masterNodeId;
+ ndbrequire(count == conf->noOfNodes);
+
+ sendSTTORRY(signal);
+}
+
+#if 0
+void
+Suma::execREAD_CONFIG_REQ(Signal* signal)
+{
+ const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr();
+ Uint32 ref = req->senderRef;
+ Uint32 senderData = req->senderData;
+ ndbrequire(req->noOfParameters == 0);
+
+ jamEntry();
+
+ const ndb_mgm_configuration_iterator * p =
+ theConfiguration.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_REDOLOG_FILES,
+ &cnoLogFiles));
+ ndbrequire(cnoLogFiles > 0);
+
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_FRAG, &cfragrecFileSize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_TABLE, &ctabrecFileSize));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_TC_CONNECT,
+ &ctcConnectrecFileSize));
+ clogFileFileSize = 4 * cnoLogFiles;
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_SCAN, &cscanrecFileSize));
+ cmaxAccOps = cscanrecFileSize * MAX_PARALLEL_SCANS_PER_FRAG;
+
+ initRecords();
+ initialiseRecordsLab(signal, 0, ref, senderData);
+
+ return;
+}//Dblqh::execSIZEALT_REP()
+#endif
+
+void
+Suma::sendSTTORRY(Signal* signal){
+ signal->theData[0] = 0;
+ signal->theData[3] = 1;
+ signal->theData[4] = 3;
+ signal->theData[5] = 5;
+ signal->theData[6] = 7;
+ signal->theData[7] = 255; // No more start phases from missra
+ sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 8, JBB);
+}
+
+void
+Suma::execNDB_STTOR(Signal* signal)
+{
+ jamEntry();
+}
+
+void
+Suma::execCONTINUEB(Signal* signal){
+ jamEntry();
+}
+
+void
+SumaParticipant::execCONTINUEB(Signal* signal)
+{
+ jamEntry();
+}
+
+/*****************************************************************************
+ *
+ * Node state handling
+ *
+ *****************************************************************************/
+
+void Suma::execAPI_FAILREQ(Signal* signal)
+{
+ jamEntry();
+ DBUG_ENTER("Suma::execAPI_FAILREQ");
+ Uint32 failedApiNode = signal->theData[0];
+ //BlockReference retRef = signal->theData[1];
+
+ c_failedApiNodes.set(failedApiNode);
+ bool found = removeSubscribersOnNode(signal, failedApiNode);
+
+ if(!found){
+ jam();
+ c_failedApiNodes.clear(failedApiNode);
+ }
+ DBUG_VOID_RETURN;
+}//execAPI_FAILREQ()
+
+bool
+SumaParticipant::removeSubscribersOnNode(Signal *signal, Uint32 nodeId)
+{
+ DBUG_ENTER("SumaParticipant::removeSubscribersOnNode");
+ bool found = false;
+
+ SubscriberPtr i_subbPtr;
+ c_dataSubscribers.first(i_subbPtr);
+ while(!i_subbPtr.isNull()){
+ SubscriberPtr subbPtr = i_subbPtr;
+ c_dataSubscribers.next(i_subbPtr);
+ jam();
+ if (refToNode(subbPtr.p->m_subscriberRef) == nodeId) {
+ jam();
+ c_dataSubscribers.remove(subbPtr);
+ c_removeDataSubscribers.add(subbPtr);
+ found = true;
+ }
+ }
+ if(found){
+ jam();
+ sendSubStopReq(signal);
+ }
+ DBUG_RETURN(found);
+}
+
+void
+SumaParticipant::sendSubStopReq(Signal *signal, bool unlock){
+ DBUG_ENTER("SumaParticipant::sendSubStopReq");
+ static bool remove_lock = false;
+ jam();
+
+ SubscriberPtr subbPtr;
+ c_removeDataSubscribers.first(subbPtr);
+ if (subbPtr.isNull()){
+ jam();
+#if 0
+ signal->theData[0] = failedApiNode;
+ signal->theData[1] = reference();
+ sendSignal(retRef, GSN_API_FAILCONF, signal, 2, JBB);
+#endif
+ c_failedApiNodes.clear();
+
+ remove_lock = false;
+ DBUG_VOID_RETURN;
+ }
+
+ if(remove_lock && !unlock) {
+ jam();
+ DBUG_VOID_RETURN;
+ }
+ remove_lock = true;
+
+ SubscriptionPtr subPtr;
+ c_subscriptions.getPtr(subPtr, subbPtr.p->m_subPtrI);
+
+ SubStopReq * const req = (SubStopReq*)signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->senderData = subbPtr.i;
+ req->subscriberRef = subbPtr.p->m_subscriberRef;
+ req->subscriberData = subbPtr.p->m_subscriberData;
+ req->subscriptionId = subPtr.p->m_subscriptionId;
+ req->subscriptionKey = subPtr.p->m_subscriptionKey;
+ req->part = SubscriptionData::TableData;
+
+ sendSignal(SUMA_REF, GSN_SUB_STOP_REQ, signal, SubStopReq::SignalLength, JBB);
+ DBUG_VOID_RETURN;
+}
+
+void
+SumaParticipant::execSUB_STOP_CONF(Signal* signal){
+ jamEntry();
+ DBUG_ENTER("SumaParticipant::execSUB_STOP_CONF");
+
+ SubStopConf * const conf = (SubStopConf*)signal->getDataPtr();
+
+ // Uint32 subscriberData = conf->subscriberData;
+ // Uint32 subscriberRef = conf->subscriberRef;
+
+ Subscription key;
+ key.m_subscriptionId = conf->subscriptionId;
+ key.m_subscriptionKey = conf->subscriptionKey;
+
+ SubscriptionPtr subPtr;
+ if(c_subscriptions.find(subPtr, key)) {
+ jam();
+ if (subPtr.p->m_markRemove) {
+ jam();
+ ndbrequire(false);
+ ndbrequire(subPtr.p->m_nSubscribers > 0);
+ subPtr.p->m_nSubscribers--;
+ if (subPtr.p->m_nSubscribers == 0){
+ jam();
+ completeSubRemoveReq(signal, subPtr);
+ }
+ }
+ }
+
+ sendSubStopReq(signal,true);
+ DBUG_VOID_RETURN;
+}
+
+void
+SumaParticipant::execSUB_STOP_REF(Signal* signal){
+ jamEntry();
+ DBUG_ENTER("SumaParticipant::execSUB_STOP_REF");
+
+ SubStopRef * const ref = (SubStopRef*)signal->getDataPtr();
+
+ Uint32 subscriptionId = ref->subscriptionId;
+ Uint32 subscriptionKey = ref->subscriptionKey;
+ Uint32 part = ref->part;
+ Uint32 subscriberData = ref->subscriberData;
+ Uint32 subscriberRef = ref->subscriberRef;
+ // Uint32 err = ref->err;
+
+ if(!ref->isTemporary()){
+ ndbrequire(false);
+ }
+
+ SubStopReq * const req = (SubStopReq*)signal->getDataPtrSend();
+ req->subscriberRef = subscriberRef;
+ req->subscriberData = subscriberData;
+ req->subscriptionId = subscriptionId;
+ req->subscriptionKey = subscriptionKey;
+ req->part = part;
+
+ sendSignal(SUMA_REF, GSN_SUB_STOP_REQ, signal, SubStopReq::SignalLength, JBB);
+
+ DBUG_VOID_RETURN;
+}
+
+void
+Suma::execNODE_FAILREP(Signal* signal){
+ jamEntry();
+ DBUG_ENTER("Suma::execNODE_FAILREP");
+
+ NodeFailRep * const rep = (NodeFailRep*)signal->getDataPtr();
+
+ bool changed = false;
+
+ NodePtr nodePtr;
+#ifdef NODEFAIL_DEBUG
+ ndbout_c("Suma: nodefailrep");
+#endif
+ c_nodeFailGCI = getFirstGCI(signal);
+
+ for(c_nodes.first(nodePtr); nodePtr.i != RNIL; c_nodes.next(nodePtr)){
+ if(NodeBitmask::get(rep->theNodes, nodePtr.p->nodeId)){
+ if(nodePtr.p->alive){
+ ndbassert(c_aliveNodes.get(nodePtr.p->nodeId));
+ changed = true;
+ jam();
+ } else {
+ ndbassert(!c_aliveNodes.get(nodePtr.p->nodeId));
+ jam();
+ }
+
+ if (c_preparingNodes.get(nodePtr.p->nodeId)) {
+ jam();
+ // we are currently preparing this node that died
+ // it's ok just to clear and go back to waiting for it to start up
+ Restart.resetNode(calcSumaBlockRef(nodePtr.p->nodeId));
+ c_preparingNodes.clear(nodePtr.p->nodeId);
+ } else if (c_handoverToDo) {
+ jam();
+ // TODO what if I'm a SUMA that is currently restarting and the SUMA
+ // responsible for restarting me is the one that died?
+
+ // a node has failed whilst handover is going on
+ // let's check if we're in the process of handover with that node
+ c_handoverToDo = false;
+ for( int i = 0; i < NO_OF_BUCKETS; i++) {
+ if (c_buckets[i].handover) {
+ // I'm doing handover, but is it with the dead node?
+ if (getResponsibleSumaNodeId(i) == nodePtr.p->nodeId) {
+ // so it was the dead node, has handover started?
+ if (c_buckets[i].handover_started) {
+ jam();
+ // we're not ok and will have lost data!
+ // set not active to indicate this -
+ // this will generate takeover behaviour
+ c_buckets[i].active = false;
+ c_buckets[i].handover_started = false;
+ } // else we're ok to revert back to state before
+ c_buckets[i].handover = false;
+ } else {
+ jam();
+ // ok, we're doing handover with a different node
+ c_handoverToDo = true;
+ }
+ }
+ }
+ }
+
+ c_failoverBuffer.nodeFailRep();
+
+ nodePtr.p->alive = 0;
+ c_aliveNodes.clear(nodePtr.p->nodeId); // this has to be done after the loop above
+ }
+ }
+ DBUG_VOID_RETURN;
+}
+
+void
+Suma::execINCL_NODEREQ(Signal* signal){
+ jamEntry();
+
+ //const Uint32 senderRef = signal->theData[0];
+ const Uint32 inclNode = signal->theData[1];
+
+ NodePtr node;
+ for(c_nodes.first(node); node.i != RNIL; c_nodes.next(node)){
+ jam();
+ const Uint32 nodeId = node.p->nodeId;
+ if(inclNode == nodeId){
+ jam();
+
+ ndbrequire(node.p->alive == 0);
+ ndbrequire(!c_aliveNodes.get(nodeId));
+
+ for (Uint32 j = 0; j < c_noNodesInGroup; j++) {
+ jam();
+ if (c_nodesInGroup[j] == nodeId) {
+ // the starting node is part of my node group
+ jam();
+ c_preparingNodes.set(nodeId); // set as being prepared
+ for (Uint32 i = 0; i < c_noNodesInGroup; i++) {
+ jam();
+ if (i == c_idInNodeGroup) {
+ jam();
+ // I'm responsible for restarting this SUMA
+ // ALL dict's should have meta data info so it is ok to start
+ Restart.startNode(signal, calcSumaBlockRef(nodeId));
+ break;
+ }//if
+ if (c_aliveNodes.get(c_nodesInGroup[i])) {
+ jam();
+ break; // another Suma takes care of this
+ }//if
+ }//for
+ break;
+ }//if
+ }//for
+
+ node.p->alive = 1;
+ c_aliveNodes.set(nodeId);
+
+ break;
+ }//if
+ }//for
+
+#if 0 // if we include this DIH's got to be prepared, later if needed...
+ signal->theData[0] = reference();
+
+ sendSignal(senderRef, GSN_INCL_NODECONF, signal, 1, JBB);
+#endif
+}
+
+void
+Suma::execSIGNAL_DROPPED_REP(Signal* signal){
+ jamEntry();
+ ndbrequire(false);
+}
+
+/********************************************************************
+ *
+ * Dump state
+ *
+ */
+
+static unsigned
+count_subscribers(const DLList<SumaParticipant::Subscriber> &subs)
+{
+ unsigned n= 0;
+ SumaParticipant::SubscriberPtr i_subbPtr;
+ subs.first(i_subbPtr);
+ while(!i_subbPtr.isNull()){
+ n++;
+ subs.next(i_subbPtr);
+ }
+ return n;
+}
+
+void
+Suma::execDUMP_STATE_ORD(Signal* signal){
+ jamEntry();
+
+ Uint32 tCase = signal->theData[0];
+ if(tCase >= 8000 && tCase <= 8003){
+ SubscriptionPtr subPtr;
+ c_subscriptions.getPtr(subPtr, g_subPtrI);
+
+ Ptr<SyncRecord> syncPtr;
+ c_syncPool.getPtr(syncPtr, subPtr.p->m_syncPtrI);
+
+ if(tCase == 8000){
+ syncPtr.p->startMeta(signal);
+ }
+
+ if(tCase == 8001){
+ syncPtr.p->startScan(signal);
+ }
+
+ if(tCase == 8002){
+ syncPtr.p->startTrigger(signal);
+ }
+
+ if(tCase == 8003){
+ subPtr.p->m_subscriptionType = SubCreateReq::SingleTableScan;
+ LocalDataBuffer<15> attrs(c_dataBufferPool, syncPtr.p->m_attributeList);
+ Uint32 tab = 0;
+ Uint32 att[] = { 0, 1, 1 };
+ syncPtr.p->m_tableList.append(&tab, 1);
+ attrs.append(att, 3);
+ }
+ }
+
+ if(tCase == 8004){
+ infoEvent("Suma: c_subscriberPool size: %d free: %d",
+ c_subscriberPool.getSize(),
+ c_subscriberPool.getNoOfFree());
+
+ infoEvent("Suma: c_tablePool size: %d free: %d",
+ c_tablePool_.getSize(),
+ c_tablePool_.getNoOfFree());
+
+ infoEvent("Suma: c_subscriptionPool size: %d free: %d",
+ c_subscriptionPool.getSize(),
+ c_subscriptionPool.getNoOfFree());
+
+ infoEvent("Suma: c_syncPool size: %d free: %d",
+ c_syncPool.getSize(),
+ c_syncPool.getNoOfFree());
+
+ infoEvent("Suma: c_dataBufferPool size: %d free: %d",
+ c_dataBufferPool.getSize(),
+ c_dataBufferPool.getNoOfFree());
+
+ infoEvent("Suma: c_metaSubscribers count: %d",
+ count_subscribers(c_metaSubscribers));
+ infoEvent("Suma: c_dataSubscribers count: %d",
+ count_subscribers(c_dataSubscribers));
+ infoEvent("Suma: c_prepDataSubscribers count: %d",
+ count_subscribers(c_prepDataSubscribers));
+ infoEvent("Suma: c_removeDataSubscribers count: %d",
+ count_subscribers(c_removeDataSubscribers));
+ }
+}
+
+/********************************************************************
+ *
+ * Convert a table name (db+schema+tablename) to tableId
+ *
+ */
+
+#if 0
+void
+SumaParticipant::convertNameToId(SubscriptionPtr subPtr, Signal * signal)
+{
+ jam();
+ if(subPtr.p->m_currentTable < subPtr.p->m_maxTables) {
+ jam();
+
+ GetTableIdReq * req = (GetTableIdReq *)signal->getDataPtrSend();
+ char * tableName = subPtr.p->m_tableNames[subPtr.p->m_currentTable];
+ const Uint32 strLen = strlen(tableName) + 1; // NULL Terminated
+ req->senderRef = reference();
+ req->senderData = subPtr.i;
+ req->len = strLen;
+
+ LinearSectionPtr ptr[1];
+ ptr[0].p = (Uint32*)tableName;
+ ptr[0].sz = strLen;
+
+ sendSignal(DBDICT_REF,
+ GSN_GET_TABLEID_REQ,
+ signal,
+ GetTableIdReq::SignalLength,
+ JBB,
+ ptr,
+ 1);
+ } else {
+ jam();
+ sendSubCreateConf(signal, subPtr.p->m_subscriberRef, subPtr);
+ }
+}
+#endif
+
+
+void
+SumaParticipant::addTableId(Uint32 tableId,
+ SubscriptionPtr subPtr, SyncRecord *psyncRec)
+{
+#ifdef NODEFAIL_DEBUG
+ ndbout_c("SumaParticipant::addTableId(%u,%u,%u), current_table=%u",
+ tableId, subPtr.i, psyncRec, subPtr.p->m_currentTable);
+#endif
+ subPtr.p->m_tables[tableId] = 1;
+ subPtr.p->m_currentTable++;
+ if(psyncRec != NULL)
+ psyncRec->m_tableList.append(&tableId, 1);
+}
+
+#if 0
+void
+SumaParticipant::execGET_TABLEID_CONF(Signal * signal)
+{
+ jamEntry();
+
+ GetTableIdConf* conf = (GetTableIdConf *)signal->getDataPtr();
+ Uint32 tableId = conf->tableId;
+ //Uint32 schemaVersion = conf->schemaVersion;
+ Uint32 senderData = conf->senderData;
+
+ SubscriptionPtr subPtr;
+ Ptr<SyncRecord> syncPtr;
+
+ c_subscriptions.getPtr(subPtr, senderData);
+ c_syncPool.getPtr(syncPtr, subPtr.p->m_syncPtrI);
+
+ /*
+ * add to m_tableList
+ */
+ addTableId(tableId, subPtr, syncPtr.p);
+
+ convertNameToId(subPtr, signal);
+}
+
+void
+SumaParticipant::execGET_TABLEID_REF(Signal * signal)
+{
+ jamEntry();
+ GetTableIdRef const * ref = (GetTableIdRef *)signal->getDataPtr();
+ Uint32 senderData = ref->senderData;
+ // Uint32 err = ref->err;
+
+ SubscriptionPtr subPtr;
+ c_subscriptions.getPtr(subPtr, senderData);
+ Uint32 subData = subPtr.p->m_subscriberData;
+ SubCreateRef * reff = (SubCreateRef*)ref;
+ /**
+ * @todo: map ref->err to GrepError.
+ */
+ reff->err = GrepError::SELECTED_TABLE_NOT_FOUND;
+ reff->subscriberData = subData;
+ sendSignal(subPtr.p->m_subscriberRef,
+ GSN_SUB_CREATE_REF,
+ signal,
+ SubCreateRef::SignalLength,
+ JBB);
+}
+#endif
+
+
+/*************************************************************
+ *
+ * Creation of subscription id's
+ *
+ ************************************************************/
+
+void
+Suma::execCREATE_SUBID_REQ(Signal* signal)
+{
+ jamEntry();
+
+ CRASH_INSERTION(13001);
+
+ CreateSubscriptionIdReq const * req =
+ (CreateSubscriptionIdReq*)signal->getDataPtr();
+ SubscriberPtr subbPtr;
+ if(!c_subscriberPool.seize(subbPtr)){
+ jam();
+ sendSubIdRef(signal, GrepError::SUBSCRIPTION_ID_NOMEM);
+ return;
+ }
+
+ subbPtr.p->m_subscriberRef = signal->getSendersBlockRef();
+ subbPtr.p->m_senderData = req->senderData;
+ subbPtr.p->m_subscriberData = subbPtr.i;
+
+ UtilSequenceReq * utilReq = (UtilSequenceReq*)signal->getDataPtrSend();
+
+ utilReq->senderData = subbPtr.p->m_subscriberData;
+ utilReq->sequenceId = SUMA_SEQUENCE;
+ utilReq->requestType = UtilSequenceReq::NextVal;
+ sendSignal(DBUTIL_REF, GSN_UTIL_SEQUENCE_REQ,
+ signal, UtilSequenceReq::SignalLength, JBB);
+}
+
+void
+Suma::execUTIL_SEQUENCE_CONF(Signal* signal)
+{
+ jamEntry();
+
+ DBUG_ENTER("Suma::execUTIL_SEQUENCE_CONF");
+ CRASH_INSERTION(13002);
+
+ UtilSequenceConf * conf = (UtilSequenceConf*)signal->getDataPtr();
+ if(conf->requestType == UtilSequenceReq::Create) {
+ jam();
+ createSequenceReply(signal, conf, NULL);
+ DBUG_VOID_RETURN;
+ }
+
+ Uint64 subId;
+ memcpy(&subId,conf->sequenceValue,8);
+ Uint32 subData = conf->senderData;
+
+ SubscriberPtr subbPtr;
+ c_subscriberPool.getPtr(subbPtr,subData);
+
+
+ CreateSubscriptionIdConf * subconf = (CreateSubscriptionIdConf*)conf;
+ subconf->subscriptionId = (Uint32)subId;
+ subconf->subscriptionKey =(getOwnNodeId() << 16) | (Uint32)(subId & 0xFFFF);
+ subconf->subscriberData = subbPtr.p->m_senderData;
+
+ sendSignal(subbPtr.p->m_subscriberRef, GSN_CREATE_SUBID_CONF, signal,
+ CreateSubscriptionIdConf::SignalLength, JBB);
+
+ c_subscriberPool.release(subbPtr);
+
+ DBUG_VOID_RETURN;
+}
+
+void
+Suma::execUTIL_SEQUENCE_REF(Signal* signal)
+{
+ jamEntry();
+ DBUG_ENTER("Suma::execUTIL_SEQUENCE_REF");
+ UtilSequenceRef * ref = (UtilSequenceRef*)signal->getDataPtr();
+
+ if(ref->requestType == UtilSequenceReq::Create) {
+ jam();
+ createSequenceReply(signal, NULL, ref);
+ DBUG_VOID_RETURN;
+ }
+
+ Uint32 subData = ref->senderData;
+
+ SubscriberPtr subbPtr;
+ c_subscriberPool.getPtr(subbPtr,subData);
+ sendSubIdRef(signal, GrepError::SEQUENCE_ERROR);
+ c_subscriberPool.release(subbPtr);
+ DBUG_VOID_RETURN;
+}//execUTIL_SEQUENCE_REF()
+
+
+void
+SumaParticipant::sendSubIdRef(Signal* signal, Uint32 errCode){
+ jam();
+ CreateSubscriptionIdRef * ref =
+ (CreateSubscriptionIdRef *)signal->getDataPtrSend();
+
+ ref->err = errCode;
+ sendSignal(signal->getSendersBlockRef(),
+ GSN_CREATE_SUBID_REF,
+ signal,
+ CreateSubscriptionIdRef::SignalLength,
+ JBB);
+
+ releaseSections(signal);
+ return;
+}
+
+/**********************************************************
+ * Suma participant interface
+ *
+ * Creation of subscriptions
+ */
+
+void
+SumaParticipant::execSUB_CREATE_REQ(Signal* signal) {
+#ifdef NODEFAIL_DEBUG
+ ndbout_c("SumaParticipant::execSUB_CREATE_REQ");
+#endif
+ jamEntry();
+
+ CRASH_INSERTION(13003);
+
+ const SubCreateReq req = *(SubCreateReq*)signal->getDataPtr();
+
+ const Uint32 subId = req.subscriptionId;
+ const Uint32 subKey = req.subscriptionKey;
+ const Uint32 subRef = req.subscriberRef;
+ const Uint32 subData = req.subscriberData;
+ const Uint32 type = req.subscriptionType & SubCreateReq::RemoveFlags;
+ const Uint32 flags = req.subscriptionType & SubCreateReq::GetFlags;
+ const bool addTableFlag = (flags & SubCreateReq::AddTableFlag) != 0;
+ const bool restartFlag = (flags & SubCreateReq::RestartFlag) != 0;
+
+ const Uint32 sender = signal->getSendersBlockRef();
+
+ Subscription key;
+ key.m_subscriptionId = subId;
+ key.m_subscriptionKey = subKey;
+
+ SubscriptionPtr subPtr;
+ Ptr<SyncRecord> syncPtr;
+
+ if (addTableFlag) {
+ ndbrequire(restartFlag); //TODO remove this
+
+ if(!c_subscriptions.find(subPtr, key)) {
+ jam();
+ sendSubCreateRef(signal, req, GrepError::SUBSCRIPTION_NOT_FOUND);
+ return;
+ }
+ jam();
+ c_syncPool.getPtr(syncPtr, subPtr.p->m_syncPtrI);
+ } else {
+ // Check that id/key is unique
+ if(c_subscriptions.find(subPtr, key)) {
+ jam();
+ sendSubCreateRef(signal, req, GrepError::SUBSCRIPTION_ID_NOT_UNIQUE);
+ return;
+ }
+ if(!c_subscriptions.seize(subPtr)) {
+ jam();
+ sendSubCreateRef(signal, req, GrepError::NOSPACE_IN_POOL);
+ return;
+ }
+ if(!c_syncPool.seize(syncPtr)) {
+ jam();
+ sendSubCreateRef(signal, req, GrepError::NOSPACE_IN_POOL);
+ return;
+ }
+ jam();
+ subPtr.p->m_subscriberRef = subRef;
+ subPtr.p->m_subscriberData = subData;
+ subPtr.p->m_subscriptionId = subId;
+ subPtr.p->m_subscriptionKey = subKey;
+ subPtr.p->m_subscriptionType = type;
+
+ /**
+ * ok to memset? Support on all compilers
+ * @todo find out if memset is supported by all compilers
+ */
+ memset(subPtr.p->m_tables,0,MAX_TABLES);
+ subPtr.p->m_maxTables = 0;
+ subPtr.p->m_currentTable = 0;
+ subPtr.p->m_syncPtrI = syncPtr.i;
+ subPtr.p->m_markRemove = false;
+ subPtr.p->m_nSubscribers = 0;
+
+ c_subscriptions.add(subPtr);
+
+ syncPtr.p->m_subscriptionPtrI = subPtr.i;
+ syncPtr.p->m_doSendSyncData = true;
+ syncPtr.p->ptrI = syncPtr.i;
+ syncPtr.p->m_locked = false;
+ syncPtr.p->m_error = false;
+ }
+
+ if (restartFlag ||
+ type == SubCreateReq::TableEvent) {
+
+ syncPtr.p->m_doSendSyncData = false;
+
+ ndbrequire(type != SubCreateReq::SingleTableScan);
+ jam();
+
+ if (subPtr.p->m_tables[req.tableId] != 0) {
+ ndbrequire(false); //TODO remove
+ jam();
+ sendSubCreateRef(signal, req, GrepError::SELECTED_TABLE_ALREADY_ADDED);
+ return;
+ }
+ if (addTableFlag) {
+ ndbrequire(type != SubCreateReq::TableEvent);
+ jam();
+ }
+ subPtr.p->m_maxTables++;
+ addTableId(req.tableId, subPtr, syncPtr.p);
+ } else {
+ switch(type){
+ case SubCreateReq::SingleTableScan:
+ {
+ jam();
+ syncPtr.p->m_tableList.append(&req.tableId, 1);
+ if(signal->getNoOfSections() > 0){
+ SegmentedSectionPtr ptr;
+ signal->getSection(ptr, SubCreateReq::ATTRIBUTE_LIST);
+ LocalDataBuffer<15> attrBuf(c_dataBufferPool,syncPtr.p->m_attributeList);
+ append(attrBuf, ptr, getSectionSegmentPool());
+ }
+ }
+ break;
+#if 0
+ case SubCreateReq::SelectiveTableSnapshot:
+ /**
+ * Tables specified by the user that does not exist
+ * in the database are just ignored. No error message
+ * is given, nor does the db nodes crash
+ * @todo: Memory is not release here (used tableBuf)
+ */
+ {
+ if(signal->getNoOfSections() == 0 ){
+ jam();
+ sendSubCreateRef(signal, req, GrepError::WRONG_NO_OF_SECTIONS);
+ return;
+ }
+
+ jam();
+ SegmentedSectionPtr ptr;
+ signal->getSection(ptr,0);// SubCreateReq::TABLE_LIST);
+ SimplePropertiesSectionReader r0(ptr, getSectionSegmentPool());
+ Uint32 i=0;
+ char table[MAX_TAB_NAME_SIZE];
+ r0.reset();
+ r0.first();
+ while(true){
+ if ((r0.getValueType() != SimpleProperties::StringValue) ||
+ (r0.getValueLen() <= 0)) {
+ releaseSections(signal);
+ ndbrequire(false);
+ }
+ r0.getString(table);
+ strcpy(subPtr.p->m_tableNames[i],table);
+ i++;
+ if(!r0.next())
+ break;
+ }
+ releaseSections(signal);
+ subPtr.p->m_maxTables = i;
+ subPtr.p->m_currentTable = 0;
+ releaseSections(signal);
+ convertNameToId(subPtr, signal);
+ return;
+ }
+ break;
+#endif
+ case SubCreateReq::DatabaseSnapshot:
+ {
+ jam();
+ }
+ break;
+ default:
+ ndbrequire(false);
+ }
+ }
+
+ sendSubCreateConf(signal, sender, subPtr);
+
+ return;
+}
+
+void
+SumaParticipant::sendSubCreateConf(Signal* signal, Uint32 sender,
+ SubscriptionPtr subPtr)
+{
+ SubCreateConf * const conf = (SubCreateConf*)signal->getDataPtrSend();
+ conf->subscriptionId = subPtr.p->m_subscriptionId;
+ conf->subscriptionKey = subPtr.p->m_subscriptionKey;
+ conf->subscriberData = subPtr.p->m_subscriberData;
+ sendSignal(sender, GSN_SUB_CREATE_CONF, signal,
+ SubCreateConf::SignalLength, JBB);
+}
+
+void
+SumaParticipant::sendSubCreateRef(Signal* signal, const SubCreateReq& req, Uint32 errCode){
+ jam();
+ SubCreateRef * ref = (SubCreateRef *)signal->getDataPtrSend();
+ ref->subscriberRef = reference();
+ ref->subscriberData = req.subscriberData;
+ ref->err = errCode;
+ releaseSections(signal);
+ sendSignal(signal->getSendersBlockRef(), GSN_SUB_CREATE_REF, signal,
+ SubCreateRef::SignalLength, JBB);
+ return;
+}
+
+
+
+
+
+
+
+
+
+
+
+
+Uint32
+SumaParticipant::getFirstGCI(Signal* signal) {
+ if (c_lastCompleteGCI == RNIL) {
+ ndbout_c("WARNING: c_lastCompleteGCI == RNIL");
+ return 0;
+ }
+ return c_lastCompleteGCI+3;
+}
+
+/**********************************************************
+ *
+ * Setting upp trigger for subscription
+ *
+ */
+
+void
+SumaParticipant::execSUB_SYNC_REQ(Signal* signal) {
+ jamEntry();
+
+ CRASH_INSERTION(13004);
+#ifdef EVENT_PH3_DEBUG
+ ndbout_c("SumaParticipant::execSUB_SYNC_REQ");
+#endif
+
+ SubSyncReq * const req = (SubSyncReq*)signal->getDataPtr();
+
+ SubscriptionPtr subPtr;
+ Subscription key;
+ key.m_subscriptionId = req->subscriptionId;
+ key.m_subscriptionKey = req->subscriptionKey;
+
+ if(!c_subscriptions.find(subPtr, key)){
+ jam();
+ sendSubSyncRef(signal, GrepError::SUBSCRIPTION_ID_NOT_FOUND);
+ return;
+ }
+
+ /**
+ * @todo Tomas, do you really need to do this?
+ */
+ if(subPtr.p->m_subscriptionType == SubCreateReq::TableEvent) {
+ jam();
+ subPtr.p->m_subscriberData = req->subscriberData;
+ }
+
+ bool ok = false;
+ SubscriptionData::Part part = (SubscriptionData::Part)req->part;
+
+ Ptr<SyncRecord> syncPtr;
+ c_syncPool.getPtr(syncPtr, subPtr.p->m_syncPtrI);
+ switch(part){
+ case SubscriptionData::MetaData:
+ ok = true;
+ jam();
+ if (subPtr.p->m_subscriptionType == SubCreateReq::DatabaseSnapshot) {
+ TableList::DataBufferIterator it;
+ syncPtr.p->m_tableList.first(it);
+ if(it.isNull()) {
+ /**
+ * Get all tables from dict
+ */
+ ListTablesReq * req = (ListTablesReq*)signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->senderData = syncPtr.i;
+ req->requestData = 0;
+ /**
+ * @todo: accomodate scan of index tables?
+ */
+ req->setTableType(DictTabInfo::UserTable);
+
+ sendSignal(DBDICT_REF, GSN_LIST_TABLES_REQ, signal,
+ ListTablesReq::SignalLength, JBB);
+ break;
+ }
+ }
+
+ syncPtr.p->startMeta(signal);
+ break;
+ case SubscriptionData::TableData: {
+ ok = true;
+ jam();
+ syncPtr.p->startScan(signal);
+ break;
+ }
+ }
+ ndbrequire(ok);
+}
+
+void
+SumaParticipant::sendSubSyncRef(Signal* signal, Uint32 errCode){
+ jam();
+ SubSyncRef * ref =
+ (SubSyncRef *)signal->getDataPtrSend();
+ ref->err = errCode;
+ sendSignal(signal->getSendersBlockRef(),
+ GSN_SUB_SYNC_REF,
+ signal,
+ SubSyncRef::SignalLength,
+ JBB);
+
+ releaseSections(signal);
+ return;
+}
+
+/**********************************************************
+ * Dict interface
+ */
+
+void
+SumaParticipant::execLIST_TABLES_CONF(Signal* signal){
+ jamEntry();
+ CRASH_INSERTION(13005);
+ ListTablesConf* const conf = (ListTablesConf*)signal->getDataPtr();
+ SyncRecord* tmp = c_syncPool.getPtr(conf->senderData);
+ tmp->runLIST_TABLES_CONF(signal);
+}
+
+
+void
+SumaParticipant::execGET_TABINFOREF(Signal* signal){
+ jamEntry();
+ GetTabInfoRef* const ref = (GetTabInfoRef*)signal->getDataPtr();
+ SyncRecord* tmp = c_syncPool.getPtr(ref->senderData);
+ tmp->runGET_TABINFOREF(signal);
+}
+
+void
+SumaParticipant::execGET_TABINFO_CONF(Signal* signal){
+ jamEntry();
+
+ CRASH_INSERTION(13006);
+
+ if(!assembleFragments(signal)){
+ return;
+ }
+
+ GetTabInfoConf* conf = (GetTabInfoConf*)signal->getDataPtr();
+
+ Uint32 tableId = conf->tableId;
+ Uint32 senderData = conf->senderData;
+
+ SyncRecord* tmp = c_syncPool.getPtr(senderData);
+ ndbrequire(parseTable(signal, conf, tableId, tmp));
+ tmp->runGET_TABINFO_CONF(signal);
+}
+
+bool
+SumaParticipant::parseTable(Signal* signal, GetTabInfoConf* conf, Uint32 tableId,
+ SyncRecord* syncPtr_p){
+
+ SegmentedSectionPtr ptr;
+ signal->getSection(ptr, GetTabInfoConf::DICT_TAB_INFO);
+
+ SimplePropertiesSectionReader it(ptr, getSectionSegmentPool());
+
+ SimpleProperties::UnpackStatus s;
+ DictTabInfo::Table tableDesc; tableDesc.init();
+ s = SimpleProperties::unpack(it, &tableDesc,
+ DictTabInfo::TableMapping,
+ DictTabInfo::TableMappingSize,
+ true, true);
+
+ ndbrequire(s == SimpleProperties::Break);
+
+ TablePtr tabPtr;
+ c_tables.find(tabPtr, tableId);
+
+ if(!tabPtr.isNull() &&
+ tabPtr.p->m_schemaVersion != tableDesc.TableVersion){
+ jam();
+
+ tabPtr.p->release(* this);
+
+ // oops wrong schema version in stored tabledesc
+ // we need to find all subscriptions with old table desc
+ // and all subscribers to this
+ // hopefully none
+ c_tables.release(tabPtr);
+ tabPtr.setNull();
+ DLHashTable<SumaParticipant::Subscription>::Iterator i_subPtr;
+ c_subscriptions.first(i_subPtr);
+ SubscriptionPtr subPtr;
+ for(;!i_subPtr.isNull();c_subscriptions.next(i_subPtr)){
+ jam();
+ c_subscriptions.getPtr(subPtr, i_subPtr.curr.i);
+ SyncRecord* tmp = c_syncPool.getPtr(subPtr.p->m_syncPtrI);
+ if (tmp == syncPtr_p) {
+ jam();
+ continue;
+ }
+ if (subPtr.p->m_tables[tableId]) {
+ jam();
+ subPtr.p->m_tables[tableId] = 0; // remove this old table reference
+ TableList::DataBufferIterator it;
+ for(tmp->m_tableList.first(it);!it.isNull();tmp->m_tableList.next(it)) {
+ jam();
+ if (*it.data == tableId){
+ jam();
+ Uint32 *pdata = it.data;
+ tmp->m_tableList.next(it);
+ for(;!it.isNull();tmp->m_tableList.next(it)) {
+ jam();
+ *pdata = *it.data;
+ pdata = it.data;
+ }
+ *pdata = RNIL; // todo remove this last item...
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ if (tabPtr.isNull()) {
+ jam();
+ /**
+ * Uninitialized table record
+ */
+ ndbrequire(c_tables.seize(tabPtr));
+ new (tabPtr.p) Table;
+ tabPtr.p->m_schemaVersion = RNIL;
+ tabPtr.p->m_tableId = tableId;
+ tabPtr.p->m_hasTriggerDefined[0] = 0;
+ tabPtr.p->m_hasTriggerDefined[1] = 0;
+ tabPtr.p->m_hasTriggerDefined[2] = 0;
+ tabPtr.p->m_triggerIds[0] = ILLEGAL_TRIGGER_ID;
+ tabPtr.p->m_triggerIds[1] = ILLEGAL_TRIGGER_ID;
+ tabPtr.p->m_triggerIds[2] = ILLEGAL_TRIGGER_ID;
+#if 0
+ ndbout_c("Get tab info conf %d", tableId);
+#endif
+ c_tables.add(tabPtr);
+ }
+
+ if(tabPtr.p->m_attributes.getSize() != 0){
+ jam();
+ return true;
+ }
+
+ /**
+ * Initialize table object
+ */
+ Uint32 noAttribs = tableDesc.NoOfAttributes;
+ Uint32 notFixed = (tableDesc.NoOfNullable+tableDesc.NoOfVariable);
+ tabPtr.p->m_schemaVersion = tableDesc.TableVersion;
+
+ // The attribute buffer
+ LocalDataBuffer<15> attrBuf(c_dataBufferPool, tabPtr.p->m_attributes);
+
+ // Temporary buffer
+ DataBuffer<15> theRest(c_dataBufferPool);
+
+ if(!attrBuf.seize(noAttribs)){
+ ndbrequire(false);
+ return false;
+ }
+
+ if(!theRest.seize(notFixed)){
+ ndbrequire(false);
+ return false;
+ }
+
+ DataBuffer<15>::DataBufferIterator attrIt; // Fixed not nullable
+ DataBuffer<15>::DataBufferIterator restIt; // variable + nullable
+ attrBuf.first(attrIt);
+ theRest.first(restIt);
+
+ for(Uint32 i = 0; i < noAttribs; i++) {
+ DictTabInfo::Attribute attrDesc; attrDesc.init();
+ s = SimpleProperties::unpack(it, &attrDesc,
+ DictTabInfo::AttributeMapping,
+ DictTabInfo::AttributeMappingSize,
+ true, true);
+ ndbrequire(s == SimpleProperties::Break);
+
+ if (!attrDesc.AttributeNullableFlag
+ /* && !attrDesc.AttributeVariableFlag */) {
+ jam();
+ * attrIt.data = attrDesc.AttributeId;
+ attrBuf.next(attrIt);
+ } else {
+ jam();
+ * restIt.data = attrDesc.AttributeId;
+ theRest.next(restIt);
+ }
+
+ // Move to next attribute
+ it.next();
+ }
+
+ /**
+ * Put the rest in end of attrBuf
+ */
+ theRest.first(restIt);
+ for(; !restIt.isNull(); theRest.next(restIt)){
+ * attrIt.data = * restIt.data;
+ attrBuf.next(attrIt);
+ }
+
+ theRest.release();
+
+ return true;
+}
+
+void
+SumaParticipant::execDI_FCOUNTCONF(Signal* signal){
+ jamEntry();
+
+ CRASH_INSERTION(13007);
+
+ const Uint32 senderData = signal->theData[3];
+ SyncRecord* tmp = c_syncPool.getPtr(senderData);
+ tmp->runDI_FCOUNTCONF(signal);
+}
+
+void
+SumaParticipant::execDIGETPRIMCONF(Signal* signal){
+ jamEntry();
+
+ CRASH_INSERTION(13008);
+
+ const Uint32 senderData = signal->theData[1];
+ SyncRecord* tmp = c_syncPool.getPtr(senderData);
+ tmp->runDIGETPRIMCONF(signal);
+}
+
+void
+SumaParticipant::execCREATE_TRIG_CONF(Signal* signal){
+ jamEntry();
+ DBUG_ENTER("SumaParticipant::execCREATE_TRIG_CONF");
+ CRASH_INSERTION(13009);
+
+ CreateTrigConf * const conf = (CreateTrigConf*)signal->getDataPtr();
+
+ const Uint32 senderData = conf->getConnectionPtr();
+ SyncRecord* tmp = c_syncPool.getPtr(senderData);
+ tmp->runCREATE_TRIG_CONF(signal);
+
+ /**
+ * dodido
+ * @todo: I (Johan) dont know what to do here. Jonas, what do you mean?
+ */
+ DBUG_VOID_RETURN;
+}
+
+void
+SumaParticipant::execCREATE_TRIG_REF(Signal* signal){
+ jamEntry();
+ ndbrequire(false);
+}
+
+void
+SumaParticipant::execDROP_TRIG_CONF(Signal* signal){
+ jamEntry();
+ DBUG_ENTER("SumaParticipant::execDROP_TRIG_CONF");
+ CRASH_INSERTION(13010);
+
+ DropTrigConf * const conf = (DropTrigConf*)signal->getDataPtr();
+
+ const Uint32 senderData = conf->getConnectionPtr();
+ SyncRecord* tmp = c_syncPool.getPtr(senderData);
+ tmp->runDROP_TRIG_CONF(signal);
+ DBUG_VOID_RETURN;
+}
+
+void
+SumaParticipant::execDROP_TRIG_REF(Signal* signal){
+ jamEntry();
+ DBUG_ENTER("SumaParticipant::execDROP_TRIG_CONF");
+ DropTrigRef * const ref = (DropTrigRef*)signal->getDataPtr();
+
+ const Uint32 senderData = ref->getConnectionPtr();
+ SyncRecord* tmp = c_syncPool.getPtr(senderData);
+ tmp->runDROP_TRIG_CONF(signal);
+ DBUG_VOID_RETURN;
+}
+
+/*************************************************************************
+ *
+ *
+ */
+
+void
+SumaParticipant::SyncRecord::runLIST_TABLES_CONF(Signal* signal){
+ jam();
+
+ ListTablesConf * const conf = (ListTablesConf*)signal->getDataPtr();
+ const Uint32 len = signal->length() - ListTablesConf::HeaderLength;
+
+ SubscriptionPtr subPtr;
+ suma.c_subscriptions.getPtr(subPtr, m_subscriptionPtrI);
+
+ for (unsigned i = 0; i < len; i++) {
+ subPtr.p->m_maxTables++;
+ suma.addTableId(ListTablesConf::getTableId(conf->tableData[i]), subPtr, this);
+ }
+
+ // for (unsigned i = 0; i < len; i++)
+ // conf->tableData[i] = ListTablesConf::getTableId(conf->tableData[i]);
+ // m_tableList.append(&conf->tableData[0], len);
+
+#if 0
+ TableList::DataBufferIterator it;
+ int i = 0;
+ for(m_tableList.first(it);!it.isNull();m_tableList.next(it)) {
+ ndbout_c("%u listtableconf tableid %d", i++, *it.data);
+ }
+#endif
+
+ if(len == ListTablesConf::DataLength){
+ jam();
+ // we expect more LIST_TABLE_CONF
+ return;
+ }
+
+#if 0
+ subPtr.p->m_currentTable = 0;
+ subPtr.p->m_maxTables = 0;
+
+ TableList::DataBufferIterator it;
+ for(m_tableList.first(it); !it.isNull(); m_tableList.next(it)) {
+ subPtr.p->m_maxTables++;
+ suma.addTableId(*it.data, subPtr, NULL);
+#ifdef NODEFAIL_DEBUG
+ ndbout_c(" listtableconf tableid %d",*it.data);
+#endif
+ }
+#endif
+
+ startMeta(signal);
+}
+
+void
+SumaParticipant::SyncRecord::startMeta(Signal* signal){
+ jam();
+ m_currentTable = 0;
+ nextMeta(signal);
+}
+
+/**
+ * m_tableList only contains UserTables
+ */
+void
+SumaParticipant::SyncRecord::nextMeta(Signal* signal){
+ jam();
+
+ TableList::DataBufferIterator it;
+ if(!m_tableList.position(it, m_currentTable)){
+ completeMeta(signal);
+ return;
+ }
+
+ GetTabInfoReq * req = (GetTabInfoReq *)signal->getDataPtrSend();
+ req->senderRef = suma.reference();
+ req->senderData = ptrI;
+ req->requestType =
+ GetTabInfoReq::RequestById | GetTabInfoReq::LongSignalConf;
+ req->tableId = * it.data;
+
+#if 0
+ ndbout_c("GET_TABINFOREQ id %d", req->tableId);
+#endif
+ suma.sendSignal(DBDICT_REF, GSN_GET_TABINFOREQ, signal,
+ GetTabInfoReq::SignalLength, JBB);
+}
+
+void
+SumaParticipant::SyncRecord::runGET_TABINFOREF(Signal* signal)
+{
+ jam();
+
+ SubscriptionPtr subPtr;
+ suma.c_subscriptions.getPtr(subPtr, m_subscriptionPtrI);
+ ndbrequire(subPtr.p->m_syncPtrI == ptrI);
+
+ Uint32 type = subPtr.p->m_subscriptionType;
+
+ bool do_continue = false;
+ switch (type) {
+ case SubCreateReq::TableEvent:
+ jam();
+ break;
+ case SubCreateReq::DatabaseSnapshot:
+ jam();
+ do_continue = true;
+ break;
+ case SubCreateReq::SelectiveTableSnapshot:
+ jam();
+ do_continue = true;
+ break;
+ case SubCreateReq::SingleTableScan:
+ jam();
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }
+
+ if (! do_continue) {
+ m_error = true;
+ completeMeta(signal);
+ return;
+ }
+
+ m_currentTable++;
+ nextMeta(signal);
+ return;
+
+ // now we need to clean-up
+}
+
+
+void
+SumaParticipant::SyncRecord::runGET_TABINFO_CONF(Signal* signal){
+ jam();
+
+ GetTabInfoConf * const conf = (GetTabInfoConf*)signal->getDataPtr();
+ // const Uint32 gci = conf->gci;
+ const Uint32 tableId = conf->tableId;
+ TableList::DataBufferIterator it;
+
+ ndbrequire(m_tableList.position(it, m_currentTable));
+ ndbrequire(* it.data == tableId);
+
+ SubscriptionPtr subPtr;
+ suma.c_subscriptions.getPtr(subPtr, m_subscriptionPtrI);
+ ndbrequire(subPtr.p->m_syncPtrI == ptrI);
+
+ SegmentedSectionPtr ptr;
+ signal->getSection(ptr, GetTabInfoConf::DICT_TAB_INFO);
+
+ SubMetaData * data = (SubMetaData*)signal->getDataPtrSend();
+ /**
+ * sending lastCompleteGCI. Used by Lars in interval calculations
+ * incremenet by one, since last_CompleteGCI is the not the current gci.
+ */
+ data->gci = suma.c_lastCompleteGCI + 1;
+ data->tableId = tableId;
+ data->senderData = subPtr.p->m_subscriberData;
+#if PRINT_ONLY
+ ndbout_c("GSN_SUB_META_DATA Table %d", tableId);
+#else
+
+ bool okToSend = m_doSendSyncData;
+
+ /*
+ * If it is a selectivetablesnapshot and the table is not part of the
+ * subscription, then do not send anything, just continue.
+ * If it is a tablevent, don't send regardless since the APIs are not
+ * interested in meta data.
+ */
+ if(subPtr.p->m_subscriptionType == SubCreateReq::SelectiveTableSnapshot)
+ if(!subPtr.p->m_tables[tableId])
+ okToSend = false;
+
+ if(okToSend) {
+ if(refToNode(subPtr.p->m_subscriberRef) == 0){
+ jam();
+ suma.EXECUTE_DIRECT(refToBlock(subPtr.p->m_subscriberRef),
+ GSN_SUB_META_DATA,
+ signal,
+ SubMetaData::SignalLength);
+ jamEntry();
+ suma.releaseSections(signal);
+ } else {
+ jam();
+ suma.sendSignal(subPtr.p->m_subscriberRef,
+ GSN_SUB_META_DATA,
+ signal,
+ SubMetaData::SignalLength, JBB);
+ }
+ }
+#endif
+
+ TablePtr tabPtr;
+ ndbrequire(suma.c_tables.find(tabPtr, tableId));
+
+ LocalDataBuffer<15> fragBuf(suma.c_dataBufferPool, tabPtr.p->m_fragments);
+ if(fragBuf.getSize() == 0){
+ /**
+ * We need to gather fragment info
+ */
+ jam();
+ signal->theData[0] = RNIL;
+ signal->theData[1] = tableId;
+ signal->theData[2] = ptrI;
+ suma.sendSignal(DBDIH_REF, GSN_DI_FCOUNTREQ, signal, 3, JBB);
+ return;
+ }
+
+ m_currentTable++;
+ nextMeta(signal);
+}
+
+void
+SumaParticipant::SyncRecord::runDI_FCOUNTCONF(Signal* signal){
+ jam();
+
+ const Uint32 userPtr = signal->theData[0];
+ const Uint32 fragCount = signal->theData[1];
+ const Uint32 tableId = signal->theData[2];
+
+ ndbrequire(userPtr == RNIL && signal->length() == 5);
+
+ TablePtr tabPtr;
+ ndbrequire(suma.c_tables.find(tabPtr, tableId));
+
+ LocalDataBuffer<15> fragBuf(suma.c_dataBufferPool, tabPtr.p->m_fragments);
+ ndbrequire(fragBuf.getSize() == 0);
+
+ m_currentFragment = fragCount;
+ signal->theData[0] = RNIL;
+ signal->theData[1] = ptrI;
+ signal->theData[2] = tableId;
+ signal->theData[3] = 0; // Frag no
+ suma.sendSignal(DBDIH_REF, GSN_DIGETPRIMREQ, signal, 4, JBB);
+}
+
+void
+SumaParticipant::SyncRecord::runDIGETPRIMCONF(Signal* signal){
+ jam();
+
+ const Uint32 userPtr = signal->theData[0];
+ //const Uint32 senderData = signal->theData[1];
+ const Uint32 nodeCount = signal->theData[6];
+ const Uint32 tableId = signal->theData[7];
+ const Uint32 fragNo = signal->theData[8];
+
+ ndbrequire(userPtr == RNIL && signal->length() == 9);
+ ndbrequire(nodeCount > 0 && nodeCount <= MAX_REPLICAS);
+
+ TablePtr tabPtr;
+ ndbrequire(suma.c_tables.find(tabPtr, tableId));
+ LocalDataBuffer<15> fragBuf(suma.c_dataBufferPool, tabPtr.p->m_fragments);
+
+ /**
+ * Add primary node for fragment to list
+ */
+ FragmentDescriptor fd;
+ fd.m_fragDesc.m_nodeId = signal->theData[2];
+ fd.m_fragDesc.m_fragmentNo = fragNo;
+ signal->theData[2] = fd.m_dummy;
+ fragBuf.append(&signal->theData[2], 1);
+
+ const Uint32 nextFrag = fragNo + 1;
+ if(nextFrag == m_currentFragment){
+ /**
+ * Complete frag info for table
+ */
+ m_currentTable++;
+ nextMeta(signal);
+ return;
+ }
+ signal->theData[0] = RNIL;
+ signal->theData[1] = ptrI;
+ signal->theData[2] = tableId;
+ signal->theData[3] = nextFrag; // Frag no
+ suma.sendSignal(DBDIH_REF, GSN_DIGETPRIMREQ, signal, 4, JBB);
+}
+
+void
+SumaParticipant::SyncRecord::completeMeta(Signal* signal){
+ jam();
+ SubscriptionPtr subPtr;
+ suma.c_subscriptions.getPtr(subPtr, m_subscriptionPtrI);
+ ndbrequire(subPtr.p->m_syncPtrI == ptrI);
+
+#if PRINT_ONLY
+ ndbout_c("GSN_SUB_SYNC_CONF (meta)");
+#else
+
+ suma.releaseSections(signal);
+
+ if (m_error) {
+ SubSyncRef * const ref = (SubSyncRef*)signal->getDataPtrSend();
+ ref->subscriptionId = subPtr.p->m_subscriptionId;
+ ref->subscriptionKey = subPtr.p->m_subscriptionKey;
+ ref->part = SubscriptionData::MetaData;
+ ref->subscriberData = subPtr.p->m_subscriberData;
+ ref->errorCode = SubSyncRef::Undefined;
+ suma.sendSignal(subPtr.p->m_subscriberRef, GSN_SUB_SYNC_REF, signal,
+ SubSyncRef::SignalLength, JBB);
+ } else {
+ SubSyncConf * const conf = (SubSyncConf*)signal->getDataPtrSend();
+ conf->subscriptionId = subPtr.p->m_subscriptionId;
+ conf->subscriptionKey = subPtr.p->m_subscriptionKey;
+ conf->part = SubscriptionData::MetaData;
+ conf->subscriberData = subPtr.p->m_subscriberData;
+ suma.sendSignal(subPtr.p->m_subscriberRef, GSN_SUB_SYNC_CONF, signal,
+ SubSyncConf::SignalLength, JBB);
+ }
+#endif
+}
+
+/**********************************************************
+ *
+ * Scan interface
+ *
+ */
+
+void
+SumaParticipant::SyncRecord::startScan(Signal* signal){
+ jam();
+
+ /**
+ * Get fraginfo
+ */
+ m_currentTable = 0;
+ m_currentFragment = 0;
+
+ nextScan(signal);
+}
+
+bool
+SumaParticipant::SyncRecord::getNextFragment(TablePtr * tab,
+ FragmentDescriptor * fd){
+ jam();
+ SubscriptionPtr subPtr;
+ suma.c_subscriptions.getPtr(subPtr, m_subscriptionPtrI);
+ TableList::DataBufferIterator tabIt;
+ DataBuffer<15>::DataBufferIterator fragIt;
+
+ m_tableList.position(tabIt, m_currentTable);
+ for(; !tabIt.curr.isNull(); m_tableList.next(tabIt), m_currentTable++){
+ TablePtr tabPtr;
+ ndbrequire(suma.c_tables.find(tabPtr, * tabIt.data));
+ if(subPtr.p->m_subscriptionType == SubCreateReq::SelectiveTableSnapshot)
+ {
+ if(!subPtr.p->m_tables[tabPtr.p->m_tableId]) {
+ *tab = tabPtr;
+ return true;
+ }
+ }
+ LocalDataBuffer<15> fragBuf(suma.c_dataBufferPool, tabPtr.p->m_fragments);
+
+ fragBuf.position(fragIt, m_currentFragment);
+ for(; !fragIt.curr.isNull(); fragBuf.next(fragIt), m_currentFragment++){
+ FragmentDescriptor tmp;
+ tmp.m_dummy = * fragIt.data;
+ if(tmp.m_fragDesc.m_nodeId == suma.getOwnNodeId()){
+ * fd = tmp;
+ * tab = tabPtr;
+ return true;
+ }
+ }
+ m_currentFragment = 0;
+ }
+ return false;
+}
+
+void
+SumaParticipant::SyncRecord::nextScan(Signal* signal){
+ jam();
+ TablePtr tabPtr;
+ FragmentDescriptor fd;
+ SubscriptionPtr subPtr;
+ if(!getNextFragment(&tabPtr, &fd)){
+ jam();
+ completeScan(signal);
+ return;
+ }
+ suma.c_subscriptions.getPtr(subPtr, m_subscriptionPtrI);
+ ndbrequire(subPtr.p->m_syncPtrI == ptrI);
+
+ if(subPtr.p->m_subscriptionType == SubCreateReq::SelectiveTableSnapshot) {
+ jam();
+ if(!subPtr.p->m_tables[tabPtr.p->m_tableId]) {
+ /*
+ * table is not part of the subscription. Check next table
+ */
+ m_currentTable++;
+ nextScan(signal);
+ return;
+ }
+ }
+
+ DataBuffer<15>::Head head = m_attributeList;
+ if(head.getSize() == 0){
+ head = tabPtr.p->m_attributes;
+ }
+ LocalDataBuffer<15> attrBuf(suma.c_dataBufferPool, head);
+
+ ScanFragReq * req = (ScanFragReq *)signal->getDataPtrSend();
+ const Uint32 parallelism = 16;
+ const Uint32 attrLen = 5 + attrBuf.getSize();
+
+ req->senderData = m_subscriptionPtrI;
+ req->resultRef = suma.reference();
+ req->tableId = tabPtr.p->m_tableId;
+ req->requestInfo = 0;
+ req->savePointId = 0;
+ ScanFragReq::setLockMode(req->requestInfo, 0);
+ ScanFragReq::setHoldLockFlag(req->requestInfo, 1);
+ ScanFragReq::setKeyinfoFlag(req->requestInfo, 0);
+ ScanFragReq::setAttrLen(req->requestInfo, attrLen);
+ req->fragmentNoKeyLen = fd.m_fragDesc.m_fragmentNo;
+ req->schemaVersion = tabPtr.p->m_schemaVersion;
+ req->transId1 = 0;
+ req->transId2 = (SUMA << 20) + (suma.getOwnNodeId() << 8);
+ req->clientOpPtr = (ptrI << 16);
+ req->batch_size_rows= 16;
+ req->batch_size_bytes= 0;
+ suma.sendSignal(DBLQH_REF, GSN_SCAN_FRAGREQ, signal,
+ ScanFragReq::SignalLength, JBB);
+
+ signal->theData[0] = ptrI;
+ signal->theData[1] = 0;
+ signal->theData[2] = (SUMA << 20) + (suma.getOwnNodeId() << 8);
+
+ // Return all
+ signal->theData[3] = attrBuf.getSize();
+ signal->theData[4] = 0;
+ signal->theData[5] = 0;
+ signal->theData[6] = 0;
+ signal->theData[7] = 0;
+
+ Uint32 dataPos = 8;
+ DataBuffer<15>::DataBufferIterator it;
+ for(attrBuf.first(it); !it.curr.isNull(); attrBuf.next(it)){
+ AttributeHeader::init(&signal->theData[dataPos++], * it.data, 0);
+ if(dataPos == 25){
+ suma.sendSignal(DBLQH_REF, GSN_ATTRINFO, signal, 25, JBB);
+ dataPos = 3;
+ }
+ }
+ if(dataPos != 3){
+ suma.sendSignal(DBLQH_REF, GSN_ATTRINFO, signal, dataPos, JBB);
+ }
+
+ m_currentTableId = tabPtr.p->m_tableId;
+ m_currentNoOfAttributes = attrBuf.getSize();
+}
+
+
+void
+SumaParticipant::execSCAN_FRAGREF(Signal* signal){
+ jamEntry();
+
+// ScanFragRef * const ref = (ScanFragRef*)signal->getDataPtr();
+ ndbrequire(false);
+}
+
+void
+SumaParticipant::execSCAN_FRAGCONF(Signal* signal){
+ jamEntry();
+
+ CRASH_INSERTION(13011);
+
+ ScanFragConf * const conf = (ScanFragConf*)signal->getDataPtr();
+
+ const Uint32 completed = conf->fragmentCompleted;
+ const Uint32 senderData = conf->senderData;
+ const Uint32 completedOps = conf->completedOps;
+
+ SubscriptionPtr subPtr;
+ c_subscriptions.getPtr(subPtr, senderData);
+
+ if(completed != 2){
+ jam();
+
+#if PRINT_ONLY
+ SubSyncContinueConf * const conf =
+ (SubSyncContinueConf*)signal->getDataPtrSend();
+ conf->subscriptionId = subPtr.p->m_subscriptionId;
+ conf->subscriptionKey = subPtr.p->m_subscriptionKey;
+ execSUB_SYNC_CONTINUE_CONF(signal);
+#else
+ SubSyncContinueReq * const req = (SubSyncContinueReq*)signal->getDataPtrSend();
+ req->subscriberData = subPtr.p->m_subscriberData;
+ req->noOfRowsSent = completedOps;
+ sendSignal(subPtr.p->m_subscriberRef, GSN_SUB_SYNC_CONTINUE_REQ, signal,
+ SubSyncContinueReq::SignalLength, JBB);
+#endif
+ return;
+ }
+
+ ndbrequire(completedOps == 0);
+
+ SyncRecord* tmp = c_syncPool.getPtr(subPtr.p->m_syncPtrI);
+
+ tmp->m_currentFragment++;
+ tmp->nextScan(signal);
+}
+
+void
+SumaParticipant::execSUB_SYNC_CONTINUE_CONF(Signal* signal){
+ jamEntry();
+
+ CRASH_INSERTION(13012);
+
+ SubSyncContinueConf * const conf =
+ (SubSyncContinueConf*)signal->getDataPtr();
+
+ SubscriptionPtr subPtr;
+ Subscription key;
+ key.m_subscriptionId = conf->subscriptionId;
+ key.m_subscriptionKey = conf->subscriptionKey;
+
+ ndbrequire(c_subscriptions.find(subPtr, key));
+
+ ScanFragNextReq * req = (ScanFragNextReq *)signal->getDataPtrSend();
+ req->senderData = subPtr.i;
+ req->closeFlag = 0;
+ req->transId1 = 0;
+ req->transId2 = (SUMA << 20) + (getOwnNodeId() << 8);
+ req->batch_size_rows = 16;
+ req->batch_size_bytes = 0;
+ sendSignal(DBLQH_REF, GSN_SCAN_NEXTREQ, signal,
+ ScanFragNextReq::SignalLength, JBB);
+}
+
+void
+SumaParticipant::SyncRecord::completeScan(Signal* signal){
+ jam();
+ // m_tableList.release();
+
+ SubscriptionPtr subPtr;
+ suma.c_subscriptions.getPtr(subPtr, m_subscriptionPtrI);
+ ndbrequire(subPtr.p->m_syncPtrI == ptrI);
+
+#if PRINT_ONLY
+ ndbout_c("GSN_SUB_SYNC_CONF (data)");
+#else
+ SubSyncConf * const conf = (SubSyncConf*)signal->getDataPtrSend();
+ conf->subscriptionId = subPtr.p->m_subscriptionId;
+ conf->subscriptionKey = subPtr.p->m_subscriptionKey;
+ conf->part = SubscriptionData::TableData;
+ conf->subscriberData = subPtr.p->m_subscriberData;
+ suma.sendSignal(subPtr.p->m_subscriberRef, GSN_SUB_SYNC_CONF, signal,
+ SubSyncConf::SignalLength, JBB);
+#endif
+}
+
+void
+SumaParticipant::execSCAN_HBREP(Signal* signal){
+ jamEntry();
+#if 0
+ ndbout << "execSCAN_HBREP" << endl << hex;
+ for(int i = 0; i<signal->length(); i++){
+ ndbout << signal->theData[i] << " ";
+ if(((i + 1) % 8) == 0)
+ ndbout << endl << hex;
+ }
+ ndbout << endl;
+#endif
+}
+
+/**********************************************************
+ *
+ * Suma participant interface
+ *
+ * Creation of subscriber
+ *
+ */
+
+void
+SumaParticipant::execSUB_START_REQ(Signal* signal){
+ jamEntry();
+ DBUG_ENTER("SumaParticipant::execSUB_START_REQ");
+
+ CRASH_INSERTION(13013);
+
+ if (c_restartLock) {
+ jam();
+ // ndbout_c("c_restartLock");
+ if (RtoI(signal->getSendersBlockRef(), false) == RNIL) {
+ jam();
+ sendSubStartRef(signal, /** Error Code */ 0, true);
+ DBUG_VOID_RETURN;
+ }
+ // only allow other Suma's in the nodegroup to come through for restart purposes
+ }
+
+ Subscription key;
+
+ SubStartReq * const req = (SubStartReq*)signal->getDataPtr();
+
+ Uint32 senderRef = req->senderRef;
+ Uint32 senderData = req->senderData;
+ Uint32 subscriberData = req->subscriberData;
+ Uint32 subscriberRef = req->subscriberRef;
+ SubscriptionData::Part part = (SubscriptionData::Part)req->part;
+ key.m_subscriptionId = req->subscriptionId;
+ key.m_subscriptionKey = req->subscriptionKey;
+
+ SubscriptionPtr subPtr;
+ if(!c_subscriptions.find(subPtr, key)){
+ jam();
+ sendSubStartRef(signal, /** Error Code */ 0);
+ DBUG_VOID_RETURN;
+ }
+
+ Ptr<SyncRecord> syncPtr;
+ c_syncPool.getPtr(syncPtr, subPtr.p->m_syncPtrI);
+ if (syncPtr.p->m_locked) {
+ jam();
+#if 0
+ ndbout_c("Locked");
+#endif
+ sendSubStartRef(signal, /** Error Code */ 0, true);
+ DBUG_VOID_RETURN;
+ }
+ syncPtr.p->m_locked = true;
+
+ SubscriberPtr subbPtr;
+ if(!c_subscriberPool.seize(subbPtr)){
+ jam();
+ syncPtr.p->m_locked = false;
+ sendSubStartRef(signal, /** Error Code */ 0);
+ DBUG_VOID_RETURN;
+ }
+
+ Uint32 type = subPtr.p->m_subscriptionType;
+
+ subbPtr.p->m_senderRef = senderRef;
+ subbPtr.p->m_senderData = senderData;
+
+ switch (type) {
+ case SubCreateReq::TableEvent:
+ jam();
+ // we want the data to return to the API not DICT
+ subbPtr.p->m_subscriberRef = subscriberRef;
+ // ndbout_c("start ref = %u", signal->getSendersBlockRef());
+ // ndbout_c("ref = %u", subbPtr.p->m_subscriberRef);
+ // we use the subscription id for now, should really be API choice
+ subbPtr.p->m_subscriberData = subscriberData;
+
+#if 0
+ if (RtoI(signal->getSendersBlockRef(), false) == RNIL) {
+ jam();
+ for (Uint32 i = 0; i < c_noNodesInGroup; i++) {
+ Uint32 ref = calcSumaBlockRef(c_nodesInGroup[i]);
+ if (ref != reference()) {
+ jam();
+ sendSubStartReq(subPtr, subbPtr, signal, ref);
+ } else
+ jam();
+ }
+ }
+#endif
+ break;
+ case SubCreateReq::DatabaseSnapshot:
+ case SubCreateReq::SelectiveTableSnapshot:
+ jam();
+ subbPtr.p->m_subscriberRef = GREP_REF;
+ subbPtr.p->m_subscriberData = subPtr.p->m_subscriberData;
+ break;
+ case SubCreateReq::SingleTableScan:
+ jam();
+ subbPtr.p->m_subscriberRef = subPtr.p->m_subscriberRef;
+ subbPtr.p->m_subscriberData = subPtr.p->m_subscriberData;
+ }
+
+ subbPtr.p->m_subPtrI = subPtr.i;
+ subbPtr.p->m_firstGCI = RNIL;
+ if (type == SubCreateReq::TableEvent)
+ subbPtr.p->m_lastGCI = 0;
+ else
+ subbPtr.p->m_lastGCI = RNIL; // disable usage of m_lastGCI
+ bool ok = false;
+
+ switch(part){
+ case SubscriptionData::MetaData:
+ ok = true;
+ jam();
+ c_metaSubscribers.add(subbPtr);
+ sendSubStartComplete(signal, subbPtr, 0, part);
+ break;
+ case SubscriptionData::TableData:
+ ok = true;
+ jam();
+ c_prepDataSubscribers.add(subbPtr);
+ syncPtr.p->startTrigger(signal);
+ break;
+ }
+ ndbrequire(ok);
+ DBUG_VOID_RETURN;
+}
+
+void
+SumaParticipant::sendSubStartComplete(Signal* signal,
+ SubscriberPtr subbPtr,
+ Uint32 firstGCI,
+ SubscriptionData::Part part){
+ jam();
+
+ SubscriptionPtr subPtr;
+ c_subscriptions.getPtr(subPtr, subbPtr.p->m_subPtrI);
+
+ Ptr<SyncRecord> syncPtr;
+ c_syncPool.getPtr(syncPtr, subPtr.p->m_syncPtrI);
+ syncPtr.p->m_locked = false;
+
+ SubStartConf * const conf = (SubStartConf*)signal->getDataPtrSend();
+
+ conf->senderRef = reference();
+ conf->senderData = subbPtr.p->m_senderData;
+ conf->subscriptionId = subPtr.p->m_subscriptionId;
+ conf->subscriptionKey = subPtr.p->m_subscriptionKey;
+ conf->firstGCI = firstGCI;
+ conf->part = (Uint32) part;
+
+ conf->subscriberData = subPtr.p->m_subscriberData;
+ sendSignal(subPtr.p->m_subscriberRef, GSN_SUB_START_CONF, signal,
+ SubStartConf::SignalLength, JBB);
+}
+
+#if 0
+void
+SumaParticipant::sendSubStartRef(SubscriptionPtr subPtr,
+ Signal* signal, Uint32 errCode,
+ bool temporary){
+ jam();
+ SubStartRef * ref = (SubStartRef *)signal->getDataPtrSend();
+ xxx ref->senderRef = reference();
+ xxx ref->senderData = subPtr.p->m_senderData;
+ ref->subscriptionId = subPtr.p->m_subscriptionId;
+ ref->subscriptionKey = subPtr.p->m_subscriptionKey;
+ ref->part = (Uint32) subPtr.p->m_subscriptionType;
+ ref->subscriberData = subPtr.p->m_subscriberData;
+ ref->err = errCode;
+ if (temporary) {
+ jam();
+ ref->setTemporary();
+ }
+ releaseSections(signal);
+ sendSignal(subPtr.p->m_subscriberRef, GSN_SUB_START_REF, signal,
+ SubStartRef::SignalLength, JBB);
+}
+#endif
+void
+SumaParticipant::sendSubStartRef(Signal* signal, Uint32 errCode,
+ bool temporary){
+ jam();
+ SubStartRef * ref = (SubStartRef *)signal->getDataPtrSend();
+ ref->senderRef = reference();
+ ref->err = errCode;
+ if (temporary) {
+ jam();
+ ref->setTemporary();
+ }
+ releaseSections(signal);
+ sendSignal(signal->getSendersBlockRef(), GSN_SUB_START_REF, signal,
+ SubStartRef::SignalLength, JBB);
+}
+
+/**********************************************************
+ *
+ * Trigger admin interface
+ *
+ */
+
+void
+SumaParticipant::SyncRecord::startTrigger(Signal* signal){
+ jam();
+ m_currentTable = 0;
+ m_latestTriggerId = RNIL;
+ nextTrigger(signal);
+}
+
+void
+SumaParticipant::SyncRecord::nextTrigger(Signal* signal){
+ jam();
+
+ TableList::DataBufferIterator it;
+
+ if(!m_tableList.position(it, m_currentTable)){
+ completeTrigger(signal);
+ return;
+ }
+
+ SubscriptionPtr subPtr;
+ suma.c_subscriptions.getPtr(subPtr, m_subscriptionPtrI);
+ ndbrequire(subPtr.p->m_syncPtrI == ptrI);
+ const Uint32 RT_BREAK = 48;
+ Uint32 latestTriggerId = 0;
+ for(Uint32 i = 0; i<RT_BREAK && !it.isNull(); i++, m_tableList.next(it)){
+ TablePtr tabPtr;
+#if 0
+ ndbout_c("nextTrigger tableid %u", *it.data);
+#endif
+ ndbrequire(suma.c_tables.find(tabPtr, *it.data));
+
+ AttributeMask attrMask;
+ createAttributeMask(attrMask, tabPtr.p);
+
+ for(Uint32 j = 0; j<3; j++){
+ i++;
+ latestTriggerId = (tabPtr.p->m_schemaVersion << 18) |
+ (j << 16) | tabPtr.p->m_tableId;
+ if(tabPtr.p->m_hasTriggerDefined[j] == 0) {
+ ndbrequire(tabPtr.p->m_triggerIds[j] == ILLEGAL_TRIGGER_ID);
+#if 0
+ ndbout_c("DEFINING trigger on table %u[%u]", tabPtr.p->m_tableId, j);
+#endif
+ CreateTrigReq * const req = (CreateTrigReq*)signal->getDataPtrSend();
+ req->setUserRef(SUMA_REF);
+ req->setConnectionPtr(ptrI);
+ req->setTriggerType(TriggerType::SUBSCRIPTION_BEFORE);
+ req->setTriggerActionTime(TriggerActionTime::TA_DETACHED);
+ req->setMonitorReplicas(true);
+ req->setMonitorAllAttributes(false);
+ req->setReceiverRef(SUMA_REF);
+ req->setTriggerId(latestTriggerId);
+ req->setTriggerEvent((TriggerEvent::Value)j);
+ req->setTableId(tabPtr.p->m_tableId);
+ req->setAttributeMask(attrMask);
+ suma.sendSignal(DBTUP_REF, GSN_CREATE_TRIG_REQ,
+ signal, CreateTrigReq::SignalLength, JBB);
+
+ } else {
+ /**
+ * Faking that a trigger has been created in order to
+ * simulate the proper behaviour.
+ * Perhaps this should be a dummy signal instead of
+ * (ab)using CREATE_TRIG_CONF.
+ */
+ CreateTrigConf * conf = (CreateTrigConf*)signal->getDataPtrSend();
+ conf->setConnectionPtr(ptrI);
+ conf->setTableId(tabPtr.p->m_tableId);
+ conf->setTriggerId(latestTriggerId);
+ suma.sendSignal(SUMA_REF,GSN_CREATE_TRIG_CONF,
+ signal, CreateTrigConf::SignalLength, JBB);
+
+ }
+
+ }
+ m_currentTable++;
+ }
+ m_latestTriggerId = latestTriggerId;
+}
+
+void
+SumaParticipant::SyncRecord::createAttributeMask(AttributeMask& mask,
+ Table * table){
+ jam();
+ mask.clear();
+ DataBuffer<15>::DataBufferIterator it;
+ LocalDataBuffer<15> attrBuf(suma.c_dataBufferPool, table->m_attributes);
+ for(attrBuf.first(it); !it.curr.isNull(); attrBuf.next(it)){
+ mask.set(* it.data);
+ }
+}
+
+void
+SumaParticipant::SyncRecord::runCREATE_TRIG_CONF(Signal* signal){
+ jam();
+
+ CreateTrigConf * const conf = (CreateTrigConf*)signal->getDataPtr();
+ const Uint32 triggerId = conf->getTriggerId();
+ Uint32 type = (triggerId >> 16) & 0x3;
+ Uint32 tableId = conf->getTableId();
+
+ TablePtr tabPtr;
+ ndbrequire(suma.c_tables.find(tabPtr, tableId));
+
+ ndbrequire(type < 3);
+ tabPtr.p->m_triggerIds[type] = triggerId;
+ tabPtr.p->m_hasTriggerDefined[type]++;
+
+ if(triggerId == m_latestTriggerId){
+ jam();
+ nextTrigger(signal);
+ }
+}
+
+void
+SumaParticipant::SyncRecord::completeTrigger(Signal* signal){
+ jam();
+ SubscriptionPtr subPtr;
+ CRASH_INSERTION(13013);
+#ifdef EVENT_PH3_DEBUG
+ ndbout_c("SumaParticipant: trigger completed");
+#endif
+ Uint32 gci;
+ suma.c_subscriptions.getPtr(subPtr, m_subscriptionPtrI);
+ ndbrequire(subPtr.p->m_syncPtrI == ptrI);
+
+ SubscriberPtr subbPtr;
+ {
+ bool found = false;
+
+ for(suma.c_prepDataSubscribers.first(subbPtr);
+ !subbPtr.isNull(); suma.c_prepDataSubscribers.next(subbPtr)) {
+ jam();
+ if(subbPtr.p->m_subPtrI == subPtr.i) {
+ jam();
+ found = true;
+ break;
+ }
+ }
+ ndbrequire(found);
+ gci = suma.getFirstGCI(signal);
+ subbPtr.p->m_firstGCI = gci;
+ suma.c_prepDataSubscribers.remove(subbPtr);
+ suma.c_dataSubscribers.add(subbPtr);
+ }
+ suma.sendSubStartComplete(signal, subbPtr, gci, SubscriptionData::TableData);
+}
+
+void
+SumaParticipant::SyncRecord::startDropTrigger(Signal* signal){
+ jam();
+ m_currentTable = 0;
+ m_latestTriggerId = RNIL;
+ nextDropTrigger(signal);
+}
+
+void
+SumaParticipant::SyncRecord::nextDropTrigger(Signal* signal){
+ jam();
+
+ TableList::DataBufferIterator it;
+
+ if(!m_tableList.position(it, m_currentTable)){
+ completeDropTrigger(signal);
+ return;
+ }
+
+ SubscriptionPtr subPtr;
+ suma.c_subscriptions.getPtr(subPtr, m_subscriptionPtrI);
+ ndbrequire(subPtr.p->m_syncPtrI == ptrI);
+
+ const Uint32 RT_BREAK = 48;
+ Uint32 latestTriggerId = 0;
+ for(Uint32 i = 0; i<RT_BREAK && !it.isNull(); i++, m_tableList.next(it)){
+ jam();
+ TablePtr tabPtr;
+#if 0
+ ndbout_c("nextDropTrigger tableid %u", *it.data);
+#endif
+ ndbrequire(suma.c_tables.find(tabPtr, * it.data));
+
+ for(Uint32 j = 0; j<3; j++){
+ jam();
+ ndbrequire(tabPtr.p->m_triggerIds[j] != ILLEGAL_TRIGGER_ID);
+ i++;
+ latestTriggerId = tabPtr.p->m_triggerIds[j];
+ if(tabPtr.p->m_hasTriggerDefined[j] == 1) {
+ jam();
+
+ DropTrigReq * const req = (DropTrigReq*)signal->getDataPtrSend();
+ req->setConnectionPtr(ptrI);
+ req->setUserRef(SUMA_REF); // Sending to myself
+ req->setRequestType(DropTrigReq::RT_USER);
+ req->setTriggerType(TriggerType::SUBSCRIPTION_BEFORE);
+ req->setTriggerActionTime(TriggerActionTime::TA_DETACHED);
+ req->setIndexId(RNIL);
+
+ req->setTableId(tabPtr.p->m_tableId);
+ req->setTriggerId(latestTriggerId);
+ req->setTriggerEvent((TriggerEvent::Value)j);
+
+#if 0
+ ndbout_c("DROPPING trigger %u = %u %u %u on table %u[%u]",
+ latestTriggerId,TriggerType::SUBSCRIPTION_BEFORE,
+ TriggerActionTime::TA_DETACHED, j, tabPtr.p->m_tableId, j);
+#endif
+ suma.sendSignal(DBTUP_REF, GSN_DROP_TRIG_REQ,
+ signal, DropTrigReq::SignalLength, JBB);
+ } else {
+ jam();
+ ndbrequire(tabPtr.p->m_hasTriggerDefined[j] > 1);
+ /**
+ * Faking that a trigger has been dropped in order to
+ * simulate the proper behaviour.
+ * Perhaps this should be a dummy signal instead of
+ * (ab)using DROP_TRIG_CONF.
+ */
+ DropTrigConf * conf = (DropTrigConf*)signal->getDataPtrSend();
+ conf->setConnectionPtr(ptrI);
+ conf->setTableId(tabPtr.p->m_tableId);
+ conf->setTriggerId(latestTriggerId);
+ suma.sendSignal(SUMA_REF,GSN_DROP_TRIG_CONF,
+ signal, DropTrigConf::SignalLength, JBB);
+ }
+ }
+ m_currentTable++;
+ }
+ m_latestTriggerId = latestTriggerId;
+}
+
+void
+SumaParticipant::SyncRecord::runDROP_TRIG_REF(Signal* signal){
+ jam();
+ DropTrigRef * const ref = (DropTrigRef*)signal->getDataPtr();
+ if (ref->getErrorCode() != DropTrigRef::TriggerNotFound){
+ ndbrequire(false);
+ }
+ const Uint32 triggerId = ref->getTriggerId();
+ Uint32 tableId = ref->getTableId();
+ runDropTrig(signal, triggerId, tableId);
+}
+
+void
+SumaParticipant::SyncRecord::runDROP_TRIG_CONF(Signal* signal){
+ jam();
+
+ DropTrigConf * const conf = (DropTrigConf*)signal->getDataPtr();
+ const Uint32 triggerId = conf->getTriggerId();
+ Uint32 tableId = conf->getTableId();
+ runDropTrig(signal, triggerId, tableId);
+}
+
+void
+SumaParticipant::SyncRecord::runDropTrig(Signal* signal,
+ Uint32 triggerId,
+ Uint32 tableId){
+ Uint32 type = (triggerId >> 16) & 0x3;
+
+ TablePtr tabPtr;
+ ndbrequire(suma.c_tables.find(tabPtr, tableId));
+
+ ndbrequire(type < 3);
+ ndbrequire(tabPtr.p->m_triggerIds[type] == triggerId);
+ tabPtr.p->m_hasTriggerDefined[type]--;
+ if (tabPtr.p->m_hasTriggerDefined[type] == 0) {
+ jam();
+ tabPtr.p->m_triggerIds[type] = ILLEGAL_TRIGGER_ID;
+ }
+ if(triggerId == m_latestTriggerId){
+ jam();
+ nextDropTrigger(signal);
+ }
+}
+
+void
+SumaParticipant::SyncRecord::completeDropTrigger(Signal* signal){
+ jam();
+ SubscriptionPtr subPtr;
+ CRASH_INSERTION(13014);
+#if 0
+ ndbout_c("trigger completed");
+#endif
+
+ suma.c_subscriptions.getPtr(subPtr, m_subscriptionPtrI);
+ ndbrequire(subPtr.p->m_syncPtrI == ptrI);
+
+ bool found = false;
+ SubscriberPtr subbPtr;
+ for(suma.c_prepDataSubscribers.first(subbPtr);
+ !subbPtr.isNull(); suma.c_prepDataSubscribers.next(subbPtr)) {
+ jam();
+ if(subbPtr.p->m_subPtrI == subPtr.i) {
+ jam();
+ found = true;
+ break;
+ }
+ }
+ ndbrequire(found);
+ suma.sendSubStopComplete(signal, subbPtr);
+}
+
+/**********************************************************
+ * Scan data interface
+ *
+ * Assumption: one execTRANSID_AI contains all attr info
+ *
+ */
+
+#define SUMA_BUF_SZ1 MAX_KEY_SIZE_IN_WORDS + MAX_TUPLE_SIZE_IN_WORDS
+#define SUMA_BUF_SZ MAX_ATTRIBUTES_IN_TABLE + SUMA_BUF_SZ1
+
+static Uint32 f_bufferLock = 0;
+static Uint32 f_buffer[SUMA_BUF_SZ];
+static Uint32 f_trigBufferSize = 0;
+static Uint32 b_bufferLock = 0;
+static Uint32 b_buffer[SUMA_BUF_SZ];
+static Uint32 b_trigBufferSize = 0;
+
+void
+SumaParticipant::execTRANSID_AI(Signal* signal){
+ jamEntry();
+
+ CRASH_INSERTION(13015);
+ TransIdAI * const data = (TransIdAI*)signal->getDataPtr();
+ const Uint32 opPtrI = data->connectPtr;
+ const Uint32 length = signal->length() - 3;
+
+ if(f_bufferLock == 0){
+ f_bufferLock = opPtrI;
+ } else {
+ ndbrequire(f_bufferLock == opPtrI);
+ }
+
+ Ptr<SyncRecord> syncPtr;
+ c_syncPool.getPtr(syncPtr, (opPtrI >> 16));
+
+ Uint32 sum = 0;
+ Uint32 * dst = f_buffer + MAX_ATTRIBUTES_IN_TABLE;
+ Uint32 * headers = f_buffer;
+ const Uint32 * src = &data->attrData[0];
+ const Uint32 * const end = &src[length];
+
+ const Uint32 attribs = syncPtr.p->m_currentNoOfAttributes;
+ for(Uint32 i = 0; i<attribs; i++){
+ Uint32 tmp = * src++;
+ * headers++ = tmp;
+ Uint32 len = AttributeHeader::getDataSize(tmp);
+
+ memcpy(dst, src, 4 * len);
+ dst += len;
+ src += len;
+ sum += len;
+ }
+
+ ndbrequire(src == end);
+
+ /**
+ * Send data to subscriber
+ */
+ LinearSectionPtr ptr[3];
+ ptr[0].p = f_buffer;
+ ptr[0].sz = attribs;
+
+ ptr[1].p = f_buffer + MAX_ATTRIBUTES_IN_TABLE;
+ ptr[1].sz = sum;
+
+ SubscriptionPtr subPtr;
+ c_subscriptions.getPtr(subPtr, syncPtr.p->m_subscriptionPtrI);
+
+ /**
+ * Initialize signal
+ */
+ SubTableData * sdata = (SubTableData*)signal->getDataPtrSend();
+ Uint32 ref = subPtr.p->m_subscriberRef;
+ sdata->tableId = syncPtr.p->m_currentTableId;
+ sdata->senderData = subPtr.p->m_subscriberData;
+ sdata->operation = 3; // Scan
+ sdata->gci = 1; // Undefined
+#if PRINT_ONLY
+ ndbout_c("GSN_SUB_TABLE_DATA (scan) #attr: %d len: %d", attribs, sum);
+#else
+ sendSignal(ref,
+ GSN_SUB_TABLE_DATA,
+ signal,
+ SubTableData::SignalLength, JBB,
+ ptr, 2);
+#endif
+
+ /**
+ * Reset f_bufferLock
+ */
+ f_bufferLock = 0;
+}
+
+/**********************************************************
+ *
+ * Trigger data interface
+ *
+ */
+
+void
+SumaParticipant::execTRIG_ATTRINFO(Signal* signal){
+ jamEntry();
+
+ CRASH_INSERTION(13016);
+ TrigAttrInfo* const trg = (TrigAttrInfo*)signal->getDataPtr();
+ const Uint32 trigId = trg->getTriggerId();
+
+ const Uint32 dataLen = signal->length() - TrigAttrInfo::StaticLength;
+
+ if(trg->getAttrInfoType() == TrigAttrInfo::BEFORE_VALUES){
+ jam();
+
+ ndbrequire(b_bufferLock == trigId);
+
+ memcpy(b_buffer + b_trigBufferSize, trg->getData(), 4 * dataLen);
+ b_trigBufferSize += dataLen;
+ // printf("before values %u %u %u\n",trigId, dataLen, b_trigBufferSize);
+ } else {
+ jam();
+
+ if(f_bufferLock == 0){
+ f_bufferLock = trigId;
+ f_trigBufferSize = 0;
+ b_bufferLock = trigId;
+ b_trigBufferSize = 0;
+ } else {
+ ndbrequire(f_bufferLock == trigId);
+ }
+
+ memcpy(f_buffer + f_trigBufferSize, trg->getData(), 4 * dataLen);
+ f_trigBufferSize += dataLen;
+ }
+}
+
+#ifdef NODEFAIL_DEBUG2
+static int theCounts[64] = {0};
+#endif
+
+Uint32
+Suma::getStoreBucket(Uint32 v)
+{
+ // id will contain id to responsible suma or
+ // RNIL if we don't have nodegroup info yet
+
+ const Uint32 N = NO_OF_BUCKETS;
+ const Uint32 D = v % N; // Distibution key
+ return D;
+}
+
+Uint32
+Suma::getResponsibleSumaNodeId(Uint32 D)
+{
+ // id will contain id to responsible suma or
+ // RNIL if we don't have nodegroup info yet
+
+ Uint32 id;
+
+ if (c_restartLock) {
+ jam();
+ // ndbout_c("c_restartLock");
+ id = RNIL;
+ } else {
+ jam();
+ id = RNIL;
+ const Uint32 n = c_noNodesInGroup; // Number nodes in node group
+ const Uint32 C1 = D / n;
+ const Uint32 C2 = D - C1*n; // = D % n;
+ const Uint32 C = C2 + C1 % n;
+ for (Uint32 i = 0; i < n; i++) {
+ jam();
+ id = c_nodesInGroup[(C + i) % n];
+ if (c_aliveNodes.get(id) &&
+ !c_preparingNodes.get(id)) {
+ jam();
+ break;
+ }//if
+ }
+#ifdef NODEFAIL_DEBUG2
+ theCounts[id]++;
+ ndbout_c("Suma:responsible n=%u, D=%u, id = %u, count=%u",
+ n,D, id, theCounts[id]);
+#endif
+ }
+ return id;
+}
+
+Uint32
+SumaParticipant::decideWhoToSend(Uint32 nBucket, Uint32 gci){
+ bool replicaFlag = true;
+ Uint32 nId = RNIL;
+
+ // bucket active/not active set by GCP_COMPLETE
+ if (c_buckets[nBucket].active) {
+ if (c_buckets[nBucket].handover && c_buckets[nBucket].handoverGCI <= gci) {
+ jam();
+ replicaFlag = true; // let the other node send this
+ nId = RNIL;
+ // mark this as started, if we get a node failiure now we have some lost stuff
+ c_buckets[nBucket].handover_started = true;
+ } else {
+ jam();
+ replicaFlag = false;
+ nId = refToNode(reference());
+ }
+ } else {
+ nId = getResponsibleSumaNodeId(nBucket);
+ replicaFlag = !(nId == refToNode(reference()));
+
+ if (!replicaFlag) {
+ if (!c_buckets[nBucket].handover) {
+ jam();
+ // appearently a node has failed and we are taking over sending
+ // from that bucket. Now we need to go back to latest completed
+ // GCI. Handling will depend on Subscriber and Subscription
+
+ // TODO, for now we make an easy takeover
+ if (gci < c_nodeFailGCI)
+ c_lastInconsistentGCI = gci;
+
+ // we now have responsability for this bucket and we're actively
+ // sending from that
+ c_buckets[nBucket].active = true;
+#ifdef HANDOVER_DEBUG
+ ndbout_c("Takeover Bucket %u", nBucket);
+#endif
+ } else if (c_buckets[nBucket].handoverGCI > gci) {
+ jam();
+ replicaFlag = true; // handover going on, but don't start sending yet
+ nId = RNIL;
+ } else {
+ jam();
+#ifdef HANDOVER_DEBUG
+ ndbout_c("Possible error: Will send from GCI = %u", gci);
+#endif
+ }
+ }
+ }
+
+#ifdef NODEFAIL_DEBUG2
+ ndbout_c("Suma:bucket %u, responsible id = %u, replicaFlag = %u",
+ nBucket, nId, (Uint32)replicaFlag);
+#endif
+ return replicaFlag;
+}
+
+void
+SumaParticipant::execFIRE_TRIG_ORD(Signal* signal){
+ jamEntry();
+ DBUG_ENTER("SumaParticipant::execFIRE_TRIG_ORD");
+ CRASH_INSERTION(13016);
+ FireTrigOrd* const trg = (FireTrigOrd*)signal->getDataPtr();
+ const Uint32 trigId = trg->getTriggerId();
+ const Uint32 hashValue = trg->getHashValue();
+ const Uint32 gci = trg->getGCI();
+ const Uint32 event = trg->getTriggerEvent();
+ const Uint32 triggerId = trg->getTriggerId();
+ Uint32 tableId = triggerId & 0xFFFF;
+
+ ndbrequire(f_bufferLock == trigId);
+
+#ifdef EVENT_DEBUG2
+ ndbout_c("SumaParticipant::execFIRE_TRIG_ORD");
+#endif
+
+ Uint32 sz = trg->getNoOfPrimaryKeyWords()+trg->getNoOfAfterValueWords();
+ ndbrequire(sz == f_trigBufferSize);
+
+ /**
+ * Reformat as "all headers" + "all data"
+ */
+ Uint32 dataLen = 0;
+ Uint32 noOfAttrs = 0;
+ Uint32 * src = f_buffer;
+ Uint32 * headers = signal->theData + 25;
+ Uint32 * dst = signal->theData + 25 + MAX_ATTRIBUTES_IN_TABLE;
+
+ LinearSectionPtr ptr[3];
+ int nptr;
+
+ ptr[0].p = headers;
+ ptr[1].p = dst;
+
+ while(sz > 0){
+ jam();
+ Uint32 tmp = * src ++;
+ * headers ++ = tmp;
+ Uint32 len = AttributeHeader::getDataSize(tmp);
+ memcpy(dst, src, 4 * len);
+ dst += len;
+ src += len;
+
+ noOfAttrs++;
+ dataLen += len;
+ sz -= (1 + len);
+ }
+ ndbrequire(sz == 0);
+
+ ptr[0].sz = noOfAttrs;
+ ptr[1].sz = dataLen;
+
+ if (b_trigBufferSize > 0) {
+ jam();
+ ptr[2].p = b_buffer;
+ ptr[2].sz = b_trigBufferSize;
+ nptr = 3;
+ } else {
+ jam();
+ nptr = 2;
+ }
+
+ // right now only for tableEvent
+ bool replicaFlag = decideWhoToSend(getStoreBucket(hashValue), gci);
+
+ /**
+ * Signal to subscriber(s)
+ */
+ SubTableData * data = (SubTableData*)signal->getDataPtrSend();//trg;
+ data->gci = gci;
+ data->tableId = tableId;
+ data->operation = event;
+ data->noOfAttributes = noOfAttrs;
+ data->dataSize = dataLen;
+
+ SubscriberPtr subbPtr;
+ for(c_dataSubscribers.first(subbPtr); !subbPtr.isNull();
+ c_dataSubscribers.next(subbPtr)){
+ if (subbPtr.p->m_firstGCI > gci) {
+#ifdef EVENT_DEBUG
+ ndbout_c("m_firstGCI = %u, gci = %u", subbPtr.p->m_firstGCI, gci);
+#endif
+ jam();
+ // we're either restarting or it's a newly created subscriber
+ // and waiting for the right gci
+ continue;
+ }
+
+ jam();
+
+ const Uint32 ref = subbPtr.p->m_subscriberRef;
+ // ndbout_c("ref = %u", ref);
+ const Uint32 subdata = subbPtr.p->m_subscriberData;
+ data->senderData = subdata;
+ /*
+ * get subscription ptr for this subscriber
+ */
+ SubscriptionPtr subPtr;
+ c_subscriptions.getPtr(subPtr, subbPtr.p->m_subPtrI);
+
+ if(!subPtr.p->m_tables[tableId]) {
+ jam();
+ continue;
+ //continue in for-loop if the table is not part of
+ //the subscription. Otherwise, send data to subscriber.
+ }
+
+ if (subPtr.p->m_subscriptionType == SubCreateReq::TableEvent) {
+ if (replicaFlag) {
+ jam();
+ c_failoverBuffer.subTableData(gci,NULL,0);
+ continue;
+ }
+ jam();
+ Uint32 tmp = data->logType;
+ if (c_lastInconsistentGCI == data->gci) {
+ data->setGCINotConsistent();
+ }
+
+#ifdef HANDOVER_DEBUG
+ {
+ static int aLongGCIName = 0;
+ if (data->gci != aLongGCIName) {
+ aLongGCIName = data->gci;
+ ndbout_c("sent from GCI = %u", aLongGCIName);
+ }
+ }
+#endif
+ DBUG_PRINT("info",("GSN_SUB_TABLE_DATA to node %d", refToNode(ref)));
+ sendSignal(ref, GSN_SUB_TABLE_DATA, signal,
+ SubTableData::SignalLength, JBB, ptr, nptr);
+ data->logType = tmp;
+ } else {
+ ndbassert(refToNode(ref) == 0 || refToNode(ref) == getOwnNodeId());
+ jam();
+#if PRINT_ONLY
+ ndbout_c("GSN_SUB_TABLE_DATA to %s: op: %d #attr: %d len: %d",
+ getBlockName(refToBlock(ref)),
+ noOfAttrs, dataLen);
+
+#else
+#ifdef HANDOVER_DEBUG
+ {
+ static int aLongGCIName2 = 0;
+ if (data->gci != aLongGCIName2) {
+ aLongGCIName2 = data->gci;
+ ndbout_c("(EXECUTE_DIRECT) sent from GCI = %u to %u", aLongGCIName2, ref);
+ }
+ }
+#endif
+ EXECUTE_DIRECT(refToBlock(ref), GSN_SUB_TABLE_DATA, signal,
+ SubTableData::SignalLength);
+ jamEntry();
+#endif
+ }
+ }
+
+ /**
+ * Reset f_bufferLock
+ */
+ f_bufferLock = 0;
+ b_bufferLock = 0;
+
+ DBUG_VOID_RETURN;
+}
+
+void
+SumaParticipant::execSUB_GCP_COMPLETE_REP(Signal* signal){
+ jamEntry();
+
+ SubGcpCompleteRep * rep = (SubGcpCompleteRep*)signal->getDataPtrSend();
+
+ Uint32 gci = rep->gci;
+ c_lastCompleteGCI = gci;
+
+ /**
+ * always send SUB_GCP_COMPLETE_REP to Grep (so
+ * Lars can do funky stuff calculating intervals,
+ * even before the subscription is started
+ */
+ rep->senderRef = reference();
+ rep->senderData = 0; //ignored in grep
+ EXECUTE_DIRECT(refToBlock(GREP_REF), GSN_SUB_GCP_COMPLETE_REP, signal,
+ SubGcpCompleteRep::SignalLength);
+
+ /**
+ * Signal to subscriber(s)
+ */
+
+ SubscriberPtr subbPtr;
+ SubscriptionPtr subPtr;
+ c_dataSubscribers.first(subbPtr);
+ for(; !subbPtr.isNull(); c_dataSubscribers.next(subbPtr)){
+
+ if (subbPtr.p->m_firstGCI > gci) {
+ jam();
+ // we don't send SUB_GCP_COMPLETE_REP for incomplete GCI's
+ continue;
+ }
+
+ const Uint32 ref = subbPtr.p->m_subscriberRef;
+ rep->senderRef = ref;
+ rep->senderData = subbPtr.p->m_subscriberData;
+
+ c_subscriptions.getPtr(subPtr, subbPtr.p->m_subPtrI);
+#if PRINT_ONLY
+ ndbout_c("GSN_SUB_GCP_COMPLETE_REP to %s:",
+ getBlockName(refToBlock(ref)));
+#else
+ /**
+ * Ignore sending to GREP (since we sent earlier)
+ */
+ if (ref == GREP_REF) {
+ jam();
+ continue;
+ }
+
+ CRASH_INSERTION(13018);
+
+ if (subPtr.p->m_subscriptionType == SubCreateReq::TableEvent)
+ {
+ jam();
+ sendSignal(ref, GSN_SUB_GCP_COMPLETE_REP, signal,
+ SubGcpCompleteRep::SignalLength, JBB);
+ }
+ else
+ {
+ jam();
+ ndbassert(refToNode(ref) == 0 || refToNode(ref) == getOwnNodeId());
+ EXECUTE_DIRECT(refToBlock(ref), GSN_SUB_GCP_COMPLETE_REP, signal,
+ SubGcpCompleteRep::SignalLength);
+ jamEntry();
+ }
+#endif
+ }
+
+ if (c_handoverToDo) {
+ jam();
+ c_handoverToDo = false;
+ for( int i = 0; i < NO_OF_BUCKETS; i++) {
+ if (c_buckets[i].handover) {
+ if (c_buckets[i].handoverGCI > gci) {
+ jam();
+ c_handoverToDo = true; // still waiting for the right GCI
+ break; /* since all handover should happen at the same time
+ * we can break here
+ */
+ } else {
+ c_buckets[i].handover = false;
+#ifdef HANDOVER_DEBUG
+ ndbout_c("Handover Bucket %u", i);
+#endif
+ if (getResponsibleSumaNodeId(i) == refToNode(reference())) {
+ // my bucket to be handed over to me
+ ndbrequire(!c_buckets[i].active);
+ jam();
+ c_buckets[i].active = true;
+ } else {
+ // someone else's bucket to handover to
+ ndbrequire(c_buckets[i].active);
+ jam();
+ c_buckets[i].active = false;
+ }
+ }
+ }
+ }
+ }
+}
+
+/***********************************************************
+ *
+ * Embryo to syncronize the Suma's so as to know if a subscriber
+ * has received a GCP_COMPLETE from all suma's or not
+ *
+ */
+
+void
+SumaParticipant::runSUB_GCP_COMPLETE_ACC(Signal* signal){
+ jam();
+
+ SubGcpCompleteAcc * const acc = (SubGcpCompleteAcc*)signal->getDataPtr();
+
+ Uint32 gci = acc->rep.gci;
+
+#ifdef EVENT_DEBUG
+ ndbout_c("SumaParticipant::runSUB_GCP_COMPLETE_ACC gci = %u", gci);
+#endif
+
+ c_failoverBuffer.subGcpCompleteRep(gci);
+}
+
+void
+Suma::execSUB_GCP_COMPLETE_ACC(Signal* signal){
+ jamEntry();
+
+ if (RtoI(signal->getSendersBlockRef(), false) != RNIL) {
+ jam();
+ // Ack from other SUMA
+ runSUB_GCP_COMPLETE_ACC(signal);
+ return;
+ }
+
+ jam();
+ // Ack from User and not an acc from other SUMA, redistribute in nodegroup
+
+ SubGcpCompleteAcc * const acc = (SubGcpCompleteAcc*)signal->getDataPtr();
+ Uint32 gci = acc->rep.gci;
+ Uint32 senderRef = acc->rep.senderRef;
+ Uint32 subscriberData = acc->rep.subscriberData;
+
+#ifdef EVENT_DEBUG
+ ndbout_c("Suma::execSUB_GCP_COMPLETE_ACC gci = %u", gci);
+#endif
+ bool moreToCome = false;
+
+ SubscriberPtr subbPtr;
+ for(c_dataSubscribers.first(subbPtr);
+ !subbPtr.isNull(); c_dataSubscribers.next(subbPtr)){
+#ifdef EVENT_DEBUG
+ ndbout_c("Suma::execSUB_GCP_COMPLETE_ACC %u == %u && %u == %u",
+ subbPtr.p->m_subscriberRef,
+ senderRef,
+ subbPtr.p->m_subscriberData,
+ subscriberData);
+#endif
+ if (subbPtr.p->m_subscriberRef == senderRef &&
+ subbPtr.p->m_subscriberData == subscriberData) {
+ jam();
+#ifdef EVENT_DEBUG
+ ndbout_c("Suma::execSUB_GCP_COMPLETE_ACC gci = FOUND SUBSCRIBER");
+#endif
+ subbPtr.p->m_lastGCI = gci;
+ } else if (subbPtr.p->m_lastGCI < gci) {
+ jam();
+ if (subbPtr.p->m_firstGCI <= gci)
+ moreToCome = true;
+ } else
+ jam();
+ }
+
+ if (!moreToCome) {
+ // tell the other SUMA's that I'm done with this GCI
+ jam();
+ for (Uint32 i = 0; i < c_noNodesInGroup; i++) {
+ Uint32 id = c_nodesInGroup[i];
+ Uint32 ref = calcSumaBlockRef(id);
+ if ((ref != reference()) && c_aliveNodes.get(id)) {
+ jam();
+ sendSignal(ref, GSN_SUB_GCP_COMPLETE_ACC, signal,
+ SubGcpCompleteAcc::SignalLength, JBB);
+ } else
+ jam();
+ }
+ }
+}
+
+static Uint32 tmpFailoverBuffer[512];
+//SumaParticipant::FailoverBuffer::FailoverBuffer(DataBuffer<15>::DataBufferPool & p)
+// : m_dataList(p),
+SumaParticipant::FailoverBuffer::FailoverBuffer()
+ :
+ c_gcis(tmpFailoverBuffer), c_sz(512), c_first(0), c_next(0), c_full(false)
+{
+}
+
+bool SumaParticipant::FailoverBuffer::subTableData(Uint32 gci, Uint32 *src, int sz)
+{
+ bool ok = true;
+
+ if (c_full) {
+ ok = false;
+#ifdef EVENT_DEBUG
+ ndbout_c("Suma::FailoverBuffer::SubTableData buffer full gci=%u");
+#endif
+ } else {
+ c_gcis[c_next] = gci;
+ c_next++;
+ if (c_next == c_sz) c_next = 0;
+ if (c_next == c_first)
+ c_full = true;
+ // ndbout_c("%u %u %u",c_first,c_next,c_sz);
+ }
+ return ok;
+}
+bool SumaParticipant::FailoverBuffer::subGcpCompleteRep(Uint32 gci)
+{
+ bool ok = true;
+
+ // ndbout_c("Empty");
+ while (true) {
+ if (c_first == c_next && !c_full)
+ break;
+ if (c_gcis[c_first] > gci)
+ break;
+ c_full = false;
+ c_first++;
+ if (c_first == c_sz) c_first = 0;
+ // ndbout_c("%u %u %u : ",c_first,c_next,c_sz);
+ }
+
+ return ok;
+}
+bool SumaParticipant::FailoverBuffer::nodeFailRep()
+{
+ bool ok = true;
+ while (true) {
+ if (c_first == c_next && !c_full)
+ break;
+
+#ifdef EVENT_DEBUG
+ ndbout_c("Suma::FailoverBuffer::NodeFailRep resending gci=%u", c_gcis[c_first]);
+#endif
+ c_full = false;
+ c_first++;
+ if (c_first == c_sz) c_first = 0;
+ }
+ return ok;
+}
+
+/**********************************************************
+ * Suma participant interface
+ *
+ * Stopping and removing of subscriber
+ *
+ */
+
+void
+SumaParticipant::execSUB_STOP_REQ(Signal* signal){
+ jamEntry();
+ DBUG_ENTER("SumaParticipant::execSUB_STOP_REQ");
+
+ CRASH_INSERTION(13019);
+
+ SubStopReq * const req = (SubStopReq*)signal->getDataPtr();
+ Uint32 senderRef = signal->getSendersBlockRef();
+ Uint32 senderData = req->senderData;
+ Uint32 subscriberRef = req->subscriberRef;
+ Uint32 subscriberData = req->subscriberData;
+ SubscriptionPtr subPtr;
+ Subscription key;
+ key.m_subscriptionId = req->subscriptionId;
+ key.m_subscriptionKey = req->subscriptionKey;
+ Uint32 part = req->part;
+
+ if (key.m_subscriptionKey == 0 &&
+ key.m_subscriptionId == 0 &&
+ subscriberData == 0) {
+ SubStopConf* conf = (SubStopConf*)signal->getDataPtrSend();
+
+ conf->senderRef = reference();
+ conf->senderData = senderData;
+ conf->subscriptionId = key.m_subscriptionId;
+ conf->subscriptionKey = key.m_subscriptionKey;
+ conf->subscriberData = subscriberData;
+
+ sendSignal(senderRef, GSN_SUB_STOP_CONF, signal,
+ SubStopConf::SignalLength, JBB);
+
+ removeSubscribersOnNode(signal, refToNode(subscriberRef));
+ DBUG_VOID_RETURN;
+ }
+
+ if(!c_subscriptions.find(subPtr, key)){
+ jam();
+ sendSubStopRef(signal, GrepError::SUBSCRIPTION_ID_NOT_FOUND);
+ return;
+ }
+
+ ndbrequire(part == SubscriptionData::TableData);
+
+ SubscriberPtr subbPtr;
+ if (senderRef == reference()){
+ jam();
+ c_subscriberPool.getPtr(subbPtr, senderData);
+ ndbrequire(subbPtr.p->m_subPtrI == subPtr.i &&
+ subbPtr.p->m_subscriberRef == subscriberRef &&
+ subbPtr.p->m_subscriberData == subscriberData);
+ c_removeDataSubscribers.remove(subbPtr);
+ } else {
+ bool found = false;
+ jam();
+ c_dataSubscribers.first(subbPtr);
+ for (;!subbPtr.isNull(); c_dataSubscribers.next(subbPtr)){
+ jam();
+ if (subbPtr.p->m_subPtrI == subPtr.i &&
+ refToNode(subbPtr.p->m_subscriberRef) == refToNode(subscriberRef) &&
+ subbPtr.p->m_subscriberData == subscriberData){
+ // ndbout_c("STOP_REQ: before c_dataSubscribers.release");
+ jam();
+ c_dataSubscribers.remove(subbPtr);
+ found = true;
+ break;
+ }
+ }
+ /**
+ * If we didn't find anyone, send ref
+ */
+ if (!found) {
+ jam();
+ sendSubStopRef(signal, GrepError::SUBSCRIBER_NOT_FOUND);
+ DBUG_VOID_RETURN;
+ }
+ }
+
+ subbPtr.p->m_senderRef = senderRef; // store ref to requestor
+ subbPtr.p->m_senderData = senderData; // store ref to requestor
+ c_prepDataSubscribers.add(subbPtr);
+
+ Ptr<SyncRecord> syncPtr;
+ c_syncPool.getPtr(syncPtr, subPtr.p->m_syncPtrI);
+ if (syncPtr.p->m_locked) {
+ jam();
+ sendSubStopRef(signal, /** Error Code */ 0, true);
+ DBUG_VOID_RETURN;
+ }
+ syncPtr.p->m_locked = true;
+
+ syncPtr.p->startDropTrigger(signal);
+ DBUG_VOID_RETURN;
+}
+
+void
+SumaParticipant::sendSubStopComplete(Signal* signal, SubscriberPtr subbPtr){
+ jam();
+
+ CRASH_INSERTION(13020);
+
+ SubscriptionPtr subPtr;
+ c_subscriptions.getPtr(subPtr, subbPtr.p->m_subPtrI);
+
+ Ptr<SyncRecord> syncPtr;
+ c_syncPool.getPtr(syncPtr, subPtr.p->m_syncPtrI);
+ syncPtr.p->m_locked = false;
+
+ SubStopConf * const conf = (SubStopConf*)signal->getDataPtrSend();
+
+ conf->senderRef = reference();
+ conf->senderData = subbPtr.p->m_senderData;
+ conf->subscriptionId = subPtr.p->m_subscriptionId;
+ conf->subscriptionKey = subPtr.p->m_subscriptionKey;
+ conf->subscriberData = subbPtr.p->m_subscriberData;
+ Uint32 senderRef = subbPtr.p->m_senderRef;
+
+ c_prepDataSubscribers.release(subbPtr);
+ sendSignal(senderRef, GSN_SUB_STOP_CONF, signal,
+ SubStopConf::SignalLength, JBB);
+}
+
+void
+SumaParticipant::sendSubStopRef(Signal* signal, Uint32 errCode,
+ bool temporary){
+ jam();
+ SubStopRef * ref = (SubStopRef *)signal->getDataPtrSend();
+ ref->senderRef = reference();
+ ref->errorCode = errCode;
+ if (temporary) {
+ ref->setTemporary();
+ }
+ sendSignal(signal->getSendersBlockRef(),
+ GSN_SUB_STOP_REF,
+ signal,
+ SubStopRef::SignalLength,
+ JBB);
+ return;
+}
+
+/**************************************************************
+ *
+ * Removing subscription
+ *
+ */
+
+void
+SumaParticipant::execSUB_REMOVE_REQ(Signal* signal) {
+ jamEntry();
+
+ Uint32 senderRef = signal->getSendersBlockRef();
+
+ CRASH_INSERTION(13021);
+
+ const SubRemoveReq req = *(SubRemoveReq*)signal->getDataPtr();
+ SubscriptionPtr subPtr;
+ Subscription key;
+ key.m_subscriptionId = req.subscriptionId;
+ key.m_subscriptionKey = req.subscriptionKey;
+
+ if(!c_subscriptions.find(subPtr, key)) {
+ jam();
+ sendSubRemoveRef(signal, req, (Uint32) GrepError::SUBSCRIPTION_ID_NOT_FOUND);
+ return;
+ }
+
+ int count = 0;
+ {
+ jam();
+ SubscriberPtr i_subbPtr;
+ for(c_prepDataSubscribers.first(i_subbPtr);
+ !i_subbPtr.isNull(); c_prepDataSubscribers.next(i_subbPtr)){
+ jam();
+ if( i_subbPtr.p->m_subPtrI == subPtr.i ) {
+ jam();
+ sendSubRemoveRef(signal, req, /* ErrorCode */ 0, true);
+ return;
+ // c_prepDataSubscribers.release(subbPtr);
+ }
+ }
+ c_dataSubscribers.first(i_subbPtr);
+ while(!i_subbPtr.isNull()){
+ jam();
+ SubscriberPtr subbPtr = i_subbPtr;
+ c_dataSubscribers.next(i_subbPtr);
+ if( subbPtr.p->m_subPtrI == subPtr.i ) {
+ jam();
+ sendSubRemoveRef(signal, req, /* ErrorCode */ 0, true);
+ return;
+ /* Unfinished/untested code. If remove should be possible
+ * even if subscribers are left these have to be stopped
+ * first. See m_markRemove, m_nSubscribers. We need also to
+ * block remove for this subscription so that multiple
+ * removes is not possible...
+ */
+ c_dataSubscribers.remove(subbPtr);
+ c_removeDataSubscribers.add(subbPtr);
+ count++;
+ }
+ }
+ c_metaSubscribers.first(i_subbPtr);
+ while(!i_subbPtr.isNull()){
+ jam();
+ SubscriberPtr subbPtr = i_subbPtr;
+ c_metaSubscribers.next(i_subbPtr);
+ if( subbPtr.p->m_subPtrI == subPtr.i ){
+ jam();
+ c_metaSubscribers.release(subbPtr);
+ }
+ }
+ }
+
+ subPtr.p->m_senderRef = senderRef;
+ subPtr.p->m_senderData = req.senderData;
+
+ if (count > 0){
+ jam();
+ ndbrequire(false); // code not finalized
+ subPtr.p->m_markRemove = true;
+ subPtr.p->m_nSubscribers = count;
+ sendSubStopReq(signal);
+ } else {
+ completeSubRemoveReq(signal, subPtr);
+ }
+}
+
+void
+SumaParticipant::completeSubRemoveReq(Signal* signal, SubscriptionPtr subPtr) {
+ Uint32 subscriptionId = subPtr.p->m_subscriptionId;
+ Uint32 subscriptionKey = subPtr.p->m_subscriptionKey;
+ Uint32 senderRef = subPtr.p->m_senderRef;
+ Uint32 senderData = subPtr.p->m_senderData;
+
+ {
+ Ptr<SyncRecord> syncPtr;
+ c_syncPool.getPtr(syncPtr, subPtr.p->m_syncPtrI);
+
+ syncPtr.p->release();
+ c_syncPool.release(syncPtr);
+ }
+
+ // if (subPtr.p->m_subscriptionType != SubCreateReq::TableEvent) {
+ // jam();
+ // senderRef = subPtr.p->m_subscriberRef;
+ // }
+ c_subscriptions.release(subPtr);
+
+ /**
+ * I was the last subscription to be remove so clear c_tables
+ */
+#if 0
+ ndbout_c("c_subscriptionPool.getSize() %d c_subscriptionPool.getNoOfFree()%d",
+ c_subscriptionPool.getSize(),c_subscriptionPool.getNoOfFree());
+#endif
+
+ if(c_subscriptionPool.getSize() == c_subscriptionPool.getNoOfFree()) {
+ jam();
+#if 0
+ ndbout_c("SUB_REMOVE_REQ:Clearing c_tables");
+#endif
+ KeyTable<Table>::Iterator it;
+ for(c_tables.first(it); !it.isNull(); ){
+
+ it.curr.p->release(* this);
+
+ TablePtr tabPtr = it.curr;
+
+ c_tables.next(it);
+ c_tables.release(tabPtr);
+ }
+ }
+
+ SubRemoveConf * const conf = (SubRemoveConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = senderData;
+ conf->subscriptionId = subscriptionId;
+ conf->subscriptionKey = subscriptionKey;
+
+ sendSignal(senderRef, GSN_SUB_REMOVE_CONF, signal,
+ SubRemoveConf::SignalLength, JBB);
+}
+
+void
+SumaParticipant::sendSubRemoveRef(Signal* signal, const SubRemoveReq& req,
+ Uint32 errCode, bool temporary){
+ jam();
+ SubRemoveRef * ref = (SubRemoveRef *)signal->getDataPtrSend();
+ ref->senderRef = reference();
+ ref->subscriptionId = req.subscriptionId;
+ ref->subscriptionKey = req.subscriptionKey;
+ ref->senderData = req.senderData;
+ ref->err = errCode;
+ if (temporary)
+ ref->setTemporary();
+ releaseSections(signal);
+ sendSignal(signal->getSendersBlockRef(), GSN_SUB_REMOVE_REF,
+ signal, SubRemoveRef::SignalLength, JBB);
+ return;
+}
+
+void
+SumaParticipant::Table::release(SumaParticipant & suma){
+ jam();
+
+ LocalDataBuffer<15> attrBuf(suma.c_dataBufferPool, m_attributes);
+ attrBuf.release();
+
+ LocalDataBuffer<15> fragBuf(suma.c_dataBufferPool, m_fragments);
+ fragBuf.release();
+}
+
+void
+SumaParticipant::SyncRecord::release(){
+ jam();
+ m_tableList.release();
+
+ LocalDataBuffer<15> attrBuf(suma.c_dataBufferPool, m_attributeList);
+ attrBuf.release();
+}
+
+
+/**************************************************************
+ *
+ * Restarting remote node functions, master functionality
+ * (slave does nothing special)
+ * - triggered on INCL_NODEREQ calling startNode
+ * - included node will issue START_ME when it's ready to start
+ * the subscribers
+ *
+ */
+
+Suma::Restart::Restart(Suma& s) : suma(s) {
+ for (int i = 0; i < MAX_REPLICAS; i++) {
+ c_okToStart[i] = false;
+ c_waitingToStart[i] = false;
+ }
+}
+
+void
+Suma::Restart::resetNode(Uint32 sumaRef)
+{
+ jam();
+ int I = suma.RtoI(sumaRef);
+ c_okToStart[I] = false;
+ c_waitingToStart[I] = false;
+}
+
+void
+Suma::Restart::startNode(Signal* signal, Uint32 sumaRef)
+{
+ jam();
+ resetNode(sumaRef);
+
+ // right now we can only handle restarting one node
+ // at a time in a node group
+
+ createSubscription(signal, sumaRef);
+}
+
+void
+Suma::Restart::createSubscription(Signal* signal, Uint32 sumaRef) {
+ jam();
+ suma.c_subscriptions.first(c_subPtr);
+ nextSubscription(signal, sumaRef);
+}
+
+void
+Suma::Restart::nextSubscription(Signal* signal, Uint32 sumaRef) {
+ jam();
+ if (c_subPtr.isNull()) {
+ jam();
+ completeSubscription(signal, sumaRef);
+ return;
+ }
+ SubscriptionPtr subPtr;
+ subPtr.i = c_subPtr.curr.i;
+ subPtr.p = suma.c_subscriptions.getPtr(subPtr.i);
+
+ suma.c_subscriptions.next(c_subPtr);
+
+ SubCreateReq * req = (SubCreateReq *)signal->getDataPtrSend();
+
+ req->subscriberRef = suma.reference();
+ req->subscriberData = subPtr.i;
+ req->subscriptionId = subPtr.p->m_subscriptionId;
+ req->subscriptionKey = subPtr.p->m_subscriptionKey;
+ req->subscriptionType = subPtr.p->m_subscriptionType |
+ SubCreateReq::RestartFlag;
+
+ switch (subPtr.p->m_subscriptionType) {
+ case SubCreateReq::TableEvent:
+ case SubCreateReq::SelectiveTableSnapshot:
+ case SubCreateReq::DatabaseSnapshot: {
+ jam();
+
+ Ptr<SyncRecord> syncPtr;
+ suma.c_syncPool.getPtr(syncPtr, subPtr.p->m_syncPtrI);
+ syncPtr.p->m_tableList.first(syncPtr.p->m_tableList_it);
+
+ ndbrequire(!syncPtr.p->m_tableList_it.isNull());
+
+ req->tableId = *syncPtr.p->m_tableList_it.data;
+
+#if 0
+ for (int i = 0; i < MAX_TABLES; i++)
+ if (subPtr.p->m_tables[i]) {
+ req->tableId = i;
+ break;
+ }
+#endif
+
+ suma.sendSignal(sumaRef, GSN_SUB_CREATE_REQ, signal,
+ SubCreateReq::SignalLength+1 /*to get table Id*/, JBB);
+ return;
+ }
+ case SubCreateReq::SingleTableScan :
+ // TODO
+ jam();
+ return;
+ }
+ ndbrequire(false);
+}
+
+void
+Suma::execSUB_CREATE_CONF(Signal* signal) {
+ jamEntry();
+#ifdef NODEFAIL_DEBUG
+ ndbout_c("Suma::execSUB_CREATE_CONF");
+#endif
+
+ const Uint32 senderRef = signal->senderBlockRef();
+
+ SubCreateConf * const conf = (SubCreateConf *)signal->getDataPtr();
+
+ Subscription key;
+ const Uint32 subscriberData = conf->subscriberData;
+ key.m_subscriptionId = conf->subscriptionId;
+ key.m_subscriptionKey = conf->subscriptionKey;
+
+ SubscriptionPtr subPtr;
+ ndbrequire(c_subscriptions.find(subPtr, key));
+
+ switch(subPtr.p->m_subscriptionType) {
+ case SubCreateReq::TableEvent:
+ case SubCreateReq::SelectiveTableSnapshot:
+ case SubCreateReq::DatabaseSnapshot:
+ {
+ Ptr<SyncRecord> syncPtr;
+ c_syncPool.getPtr(syncPtr, subPtr.p->m_syncPtrI);
+
+ syncPtr.p->m_tableList.next(syncPtr.p->m_tableList_it);
+ if (syncPtr.p->m_tableList_it.isNull()) {
+ jam();
+ SubSyncReq *req = (SubSyncReq *)signal->getDataPtrSend();
+
+ req->subscriptionId = key.m_subscriptionId;
+ req->subscriptionKey = key.m_subscriptionKey;
+ req->subscriberData = subscriberData;
+ req->part = (Uint32) SubscriptionData::MetaData;
+
+ sendSignal(senderRef, GSN_SUB_SYNC_REQ, signal,
+ SubSyncReq::SignalLength, JBB);
+ } else {
+ jam();
+ SubCreateReq * req = (SubCreateReq *)signal->getDataPtrSend();
+
+ req->subscriberRef = reference();
+ req->subscriberData = subPtr.i;
+ req->subscriptionId = subPtr.p->m_subscriptionId;
+ req->subscriptionKey = subPtr.p->m_subscriptionKey;
+ req->subscriptionType = subPtr.p->m_subscriptionType |
+ SubCreateReq::RestartFlag |
+ SubCreateReq::AddTableFlag;
+
+ req->tableId = *syncPtr.p->m_tableList_it.data;
+
+ sendSignal(senderRef, GSN_SUB_CREATE_REQ, signal,
+ SubCreateReq::SignalLength+1 /*to get table Id*/, JBB);
+ }
+ }
+ return;
+ case SubCreateReq::SingleTableScan:
+ ndbrequire(false);
+ }
+ ndbrequire(false);
+}
+
+void
+Suma::execSUB_CREATE_REF(Signal* signal) {
+ jamEntry();
+#ifdef NODEFAIL_DEBUG
+ ndbout_c("Suma::execSUB_CREATE_REF");
+#endif
+ //ndbrequire(false);
+}
+
+void
+Suma::execSUB_SYNC_CONF(Signal* signal) {
+ jamEntry();
+#ifdef NODEFAIL_DEBUG
+ ndbout_c("Suma::execSUB_SYNC_CONF");
+#endif
+ Uint32 sumaRef = signal->getSendersBlockRef();
+
+ SubSyncConf *conf = (SubSyncConf *)signal->getDataPtr();
+ Subscription key;
+
+ key.m_subscriptionId = conf->subscriptionId;
+ key.m_subscriptionKey = conf->subscriptionKey;
+ // SubscriptionData::Part part = (SubscriptionData::Part)conf->part;
+ // const Uint32 subscriberData = conf->subscriberData;
+
+ SubscriptionPtr subPtr;
+ c_subscriptions.find(subPtr, key);
+
+ switch(subPtr.p->m_subscriptionType) {
+ case SubCreateReq::TableEvent:
+ case SubCreateReq::SelectiveTableSnapshot:
+ case SubCreateReq::DatabaseSnapshot:
+ jam();
+ Restart.nextSubscription(signal, sumaRef);
+ return;
+ case SubCreateReq::SingleTableScan:
+ ndbrequire(false);
+ return;
+ }
+ ndbrequire(false);
+}
+
+void
+Suma::execSUB_SYNC_REF(Signal* signal) {
+ jamEntry();
+#ifdef NODEFAIL_DEBUG
+ ndbout_c("Suma::execSUB_SYNC_REF");
+#endif
+ //ndbrequire(false);
+}
+
+void
+Suma::execSUMA_START_ME(Signal* signal) {
+ jamEntry();
+#ifdef NODEFAIL_DEBUG
+ ndbout_c("Suma::execSUMA_START_ME");
+#endif
+
+ Restart.runSUMA_START_ME(signal, signal->getSendersBlockRef());
+}
+
+void
+Suma::Restart::runSUMA_START_ME(Signal* signal, Uint32 sumaRef) {
+ int I = suma.RtoI(sumaRef);
+
+ // restarting Suma is ready for SUB_START_REQ
+ if (c_waitingToStart[I]) {
+ // we've waited with startSubscriber since restarting suma was not ready
+ c_waitingToStart[I] = false;
+ startSubscriber(signal, sumaRef);
+ } else {
+ // do startSubscriber as soon as its time
+ c_okToStart[I] = true;
+ }
+}
+
+void
+Suma::Restart::completeSubscription(Signal* signal, Uint32 sumaRef) {
+ jam();
+ int I = suma.RtoI(sumaRef);
+
+ if (c_okToStart[I]) {// otherwise will start when START_ME comes
+ c_okToStart[I] = false;
+ startSubscriber(signal, sumaRef);
+ } else {
+ c_waitingToStart[I] = true;
+ }
+}
+
+void
+Suma::Restart::startSubscriber(Signal* signal, Uint32 sumaRef) {
+ jam();
+ suma.c_dataSubscribers.first(c_subbPtr);
+ nextSubscriber(signal, sumaRef);
+}
+
+void
+Suma::Restart::sendSubStartReq(SubscriptionPtr subPtr, SubscriberPtr subbPtr,
+ Signal* signal, Uint32 sumaRef)
+{
+ jam();
+ SubStartReq * req = (SubStartReq *)signal->getDataPtrSend();
+
+ req->senderRef = suma.reference();
+ req->senderData = subbPtr.p->m_senderData;
+ req->subscriptionId = subPtr.p->m_subscriptionId;
+ req->subscriptionKey = subPtr.p->m_subscriptionKey;
+ req->part = SubscriptionData::TableData;
+ req->subscriberData = subbPtr.p->m_subscriberData;
+ req->subscriberRef = subbPtr.p->m_subscriberRef;
+
+ // restarting suma will not respond to this until startphase 5
+ // since it is not until then data copying has been completed
+#ifdef NODEFAIL_DEBUG
+ ndbout_c("Suma::Restart::sendSubStartReq sending GSN_SUB_START_REQ id=%u key=%u",
+ req->subscriptionId, req->subscriptionKey);
+#endif
+ suma.sendSignal(sumaRef, GSN_SUB_START_REQ,
+ signal, SubStartReq::SignalLength2, JBB);
+}
+
+void
+Suma::execSUB_START_CONF(Signal* signal) {
+ jamEntry();
+#ifdef NODEFAIL_DEBUG
+ ndbout_c("Suma::execSUB_START_CONF");
+#endif
+ Uint32 sumaRef = signal->getSendersBlockRef();
+ Restart.nextSubscriber(signal, sumaRef);
+}
+
+void
+Suma::execSUB_START_REF(Signal* signal) {
+ jamEntry();
+#ifdef NODEFAIL_DEBUG
+ ndbout_c("Suma::execSUB_START_REF");
+#endif
+ //ndbrequire(false);
+}
+
+void
+Suma::Restart::nextSubscriber(Signal* signal, Uint32 sumaRef) {
+ jam();
+ if (c_subbPtr.isNull()) {
+ jam();
+ completeSubscriber(signal, sumaRef);
+ return;
+ }
+
+ SubscriberPtr subbPtr = c_subbPtr;
+ suma.c_dataSubscribers.next(c_subbPtr);
+
+ /*
+ * get subscription ptr for this subscriber
+ */
+
+ SubscriptionPtr subPtr;
+ suma.c_subscriptions.getPtr(subPtr, subbPtr.p->m_subPtrI);
+ switch (subPtr.p->m_subscriptionType) {
+ case SubCreateReq::TableEvent:
+ case SubCreateReq::SelectiveTableSnapshot:
+ case SubCreateReq::DatabaseSnapshot:
+ {
+ jam();
+ sendSubStartReq(subPtr, subbPtr, signal, sumaRef);
+#if 0
+ SubStartReq * req = (SubStartReq *)signal->getDataPtrSend();
+
+ req->senderRef = reference();
+ req->senderData = subbPtr.p->m_senderData;
+ req->subscriptionId = subPtr.p->m_subscriptionId;
+ req->subscriptionKey = subPtr.p->m_subscriptionKey;
+ req->part = SubscriptionData::TableData;
+ req->subscriberData = subbPtr.p->m_subscriberData;
+ req->subscriberRef = subbPtr.p->m_subscriberRef;
+
+ // restarting suma will not respond to this until startphase 5
+ // since it is not until then data copying has been completed
+#ifdef NODEFAIL_DEBUG
+ ndbout_c("Suma::nextSubscriber sending GSN_SUB_START_REQ id=%u key=%u",
+ req->subscriptionId, req->subscriptionKey);
+#endif
+ suma.sendSignal(sumaRef, GSN_SUB_START_REQ,
+ signal, SubStartReq::SignalLength2, JBB);
+#endif
+ }
+ return;
+ case SubCreateReq::SingleTableScan:
+ ndbrequire(false);
+ return;
+ }
+ ndbrequire(false);
+}
+
+void
+Suma::Restart::completeSubscriber(Signal* signal, Uint32 sumaRef) {
+ completeRestartingNode(signal, sumaRef);
+}
+
+void
+Suma::Restart::completeRestartingNode(Signal* signal, Uint32 sumaRef) {
+ jam();
+ SumaHandoverReq * req = (SumaHandoverReq *)signal->getDataPtrSend();
+
+ req->gci = suma.getFirstGCI(signal);
+
+ suma.sendSignal(sumaRef, GSN_SUMA_HANDOVER_REQ, signal,
+ SumaHandoverReq::SignalLength, JBB);
+}
+
+// only run on restarting suma
+
+void
+Suma::execSUMA_HANDOVER_REQ(Signal* signal)
+{
+ jamEntry();
+ // Uint32 sumaRef = signal->getSendersBlockRef();
+ SumaHandoverReq const * req = (SumaHandoverReq *)signal->getDataPtr();
+
+ Uint32 gci = req->gci;
+ Uint32 new_gci = getFirstGCI(signal);
+
+ if (new_gci > gci) {
+ gci = new_gci;
+ }
+
+ { // all recreated subscribers at restarting SUMA start at same GCI
+ SubscriberPtr subbPtr;
+ for(c_dataSubscribers.first(subbPtr);
+ !subbPtr.isNull();
+ c_dataSubscribers.next(subbPtr)){
+ subbPtr.p->m_firstGCI = gci;
+ }
+ }
+
+#ifdef NODEFAIL_DEBUG
+ ndbout_c("Suma::execSUMA_HANDOVER_REQ, gci = %u", gci);
+#endif
+
+ c_handoverToDo = false;
+ c_restartLock = false;
+ {
+#ifdef HANDOVER_DEBUG
+ int c = 0;
+#endif
+ for( int i = 0; i < NO_OF_BUCKETS; i++) {
+ jam();
+ if (getResponsibleSumaNodeId(i) == refToNode(reference())) {
+#ifdef HANDOVER_DEBUG
+ c++;
+#endif
+ jam();
+ c_buckets[i].active = false;
+ c_buckets[i].handoverGCI = gci;
+ c_buckets[i].handover = true;
+ c_buckets[i].handover_started = false;
+ c_handoverToDo = true;
+ }
+ }
+#ifdef HANDOVER_DEBUG
+ ndbout_c("prepared handover of bucket %u buckets", c);
+#endif
+ }
+
+ for (Uint32 i = 0; i < c_noNodesInGroup; i++) {
+ jam();
+ Uint32 ref = calcSumaBlockRef(c_nodesInGroup[i]);
+ if (ref != reference()) {
+ jam();
+ sendSignal(ref, GSN_SUMA_HANDOVER_CONF, signal,
+ SumaHandoverConf::SignalLength, JBB);
+ }//if
+ }
+}
+
+// only run on all but restarting suma
+void
+Suma::execSUMA_HANDOVER_CONF(Signal* signal) {
+ jamEntry();
+ Uint32 sumaRef = signal->getSendersBlockRef();
+ SumaHandoverConf const * conf = (SumaHandoverConf *)signal->getDataPtr();
+
+ Uint32 gci = conf->gci;
+
+#ifdef HANDOVER_DEBUG
+ ndbout_c("Suma::execSUMA_HANDOVER_CONF, gci = %u", gci);
+#endif
+
+ /* TODO, if we are restarting several SUMA's (>2 in a nodegroup)
+ * we have to collect all these conf's before proceding
+ */
+
+ // restarting node is now prepared and ready
+ c_preparingNodes.clear(refToNode(sumaRef)); /* !! important to do before
+ * below since it affects
+ * getResponsibleSumaNodeId()
+ */
+
+ c_handoverToDo = false;
+ // mark all active buckets really belonging to restarting SUMA
+ for( int i = 0; i < NO_OF_BUCKETS; i++) {
+ if (c_buckets[i].active) {
+ // I'm running this bucket
+ if (getResponsibleSumaNodeId(i) == refToNode(sumaRef)) {
+ // but it should really be the restarted node
+ c_buckets[i].handoverGCI = gci;
+ c_buckets[i].handover = true;
+ c_buckets[i].handover_started = false;
+ c_handoverToDo = true;
+ }
+ }
+ }
+}
+
+template void append(DataBuffer<11>&,SegmentedSectionPtr,SectionSegmentPool&);
+
diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.hpp b/storage/ndb/src/kernel/blocks/suma/Suma.hpp
new file mode 100644
index 00000000000..65869f44423
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/suma/Suma.hpp
@@ -0,0 +1,600 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef SUMA_H
+#define SUMA_H
+
+#include <ndb_limits.h>
+#include <SimulatedBlock.hpp>
+
+#include <NodeBitmask.hpp>
+
+#include <SLList.hpp>
+#include <DLList.hpp>
+#include <KeyTable.hpp>
+#include <DataBuffer.hpp>
+#include <SignalCounter.hpp>
+#include <AttributeHeader.hpp>
+#include <AttributeList.hpp>
+
+#include <signaldata/UtilSequence.hpp>
+#include <signaldata/SumaImpl.hpp>
+
+class SumaParticipant : public SimulatedBlock {
+protected:
+ SumaParticipant(const Configuration & conf);
+ virtual ~SumaParticipant();
+ BLOCK_DEFINES(SumaParticipant);
+
+protected:
+ /**
+ * Private interface
+ */
+ void execSUB_CREATE_REQ(Signal* signal);
+ void execSUB_REMOVE_REQ(Signal* signal);
+
+ void execSUB_START_REQ(Signal* signal);
+ void execSUB_STOP_REQ(Signal* signal);
+
+ void execSUB_SYNC_REQ(Signal* signal);
+ void execSUB_ABORT_SYNC_REQ(Signal* signal);
+
+ void execSUB_STOP_CONF(Signal* signal);
+ void execSUB_STOP_REF(Signal* signal);
+
+ /**
+ * Dict interface
+ */
+ void execLIST_TABLES_REF(Signal* signal);
+ void execLIST_TABLES_CONF(Signal* signal);
+ void execGET_TABINFOREF(Signal* signal);
+ void execGET_TABINFO_CONF(Signal* signal);
+#if 0
+ void execGET_TABLEID_CONF(Signal* signal);
+ void execGET_TABLEID_REF(Signal* signal);
+#endif
+ /**
+ * Scan interface
+ */
+ void execSCAN_HBREP(Signal* signal);
+ void execSCAN_FRAGREF(Signal* signal);
+ void execSCAN_FRAGCONF(Signal* signal);
+ void execTRANSID_AI(Signal* signal);
+ void execSUB_SYNC_CONTINUE_REF(Signal* signal);
+ void execSUB_SYNC_CONTINUE_CONF(Signal* signal);
+
+ /**
+ * Trigger logging
+ */
+ void execTRIG_ATTRINFO(Signal* signal);
+ void execFIRE_TRIG_ORD(Signal* signal);
+ void execSUB_GCP_COMPLETE_REP(Signal* signal);
+ void runSUB_GCP_COMPLETE_ACC(Signal* signal);
+
+ /**
+ * DIH signals
+ */
+ void execDI_FCOUNTREF(Signal* signal);
+ void execDI_FCOUNTCONF(Signal* signal);
+ void execDIGETPRIMREF(Signal* signal);
+ void execDIGETPRIMCONF(Signal* signal);
+
+ /**
+ * Trigger administration
+ */
+ void execCREATE_TRIG_REF(Signal* signal);
+ void execCREATE_TRIG_CONF(Signal* signal);
+ void execDROP_TRIG_REF(Signal* signal);
+ void execDROP_TRIG_CONF(Signal* signal);
+
+ /**
+ * continueb
+ */
+ void execCONTINUEB(Signal* signal);
+
+public:
+ typedef DataBuffer<15> TableList;
+
+ union FragmentDescriptor {
+ struct {
+ Uint16 m_fragmentNo;
+ Uint16 m_nodeId;
+ } m_fragDesc;
+ Uint32 m_dummy;
+ };
+
+ /**
+ * Used when sending SCAN_FRAG
+ */
+ union AttributeDescriptor {
+ struct {
+ Uint16 attrId;
+ Uint16 unused;
+ } m_attrDesc;
+ Uint32 m_dummy;
+ };
+
+ struct Table {
+ Table() { m_tableId = ~0; }
+ void release(SumaParticipant&);
+
+ union { Uint32 m_tableId; Uint32 key; };
+ Uint32 m_schemaVersion;
+ Uint32 m_hasTriggerDefined[3]; // Insert/Update/Delete
+ Uint32 m_triggerIds[3]; // Insert/Update/Delete
+
+ /**
+ * Default order in which to ask for attributes during scan
+ * 1) Fixed, not nullable
+ * 2) Rest
+ */
+ DataBuffer<15>::Head m_attributes; // Attribute id's
+
+ /**
+ * Fragments
+ */
+ DataBuffer<15>::Head m_fragments; // Fragment descriptors
+
+ /**
+ * Hash table stuff
+ */
+ Uint32 nextHash;
+ union { Uint32 prevHash; Uint32 nextPool; };
+ Uint32 hashValue() const {
+ return m_tableId;
+ }
+ bool equal(const Table& rec) const {
+ return m_tableId == rec.m_tableId;
+ }
+ };
+ typedef Ptr<Table> TablePtr;
+
+ /**
+ * Subscriptions
+ */
+ struct SyncRecord {
+ SyncRecord(SumaParticipant& s, DataBuffer<15>::DataBufferPool & p)
+ : m_locked(false), m_tableList(p), suma(s)
+#ifdef ERROR_INSERT
+ , cerrorInsert(s.cerrorInsert)
+#endif
+ {}
+
+ void release();
+
+ Uint32 m_subscriptionPtrI;
+ bool m_locked;
+ bool m_doSendSyncData;
+ bool m_error;
+ TableList m_tableList; // Tables to sync (snapshoted at beginning)
+ TableList::DataBufferIterator m_tableList_it;
+
+ /**
+ * Sync meta
+ */
+ void startMeta(Signal*);
+ void nextMeta(Signal*);
+ void completeMeta(Signal*);
+
+ /**
+ * Create triggers
+ */
+ Uint32 m_latestTriggerId;
+ void startTrigger(Signal* signal);
+ void nextTrigger(Signal* signal);
+ void completeTrigger(Signal* signal);
+ void createAttributeMask(AttributeMask&, Table*);
+
+ /**
+ * Drop triggers
+ */
+ void startDropTrigger(Signal* signal);
+ void nextDropTrigger(Signal* signal);
+ void completeDropTrigger(Signal* signal);
+
+ /**
+ * Sync data
+ */
+ Uint32 m_currentTable; // Index in m_tableList
+ Uint32 m_currentFragment; // Index in tabPtr.p->m_fragments
+ DataBuffer<15>::Head m_attributeList; // Attribute if other than default
+ DataBuffer<15>::Head m_tabList; // tables if other than default
+
+ Uint32 m_currentTableId; // Current table
+ Uint32 m_currentNoOfAttributes; // No of attributes for current table
+ void startScan(Signal*);
+ void nextScan(Signal*);
+ bool getNextFragment(TablePtr * tab, FragmentDescriptor * fd);
+ void completeScan(Signal*);
+
+ SumaParticipant & suma;
+#ifdef ERROR_INSERT
+ UintR &cerrorInsert;
+#endif
+ BlockNumber number() const { return suma.number(); }
+ void progError(int line, int cause, const char * extra) {
+ suma.progError(line, cause, extra);
+ }
+
+ void runLIST_TABLES_CONF(Signal* signal);
+ void runGET_TABINFO_CONF(Signal* signal);
+ void runGET_TABINFOREF(Signal* signal);
+
+ void runDI_FCOUNTCONF(Signal* signal);
+ void runDIGETPRIMCONF(Signal* signal);
+
+ void runCREATE_TRIG_CONF(Signal* signal);
+ void runDROP_TRIG_CONF(Signal* signal);
+ void runDROP_TRIG_REF(Signal* signal);
+ void runDropTrig(Signal* signal, Uint32 triggerId, Uint32 tableId);
+
+ union { Uint32 nextPool; Uint32 nextList; Uint32 ptrI; };
+ };
+ friend struct SyncRecord;
+
+ struct Subscription {
+ Uint32 m_subscriberRef;
+ Uint32 m_subscriberData;
+ Uint32 m_senderRef;
+ Uint32 m_senderData;
+ Uint32 m_subscriptionId;
+ Uint32 m_subscriptionKey;
+ Uint32 m_subscriptionType;
+ Uint32 m_coordinatorRef;
+ Uint32 m_syncPtrI; // Active sync operation
+ Uint32 m_nSubscribers;
+ bool m_markRemove;
+
+ Uint32 nextHash;
+ union { Uint32 prevHash; Uint32 nextPool; };
+
+ Uint32 hashValue() const {
+ return m_subscriptionId + m_subscriptionKey;
+ }
+
+ bool equal(const Subscription & s) const {
+ return
+ m_subscriptionId == s.m_subscriptionId &&
+ m_subscriptionKey == s.m_subscriptionKey;
+ }
+ /**
+ * The following holds the table names of tables included
+ * in the subscription.
+ */
+ // TODO we've got to fix this, this is to inefficient. Tomas
+ char m_tables[MAX_TABLES];
+#if 0
+ char m_tableNames[MAX_TABLES][MAX_TAB_NAME_SIZE];
+#endif
+ /**
+ * "Iterator" used to iterate through m_tableNames
+ */
+ Uint32 m_maxTables;
+ Uint32 m_currentTable;
+ };
+ typedef Ptr<Subscription> SubscriptionPtr;
+
+ struct Subscriber {
+ Uint32 m_senderRef;
+ Uint32 m_senderData;
+ Uint32 m_subscriberRef;
+ Uint32 m_subscriberData;
+ Uint32 m_subPtrI; //reference to subscription
+ Uint32 m_firstGCI; // first GCI to send
+ Uint32 m_lastGCI; // last acnowledged GCI
+ Uint32 nextList;
+ union { Uint32 nextPool; Uint32 prevList; };
+ };
+ typedef Ptr<Subscriber> SubscriberPtr;
+
+ struct Bucket {
+ bool active;
+ bool handover;
+ bool handover_started;
+ Uint32 handoverGCI;
+ };
+#define NO_OF_BUCKETS 24
+ struct Bucket c_buckets[NO_OF_BUCKETS];
+ bool c_handoverToDo;
+ Uint32 c_lastCompleteGCI;
+
+ /**
+ *
+ */
+ DLList<Subscriber> c_metaSubscribers;
+ DLList<Subscriber> c_dataSubscribers;
+ DLList<Subscriber> c_prepDataSubscribers;
+ DLList<Subscriber> c_removeDataSubscribers;
+
+ /**
+ * Lists
+ */
+ KeyTable<Table> c_tables;
+ DLHashTable<Subscription> c_subscriptions;
+
+ /**
+ * Pools
+ */
+ ArrayPool<Subscriber> c_subscriberPool;
+ ArrayPool<Table> c_tablePool_;
+ ArrayPool<Subscription> c_subscriptionPool;
+ ArrayPool<SyncRecord> c_syncPool;
+ DataBuffer<15>::DataBufferPool c_dataBufferPool;
+
+ /**
+ * for restarting Suma not to start sending data too early
+ */
+ bool c_restartLock;
+
+ /**
+ * for flagging that a GCI containg inconsistent data
+ * typically due to node failiure
+ */
+
+ Uint32 c_lastInconsistentGCI;
+ Uint32 c_nodeFailGCI;
+
+ NodeBitmask c_failedApiNodes;
+
+ /**
+ * Functions
+ */
+ bool removeSubscribersOnNode(Signal *signal, Uint32 nodeId);
+
+ bool parseTable(Signal* signal, class GetTabInfoConf* conf, Uint32 tableId,
+ SyncRecord* syncPtr_p);
+ bool checkTableTriggers(SegmentedSectionPtr ptr);
+
+ void addTableId(Uint32 TableId,
+ SubscriptionPtr subPtr, SyncRecord *psyncRec);
+
+ void sendSubIdRef(Signal* signal, Uint32 errorCode);
+ void sendSubCreateConf(Signal* signal, Uint32 sender, SubscriptionPtr subPtr);
+ void sendSubCreateRef(Signal* signal, const SubCreateReq& req, Uint32 errorCode);
+ void sendSubStartRef(SubscriptionPtr subPtr, Signal* signal,
+ Uint32 errorCode, bool temporary = false);
+ void sendSubStartRef(Signal* signal,
+ Uint32 errorCode, bool temporary = false);
+ void sendSubStopRef(Signal* signal,
+ Uint32 errorCode, bool temporary = false);
+ void sendSubSyncRef(Signal* signal, Uint32 errorCode);
+ void sendSubRemoveRef(Signal* signal, const SubRemoveReq& ref,
+ Uint32 errorCode, bool temporary = false);
+ void sendSubStartComplete(Signal*, SubscriberPtr, Uint32,
+ SubscriptionData::Part);
+ void sendSubStopComplete(Signal*, SubscriberPtr);
+ void sendSubStopReq(Signal* signal, bool unlock= false);
+
+ void completeSubRemoveReq(Signal* signal, SubscriptionPtr subPtr);
+
+ Uint32 getFirstGCI(Signal* signal);
+ Uint32 decideWhoToSend(Uint32 nBucket, Uint32 gci);
+
+ virtual Uint32 getStoreBucket(Uint32 v) = 0;
+ virtual Uint32 getResponsibleSumaNodeId(Uint32 D) = 0;
+ virtual Uint32 RtoI(Uint32 sumaRef, bool dieOnNotFound = true) = 0;
+
+ struct FailoverBuffer {
+ // FailoverBuffer(DataBuffer<15>::DataBufferPool & p);
+ FailoverBuffer();
+
+ bool subTableData(Uint32 gci, Uint32 *src, int sz);
+ bool subGcpCompleteRep(Uint32 gci);
+ bool nodeFailRep();
+
+ // typedef DataBuffer<15> GCIDataBuffer;
+ // GCIDataBuffer m_GCIDataBuffer;
+ // GCIDataBuffer::DataBufferIterator m_GCIDataBuffer_it;
+
+ Uint32 *c_gcis;
+ int c_sz;
+
+ // Uint32 *c_buf;
+ // int c_buf_sz;
+
+ int c_first;
+ int c_next;
+ bool c_full;
+ } c_failoverBuffer;
+
+ /**
+ * Table admin
+ */
+ void convertNameToId( SubscriptionPtr subPtr, Signal * signal);
+
+
+};
+
+class Suma : public SumaParticipant {
+ BLOCK_DEFINES(Suma);
+public:
+ Suma(const Configuration & conf);
+ virtual ~Suma();
+private:
+ /**
+ * Public interface
+ */
+ void execCREATE_SUBSCRIPTION_REQ(Signal* signal);
+ void execDROP_SUBSCRIPTION_REQ(Signal* signal);
+
+ void execSTART_SUBSCRIPTION_REQ(Signal* signal);
+ void execSTOP_SUBSCRIPTION_REQ(Signal* signal);
+
+ void execSYNC_SUBSCRIPTION_REQ(Signal* signal);
+ void execABORT_SYNC_REQ(Signal* signal);
+
+ /**
+ * Framework signals
+ */
+
+ void getNodeGroupMembers(Signal* signal);
+
+ void execSTTOR(Signal* signal);
+ void sendSTTORRY(Signal*);
+ void execNDB_STTOR(Signal* signal);
+ void execDUMP_STATE_ORD(Signal* signal);
+ void execREAD_NODESCONF(Signal* signal);
+ void execNODE_FAILREP(Signal* signal);
+ void execINCL_NODEREQ(Signal* signal);
+ void execCONTINUEB(Signal* signal);
+ void execSIGNAL_DROPPED_REP(Signal* signal);
+ void execAPI_FAILREQ(Signal* signal) ;
+
+ void execSUB_GCP_COMPLETE_ACC(Signal* signal);
+
+ /**
+ * Controller interface
+ */
+ void execSUB_CREATE_REF(Signal* signal);
+ void execSUB_CREATE_CONF(Signal* signal);
+
+ void execSUB_DROP_REF(Signal* signal);
+ void execSUB_DROP_CONF(Signal* signal);
+
+ void execSUB_START_REF(Signal* signal);
+ void execSUB_START_CONF(Signal* signal);
+
+ void execSUB_STOP_REF(Signal* signal);
+ void execSUB_STOP_CONF(Signal* signal);
+
+ void execSUB_SYNC_REF(Signal* signal);
+ void execSUB_SYNC_CONF(Signal* signal);
+
+ void execSUB_ABORT_SYNC_REF(Signal* signal);
+ void execSUB_ABORT_SYNC_CONF(Signal* signal);
+
+ void execSUMA_START_ME(Signal* signal);
+ void execSUMA_HANDOVER_REQ(Signal* signal);
+ void execSUMA_HANDOVER_CONF(Signal* signal);
+
+ /**
+ * Subscription generation interface
+ */
+ void createSequence(Signal* signal);
+ void createSequenceReply(Signal* signal,
+ UtilSequenceConf* conf,
+ UtilSequenceRef* ref);
+ void execUTIL_SEQUENCE_CONF(Signal* signal);
+ void execUTIL_SEQUENCE_REF(Signal* signal);
+ void execCREATE_SUBID_REQ(Signal* signal);
+
+ Uint32 getStoreBucket(Uint32 v);
+ Uint32 getResponsibleSumaNodeId(Uint32 D);
+
+ /**
+ * for Suma that is restarting another
+ */
+
+ struct Restart {
+ Restart(Suma& s);
+
+ Suma & suma;
+
+ bool c_okToStart[MAX_REPLICAS];
+ bool c_waitingToStart[MAX_REPLICAS];
+
+ DLHashTable<SumaParticipant::Subscription>::Iterator c_subPtr; // TODO [MAX_REPLICAS]
+ SubscriberPtr c_subbPtr; // TODO [MAX_REPLICAS]
+
+ void progError(int line, int cause, const char * extra) {
+ suma.progError(line, cause, extra);
+ }
+
+ void resetNode(Uint32 sumaRef);
+ void runSUMA_START_ME(Signal*, Uint32 sumaRef);
+ void startNode(Signal*, Uint32 sumaRef);
+
+ void createSubscription(Signal* signal, Uint32 sumaRef);
+ void nextSubscription(Signal* signal, Uint32 sumaRef);
+ void completeSubscription(Signal* signal, Uint32 sumaRef);
+
+ void startSync(Signal* signal, Uint32 sumaRef);
+ void nextSync(Signal* signal, Uint32 sumaRef);
+ void completeSync(Signal* signal, Uint32 sumaRef);
+
+ void sendSubStartReq(SubscriptionPtr subPtr, SubscriberPtr subbPtr,
+ Signal* signal, Uint32 sumaRef);
+ void startSubscriber(Signal* signal, Uint32 sumaRef);
+ void nextSubscriber(Signal* signal, Uint32 sumaRef);
+ void completeSubscriber(Signal* signal, Uint32 sumaRef);
+
+ void completeRestartingNode(Signal* signal, Uint32 sumaRef);
+ } Restart;
+
+private:
+ friend class Restart;
+ struct SubCoordinator {
+ Uint32 m_subscriberRef;
+ Uint32 m_subscriberData;
+
+ Uint32 m_subscriptionId;
+ Uint32 m_subscriptionKey;
+
+ NdbNodeBitmask m_participants;
+
+ Uint32 m_outstandingGsn;
+ SignalCounter m_outstandingRequests;
+
+ Uint32 nextList;
+ union { Uint32 prevList; Uint32 nextPool; };
+ };
+ Ptr<SubCoordinator> SubCoordinatorPtr;
+
+ struct Node {
+ Uint32 nodeId;
+ Uint32 alive;
+ Uint32 nextList;
+ union { Uint32 prevList; Uint32 nextPool; };
+ };
+ typedef Ptr<Node> NodePtr;
+
+ /**
+ * Variables
+ */
+ NodeId c_masterNodeId;
+ SLList<Node> c_nodes;
+ NdbNodeBitmask c_aliveNodes;
+ NdbNodeBitmask c_preparingNodes;
+
+ Uint32 RtoI(Uint32 sumaRef, bool dieOnNotFound = true);
+
+ /**
+ * for all Suma's to keep track of other Suma's in Node group
+ */
+ Uint32 c_nodeGroup;
+ Uint32 c_noNodesInGroup;
+ Uint32 c_idInNodeGroup;
+ NodeId c_nodesInGroup[MAX_REPLICAS];
+
+ /**
+ * don't seem to be used
+ */
+ ArrayPool<Node> c_nodePool;
+ ArrayPool<SubCoordinator> c_subCoordinatorPool;
+ DLList<SubCoordinator> c_runningSubscriptions;
+};
+
+inline Uint32
+Suma::RtoI(Uint32 sumaRef, bool dieOnNotFound) {
+ for (Uint32 i = 0; i < c_noNodesInGroup; i++) {
+ if (sumaRef == calcSumaBlockRef(c_nodesInGroup[i]))
+ return i;
+ }
+ ndbrequire(!dieOnNotFound);
+ return RNIL;
+}
+
+#endif
diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.txt b/storage/ndb/src/kernel/blocks/suma/Suma.txt
new file mode 100644
index 00000000000..eba031226ef
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/suma/Suma.txt
@@ -0,0 +1,192 @@
+Protocols involving SUMA:
+
+
+
+USER SUMA UTIL
+========================================================
+CREATE_SUBID_REQ
+------------------------->
+ UTIL_SEQUENCE
+ ---------------------->
+ <----------------------
+CREATE_SUBID_CONF
+<-------------------------
+
+
+
+
+USER SUMA DICT
+========================================================
+SUB_CREATE_REQ
+------------------------->
+ case SelectiveTableSnapshot:
+ GET_TABLEID
+ ---------------------->
+ <----------------------
+SUB_CREATE_CONF
+<-------------------------
+
+
+
+
+
+USER SUMA DICT
+========================================================
+SUB_SYNC_REQ::MetaData
+------------------------->
+ case DatabaseSnapshot:
+ LIST_TABLES
+ ---------------------->
+ <----------------------
+for each table...
+ GET_TABINFO
+ ---------------------->
+ <----------------------
+SUB_META_DATA DIH
+<------------------------- =======
+ DI_FCOUNT
+ ---------------------->
+ <----------------------
+ DI_GETPRIM
+ ---------------------->
+ <----------------------
+..end for each table
+SUB_SYNC_CONF
+<-------------------------
+
+
+
+
+USER SUMA LQH
+========================================================
+SUB_SYNC_REQ::TableData
+------------------------->
+for each table...
+ SCAN_FRAG_REQ
+ ---------------------->
+ ATTRINFO
+ ---------------------->
+ SCAN_FRAG_CONF
+ <----------------------
+SUB_SYNC_CONTINUE
+<-------------------------
+------------------------->
+ SCAN_NEXTREQ
+ ---------------------->
+...end for each table
+
+
+
+??????????
+ SCAN_HBREP
+ <----------------------
+
+
+
+USER SUMA
+===============================
+SUB_START_REQ::MetaData
+------------------------->
+SUB_START_CONF
+<-------------------------
+
+
+
+USER SUMA TUP
+========================================================
+SUB_START_REQ::TableData
+------------------------->
+for each table...
+ CREATE_TRIG
+ ---------------------->
+ <----------------------
+...end for each table
+SUB_START_CONF
+<-------------------------
+
+
+USER SUMA XXX
+========================================================
+ TRANSID_AI
+ <----------------------
+SUB_TABLE_DATA
+<-------------------------
+
+
+
+
+
+USER SUMA XXX
+========================================================
+ TRIG_ATTRINFO
+ <----------------------
+ FIRE_TRIG_ORD
+ <----------------------
+SUB_TABLE_DATA
+<-------------------------
+
+
+
+USER SUMA XXX
+========================================================
+ SUB_GCP_COMPLETE_REP
+ <----------------------
+SUB_GCP_COMPLETE_REP
+<-------------------------
+
+for event only:
+SUB_GCP_COMPLETE_ACK
+------------------------->
+ when all subscribers have sent ACK on gci
+ send to all other suma's in node group:
+ SUB_GCP_COMPLETE_ACK
+ ---------------------->
+
+
+USER SUMA
+===============================
+SUB_STOP_REQ
+------------------------->
+SUB_STOP_CONF
+<-------------------------
+
+
+
+USER SUMA
+===============================
+SUB_REMOVE_REQ
+------------------------->
+SUB_REMOVE_CONF
+<-------------------------
+
+
+
+MASTER SUMA RESTARTING SUMA
+=========================================
+INCL_NODEREQ
+<---------------------------------------------------------
+for each subscription...
+SUB_CREATE_REQ
+------------------------->...
+<-------------------------
+SUB_SYNC_REQ::MetaData
+------------------------->...
+<-------------------------
+... end for each subscription
+
+
+ SUMA_START_ME (sent asynchronously in start phase 5 to all suma's in node group)
+<-------------------------
+ ------------------------->
+
+for each subscriber...
+SUB_START_REQ (not before SUMA_START_ME)
+------------------------->
+<-------------------------
+... end for each subscriber
+
+SUMA_HANDOVER_REQ
+------------------------->
+ SUMA_HANDOVER_CONF (to all suma's in node group)
+<-------------------------
+ ------------------------->
diff --git a/storage/ndb/src/kernel/blocks/suma/SumaInit.cpp b/storage/ndb/src/kernel/blocks/suma/SumaInit.cpp
new file mode 100644
index 00000000000..b5945db3811
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/suma/SumaInit.cpp
@@ -0,0 +1,192 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "Suma.hpp"
+
+#include <Properties.hpp>
+#include <Configuration.hpp>
+
+SumaParticipant::SumaParticipant(const Configuration & conf) :
+ SimulatedBlock(SUMA, conf),
+ c_metaSubscribers(c_subscriberPool),
+ c_dataSubscribers(c_subscriberPool),
+ c_prepDataSubscribers(c_subscriberPool),
+ c_removeDataSubscribers(c_subscriberPool),
+ c_tables(c_tablePool_),
+ c_subscriptions(c_subscriptionPool)
+{
+ BLOCK_CONSTRUCTOR(SumaParticipant);
+
+ /**
+ * SUMA participant if
+ */
+ addRecSignal(GSN_SUB_CREATE_REQ, &SumaParticipant::execSUB_CREATE_REQ);
+ addRecSignal(GSN_SUB_REMOVE_REQ, &SumaParticipant::execSUB_REMOVE_REQ);
+ addRecSignal(GSN_SUB_START_REQ, &SumaParticipant::execSUB_START_REQ);
+ addRecSignal(GSN_SUB_STOP_REQ, &SumaParticipant::execSUB_STOP_REQ);
+ addRecSignal(GSN_SUB_SYNC_REQ, &SumaParticipant::execSUB_SYNC_REQ);
+
+ addRecSignal(GSN_SUB_STOP_CONF, &SumaParticipant::execSUB_STOP_CONF);
+ addRecSignal(GSN_SUB_STOP_REF, &SumaParticipant::execSUB_STOP_REF);
+
+ /**
+ * Dict interface
+ */
+ //addRecSignal(GSN_LIST_TABLES_REF, &SumaParticipant::execLIST_TABLES_REF);
+ addRecSignal(GSN_LIST_TABLES_CONF, &SumaParticipant::execLIST_TABLES_CONF);
+ //addRecSignal(GSN_GET_TABINFOREF, &SumaParticipant::execGET_TABINFO_REF);
+ addRecSignal(GSN_GET_TABINFO_CONF, &SumaParticipant::execGET_TABINFO_CONF);
+ addRecSignal(GSN_GET_TABINFOREF, &SumaParticipant::execGET_TABINFOREF);
+#if 0
+ addRecSignal(GSN_GET_TABLEID_CONF, &SumaParticipant::execGET_TABLEID_CONF);
+ addRecSignal(GSN_GET_TABLEID_REF, &SumaParticipant::execGET_TABLEID_REF);
+#endif
+ /**
+ * Dih interface
+ */
+ //addRecSignal(GSN_DI_FCOUNTREF, &SumaParticipant::execDI_FCOUNTREF);
+ addRecSignal(GSN_DI_FCOUNTCONF, &SumaParticipant::execDI_FCOUNTCONF);
+ //addRecSignal(GSN_DIGETPRIMREF, &SumaParticipant::execDIGETPRIMREF);
+ addRecSignal(GSN_DIGETPRIMCONF, &SumaParticipant::execDIGETPRIMCONF);
+
+ /**
+ * Scan interface
+ */
+ addRecSignal(GSN_SCAN_HBREP, &SumaParticipant::execSCAN_HBREP);
+ addRecSignal(GSN_TRANSID_AI, &SumaParticipant::execTRANSID_AI);
+ addRecSignal(GSN_SCAN_FRAGREF, &SumaParticipant::execSCAN_FRAGREF);
+ addRecSignal(GSN_SCAN_FRAGCONF, &SumaParticipant::execSCAN_FRAGCONF);
+#if 0
+ addRecSignal(GSN_SUB_SYNC_CONTINUE_REF,
+ &SumaParticipant::execSUB_SYNC_CONTINUE_REF);
+#endif
+ addRecSignal(GSN_SUB_SYNC_CONTINUE_CONF,
+ &SumaParticipant::execSUB_SYNC_CONTINUE_CONF);
+
+ /**
+ * Trigger stuff
+ */
+ addRecSignal(GSN_TRIG_ATTRINFO, &SumaParticipant::execTRIG_ATTRINFO);
+ addRecSignal(GSN_FIRE_TRIG_ORD, &SumaParticipant::execFIRE_TRIG_ORD);
+
+ addRecSignal(GSN_CREATE_TRIG_REF, &Suma::execCREATE_TRIG_REF);
+ addRecSignal(GSN_CREATE_TRIG_CONF, &Suma::execCREATE_TRIG_CONF);
+ addRecSignal(GSN_DROP_TRIG_REF, &Suma::execDROP_TRIG_REF);
+ addRecSignal(GSN_DROP_TRIG_CONF, &Suma::execDROP_TRIG_CONF);
+
+ addRecSignal(GSN_SUB_GCP_COMPLETE_REP,
+ &SumaParticipant::execSUB_GCP_COMPLETE_REP);
+
+ /**
+ * @todo: fix pool sizes
+ */
+ Uint32 noTables;
+ const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ ndb_mgm_get_int_parameter(p, CFG_DB_NO_TABLES,
+ &noTables);
+
+ c_tablePool_.setSize(noTables);
+ c_tables.setSize(noTables);
+
+ c_subscriptions.setSize(20); //10
+ c_subscriberPool.setSize(64);
+
+ c_subscriptionPool.setSize(64); //2
+ c_syncPool.setSize(20); //2
+ c_dataBufferPool.setSize(128);
+
+ {
+ SLList<SyncRecord> tmp(c_syncPool);
+ Ptr<SyncRecord> ptr;
+ while(tmp.seize(ptr))
+ new (ptr.p) SyncRecord(* this, c_dataBufferPool);
+ tmp.release();
+ }
+
+ for( int i = 0; i < NO_OF_BUCKETS; i++) {
+ c_buckets[i].active = false;
+ c_buckets[i].handover = false;
+ c_buckets[i].handover_started = false;
+ c_buckets[i].handoverGCI = 0;
+ }
+ c_handoverToDo = false;
+ c_lastInconsistentGCI = RNIL;
+ c_lastCompleteGCI = RNIL;
+ c_nodeFailGCI = 0;
+
+ c_failedApiNodes.clear();
+}
+
+SumaParticipant::~SumaParticipant()
+{
+}
+
+Suma::Suma(const Configuration & conf) :
+ SumaParticipant(conf),
+ Restart(*this),
+ c_nodes(c_nodePool),
+ c_runningSubscriptions(c_subCoordinatorPool)
+{
+
+ c_nodePool.setSize(MAX_NDB_NODES);
+ c_masterNodeId = getOwnNodeId();
+
+ c_nodeGroup = c_noNodesInGroup = c_idInNodeGroup = 0;
+ for (int i = 0; i < MAX_REPLICAS; i++) {
+ c_nodesInGroup[i] = 0;
+ }
+
+ c_subCoordinatorPool.setSize(10);
+
+ // Add received signals
+ addRecSignal(GSN_STTOR, &Suma::execSTTOR);
+ addRecSignal(GSN_NDB_STTOR, &Suma::execNDB_STTOR);
+ addRecSignal(GSN_DUMP_STATE_ORD, &Suma::execDUMP_STATE_ORD);
+ addRecSignal(GSN_READ_NODESCONF, &Suma::execREAD_NODESCONF);
+ addRecSignal(GSN_API_FAILREQ, &Suma::execAPI_FAILREQ);
+ addRecSignal(GSN_NODE_FAILREP, &Suma::execNODE_FAILREP);
+ addRecSignal(GSN_INCL_NODEREQ, &Suma::execINCL_NODEREQ);
+ addRecSignal(GSN_CONTINUEB, &Suma::execCONTINUEB);
+ addRecSignal(GSN_SIGNAL_DROPPED_REP, &Suma::execSIGNAL_DROPPED_REP, true);
+ addRecSignal(GSN_UTIL_SEQUENCE_CONF, &Suma::execUTIL_SEQUENCE_CONF);
+ addRecSignal(GSN_UTIL_SEQUENCE_REF, &Suma::execUTIL_SEQUENCE_REF);
+ addRecSignal(GSN_CREATE_SUBID_REQ,
+ &Suma::execCREATE_SUBID_REQ);
+
+ addRecSignal(GSN_SUB_CREATE_CONF, &Suma::execSUB_CREATE_CONF);
+ addRecSignal(GSN_SUB_CREATE_REF, &Suma::execSUB_CREATE_REF);
+ addRecSignal(GSN_SUB_SYNC_CONF, &Suma::execSUB_SYNC_CONF);
+ addRecSignal(GSN_SUB_SYNC_REF, &Suma::execSUB_SYNC_REF);
+ addRecSignal(GSN_SUB_START_CONF, &Suma::execSUB_START_CONF);
+ addRecSignal(GSN_SUB_START_REF, &Suma::execSUB_START_REF);
+
+ addRecSignal(GSN_SUMA_START_ME, &Suma::execSUMA_START_ME);
+ addRecSignal(GSN_SUMA_HANDOVER_REQ, &Suma::execSUMA_HANDOVER_REQ);
+ addRecSignal(GSN_SUMA_HANDOVER_CONF, &Suma::execSUMA_HANDOVER_CONF);
+
+ addRecSignal(GSN_SUB_GCP_COMPLETE_ACC,
+ &Suma::execSUB_GCP_COMPLETE_ACC);
+}
+
+Suma::~Suma()
+{
+}
+
+BLOCK_FUNCTIONS(Suma)
+BLOCK_FUNCTIONS(SumaParticipant)
+
diff --git a/storage/ndb/src/kernel/blocks/trix/Makefile.am b/storage/ndb/src/kernel/blocks/trix/Makefile.am
new file mode 100644
index 00000000000..343063a6283
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/trix/Makefile.am
@@ -0,0 +1,23 @@
+noinst_LIBRARIES = libtrix.a
+
+libtrix_a_SOURCES = Trix.cpp
+
+include $(top_srcdir)/ndb/config/common.mk.am
+include $(top_srcdir)/ndb/config/type_kernel.mk.am
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
+
+windoze-dsp: libtrix.dsp
+
+libtrix.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-lib.am \
+ $(top_srcdir)/ndb/config/win-name \
+ $(top_srcdir)/ndb/config/win-includes \
+ $(top_srcdir)/ndb/config/win-sources \
+ $(top_srcdir)/ndb/config/win-libraries
+ cat $(top_srcdir)/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/ndb/config/win-sources $@ $(libtrix_a_SOURCES)
+ @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/storage/ndb/src/kernel/blocks/trix/Trix.cpp b/storage/ndb/src/kernel/blocks/trix/Trix.cpp
new file mode 100644
index 00000000000..cd11cb4d575
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/trix/Trix.cpp
@@ -0,0 +1,967 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "Trix.hpp"
+
+#include <string.h>
+#include <kernel_types.h>
+#include <NdbOut.hpp>
+
+#include <signaldata/ReadNodesConf.hpp>
+#include <signaldata/NodeFailRep.hpp>
+#include <signaldata/DumpStateOrd.hpp>
+#include <signaldata/GetTabInfo.hpp>
+#include <signaldata/DictTabInfo.hpp>
+#include <signaldata/BuildIndx.hpp>
+#include <signaldata/SumaImpl.hpp>
+#include <signaldata/UtilPrepare.hpp>
+#include <signaldata/UtilExecute.hpp>
+#include <signaldata/UtilRelease.hpp>
+#include <SectionReader.hpp>
+#include <AttributeHeader.hpp>
+
+#define CONSTRAINT_VIOLATION 893
+
+#define DEBUG(x) { ndbout << "TRIX::" << x << endl; }
+
+/**
+ *
+ */
+Trix::Trix(const Configuration & conf) :
+ SimulatedBlock(TRIX, conf),
+ c_theNodes(c_theNodeRecPool),
+ c_masterNodeId(0),
+ c_masterTrixRef(0),
+ c_noNodesFailed(0),
+ c_noActiveNodes(0),
+ c_theSubscriptions(c_theSubscriptionRecPool)
+{
+ BLOCK_CONSTRUCTOR(Trix);
+
+ // Add received signals
+ addRecSignal(GSN_STTOR, &Trix::execSTTOR);
+ addRecSignal(GSN_NDB_STTOR, &Trix::execNDB_STTOR); // Forwarded from DICT
+ addRecSignal(GSN_READ_NODESCONF, &Trix::execREAD_NODESCONF);
+ addRecSignal(GSN_READ_NODESREF, &Trix::execREAD_NODESREF);
+ addRecSignal(GSN_NODE_FAILREP, &Trix::execNODE_FAILREP);
+ addRecSignal(GSN_INCL_NODEREQ, &Trix::execINCL_NODEREQ);
+ addRecSignal(GSN_DUMP_STATE_ORD, &Trix::execDUMP_STATE_ORD);
+
+ // Index build
+ addRecSignal(GSN_BUILDINDXREQ, &Trix::execBUILDINDXREQ);
+ // Dump testing
+ addRecSignal(GSN_BUILDINDXCONF, &Trix::execBUILDINDXCONF);
+ addRecSignal(GSN_BUILDINDXREF, &Trix::execBUILDINDXREF);
+
+
+ addRecSignal(GSN_UTIL_PREPARE_CONF, &Trix::execUTIL_PREPARE_CONF);
+ addRecSignal(GSN_UTIL_PREPARE_REF, &Trix::execUTIL_PREPARE_REF);
+ addRecSignal(GSN_UTIL_EXECUTE_CONF, &Trix::execUTIL_EXECUTE_CONF);
+ addRecSignal(GSN_UTIL_EXECUTE_REF, &Trix::execUTIL_EXECUTE_REF);
+ addRecSignal(GSN_UTIL_RELEASE_CONF, &Trix::execUTIL_RELEASE_CONF);
+ addRecSignal(GSN_UTIL_RELEASE_REF, &Trix::execUTIL_RELEASE_REF);
+
+
+ // Suma signals
+ addRecSignal(GSN_SUB_CREATE_CONF, &Trix::execSUB_CREATE_CONF);
+ addRecSignal(GSN_SUB_CREATE_REF, &Trix::execSUB_CREATE_REF);
+ addRecSignal(GSN_SUB_REMOVE_CONF, &Trix::execSUB_REMOVE_CONF);
+ addRecSignal(GSN_SUB_REMOVE_REF, &Trix::execSUB_REMOVE_REF);
+ addRecSignal(GSN_SUB_SYNC_CONF, &Trix::execSUB_SYNC_CONF);
+ addRecSignal(GSN_SUB_SYNC_REF, &Trix::execSUB_SYNC_REF);
+ addRecSignal(GSN_SUB_SYNC_CONTINUE_REQ, &Trix::execSUB_SYNC_CONTINUE_REQ);
+ addRecSignal(GSN_SUB_META_DATA, &Trix::execSUB_META_DATA);
+ addRecSignal(GSN_SUB_TABLE_DATA, &Trix::execSUB_TABLE_DATA);
+
+ // Allocate pool sizes
+ c_theAttrOrderBufferPool.setSize(100);
+ c_theSubscriptionRecPool.setSize(100);
+
+ ArrayList<SubscriptionRecord> subscriptions(c_theSubscriptionRecPool);
+ SubscriptionRecPtr subptr;
+ while(subscriptions.seize(subptr) == true) {
+ new (subptr.p) SubscriptionRecord(c_theAttrOrderBufferPool);
+ }
+ subscriptions.release();
+}
+
+/**
+ *
+ */
+Trix::~Trix()
+{
+}
+
+/**
+ *
+ */
+void Trix::execSTTOR(Signal* signal)
+{
+ jamEntry();
+
+ //const Uint32 startphase = signal->theData[1];
+ const Uint32 theSignalKey = signal->theData[6];
+
+ signal->theData[0] = theSignalKey;
+ signal->theData[3] = 1;
+ signal->theData[4] = 255; // No more start phases from missra
+ sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 5, JBB);
+ return;
+}//Trix::execSTTOR()
+
+/**
+ *
+ */
+void Trix::execNDB_STTOR(Signal* signal)
+{
+ jamEntry();
+ BlockReference ndbcntrRef = signal->theData[0];
+ Uint16 startphase = signal->theData[2]; /* RESTART PHASE */
+ Uint16 mynode = signal->theData[1];
+ //Uint16 restarttype = signal->theData[3];
+ //UintR configInfo1 = signal->theData[6]; /* CONFIGRATION INFO PART 1 */
+ //UintR configInfo2 = signal->theData[7]; /* CONFIGRATION INFO PART 2 */
+ switch (startphase) {
+ case 3:
+ jam();
+ /* SYMBOLIC START PHASE 4 */
+ /* ABSOLUTE PHASE 5 */
+ /* REQUEST NODE IDENTITIES FROM DBDIH */
+ signal->theData[0] = calcTrixBlockRef(mynode);
+ sendSignal(ndbcntrRef, GSN_READ_NODESREQ, signal, 1, JBB);
+ return;
+ break;
+ case 6:
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ *
+ */
+void Trix::execREAD_NODESCONF(Signal* signal)
+{
+ jamEntry();
+
+ ReadNodesConf * const readNodes = (ReadNodesConf *)signal->getDataPtr();
+ //Uint32 noOfNodes = readNodes->noOfNodes;
+ NodeRecPtr nodeRecPtr;
+
+ c_masterNodeId = readNodes->masterNodeId;
+ c_masterTrixRef = RNIL;
+ c_noNodesFailed = 0;
+
+ for(unsigned i = 0; i < MAX_NDB_NODES; i++) {
+ jam();
+ if(NodeBitmask::get(readNodes->allNodes, i)) {
+ // Node is defined
+ jam();
+ ndbrequire(c_theNodes.seizeId(nodeRecPtr, i));
+ nodeRecPtr.p->trixRef = calcTrixBlockRef(i);
+ if (i == c_masterNodeId) {
+ c_masterTrixRef = nodeRecPtr.p->trixRef;
+ }
+ if(NodeBitmask::get(readNodes->inactiveNodes, i)){
+ // Node is not active
+ jam();
+ /**-----------------------------------------------------------------
+ * THIS NODE IS DEFINED IN THE CLUSTER BUT IS NOT ALIVE CURRENTLY.
+ * WE ADD THE NODE TO THE SET OF FAILED NODES AND ALSO SET THE
+ * BLOCKSTATE TO BUSY TO AVOID ADDING TRIGGERS OR INDEXES WHILE
+ * NOT ALL NODES ARE ALIVE.
+ *------------------------------------------------------------------*/
+ arrGuard(c_noNodesFailed, MAX_NDB_NODES);
+ nodeRecPtr.p->alive = false;
+ c_noNodesFailed++;
+ c_blockState = Trix::NODE_FAILURE;
+ }
+ else {
+ // Node is active
+ jam();
+ c_noActiveNodes++;
+ nodeRecPtr.p->alive = true;
+ }
+ }
+ }
+ if (c_noNodesFailed == 0) {
+ c_blockState = Trix::STARTED;
+ }
+}
+
+/**
+ *
+ */
+void Trix::execREAD_NODESREF(Signal* signal)
+{
+ // NYI
+}
+
+/**
+ *
+ */
+void Trix::execNODE_FAILREP(Signal* signal)
+{
+ jamEntry();
+ NodeFailRep * const nodeFail = (NodeFailRep *) signal->getDataPtr();
+
+ //Uint32 failureNr = nodeFail->failNo;
+ //Uint32 numberNodes = nodeFail->noOfNodes;
+ Uint32 masterNodeId = nodeFail->masterNodeId;
+
+ NodeRecPtr nodeRecPtr;
+
+ for(c_theNodes.first(nodeRecPtr);
+ nodeRecPtr.i != RNIL;
+ c_theNodes.next(nodeRecPtr)) {
+ if(NodeBitmask::get(nodeFail->theNodes, nodeRecPtr.i)) {
+ nodeRecPtr.p->alive = false;
+ c_noNodesFailed++;
+ c_noActiveNodes--;
+ }
+ }
+ if (c_masterNodeId != masterNodeId) {
+ c_masterNodeId = masterNodeId;
+ NodeRecord* nodeRec = c_theNodes.getPtr(masterNodeId);
+ c_masterTrixRef = nodeRec->trixRef;
+ }
+}
+
+/**
+ *
+ */
+void Trix::execINCL_NODEREQ(Signal* signal)
+{
+ jamEntry();
+ UintR node_id = signal->theData[1];
+ NodeRecord* nodeRec = c_theNodes.getPtr(node_id);
+ nodeRec->alive = true;
+ c_noNodesFailed--;
+ c_noActiveNodes++;
+ nodeRec->trixRef = calcTrixBlockRef(node_id);
+ if (c_noNodesFailed == 0) {
+ c_blockState = Trix::STARTED;
+ }
+}
+
+// Debugging
+void
+Trix::execDUMP_STATE_ORD(Signal* signal)
+{
+ jamEntry();
+
+ DumpStateOrd * dumpStateOrd = (DumpStateOrd *)signal->getDataPtr();
+
+ switch(dumpStateOrd->args[0]) {
+ case(300): {// ok
+ // index2 -T; index2 -I -n10000; index2 -c
+ // all dump 300 0 0 0 0 0 4 2
+ // select_count INDEX0000
+ BuildIndxReq * buildIndxReq = (BuildIndxReq *)signal->getDataPtrSend();
+
+ MEMCOPY_NO_WORDS(buildIndxReq,
+ signal->theData + 1,
+ BuildIndxReq::SignalLength);
+ buildIndxReq->setUserRef(reference()); // return to me
+ buildIndxReq->setParallelism(10);
+ Uint32 indexColumns[1] = {1};
+ Uint32 keyColumns[1] = {0};
+ struct LinearSectionPtr orderPtr[2];
+ buildIndxReq->setColumnOrder(indexColumns, 1, keyColumns, 1, orderPtr);
+ sendSignal(reference(),
+ GSN_BUILDINDXREQ,
+ signal,
+ BuildIndxReq::SignalLength,
+ JBB,
+ orderPtr,
+ BuildIndxReq::NoOfSections);
+ break;
+ }
+ case(301): { // ok
+ // index2 -T; index2 -I -n10000; index2 -c -p
+ // all dump 301 0 0 0 0 0 4 2
+ // select_count INDEX0000
+ BuildIndxReq * buildIndxReq = (BuildIndxReq *)signal->getDataPtrSend();
+
+ MEMCOPY_NO_WORDS(buildIndxReq,
+ signal->theData + 1,
+ BuildIndxReq::SignalLength);
+ buildIndxReq->setUserRef(reference()); // return to me
+ buildIndxReq->setParallelism(10);
+ Uint32 indexColumns[2] = {0, 1};
+ Uint32 keyColumns[1] = {0};
+ struct LinearSectionPtr orderPtr[2];
+ buildIndxReq->setColumnOrder(indexColumns, 2, keyColumns, 1, orderPtr);
+ sendSignal(reference(),
+ GSN_BUILDINDXREQ,
+ signal,
+ BuildIndxReq::SignalLength,
+ JBB,
+ orderPtr,
+ BuildIndxReq::NoOfSections);
+ break;
+ }
+ case(302): { // ok
+ // index -T; index -I -n1000; index -c -p
+ // all dump 302 0 0 0 0 0 4 2
+ // select_count PNUMINDEX0000
+ BuildIndxReq * buildIndxReq = (BuildIndxReq *)signal->getDataPtrSend();
+
+ MEMCOPY_NO_WORDS(buildIndxReq,
+ signal->theData + 1,
+ BuildIndxReq::SignalLength);
+ buildIndxReq->setUserRef(reference()); // return to me
+ buildIndxReq->setParallelism(10);
+ Uint32 indexColumns[3] = {0, 3, 5};
+ Uint32 keyColumns[1] = {0};
+ struct LinearSectionPtr orderPtr[2];
+ buildIndxReq->setColumnOrder(indexColumns, 3, keyColumns, 1, orderPtr);
+ sendSignal(reference(),
+ GSN_BUILDINDXREQ,
+ signal,
+ BuildIndxReq::SignalLength,
+ JBB,
+ orderPtr,
+ BuildIndxReq::NoOfSections);
+ break;
+ }
+ case(303): { // ok
+ // index -T -2; index -I -2 -n1000; index -c -p
+ // all dump 303 0 0 0 0 0 4 2
+ // select_count PNUMINDEX0000
+ BuildIndxReq * buildIndxReq = (BuildIndxReq *)signal->getDataPtrSend();
+
+ MEMCOPY_NO_WORDS(buildIndxReq,
+ signal->theData + 1,
+ BuildIndxReq::SignalLength);
+ buildIndxReq->setUserRef(reference()); // return to me
+ buildIndxReq->setParallelism(10);
+ Uint32 indexColumns[3] = {0, 3, 5};
+ Uint32 keyColumns[2] = {0, 1};
+ struct LinearSectionPtr orderPtr[2];
+ buildIndxReq->setColumnOrder(indexColumns, 3, keyColumns, 2, orderPtr);
+ sendSignal(reference(),
+ GSN_BUILDINDXREQ,
+ signal,
+ BuildIndxReq::SignalLength,
+ JBB,
+ orderPtr,
+ BuildIndxReq::NoOfSections);
+ break;
+ }
+ case(304): { // ok
+ // index -T -L; index -I -L -n1000; index -c -p
+ // all dump 304 0 0 0 0 0 4 2
+ // select_count PNUMINDEX0000
+ BuildIndxReq * buildIndxReq = (BuildIndxReq *)signal->getDataPtrSend();
+
+ MEMCOPY_NO_WORDS(buildIndxReq,
+ signal->theData + 1,
+ BuildIndxReq::SignalLength);
+ buildIndxReq->setUserRef(reference()); // return to me
+ buildIndxReq->setParallelism(10);
+ Uint32 indexColumns[3] = {0, 3, 5};
+ Uint32 keyColumns[1] = {0};
+ struct LinearSectionPtr orderPtr[2];
+ buildIndxReq->setColumnOrder(indexColumns, 3, keyColumns, 1, orderPtr);
+ sendSignal(reference(),
+ GSN_BUILDINDXREQ,
+ signal,
+ BuildIndxReq::SignalLength,
+ JBB,
+ orderPtr,
+ BuildIndxReq::NoOfSections);
+ break;
+ }
+ case(305): { // ok
+ // index -T -2 -L; index -I -2 -L -n1000; index -c -p
+ // all dump 305 0 0 0 0 0 4 2
+ // select_count PNUMINDEX0000
+ BuildIndxReq * buildIndxReq = (BuildIndxReq *)signal->getDataPtrSend();
+
+ MEMCOPY_NO_WORDS(buildIndxReq,
+ signal->theData + 1,
+ BuildIndxReq::SignalLength);
+ buildIndxReq->setUserRef(reference()); // return to me
+ buildIndxReq->setParallelism(10);
+ Uint32 indexColumns[3] = {0, 3, 5};
+ Uint32 keyColumns[2] = {0, 1};
+ struct LinearSectionPtr orderPtr[2];
+ buildIndxReq->setColumnOrder(indexColumns, 3, keyColumns, 2, orderPtr);
+ sendSignal(reference(),
+ GSN_BUILDINDXREQ,
+ signal,
+ BuildIndxReq::SignalLength,
+ JBB,
+ orderPtr,
+ BuildIndxReq::NoOfSections);
+ break;
+ }
+ default: {
+ // Ignore
+ }
+ }
+}
+
+// Build index
+/**
+ *
+ */
+void Trix:: execBUILDINDXREQ(Signal* signal)
+{
+ jamEntry();
+ BuildIndxReq * buildIndxReq = (BuildIndxReq *)signal->getDataPtr();
+
+ // Seize a subscription record
+ SubscriptionRecPtr subRecPtr;
+ SubscriptionRecord* subRec;
+
+ if (!c_theSubscriptions.seizeId(subRecPtr, buildIndxReq->getBuildId())) {
+ // Failed to allocate subscription record
+ BuildIndxRef * buildIndxRef = (BuildIndxRef *)signal->getDataPtrSend();
+
+ buildIndxRef->setErrorCode(BuildIndxRef::AllocationFailure);
+ releaseSections(signal);
+ sendSignal(buildIndxReq->getUserRef(),
+ GSN_BUILDINDXREF, signal, BuildIndxRef::SignalLength, JBB);
+ return;
+ }
+ subRec = subRecPtr.p;
+ subRec->errorCode = BuildIndxRef::NoError;
+ subRec->userReference = buildIndxReq->getUserRef();
+ subRec->connectionPtr = buildIndxReq->getConnectionPtr();
+ subRec->subscriptionId = buildIndxReq->getBuildId();
+ subRec->subscriptionKey = buildIndxReq->getBuildKey();
+ subRec->indexType = buildIndxReq->getIndexType();
+ subRec->sourceTableId = buildIndxReq->getTableId();
+ subRec->targetTableId = buildIndxReq->getIndexId();
+ subRec->parallelism = buildIndxReq->getParallelism();
+ subRec->expectedConf = 0;
+ subRec->subscriptionCreated = false;
+ subRec->pendingSubSyncContinueConf = false;
+ subRec->prepareId = RNIL;
+
+ // Get column order segments
+ Uint32 noOfSections = signal->getNoOfSections();
+ if(noOfSections > 0) {
+ SegmentedSectionPtr ptr;
+ signal->getSection(ptr, BuildIndxReq::INDEX_COLUMNS);
+ append(subRec->attributeOrder, ptr, getSectionSegmentPool());
+ subRec->noOfIndexColumns = ptr.sz;
+ }
+ if(noOfSections > 1) {
+ SegmentedSectionPtr ptr;
+ signal->getSection(ptr, BuildIndxReq::KEY_COLUMNS);
+ append(subRec->attributeOrder, ptr, getSectionSegmentPool());
+ subRec->noOfKeyColumns = ptr.sz;
+ }
+#if 0
+ // Debugging
+ printf("Trix:: execBUILDINDXREQ: Attribute order:\n");
+ subRec->attributeOrder.print(stdout);
+#endif
+ releaseSections(signal);
+ prepareInsertTransactions(signal, subRecPtr);
+}
+
+void Trix:: execBUILDINDXCONF(Signal* signal)
+{
+ printf("Trix:: execBUILDINDXCONF\n");
+}
+
+void Trix:: execBUILDINDXREF(Signal* signal)
+{
+ printf("Trix:: execBUILDINDXREF\n");
+}
+
+void Trix::execUTIL_PREPARE_CONF(Signal* signal)
+{
+ jamEntry();
+ UtilPrepareConf * utilPrepareConf = (UtilPrepareConf *)signal->getDataPtr();
+ SubscriptionRecPtr subRecPtr;
+ SubscriptionRecord* subRec;
+
+ subRecPtr.i = utilPrepareConf->senderData;
+ if ((subRec = c_theSubscriptions.getPtr(subRecPtr.i)) == NULL) {
+ printf("Trix::execUTIL_PREPARE_CONF: Failed to find subscription data %u\n", subRecPtr.i);
+ return;
+ }
+ subRecPtr.p = subRec;
+ subRec->prepareId = utilPrepareConf->prepareId;
+ setupSubscription(signal, subRecPtr);
+}
+
+void Trix::execUTIL_PREPARE_REF(Signal* signal)
+{
+ jamEntry();
+ UtilPrepareRef * utilPrepareRef = (UtilPrepareRef *)signal->getDataPtr();
+ SubscriptionRecPtr subRecPtr;
+ SubscriptionRecord* subRec;
+
+ subRecPtr.i = utilPrepareRef->senderData;
+ if ((subRec = c_theSubscriptions.getPtr(subRecPtr.i)) == NULL) {
+ printf("Trix::execUTIL_PREPARE_REF: Failed to find subscription data %u\n", subRecPtr.i);
+ return;
+ }
+ subRecPtr.p = subRec;
+ subRec->errorCode = BuildIndxRef::InternalError;
+}
+
+void Trix::execUTIL_EXECUTE_CONF(Signal* signal)
+{
+ jamEntry();
+ UtilExecuteConf * utilExecuteConf = (UtilExecuteConf *)signal->getDataPtr();
+ SubscriptionRecPtr subRecPtr;
+ SubscriptionRecord* subRec;
+
+ subRecPtr.i = utilExecuteConf->senderData;
+ if ((subRec = c_theSubscriptions.getPtr(subRecPtr.i)) == NULL) {
+ printf("rix::execUTIL_EXECUTE_CONF: Failed to find subscription data %u\n", subRecPtr.i);
+ return;
+ }
+ subRecPtr.p = subRec;
+ subRec->expectedConf--;
+ checkParallelism(signal, subRec);
+ if (subRec->expectedConf == 0)
+ buildComplete(signal, subRecPtr);
+}
+
+void Trix::execUTIL_EXECUTE_REF(Signal* signal)
+{
+ jamEntry();
+ UtilExecuteRef * utilExecuteRef = (UtilExecuteRef *)signal->getDataPtr();
+ SubscriptionRecPtr subRecPtr;
+ SubscriptionRecord* subRec;
+
+ subRecPtr.i = utilExecuteRef->senderData;
+ if ((subRec = c_theSubscriptions.getPtr(subRecPtr.i)) == NULL) {
+ printf("Trix::execUTIL_EXECUTE_REF: Failed to find subscription data %u\n", subRecPtr.i);
+ return;
+ }
+ subRecPtr.p = subRec;
+ ndbrequire(utilExecuteRef->errorCode == UtilExecuteRef::TCError);
+ if(utilExecuteRef->TCErrorCode == CONSTRAINT_VIOLATION)
+ buildFailed(signal, subRecPtr, BuildIndxRef::IndexNotUnique);
+ else
+ buildFailed(signal, subRecPtr, BuildIndxRef::InternalError);
+}
+
+void Trix::execSUB_CREATE_CONF(Signal* signal)
+{
+ jamEntry();
+ SubCreateConf * subCreateConf = (SubCreateConf *)signal->getDataPtr();
+ SubscriptionRecPtr subRecPtr;
+ SubscriptionRecord* subRec;
+
+ subRecPtr.i = subCreateConf->subscriberData;
+ if ((subRec = c_theSubscriptions.getPtr(subRecPtr.i)) == NULL) {
+ printf("Trix::execSUB_CREATE_CONF: Failed to find subscription data %u\n", subRecPtr.i);
+ return;
+ }
+ ndbrequire(subRec->subscriptionId == subCreateConf->subscriptionId);
+ ndbrequire(subRec->subscriptionKey == subCreateConf->subscriptionKey);
+ subRec->subscriptionCreated = true;
+ subRecPtr.p = subRec;
+ setupTableScan(signal, subRecPtr);
+}
+
+void Trix::execSUB_CREATE_REF(Signal* signal)
+{
+ jamEntry();
+ // THIS SIGNAL IS NEVER SENT FROM SUMA?
+ /*
+ SubCreateRef * subCreateRef = (SubCreateRef *)signal->getDataPtr();
+ SubscriptionRecPtr subRecPtr;
+ SubscriptionRecord* subRec;
+
+ subRecPtr.i = subCreateRef->subscriberData;
+ if ((subRec = c_theSubscriptions.getPtr(subRecPtr.i)) == NULL) {
+ printf("Trix::execSUB_CREATE_REF: Failed to find subscription data %u\n", subRecPtr.i);
+ return;
+ }
+ subRecPtr.p = subRec;
+ buildFailed(signal, subRecPtr, BuildIndxRef::InternalError);
+ */
+}
+
+void Trix::execSUB_SYNC_CONF(Signal* signal)
+{
+ jamEntry();
+ SubSyncConf * subSyncConf = (SubSyncConf *)signal->getDataPtr();
+ SubscriptionRecPtr subRecPtr;
+ SubscriptionRecord* subRec;
+
+ subRecPtr.i = subSyncConf->subscriberData;
+ if ((subRec = c_theSubscriptions.getPtr(subRecPtr.i)) == NULL) {
+ printf("Trix::execSUB_SYNC_CONF: Failed to find subscription data %u\n", subRecPtr.i);
+ return;
+ }
+ ndbrequire(subRec->subscriptionId == subSyncConf->subscriptionId);
+ ndbrequire(subRec->subscriptionKey == subSyncConf->subscriptionKey);
+ subRecPtr.p = subRec;
+ if(subSyncConf->part == SubscriptionData::MetaData)
+ startTableScan(signal, subRecPtr);
+ else {
+ subRec->expectedConf--;
+ checkParallelism(signal, subRec);
+ if (subRec->expectedConf == 0)
+ buildComplete(signal, subRecPtr);
+ }
+}
+
+void Trix::execSUB_SYNC_REF(Signal* signal)
+{
+ jamEntry();
+ SubSyncRef * subSyncRef = (SubSyncRef *)signal->getDataPtr();
+ SubscriptionRecPtr subRecPtr;
+ SubscriptionRecord* subRec;
+
+ subRecPtr.i = subSyncRef->subscriberData;
+ if ((subRec = c_theSubscriptions.getPtr(subRecPtr.i)) == NULL) {
+ printf("Trix::execSUB_SYNC_REF: Failed to find subscription data %u\n", subRecPtr.i);
+ return;
+ }
+ subRecPtr.p = subRec;
+ buildFailed(signal, subRecPtr, BuildIndxRef::InternalError);
+}
+
+void Trix::execSUB_SYNC_CONTINUE_REQ(Signal* signal)
+{
+ SubSyncContinueReq * subSyncContinueReq =
+ (SubSyncContinueReq *) signal->getDataPtr();
+
+ SubscriptionRecPtr subRecPtr;
+ SubscriptionRecord* subRec;
+ subRecPtr.i = subSyncContinueReq->subscriberData;
+ if ((subRec = c_theSubscriptions.getPtr(subRecPtr.i)) == NULL) {
+ printf("Trix::execSUB_SYNC_CONTINUE_REQ: Failed to find subscription data %u\n", subRecPtr.i);
+ return;
+ }
+ subRecPtr.p = subRec;
+ subRec->pendingSubSyncContinueConf = true;
+ checkParallelism(signal, subRec);
+}
+
+void Trix::execSUB_META_DATA(Signal* signal)
+{
+ jamEntry();
+}
+
+void Trix::execSUB_TABLE_DATA(Signal* signal)
+{
+ jamEntry();
+ SubTableData * subTableData = (SubTableData *)signal->getDataPtr();
+ SubscriptionRecPtr subRecPtr;
+ SubscriptionRecord* subRec;
+ subRecPtr.i = subTableData->subscriberData;
+ if ((subRec = c_theSubscriptions.getPtr(subRecPtr.i)) == NULL) {
+ printf("Trix::execSUB_TABLE_DATA: Failed to find subscription data %u\n", subRecPtr.i);
+ return;
+ }
+ subRecPtr.p = subRec;
+ SegmentedSectionPtr headerPtr, dataPtr;
+ if (!signal->getSection(headerPtr, 0)) {
+ printf("Trix::execSUB_TABLE_DATA: Failed to get header section\n");
+ }
+ if (!signal->getSection(dataPtr, 1)) {
+ printf("Trix::execSUB_TABLE_DATA: Failed to get data section\n");
+ }
+ executeInsertTransaction(signal, subRecPtr, headerPtr, dataPtr);
+}
+
+void Trix::setupSubscription(Signal* signal, SubscriptionRecPtr subRecPtr)
+{
+ Uint32 attributeList[MAX_ATTRIBUTES_IN_TABLE * 2];
+ SubCreateReq * subCreateReq = (SubCreateReq *)signal->getDataPtrSend();
+ SubscriptionRecord* subRec = subRecPtr.p;
+// Uint32 listLen = subRec->noOfIndexColumns + subRec->noOfKeyColumns;
+ AttrOrderBuffer::DataBufferIterator iter;
+ Uint32 i = 0;
+
+ jam();
+ bool moreAttributes = subRec->attributeOrder.first(iter);
+ while (moreAttributes) {
+ attributeList[i++] = *iter.data;
+ moreAttributes = subRec->attributeOrder.next(iter);
+ }
+ // Merge index and key column segments
+ struct LinearSectionPtr orderPtr[3];
+ orderPtr[0].p = attributeList;
+ orderPtr[0].sz = subRec->attributeOrder.getSize();
+
+
+ subCreateReq->subscriberRef = reference();
+ subCreateReq->subscriberData = subRecPtr.i;
+ subCreateReq->subscriptionId = subRec->subscriptionId;
+ subCreateReq->subscriptionKey = subRec->subscriptionKey;
+ subCreateReq->tableId = subRec->sourceTableId;
+ subCreateReq->subscriptionType = SubCreateReq::SingleTableScan;
+
+ sendSignal(SUMA_REF, GSN_SUB_CREATE_REQ,
+ signal, SubCreateReq::SignalLength+1, JBB, orderPtr, 1);
+}
+
+void Trix::setupTableScan(Signal* signal, SubscriptionRecPtr subRecPtr)
+{
+ SubSyncReq * subSyncReq = (SubSyncReq *)signal->getDataPtrSend();
+
+ jam();
+ subSyncReq->subscriptionId = subRecPtr.i;
+ subSyncReq->subscriptionKey = subRecPtr.p->subscriptionKey;
+ subSyncReq->part = SubscriptionData::MetaData;
+ sendSignal(SUMA_REF, GSN_SUB_SYNC_REQ,
+ signal, SubSyncReq::SignalLength, JBB);
+}
+
+void Trix::startTableScan(Signal* signal, SubscriptionRecPtr subRecPtr)
+{
+ jam();
+ subRecPtr.p->expectedConf = 1;
+ SubSyncReq * subSyncReq = (SubSyncReq *)signal->getDataPtrSend();
+
+ subSyncReq->subscriptionId = subRecPtr.i;
+ subSyncReq->subscriptionKey = subRecPtr.p->subscriptionKey;
+ subSyncReq->part = SubscriptionData::TableData;
+ sendSignal(SUMA_REF, GSN_SUB_SYNC_REQ,
+ signal, SubSyncReq::SignalLength, JBB);
+}
+
+void Trix::prepareInsertTransactions(Signal* signal,
+ SubscriptionRecPtr subRecPtr)
+{
+ SubscriptionRecord* subRec = subRecPtr.p;
+ UtilPrepareReq * utilPrepareReq =
+ (UtilPrepareReq *)signal->getDataPtrSend();
+
+ jam();
+ utilPrepareReq->senderRef = reference();
+ utilPrepareReq->senderData = subRecPtr.i;
+
+ const Uint32 pageSizeInWords = 128;
+ Uint32 propPage[pageSizeInWords];
+ LinearWriter w(&propPage[0],128);
+ w.first();
+ w.add(UtilPrepareReq::NoOfOperations, 1);
+ w.add(UtilPrepareReq::OperationType, UtilPrepareReq::Write);
+ w.add(UtilPrepareReq::TableId, subRec->targetTableId);
+ // Add index attributes in increasing order and one PK attribute
+ for(Uint32 i = 0; i < subRec->noOfIndexColumns + 1; i++)
+ w.add(UtilPrepareReq::AttributeId, i);
+
+#if 0
+ // Debugging
+ SimplePropertiesLinearReader reader(propPage, w.getWordsUsed());
+ printf("Trix::prepareInsertTransactions: Sent SimpleProperties:\n");
+ reader.printAll(ndbout);
+#endif
+
+ struct LinearSectionPtr sectionsPtr[UtilPrepareReq::NoOfSections];
+ sectionsPtr[UtilPrepareReq::PROPERTIES_SECTION].p = propPage;
+ sectionsPtr[UtilPrepareReq::PROPERTIES_SECTION].sz = w.getWordsUsed();
+ sendSignal(DBUTIL_REF, GSN_UTIL_PREPARE_REQ, signal,
+ UtilPrepareReq::SignalLength, JBB,
+ sectionsPtr, UtilPrepareReq::NoOfSections);
+}
+
+void Trix::executeInsertTransaction(Signal* signal,
+ SubscriptionRecPtr subRecPtr,
+ SegmentedSectionPtr headerPtr,
+ SegmentedSectionPtr dataPtr)
+{
+ jam();
+ SubscriptionRecord* subRec = subRecPtr.p;
+ UtilExecuteReq * utilExecuteReq =
+ (UtilExecuteReq *)signal->getDataPtrSend();
+ Uint32* headerBuffer = signal->theData + 25;
+ Uint32* dataBuffer = headerBuffer + headerPtr.sz;
+
+ utilExecuteReq->senderRef = reference();
+ utilExecuteReq->senderData = subRecPtr.i;
+ utilExecuteReq->prepareId = subRec->prepareId;
+#if 0
+ printf("Header size %u\n", headerPtr.sz);
+ for(int i = 0; i < headerPtr.sz; i++)
+ printf("H'%.8x ", headerBuffer[i]);
+ printf("\n");
+
+ printf("Data size %u\n", dataPtr.sz);
+ for(int i = 0; i < dataPtr.sz; i++)
+ printf("H'%.8x ", dataBuffer[i]);
+ printf("\n");
+#endif
+ // Save scan result in linear buffers
+ copy(headerBuffer, headerPtr);
+ copy(dataBuffer, dataPtr);
+
+ // Calculate packed key size
+ Uint32 noOfKeyData = 0;
+ for(Uint32 i = 0; i < headerPtr.sz; i++) {
+ AttributeHeader* keyAttrHead = (AttributeHeader *) headerBuffer + i;
+
+ // Filter out NULL attributes
+ if (keyAttrHead->isNULL())
+ return;
+
+ if (i < subRec->noOfIndexColumns)
+ // Renumber index attributes in consequtive order
+ keyAttrHead->setAttributeId(i);
+ else
+ // Calculate total size of PK attribute
+ noOfKeyData += keyAttrHead->getDataSize();
+ }
+ // Increase expected CONF count
+ subRec->expectedConf++;
+
+ // Pack key attributes
+ AttributeHeader::init(headerBuffer + subRec->noOfIndexColumns,
+ subRec->noOfIndexColumns,
+ noOfKeyData);
+
+ struct LinearSectionPtr sectionsPtr[UtilExecuteReq::NoOfSections];
+ sectionsPtr[UtilExecuteReq::HEADER_SECTION].p = headerBuffer;
+ sectionsPtr[UtilExecuteReq::HEADER_SECTION].sz =
+ subRec->noOfIndexColumns + 1;
+ sectionsPtr[UtilExecuteReq::DATA_SECTION].p = dataBuffer;
+ sectionsPtr[UtilExecuteReq::DATA_SECTION].sz = dataPtr.sz;
+ sendSignal(DBUTIL_REF, GSN_UTIL_EXECUTE_REQ, signal,
+ UtilExecuteReq::SignalLength, JBB,
+ sectionsPtr, UtilExecuteReq::NoOfSections);
+}
+
+void Trix::buildComplete(Signal* signal, SubscriptionRecPtr subRecPtr)
+{
+ SubRemoveReq * const req = (SubRemoveReq*)signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->senderData = subRecPtr.i;
+ req->subscriptionId = subRecPtr.p->subscriptionId;
+ req->subscriptionKey = subRecPtr.p->subscriptionKey;
+ sendSignal(SUMA_REF, GSN_SUB_REMOVE_REQ, signal,
+ SubRemoveReq::SignalLength, JBB);
+}
+
+void Trix::buildFailed(Signal* signal,
+ SubscriptionRecPtr subRecPtr,
+ BuildIndxRef::ErrorCode errorCode)
+{
+ SubscriptionRecord* subRec = subRecPtr.p;
+
+ subRec->errorCode = errorCode;
+ // Continue accumulating since we currently cannot stop SUMA
+ subRec->expectedConf--;
+ checkParallelism(signal, subRec);
+ if (subRec->expectedConf == 0)
+ buildComplete(signal, subRecPtr);
+}
+
+void
+Trix::execSUB_REMOVE_REF(Signal* signal){
+ jamEntry();
+ //@todo
+ ndbrequire(false);
+}
+
+void
+Trix::execSUB_REMOVE_CONF(Signal* signal){
+ jamEntry();
+
+ SubRemoveConf * const conf = (SubRemoveConf*)signal->getDataPtrSend();
+
+ SubscriptionRecPtr subRecPtr;
+ c_theSubscriptions.getPtr(subRecPtr, conf->senderData);
+
+ if(subRecPtr.p->prepareId != RNIL){
+ jam();
+
+ UtilReleaseReq * const req = (UtilReleaseReq*)signal->getDataPtrSend();
+ req->prepareId = subRecPtr.p->prepareId;
+ req->senderData = subRecPtr.i;
+
+ sendSignal(DBUTIL_REF, GSN_UTIL_RELEASE_REQ, signal,
+ UtilReleaseReq::SignalLength , JBB);
+ return;
+ }
+
+ {
+ UtilReleaseConf * const conf = (UtilReleaseConf*)signal->getDataPtrSend();
+ conf->senderData = subRecPtr.i;
+ execUTIL_RELEASE_CONF(signal);
+ }
+}
+
+void
+Trix::execUTIL_RELEASE_REF(Signal* signal){
+ jamEntry();
+ ndbrequire(false);
+}
+
+void
+Trix::execUTIL_RELEASE_CONF(Signal* signal){
+
+ UtilReleaseConf * const conf = (UtilReleaseConf*)signal->getDataPtrSend();
+
+ SubscriptionRecPtr subRecPtr;
+ c_theSubscriptions.getPtr(subRecPtr, conf->senderData);
+
+ if(subRecPtr.p->errorCode == BuildIndxRef::NoError){
+ // Build is complete, reply to original sender
+ BuildIndxConf * buildIndxConf = (BuildIndxConf *)signal->getDataPtrSend();
+ buildIndxConf->setUserRef(subRecPtr.p->userReference);
+ buildIndxConf->setConnectionPtr(subRecPtr.p->connectionPtr);
+ buildIndxConf->setRequestType(BuildIndxReq::RT_TRIX);
+ buildIndxConf->setIndexType(subRecPtr.p->indexType);
+ buildIndxConf->setTableId(subRecPtr.p->sourceTableId);
+ buildIndxConf->setIndexId(subRecPtr.p->targetTableId);
+
+ sendSignal(subRecPtr.p->userReference, GSN_BUILDINDXCONF, signal,
+ BuildIndxConf::SignalLength , JBB);
+ } else {
+ // Build failed, reply to original sender
+ BuildIndxRef * buildIndxRef = (BuildIndxRef *)signal->getDataPtrSend();
+ buildIndxRef->setUserRef(subRecPtr.p->userReference);
+ buildIndxRef->setConnectionPtr(subRecPtr.p->connectionPtr);
+ buildIndxRef->setRequestType(BuildIndxReq::RT_TRIX);
+ buildIndxRef->setIndexType(subRecPtr.p->indexType);
+ buildIndxRef->setTableId(subRecPtr.p->sourceTableId);
+ buildIndxRef->setIndexId(subRecPtr.p->targetTableId);
+ buildIndxRef->setErrorCode(subRecPtr.p->errorCode);
+
+ sendSignal(subRecPtr.p->userReference, GSN_BUILDINDXREF, signal,
+ BuildIndxRef::SignalLength , JBB);
+ }
+
+ // Release subscription record
+ subRecPtr.p->attributeOrder.release();
+ c_theSubscriptions.release(subRecPtr.i);
+}
+
+void Trix::checkParallelism(Signal* signal, SubscriptionRecord* subRec)
+{
+ if ((subRec->pendingSubSyncContinueConf) &&
+ (subRec->expectedConf < subRec->parallelism)) {
+ SubSyncContinueConf * subSyncContinueConf =
+ (SubSyncContinueConf *) signal->getDataPtrSend();
+ subSyncContinueConf->subscriptionId = subRec->subscriptionId;
+ subSyncContinueConf->subscriptionKey = subRec->subscriptionKey;
+ sendSignal(SUMA_REF, GSN_SUB_SYNC_CONTINUE_CONF, signal,
+ SubSyncContinueConf::SignalLength , JBB);
+ subRec->pendingSubSyncContinueConf = false;
+ }
+}
+
+BLOCK_FUNCTIONS(Trix)
+
+template void append(DataBuffer<15>&,SegmentedSectionPtr,SectionSegmentPool&);
diff --git a/storage/ndb/src/kernel/blocks/trix/Trix.hpp b/storage/ndb/src/kernel/blocks/trix/Trix.hpp
new file mode 100644
index 00000000000..8dc01375fa1
--- /dev/null
+++ b/storage/ndb/src/kernel/blocks/trix/Trix.hpp
@@ -0,0 +1,191 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef TRIX_H
+#define TRIX_H
+
+#include <SimulatedBlock.hpp>
+#include <trigger_definitions.h>
+#include <DataBuffer.hpp>
+#include <ArrayList.hpp>
+#include <SimpleProperties.hpp>
+#include <signaldata/DictTabInfo.hpp>
+#include <signaldata/CreateTrig.hpp>
+#include <signaldata/BuildIndx.hpp>
+
+// Error codes
+#define INTERNAL_ERROR_ILLEGAL_CALL 4344
+#define INTERNAL_ERROR_TRIX_BUSY 4345
+
+/**
+ * TRIX - This block manages triggers and index (in coop with DICT)
+ */
+class Trix : public SimulatedBlock
+{
+public:
+ Trix(const class Configuration & conf);
+ virtual ~Trix();
+
+public:
+ // Subscription data, when communicating with SUMA
+
+ enum RequestType {
+ TABLE_REORG = 0,
+ INDEX_BUILD = 1
+ };
+ typedef DataBuffer<11> AttrOrderBuffer;
+
+private:
+ // Private attributes
+
+ BLOCK_DEFINES(Trix);
+
+ // Declared but not defined
+ //DBtrix(const Trix &obj);
+ //void operator = (const Trix &);
+
+ // Block state
+ enum BlockState {
+ NOT_STARTED,
+ STARTED,
+ NODE_FAILURE,
+ IDLE,
+ BUSY
+ };
+
+ BlockState c_blockState;
+
+ // Node data needed when communicating with remote TRIX:es
+ struct NodeRecord {
+ bool alive;
+ BlockReference trixRef;
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ };
+ Uint32 prevList;
+ };
+
+ typedef Ptr<NodeRecord> NodeRecPtr;
+
+ /**
+ * The pool of node records
+ */
+ ArrayPool<NodeRecord> c_theNodeRecPool;
+
+ /**
+ * The list of other NDB nodes
+ */
+ ArrayList<NodeRecord> c_theNodes;
+
+ Uint32 c_masterNodeId;
+ BlockReference c_masterTrixRef;
+ Uint16 c_noNodesFailed;
+ Uint16 c_noActiveNodes;
+
+ AttrOrderBuffer::DataBufferPool c_theAttrOrderBufferPool;
+
+ struct SubscriptionRecord {
+ SubscriptionRecord(AttrOrderBuffer::DataBufferPool & aop):
+ attributeOrder(aop)
+ {}
+ RequestType requestType;
+ BlockReference userReference; // For user
+ Uint32 connectionPtr; // For user
+ Uint32 subscriptionId; // For Suma
+ Uint32 subscriptionKey; // For Suma
+ Uint32 prepareId; // For DbUtil
+ Uint32 indexType;
+ Uint32 sourceTableId;
+ Uint32 targetTableId;
+ AttrOrderBuffer attributeOrder;
+ Uint32 noOfIndexColumns;
+ Uint32 noOfKeyColumns;
+ Uint32 parallelism;
+ BuildIndxRef::ErrorCode errorCode;
+ bool subscriptionCreated;
+ bool pendingSubSyncContinueConf;
+ Uint32 expectedConf; // Count in n UTIL_EXECUTE_CONF + 1 SUB_SYNC_CONF
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ };
+ Uint32 prevList;
+ };
+
+ typedef Ptr<SubscriptionRecord> SubscriptionRecPtr;
+
+ /**
+ * The pool of node records
+ */
+ ArrayPool<SubscriptionRecord> c_theSubscriptionRecPool;
+
+ /**
+ * The list of other subscriptions
+ */
+ ArrayList<SubscriptionRecord> c_theSubscriptions;
+
+ // System start
+ void execSTTOR(Signal* signal);
+ void execNDB_STTOR(Signal* signal);
+
+ // Node management
+ void execREAD_NODESCONF(Signal* signal);
+ void execREAD_NODESREF(Signal* signal);
+ void execNODE_FAILREP(Signal* signal);
+ void execINCL_NODEREQ(Signal* signal);
+ // Debugging
+ void execDUMP_STATE_ORD(Signal* signal);
+
+ // Build index
+ void execBUILDINDXREQ(Signal* signal);
+ void execBUILDINDXCONF(Signal* signal);
+ void execBUILDINDXREF(Signal* signal);
+
+ void execUTIL_PREPARE_CONF(Signal* signal);
+ void execUTIL_PREPARE_REF(Signal* signal);
+ void execUTIL_EXECUTE_CONF(Signal* signal);
+ void execUTIL_EXECUTE_REF(Signal* signal);
+ void execUTIL_RELEASE_CONF(Signal* signal);
+ void execUTIL_RELEASE_REF(Signal* signal);
+
+ // Suma signals
+ void execSUB_CREATE_CONF(Signal* signal);
+ void execSUB_CREATE_REF(Signal* signal);
+ void execSUB_REMOVE_CONF(Signal* signal);
+ void execSUB_REMOVE_REF(Signal* signal);
+ void execSUB_SYNC_CONF(Signal* signal);
+ void execSUB_SYNC_REF(Signal* signal);
+ void execSUB_SYNC_CONTINUE_REQ(Signal* signal);
+ void execSUB_META_DATA(Signal* signal);
+ void execSUB_TABLE_DATA(Signal* signal);
+
+ // Utility functions
+ void setupSubscription(Signal* signal, SubscriptionRecPtr subRecPtr);
+ void setupTableScan(Signal* signal, SubscriptionRecPtr subRecPtr);
+ void startTableScan(Signal* signal, SubscriptionRecPtr subRecPtr);
+ void prepareInsertTransactions(Signal* signal, SubscriptionRecPtr subRecPtr);
+ void executeInsertTransaction(Signal* signal, SubscriptionRecPtr subRecPtr,
+ SegmentedSectionPtr headerPtr,
+ SegmentedSectionPtr dataPtr);
+ void buildComplete(Signal* signal, SubscriptionRecPtr subRecPtr);
+ void buildFailed(Signal* signal,
+ SubscriptionRecPtr subRecPtr,
+ BuildIndxRef::ErrorCode);
+ void checkParallelism(Signal* signal, SubscriptionRecord* subRec);
+};
+
+#endif