summaryrefslogtreecommitdiff
path: root/storage
diff options
context:
space:
mode:
authorcmiller@zippy.cornsilk.net <>2007-10-17 14:05:43 -0400
committercmiller@zippy.cornsilk.net <>2007-10-17 14:05:43 -0400
commitf3d77c1979bad93a304cbb5b93d672178815df00 (patch)
tree892096ddfa11f5fea89ad26528d5c64fbc05ffce /storage
parentc6b5c6af86cb7fed9ad86690136d668071b6f6cc (diff)
parent571200d48750beba84a2936d727afcb347e1b704 (diff)
downloadmariadb-git-f3d77c1979bad93a304cbb5b93d672178815df00.tar.gz
Merge zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.1-comeng-unification
into zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.1-recentcommmerge
Diffstat (limited to 'storage')
-rw-r--r--storage/archive/CMakeLists.txt8
-rw-r--r--storage/archive/archive_reader.c16
-rw-r--r--storage/archive/azio.c45
-rw-r--r--storage/archive/azlib.h2
-rw-r--r--storage/archive/ha_archive.cc176
-rw-r--r--storage/archive/ha_archive.h38
-rw-r--r--storage/blackhole/CMakeLists.txt7
-rw-r--r--storage/blackhole/ha_blackhole.cc150
-rw-r--r--storage/blackhole/ha_blackhole.h42
-rw-r--r--storage/csv/CMakeLists.txt7
-rw-r--r--storage/csv/ha_tina.cc231
-rw-r--r--storage/csv/ha_tina.h34
-rw-r--r--storage/csv/transparent_file.cc46
-rw-r--r--storage/csv/transparent_file.h4
-rw-r--r--storage/example/CMakeLists.txt7
-rw-r--r--storage/example/ha_example.cc52
-rw-r--r--storage/example/ha_example.h31
-rw-r--r--storage/example/plug.in2
-rw-r--r--storage/federated/CMakeLists.txt7
-rw-r--r--storage/federated/ha_federated.cc874
-rw-r--r--storage/federated/ha_federated.h49
-rwxr-xr-x[-rw-r--r--]storage/heap/CMakeLists.txt7
-rw-r--r--storage/heap/_check.c4
-rw-r--r--storage/heap/_rectest.c2
-rw-r--r--storage/heap/ha_heap.cc123
-rw-r--r--storage/heap/ha_heap.h45
-rw-r--r--storage/heap/heapdef.h54
-rw-r--r--storage/heap/hp_block.c23
-rw-r--r--storage/heap/hp_clear.c4
-rw-r--r--storage/heap/hp_close.c5
-rw-r--r--storage/heap/hp_create.c73
-rw-r--r--storage/heap/hp_delete.c10
-rw-r--r--storage/heap/hp_hash.c73
-rw-r--r--storage/heap/hp_info.c2
-rw-r--r--storage/heap/hp_open.c85
-rw-r--r--storage/heap/hp_rfirst.c6
-rw-r--r--storage/heap/hp_rkey.c8
-rw-r--r--storage/heap/hp_rlast.c6
-rw-r--r--storage/heap/hp_rnext.c6
-rw-r--r--storage/heap/hp_rprev.c6
-rw-r--r--storage/heap/hp_rrnd.c4
-rw-r--r--storage/heap/hp_rsame.c2
-rw-r--r--storage/heap/hp_scan.c2
-rw-r--r--storage/heap/hp_test1.c24
-rw-r--r--storage/heap/hp_test2.c79
-rw-r--r--storage/heap/hp_update.c4
-rw-r--r--storage/heap/hp_write.c32
-rwxr-xr-x[-rw-r--r--]storage/innobase/CMakeLists.txt13
-rw-r--r--storage/innobase/Makefile.am227
-rw-r--r--storage/innobase/btr/Makefile.am25
-rw-r--r--storage/innobase/btr/btr0btr.c7
-rw-r--r--storage/innobase/buf/Makefile.am25
-rw-r--r--storage/innobase/buf/buf0buf.c12
-rw-r--r--storage/innobase/buf/buf0lru.c13
-rw-r--r--storage/innobase/data/Makefile.am25
-rw-r--r--storage/innobase/data/data0data.c2
-rw-r--r--storage/innobase/data/data0type.c3
-rw-r--r--storage/innobase/dict/Makefile.am26
-rw-r--r--storage/innobase/dict/dict0boot.c65
-rw-r--r--storage/innobase/dict/dict0crea.c5
-rw-r--r--storage/innobase/dict/dict0dict.c182
-rw-r--r--storage/innobase/dict/dict0load.c9
-rw-r--r--storage/innobase/dict/dict0mem.c80
-rw-r--r--storage/innobase/dyn/Makefile.am25
-rw-r--r--storage/innobase/eval/Makefile.am25
-rw-r--r--storage/innobase/fil/Makefile.am25
-rw-r--r--storage/innobase/fsp/Makefile.am26
-rw-r--r--storage/innobase/fsp/fsp0fsp.c16
-rw-r--r--storage/innobase/fut/Makefile.am25
-rw-r--r--storage/innobase/ha/Makefile.am25
-rw-r--r--storage/innobase/handler/Makefile.am28
-rw-r--r--storage/innobase/handler/ha_innodb.cc2049
-rw-r--r--storage/innobase/handler/ha_innodb.h134
-rw-r--r--storage/innobase/ibuf/Makefile.am25
-rw-r--r--storage/innobase/ibuf/ibuf0ibuf.c106
-rw-r--r--storage/innobase/include/Makefile.i10
-rw-r--r--storage/innobase/include/buf0buf.ic2
-rw-r--r--storage/innobase/include/db0err.h5
-rw-r--r--storage/innobase/include/dict0dict.h61
-rw-r--r--storage/innobase/include/dict0dict.ic24
-rw-r--r--storage/innobase/include/dict0mem.h39
-rw-r--r--storage/innobase/include/fsp0fsp.h2
-rw-r--r--storage/innobase/include/ha_prototypes.h29
-rw-r--r--storage/innobase/include/lock0iter.h52
-rw-r--r--storage/innobase/include/lock0lock.h14
-rw-r--r--storage/innobase/include/lock0priv.h101
-rw-r--r--storage/innobase/include/lock0priv.ic32
-rw-r--r--storage/innobase/include/mem0mem.ic18
-rw-r--r--storage/innobase/include/os0file.h3
-rw-r--r--storage/innobase/include/page0page.h9
-rw-r--r--storage/innobase/include/rem0rec.ic6
-rw-r--r--storage/innobase/include/row0mysql.h1
-rw-r--r--storage/innobase/include/row0sel.h10
-rw-r--r--storage/innobase/include/sync0rw.ic2
-rw-r--r--storage/innobase/include/trx0trx.h64
-rw-r--r--storage/innobase/include/trx0undo.h13
-rw-r--r--storage/innobase/include/univ.i16
-rw-r--r--storage/innobase/include/ut0mem.h2
-rw-r--r--storage/innobase/include/ut0ut.h5
-rw-r--r--storage/innobase/lock/Makefile.am25
-rw-r--r--storage/innobase/lock/lock0iter.c90
-rw-r--r--storage/innobase/lock/lock0lock.c88
-rw-r--r--storage/innobase/log/Makefile.am25
-rw-r--r--storage/innobase/log/log0log.c34
-rw-r--r--storage/innobase/log/log0recv.c246
-rw-r--r--storage/innobase/mach/Makefile.am25
-rw-r--r--storage/innobase/mem/Makefile.am27
-rw-r--r--storage/innobase/mem/mem0mem.c1
-rw-r--r--storage/innobase/mem/mem0pool.c6
-rw-r--r--storage/innobase/mtr/Makefile.am25
-rw-r--r--storage/innobase/mtr/mtr0log.c28
-rw-r--r--storage/innobase/os/Makefile.am25
-rw-r--r--storage/innobase/os/os0file.c19
-rw-r--r--storage/innobase/page/Makefile.am25
-rw-r--r--storage/innobase/page/page0page.c12
-rw-r--r--storage/innobase/pars/Makefile.am27
-rw-r--r--storage/innobase/pars/pars0pars.c3
-rw-r--r--storage/innobase/plug.in37
-rw-r--r--storage/innobase/que/Makefile.am25
-rw-r--r--storage/innobase/read/Makefile.am25
-rw-r--r--storage/innobase/rem/Makefile.am25
-rw-r--r--storage/innobase/rem/rem0rec.c46
-rw-r--r--storage/innobase/row/Makefile.am26
-rw-r--r--storage/innobase/row/row0ins.c46
-rw-r--r--storage/innobase/row/row0mysql.c7
-rw-r--r--storage/innobase/row/row0row.c23
-rw-r--r--storage/innobase/row/row0sel.c172
-rw-r--r--storage/innobase/srv/Makefile.am25
-rw-r--r--storage/innobase/srv/srv0srv.c54
-rw-r--r--storage/innobase/srv/srv0start.c6
-rw-r--r--storage/innobase/sync/Makefile.am25
-rw-r--r--storage/innobase/sync/sync0arr.c4
-rw-r--r--storage/innobase/sync/sync0rw.c18
-rw-r--r--storage/innobase/sync/sync0sync.c1
-rw-r--r--storage/innobase/thr/Makefile.am25
-rw-r--r--storage/innobase/trx/Makefile.am26
-rw-r--r--storage/innobase/trx/trx0rec.c19
-rw-r--r--storage/innobase/trx/trx0sys.c13
-rw-r--r--storage/innobase/trx/trx0trx.c92
-rw-r--r--storage/innobase/trx/trx0undo.c101
-rw-r--r--storage/innobase/usr/Makefile.am25
-rw-r--r--storage/innobase/ut/Makefile.am25
-rw-r--r--storage/innobase/ut/ut0mem.c2
-rw-r--r--storage/innobase/ut/ut0ut.c1
-rwxr-xr-x[-rw-r--r--]storage/myisam/CMakeLists.txt37
-rw-r--r--storage/myisam/ft_boolean_search.c145
-rw-r--r--storage/myisam/ft_nlq_search.c22
-rw-r--r--storage/myisam/ft_parser.c70
-rw-r--r--storage/myisam/ft_static.c4
-rw-r--r--storage/myisam/ft_stopwords.c6
-rw-r--r--storage/myisam/ft_update.c28
-rw-r--r--storage/myisam/ftdefs.h32
-rw-r--r--storage/myisam/fulltext.h6
-rw-r--r--storage/myisam/ha_myisam.cc252
-rw-r--r--storage/myisam/ha_myisam.h51
-rw-r--r--storage/myisam/mi_cache.c6
-rw-r--r--storage/myisam/mi_check.c174
-rw-r--r--storage/myisam/mi_checksum.c8
-rw-r--r--storage/myisam/mi_close.c10
-rw-r--r--storage/myisam/mi_create.c8
-rw-r--r--storage/myisam/mi_delete.c76
-rw-r--r--storage/myisam/mi_delete_all.c2
-rw-r--r--storage/myisam/mi_dynrec.c185
-rw-r--r--storage/myisam/mi_extra.c20
-rw-r--r--storage/myisam/mi_key.c73
-rw-r--r--storage/myisam/mi_keycache.c3
-rw-r--r--storage/myisam/mi_locking.c6
-rw-r--r--storage/myisam/mi_log.c21
-rw-r--r--storage/myisam/mi_open.c77
-rw-r--r--storage/myisam/mi_packrec.c105
-rw-r--r--storage/myisam/mi_page.c12
-rw-r--r--storage/myisam/mi_preload.c19
-rw-r--r--storage/myisam/mi_range.c4
-rw-r--r--storage/myisam/mi_rfirst.c2
-rw-r--r--storage/myisam/mi_rkey.c84
-rw-r--r--storage/myisam/mi_rlast.c2
-rw-r--r--storage/myisam/mi_rnext.c2
-rw-r--r--storage/myisam/mi_rnext_same.c2
-rw-r--r--storage/myisam/mi_rprev.c2
-rw-r--r--storage/myisam/mi_rrnd.c2
-rw-r--r--storage/myisam/mi_rsame.c2
-rw-r--r--storage/myisam/mi_rsamepos.c2
-rw-r--r--storage/myisam/mi_scan.c2
-rw-r--r--storage/myisam/mi_search.c26
-rw-r--r--storage/myisam/mi_static.c2
-rw-r--r--storage/myisam/mi_statrec.c48
-rw-r--r--storage/myisam/mi_test1.c116
-rw-r--r--storage/myisam/mi_test2.c67
-rw-r--r--storage/myisam/mi_test3.c20
-rw-r--r--storage/myisam/mi_unique.c16
-rw-r--r--storage/myisam/mi_update.c10
-rw-r--r--storage/myisam/mi_write.c64
-rw-r--r--storage/myisam/myisam_ftdump.c2
-rw-r--r--storage/myisam/myisamchk.c96
-rw-r--r--storage/myisam/myisamdef.h135
-rw-r--r--storage/myisam/myisamlog.c63
-rw-r--r--storage/myisam/myisampack.c124
-rw-r--r--storage/myisam/rt_index.c32
-rw-r--r--storage/myisam/rt_split.c4
-rw-r--r--storage/myisam/rt_test.c34
-rw-r--r--storage/myisam/sort.c96
-rw-r--r--storage/myisam/sp_defs.h2
-rw-r--r--storage/myisam/sp_key.c77
-rw-r--r--storage/myisam/sp_test.c36
-rwxr-xr-x[-rw-r--r--]storage/myisammrg/CMakeLists.txt7
-rw-r--r--storage/myisammrg/ha_myisammrg.cc121
-rw-r--r--storage/myisammrg/ha_myisammrg.h35
-rw-r--r--storage/myisammrg/myrg_close.c2
-rw-r--r--storage/myisammrg/myrg_create.c4
-rw-r--r--storage/myisammrg/myrg_def.h7
-rw-r--r--storage/myisammrg/myrg_delete.c2
-rw-r--r--storage/myisammrg/myrg_locking.c8
-rw-r--r--storage/myisammrg/myrg_open.c25
-rw-r--r--storage/myisammrg/myrg_queue.c4
-rw-r--r--storage/myisammrg/myrg_rfirst.c4
-rw-r--r--storage/myisammrg/myrg_rkey.c10
-rw-r--r--storage/myisammrg/myrg_rlast.c4
-rw-r--r--storage/myisammrg/myrg_rnext.c4
-rw-r--r--storage/myisammrg/myrg_rnext_same.c4
-rw-r--r--storage/myisammrg/myrg_rprev.c4
-rw-r--r--storage/myisammrg/myrg_rrnd.c12
-rw-r--r--storage/myisammrg/myrg_rsame.c2
-rw-r--r--storage/myisammrg/myrg_update.c2
-rw-r--r--storage/myisammrg/myrg_write.c2
-rw-r--r--storage/ndb/MAINTAINERS163
-rw-r--r--storage/ndb/config/common.mk.am2
-rw-r--r--storage/ndb/include/Makefile.am1
-rw-r--r--storage/ndb/include/debugger/EventLogger.hpp2
-rw-r--r--storage/ndb/include/kernel/signaldata/FsOpenReq.hpp2
-rw-r--r--storage/ndb/include/mgmapi/mgmapi.h102
-rw-r--r--storage/ndb/include/mgmapi/mgmapi_config_parameters.h7
-rw-r--r--storage/ndb/include/mgmapi/mgmapi_error.h121
-rw-r--r--storage/ndb/include/mgmapi/ndbd_exit_codes.h3
-rw-r--r--storage/ndb/include/ndb_global.h.in2
-rw-r--r--storage/ndb/include/ndb_version.h.in57
-rw-r--r--storage/ndb/include/ndbapi/Ndb.hpp46
-rw-r--r--storage/ndb/include/ndbapi/NdbOperation.hpp7
-rw-r--r--storage/ndb/include/ndbapi/NdbRecAttr.hpp28
-rw-r--r--storage/ndb/include/portlib/NdbThread.h2
-rw-r--r--storage/ndb/include/portlib/NdbTick.h4
-rw-r--r--storage/ndb/include/util/ndb_opts.h22
-rw-r--r--storage/ndb/include/util/version.h23
-rw-r--r--storage/ndb/ndbapi-examples/mgmapi_logevent/Makefile6
-rw-r--r--storage/ndb/ndbapi-examples/mgmapi_logevent/main.cpp (renamed from storage/ndb/ndbapi-examples/mgmapi_logevent/mgmapi_logevent.cpp)0
-rw-r--r--storage/ndb/ndbapi-examples/mgmapi_logevent2/Makefile6
-rw-r--r--storage/ndb/ndbapi-examples/mgmapi_logevent2/main.cpp (renamed from storage/ndb/ndbapi-examples/mgmapi_logevent2/mgmapi_logevent2.cpp)0
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_async1/ndbapi_async1.cpp2
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_simple/ndbapi_simple.cpp12
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_simple_dual/Makefile6
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_simple_dual/main.cpp (renamed from storage/ndb/ndbapi-examples/ndbapi_simple_dual/ndbapi_simple_dual.cpp)0
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_simple_index/Makefile6
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_simple_index/main.cpp (renamed from storage/ndb/ndbapi-examples/ndbapi_simple_index/ndbapi_simple_index.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/signaldata/SignalNames.cpp8
-rw-r--r--storage/ndb/src/common/portlib/NdbTick.c4
-rw-r--r--storage/ndb/src/common/transporter/Packer.cpp5
-rw-r--r--storage/ndb/src/common/transporter/SCI_Transporter.cpp216
-rw-r--r--storage/ndb/src/common/transporter/SCI_Transporter.hpp18
-rw-r--r--storage/ndb/src/common/transporter/TCP_Transporter.hpp4
-rw-r--r--storage/ndb/src/common/transporter/TransporterRegistry.cpp51
-rw-r--r--storage/ndb/src/common/util/version.c23
-rw-r--r--storage/ndb/src/cw/cpcd/main.cpp12
-rw-r--r--storage/ndb/src/kernel/blocks/ERROR_codes.txt20
-rw-r--r--storage/ndb/src/kernel/blocks/backup/Backup.cpp86
-rw-r--r--storage/ndb/src/kernel/blocks/backup/Backup.hpp3
-rw-r--r--storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp10
-rw-r--r--storage/ndb/src/kernel/blocks/backup/BackupInit.cpp7
-rw-r--r--storage/ndb/src/kernel/blocks/backup/FsBuffer.hpp24
-rw-r--r--storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp17
-rw-r--r--storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp5
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp925
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp9
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp2
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp2
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp217
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp28
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp21
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp721
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/Makefile.am2
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/redoLogReader/reader.cpp (renamed from storage/ndb/src/kernel/blocks/dblqh/redoLogReader/redoLogFileReader.cpp)0
-rw-r--r--storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp10
-rw-r--r--storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp164
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp15
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp3
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp47
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp21
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp17
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp22
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp87
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp61
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp5
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp66
-rw-r--r--storage/ndb/src/kernel/blocks/diskpage.cpp2
-rw-r--r--storage/ndb/src/kernel/blocks/lgman.cpp37
-rw-r--r--storage/ndb/src/kernel/blocks/lgman.hpp3
-rw-r--r--storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp45
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp160
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp6
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp2
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp2
-rw-r--r--storage/ndb/src/kernel/blocks/pgman.cpp133
-rw-r--r--storage/ndb/src/kernel/blocks/pgman.hpp6
-rw-r--r--storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp4
-rw-r--r--storage/ndb/src/kernel/blocks/restore.cpp5
-rw-r--r--storage/ndb/src/kernel/blocks/tsman.cpp6
-rw-r--r--storage/ndb/src/kernel/error/ndbd_exit_codes.c2
-rw-r--r--storage/ndb/src/kernel/vm/Configuration.cpp28
-rw-r--r--storage/ndb/src/kernel/vm/Configuration.hpp1
-rw-r--r--storage/ndb/src/kernel/vm/DynArr256.cpp101
-rw-r--r--storage/ndb/src/kernel/vm/DynArr256.hpp12
-rw-r--r--storage/ndb/src/kernel/vm/RWPool.cpp2
-rw-r--r--storage/ndb/src/kernel/vm/SimulatedBlock.cpp48
-rw-r--r--storage/ndb/src/kernel/vm/SimulatedBlock.hpp10
-rw-r--r--storage/ndb/src/kernel/vm/WatchDog.cpp152
-rw-r--r--storage/ndb/src/mgmapi/mgmapi.cpp4
-rw-r--r--storage/ndb/src/mgmclient/CommandInterpreter.cpp249
-rw-r--r--storage/ndb/src/mgmclient/Makefile.am4
-rw-r--r--storage/ndb/src/mgmclient/main.cpp31
-rw-r--r--storage/ndb/src/mgmsrv/ConfigInfo.cpp87
-rw-r--r--storage/ndb/src/mgmsrv/InitConfigFileParser.cpp43
-rw-r--r--storage/ndb/src/mgmsrv/Makefile.am2
-rw-r--r--storage/ndb/src/mgmsrv/MgmtSrvr.cpp86
-rw-r--r--storage/ndb/src/mgmsrv/MgmtSrvr.hpp39
-rw-r--r--storage/ndb/src/mgmsrv/Services.cpp2
-rw-r--r--storage/ndb/src/mgmsrv/main.cpp14
-rw-r--r--storage/ndb/src/mgmsrv/ndb_mgmd_error.h33
-rw-r--r--storage/ndb/src/ndbapi/ClusterMgr.cpp8
-rw-r--r--storage/ndb/src/ndbapi/ClusterMgr.hpp2
-rw-r--r--storage/ndb/src/ndbapi/Ndb.cpp274
-rw-r--r--storage/ndb/src/ndbapi/NdbBlob.cpp17
-rw-r--r--storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp2
-rw-r--r--storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp2
-rw-r--r--storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp3
-rw-r--r--storage/ndb/src/ndbapi/NdbOperation.cpp8
-rw-r--r--storage/ndb/src/ndbapi/NdbOperationDefine.cpp1
-rw-r--r--storage/ndb/src/ndbapi/NdbOperationInt.cpp2
-rw-r--r--storage/ndb/src/ndbapi/NdbRecAttr.cpp6
-rw-r--r--storage/ndb/src/ndbapi/NdbScanOperation.cpp4
-rw-r--r--storage/ndb/src/ndbapi/NdbTransaction.cpp38
-rw-r--r--storage/ndb/src/ndbapi/ndb_internal.hpp26
-rw-r--r--storage/ndb/src/ndbapi/ndberror.c36
-rw-r--r--storage/ndb/test/include/HugoTransactions.hpp16
-rw-r--r--storage/ndb/test/include/NDBT_Thread.hpp226
-rw-r--r--storage/ndb/test/ndbapi/benchronja.cpp19
-rw-r--r--storage/ndb/test/ndbapi/flexAsynch.cpp19
-rw-r--r--storage/ndb/test/ndbapi/flexHammer.cpp15
-rw-r--r--storage/ndb/test/ndbapi/flexScan.cpp11
-rw-r--r--storage/ndb/test/ndbapi/flexTT.cpp19
-rw-r--r--storage/ndb/test/ndbapi/flexTimedAsynch.cpp23
-rw-r--r--storage/ndb/test/ndbapi/initronja.cpp9
-rw-r--r--storage/ndb/test/ndbapi/testBasic.cpp125
-rw-r--r--storage/ndb/test/ndbapi/testDict.cpp157
-rw-r--r--storage/ndb/test/ndbapi/testIndex.cpp118
-rw-r--r--storage/ndb/test/ndbapi/testIndexStat.cpp30
-rw-r--r--storage/ndb/test/ndbapi/testMgm.cpp75
-rw-r--r--storage/ndb/test/ndbapi/testNdbApi.cpp34
-rw-r--r--storage/ndb/test/ndbapi/testNodeRestart.cpp145
-rw-r--r--storage/ndb/test/ndbapi/testOperations.cpp5
-rw-r--r--storage/ndb/test/ndbapi/testScanFilter.cpp12
-rw-r--r--storage/ndb/test/ndbapi/testSystemRestart.cpp95
-rw-r--r--storage/ndb/test/ndbapi/test_event_merge.cpp34
-rw-r--r--storage/ndb/test/odbc/SQL99_test/SQL99_test.cpp13
-rw-r--r--storage/ndb/test/run-test/Makefile.am4
-rw-r--r--storage/ndb/test/run-test/autotest-boot.sh35
-rw-r--r--storage/ndb/test/run-test/autotest-run.sh6
-rw-r--r--storage/ndb/test/run-test/conf-dl145a.cnf3
-rw-r--r--storage/ndb/test/run-test/conf-test.cnf26
-rw-r--r--storage/ndb/test/run-test/daily-basic-tests.txt39
-rw-r--r--storage/ndb/test/run-test/main.cpp36
-rw-r--r--storage/ndb/test/run-test/upgrade-boot.sh218
-rw-r--r--storage/ndb/test/src/HugoTransactions.cpp78
-rw-r--r--storage/ndb/test/src/Makefile.am2
-rw-r--r--storage/ndb/test/src/NDBT_Test.cpp20
-rw-r--r--storage/ndb/test/src/NDBT_Thread.cpp283
-rw-r--r--storage/ndb/test/tools/hugoFill.cpp4
-rw-r--r--storage/ndb/test/tools/hugoPkDelete.cpp94
-rw-r--r--storage/ndb/test/tools/hugoPkRead.cpp89
-rw-r--r--storage/ndb/test/tools/hugoPkUpdate.cpp98
-rw-r--r--storage/ndb/tools/delete_all.cpp8
-rw-r--r--storage/ndb/tools/desc.cpp8
-rw-r--r--storage/ndb/tools/drop_index.cpp2
-rw-r--r--storage/ndb/tools/drop_tab.cpp2
-rw-r--r--storage/ndb/tools/listTables.cpp12
-rw-r--r--storage/ndb/tools/ndb_config.cpp22
-rw-r--r--storage/ndb/tools/ndb_error_reporter4
-rw-r--r--storage/ndb/tools/ndb_size.pl165
-rw-r--r--storage/ndb/tools/restore/Restore.cpp59
-rw-r--r--storage/ndb/tools/restore/consumer_restore.cpp49
-rw-r--r--storage/ndb/tools/restore/consumer_restore.hpp2
-rw-r--r--storage/ndb/tools/restore/restore_main.cpp75
-rw-r--r--storage/ndb/tools/select_all.cpp26
-rw-r--r--storage/ndb/tools/select_count.cpp6
-rw-r--r--storage/ndb/tools/waiter.cpp8
392 files changed, 12269 insertions, 6581 deletions
diff --git a/storage/archive/CMakeLists.txt b/storage/archive/CMakeLists.txt
index 09227a6cc2d..9a1cfe081b6 100644
--- a/storage/archive/CMakeLists.txt
+++ b/storage/archive/CMakeLists.txt
@@ -20,5 +20,9 @@ INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include ${CMAKE_SOURCE_DIR}/zlib
${CMAKE_SOURCE_DIR}/sql
${CMAKE_SOURCE_DIR}/regex
${CMAKE_SOURCE_DIR}/extra/yassl/include)
-ADD_LIBRARY(archive azio.c ha_archive.cc ha_archive.h)
-TARGET_LINK_LIBRARIES(archive zlib mysys dbug strings)
+
+SET(ARCHIVE_SOURCES azio.c ha_archive.cc ha_archive.h)
+
+IF(NOT SOURCE_SUBLIBS)
+ ADD_LIBRARY(archive ${ARCHIVE_SOURCES})
+ENDIF(NOT SOURCE_SUBLIBS)
diff --git a/storage/archive/archive_reader.c b/storage/archive/archive_reader.c
index 14018217dea..bfc01073161 100644
--- a/storage/archive/archive_reader.c
+++ b/storage/archive/archive_reader.c
@@ -110,7 +110,7 @@ int main(int argc, char *argv[])
if (opt_check)
{
- byte size_buffer[ARCHIVE_ROW_HEADER_SIZE];
+ uchar size_buffer[ARCHIVE_ROW_HEADER_SIZE];
int error;
unsigned int x;
unsigned int read;
@@ -118,7 +118,7 @@ int main(int argc, char *argv[])
unsigned long long row_count= 0;
char buffer;
- while ((read= azread(&reader_handle, (byte *)size_buffer,
+ while ((read= azread(&reader_handle, (uchar *)size_buffer,
ARCHIVE_ROW_HEADER_SIZE, &error)))
{
if (error == Z_STREAM_ERROR || (read && read < ARCHIVE_ROW_HEADER_SIZE))
@@ -171,7 +171,7 @@ int main(int argc, char *argv[])
if (opt_backup)
{
- byte size_buffer[ARCHIVE_ROW_HEADER_SIZE];
+ uchar size_buffer[ARCHIVE_ROW_HEADER_SIZE];
int error;
unsigned int read;
unsigned int row_len;
@@ -213,7 +213,7 @@ int main(int argc, char *argv[])
my_free(ptr, MYF(0));
}
- while ((read= azread(&reader_handle, (byte *)size_buffer,
+ while ((read= azread(&reader_handle, (uchar *)size_buffer,
ARCHIVE_ROW_HEADER_SIZE, &error)))
{
if (error == Z_STREAM_ERROR || (read && read < ARCHIVE_ROW_HEADER_SIZE))
@@ -263,7 +263,7 @@ int main(int argc, char *argv[])
frm_file= my_open(argv[1], O_CREAT|O_RDWR|O_BINARY, MYF(0));
ptr= (char *)my_malloc(sizeof(char) * reader_handle.frm_length, MYF(0));
azread_frm(&reader_handle, ptr);
- my_write(frm_file, ptr, reader_handle.frm_length, MYF(0));
+ my_write(frm_file, (uchar*) ptr, reader_handle.frm_length, MYF(0));
my_close(frm_file, MYF(0));
my_free(ptr, MYF(0));
}
@@ -355,15 +355,15 @@ static struct my_option my_long_options[] =
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"set-auto-increment", 'A',
"Force auto_increment to start at this or higher value. If no value is given, then sets the next auto_increment value to the highest used value for the auto key + 1.",
- (gptr*) &new_auto_increment,
- (gptr*) &new_auto_increment,
+ (uchar**) &new_auto_increment,
+ (uchar**) &new_auto_increment,
0, GET_ULL, OPT_ARG, 0, 0, 0, 0, 0, 0},
{"silent", 's',
"Only print errors. One can use two -s to make archive_reader very silent.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"tmpdir", 't',
"Path for temporary files.",
- (gptr*) &opt_tmpdir,
+ (uchar**) &opt_tmpdir,
0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"version", 'V',
"Print version and exit.",
diff --git a/storage/archive/azio.c b/storage/archive/azio.c
index 6b01d9c3c88..c04749444cb 100644
--- a/storage/archive/azio.c
+++ b/storage/archive/azio.c
@@ -19,7 +19,7 @@
static int const gz_magic[2] = {0x1f, 0x8b}; /* gzip magic header */
static int const az_magic[3] = {0xfe, 0x03, 0x01}; /* az magic header */
-/* gzip flag byte */
+/* gzip flag uchar */
#define ASCII_FLAG 0x01 /* bit 0 set: file probably ascii text */
#define HEAD_CRC 0x02 /* bit 1 set: header CRC present */
#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */
@@ -139,8 +139,8 @@ int az_open (azio_stream *s, const char *path, int Flags, File fd)
}
else if (s->mode == 'w')
{
- unsigned char buffer[AZHEADER_SIZE + AZMETA_BUFFER_SIZE];
- my_pread(s->file, (byte*) buffer, AZHEADER_SIZE + AZMETA_BUFFER_SIZE, 0,
+ uchar buffer[AZHEADER_SIZE + AZMETA_BUFFER_SIZE];
+ my_pread(s->file, buffer, AZHEADER_SIZE + AZMETA_BUFFER_SIZE, 0,
MYF(0));
read_header(s, buffer); /* skip the .az header */
my_seek(s->file, 0, MY_SEEK_END, MYF(0));
@@ -190,7 +190,8 @@ void write_header(azio_stream *s)
*(ptr + AZ_DIRTY_POS)= (unsigned char)s->dirty; /* Start of Data Block Index Block */
/* Always begin at the begining, and end there as well */
- my_pwrite(s->file, buffer, AZHEADER_SIZE + AZMETA_BUFFER_SIZE, 0, MYF(0));
+ my_pwrite(s->file, (uchar*) buffer, AZHEADER_SIZE + AZMETA_BUFFER_SIZE, 0,
+ MYF(0));
}
/* ===========================================================================
@@ -224,7 +225,7 @@ int get_byte(s)
if (s->stream.avail_in == 0)
{
errno = 0;
- s->stream.avail_in = my_read(s->file, (byte *)s->inbuf, AZ_BUFSIZE_READ, MYF(0));
+ s->stream.avail_in = my_read(s->file, (uchar *)s->inbuf, AZ_BUFSIZE_READ, MYF(0));
if (s->stream.avail_in == 0)
{
s->z_eof = 1;
@@ -248,8 +249,8 @@ int get_byte(s)
*/
void check_header(azio_stream *s)
{
- int method; /* method byte */
- int flags; /* flags byte */
+ int method; /* method uchar */
+ int flags; /* flags uchar */
uInt len;
int c;
@@ -260,7 +261,7 @@ void check_header(azio_stream *s)
if (len < 2) {
if (len) s->inbuf[0] = s->stream.next_in[0];
errno = 0;
- len = (uInt)my_read(s->file, (byte *)s->inbuf + len, AZ_BUFSIZE_READ >> len, MYF(0));
+ len = (uInt)my_read(s->file, (uchar *)s->inbuf + len, AZ_BUFSIZE_READ >> len, MYF(0));
if (len == 0) s->z_err = Z_ERRNO;
s->stream.avail_in += len;
s->stream.next_in = s->inbuf;
@@ -442,7 +443,7 @@ unsigned int ZEXPORT azread ( azio_stream *s, voidp buf, unsigned int len, int *
if (s->stream.avail_out > 0)
{
s->stream.avail_out -=
- (uInt)my_read(s->file, (byte *)next_out, s->stream.avail_out, MYF(0));
+ (uInt)my_read(s->file, (uchar *)next_out, s->stream.avail_out, MYF(0));
}
len -= s->stream.avail_out;
s->in += len;
@@ -455,7 +456,7 @@ unsigned int ZEXPORT azread ( azio_stream *s, voidp buf, unsigned int len, int *
if (s->stream.avail_in == 0 && !s->z_eof) {
errno = 0;
- s->stream.avail_in = (uInt)my_read(s->file, (byte *)s->inbuf, AZ_BUFSIZE_READ, MYF(0));
+ s->stream.avail_in = (uInt)my_read(s->file, (uchar *)s->inbuf, AZ_BUFSIZE_READ, MYF(0));
if (s->stream.avail_in == 0)
{
s->z_eof = 1;
@@ -509,7 +510,7 @@ unsigned int ZEXPORT azread ( azio_stream *s, voidp buf, unsigned int len, int *
Writes the given number of uncompressed bytes into the compressed file.
azwrite returns the number of bytes actually written (0 in case of error).
*/
-unsigned int azwrite (azio_stream *s, voidpc buf, unsigned int len)
+unsigned int azwrite (azio_stream *s, const voidp buf, unsigned int len)
{
s->stream.next_in = (Bytef*)buf;
s->stream.avail_in = len;
@@ -522,7 +523,7 @@ unsigned int azwrite (azio_stream *s, voidpc buf, unsigned int len)
{
s->stream.next_out = s->outbuf;
- if (my_write(s->file, (byte *)s->outbuf, AZ_BUFSIZE_WRITE,
+ if (my_write(s->file, (uchar *)s->outbuf, AZ_BUFSIZE_WRITE,
MYF(0)) != AZ_BUFSIZE_WRITE)
{
s->z_err = Z_ERRNO;
@@ -557,6 +558,7 @@ int do_flush (azio_stream *s, int flush)
{
uInt len;
int done = 0;
+ my_off_t afterwrite_pos;
if (s == NULL || s->mode != 'w') return Z_STREAM_ERROR;
@@ -569,7 +571,7 @@ int do_flush (azio_stream *s, int flush)
if (len != 0)
{
s->check_point= my_tell(s->file, MYF(0));
- if ((uInt)my_write(s->file, (byte *)s->outbuf, len, MYF(0)) != len)
+ if ((uInt)my_write(s->file, (uchar *)s->outbuf, len, MYF(0)) != len)
{
s->z_err = Z_ERRNO;
return Z_ERRNO;
@@ -597,7 +599,10 @@ int do_flush (azio_stream *s, int flush)
s->dirty= AZ_STATE_CLEAN; /* Mark it clean, we should be good now */
else
s->dirty= AZ_STATE_SAVED; /* Mark it clean, we should be good now */
+
+ afterwrite_pos= my_tell(s->file, MYF(0));
write_header(s);
+ my_seek(s->file, afterwrite_pos, SEEK_SET, MYF(0));
return s->z_err == Z_STREAM_END ? Z_OK : s->z_err;
}
@@ -611,7 +616,7 @@ int ZEXPORT azflush (s, flush)
if (s->mode == 'r')
{
unsigned char buffer[AZHEADER_SIZE + AZMETA_BUFFER_SIZE];
- my_pread(s->file, (byte*) buffer, AZHEADER_SIZE + AZMETA_BUFFER_SIZE, 0,
+ my_pread(s->file, (uchar*) buffer, AZHEADER_SIZE + AZMETA_BUFFER_SIZE, 0,
MYF(0));
read_header(s, buffer); /* skip the .az header */
@@ -748,7 +753,7 @@ my_off_t ZEXPORT aztell (file)
void putLong (File file, uLong x)
{
int n;
- byte buffer[1];
+ uchar buffer[1];
for (n = 0; n < 4; n++)
{
@@ -817,7 +822,7 @@ int azwrite_frm(azio_stream *s, char *blob, unsigned int length)
s->frm_length= length;
s->start+= length;
- my_pwrite(s->file, blob, s->frm_length, s->frm_start_pos, MYF(0));
+ my_pwrite(s->file, (uchar*) blob, s->frm_length, s->frm_start_pos, MYF(0));
write_header(s);
my_seek(s->file, 0, MY_SEEK_END, MYF(0));
@@ -827,7 +832,7 @@ int azwrite_frm(azio_stream *s, char *blob, unsigned int length)
int azread_frm(azio_stream *s, char *blob)
{
- my_pread(s->file, blob, s->frm_length, s->frm_start_pos, MYF(0));
+ my_pread(s->file, (uchar*) blob, s->frm_length, s->frm_start_pos, MYF(0));
return 0;
}
@@ -848,7 +853,8 @@ int azwrite_comment(azio_stream *s, char *blob, unsigned int length)
s->comment_length= length;
s->start+= length;
- my_pwrite(s->file, blob, s->comment_length, s->comment_start_pos, MYF(0));
+ my_pwrite(s->file, (uchar*) blob, s->comment_length, s->comment_start_pos,
+ MYF(0));
write_header(s);
my_seek(s->file, 0, MY_SEEK_END, MYF(0));
@@ -858,7 +864,8 @@ int azwrite_comment(azio_stream *s, char *blob, unsigned int length)
int azread_comment(azio_stream *s, char *blob)
{
- my_pread(s->file, blob, s->comment_length, s->comment_start_pos, MYF(0));
+ my_pread(s->file, (uchar*) blob, s->comment_length, s->comment_start_pos,
+ MYF(0));
return 0;
}
diff --git a/storage/archive/azlib.h b/storage/archive/azlib.h
index a5bee1befae..47772b1c4fe 100644
--- a/storage/archive/azlib.h
+++ b/storage/archive/azlib.h
@@ -273,7 +273,7 @@ extern unsigned int azread ( azio_stream *s, voidp buf, unsigned int len, int *e
gzread returns the number of uncompressed bytes actually read (0 for
end of file, -1 for error). */
-extern unsigned int azwrite (azio_stream *s, voidpc buf, unsigned int len);
+extern unsigned int azwrite (azio_stream *s, const voidp buf, unsigned int len);
/*
Writes the given number of uncompressed bytes into the compressed file.
azwrite returns the number of uncompressed bytes actually written
diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc
index 6853e879f55..6696eac2fbb 100644
--- a/storage/archive/ha_archive.cc
+++ b/storage/archive/ha_archive.cc
@@ -113,9 +113,9 @@ static handler *archive_create_handler(handlerton *hton,
TABLE_SHARE *table,
MEM_ROOT *mem_root);
int archive_discover(handlerton *hton, THD* thd, const char *db,
- const char *name,
- const void** frmblob,
- uint* frmlen);
+ const char *name,
+ uchar **frmblob,
+ size_t *frmlen);
/*
Number of rows that will force a bulk insert.
@@ -137,11 +137,11 @@ static handler *archive_create_handler(handlerton *hton,
/*
Used for hash table that tracks open tables.
*/
-static byte* archive_get_key(ARCHIVE_SHARE *share,uint *length,
+static uchar* archive_get_key(ARCHIVE_SHARE *share, size_t *length,
my_bool not_used __attribute__((unused)))
{
*length=share->table_name_length;
- return (byte*) share->table_name;
+ return (uchar*) share->table_name;
}
@@ -216,9 +216,9 @@ ha_archive::ha_archive(handlerton *hton, TABLE_SHARE *table_arg)
}
int archive_discover(handlerton *hton, THD* thd, const char *db,
- const char *name,
- const void** frmblob,
- uint* frmlen)
+ const char *name,
+ uchar **frmblob,
+ size_t *frmlen)
{
DBUG_ENTER("archive_discover");
DBUG_PRINT("archive_discover", ("db: %s, name: %s", db, name));
@@ -247,7 +247,7 @@ int archive_discover(handlerton *hton, THD* thd, const char *db,
azclose(&frm_stream);
*frmlen= frm_stream.frm_length;
- *frmblob= frm_ptr;
+ *frmblob= (uchar*) frm_ptr;
DBUG_RETURN(0);
err:
@@ -316,7 +316,7 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, int *rc)
length=(uint) strlen(table_name);
if (!(share=(ARCHIVE_SHARE*) hash_search(&archive_open_tables,
- (byte*) table_name,
+ (uchar*) table_name,
length)))
{
char *tmp_name;
@@ -362,7 +362,7 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, int *rc)
share->crashed= archive_tmp.dirty;
azclose(&archive_tmp);
- VOID(my_hash_insert(&archive_open_tables, (byte*) share));
+ VOID(my_hash_insert(&archive_open_tables, (uchar*) share));
thr_lock_init(&share->lock);
}
share->use_count++;
@@ -393,7 +393,7 @@ int ha_archive::free_share()
pthread_mutex_lock(&archive_mutex);
if (!--share->use_count)
{
- hash_delete(&archive_open_tables, (byte*) share);
+ hash_delete(&archive_open_tables, (uchar*) share);
thr_lock_delete(&share->lock);
VOID(pthread_mutex_destroy(&share->mutex));
/*
@@ -408,7 +408,7 @@ int ha_archive::free_share()
if (azclose(&(share->archive_write)))
rc= 1;
}
- my_free((gptr) share, MYF(0));
+ my_free((uchar*) share, MYF(0));
}
pthread_mutex_unlock(&archive_mutex);
@@ -436,6 +436,9 @@ int ha_archive::init_archive_writer()
}
+/*
+ No locks are required because it is associated with just one handler instance
+*/
int ha_archive::init_archive_reader()
{
DBUG_ENTER("ha_archive::init_archive_reader");
@@ -579,7 +582,7 @@ int ha_archive::create(const char *name, TABLE *table_arg,
azio_stream create_stream; /* Archive file we are working with */
File frm_file; /* File handler for readers */
MY_STAT file_stat; // Stat information for the data file
- byte *frm_ptr;
+ uchar *frm_ptr;
DBUG_ENTER("ha_archive::create");
@@ -651,12 +654,12 @@ int ha_archive::create(const char *name, TABLE *table_arg,
{
if (!my_fstat(frm_file, &file_stat, MYF(MY_WME)))
{
- frm_ptr= (byte *)my_malloc(sizeof(byte) * file_stat.st_size , MYF(0));
+ frm_ptr= (uchar *)my_malloc(sizeof(uchar) * file_stat.st_size, MYF(0));
if (frm_ptr)
{
my_read(frm_file, frm_ptr, file_stat.st_size, MYF(0));
azwrite_frm(&create_stream, (char *)frm_ptr, file_stat.st_size);
- my_free((gptr)frm_ptr, MYF(0));
+ my_free((uchar*)frm_ptr, MYF(0));
}
}
my_close(frm_file, MYF(0));
@@ -696,7 +699,7 @@ error:
/*
This is where the actual row is written out.
*/
-int ha_archive::real_write_row(byte *buf, azio_stream *writer)
+int ha_archive::real_write_row(uchar *buf, azio_stream *writer)
{
my_off_t written;
unsigned int r_pack_length;
@@ -726,7 +729,7 @@ int ha_archive::real_write_row(byte *buf, azio_stream *writer)
the bytes required for the length in the header.
*/
-uint32 ha_archive::max_row_length(const byte *buf)
+uint32 ha_archive::max_row_length(const uchar *buf)
{
uint32 length= (uint32)(table->s->reclength + table->s->fields*2);
length+= ARCHIVE_ROW_HEADER_SIZE;
@@ -743,9 +746,9 @@ uint32 ha_archive::max_row_length(const byte *buf)
}
-unsigned int ha_archive::pack_row(byte *record)
+unsigned int ha_archive::pack_row(uchar *record)
{
- byte *ptr;
+ uchar *ptr;
DBUG_ENTER("ha_archive::pack_row");
@@ -761,8 +764,7 @@ unsigned int ha_archive::pack_row(byte *record)
for (Field **field=table->field ; *field ; field++)
{
if (!((*field)->is_null()))
- ptr=(byte*) (*field)->pack((char*) ptr,
- (char*) record + (*field)->offset(record));
+ ptr= (*field)->pack(ptr, record + (*field)->offset(record));
}
int4store(record_buffer->buffer, (int)(ptr - record_buffer->buffer -
@@ -784,26 +786,27 @@ unsigned int ha_archive::pack_row(byte *record)
for implementing start_bulk_insert() is that we could skip
setting dirty to true each time.
*/
-int ha_archive::write_row(byte *buf)
+int ha_archive::write_row(uchar *buf)
{
int rc;
- byte *read_buf= NULL;
+ uchar *read_buf= NULL;
ulonglong temp_auto;
- byte *record= table->record[0];
+ uchar *record= table->record[0];
DBUG_ENTER("ha_archive::write_row");
if (share->crashed)
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
- if (!share->archive_write_open)
- if (init_archive_writer())
- DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
-
ha_statistic_increment(&SSV::ha_write_count);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
table->timestamp_field->set_time();
pthread_mutex_lock(&share->mutex);
+ if (!share->archive_write_open)
+ if (init_archive_writer())
+ DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
+
+
if (table->next_number_field && record == table->record[0])
{
KEY *mkey= &table->s->key_info[0]; // We only support one key right now
@@ -832,7 +835,7 @@ int ha_archive::write_row(byte *buf)
First we create a buffer that we can use for reading rows, and can pass
to get_row().
*/
- if (!(read_buf= (byte*) my_malloc(table->s->reclength, MYF(MY_WME))))
+ if (!(read_buf= (uchar*) my_malloc(table->s->reclength, MYF(MY_WME))))
{
rc= HA_ERR_OUT_OF_MEM;
goto error;
@@ -882,7 +885,7 @@ int ha_archive::write_row(byte *buf)
error:
pthread_mutex_unlock(&share->mutex);
if (read_buf)
- my_free((gptr) read_buf, MYF(0));
+ my_free((uchar*) read_buf, MYF(0));
DBUG_RETURN(rc);
}
@@ -910,7 +913,7 @@ int ha_archive::index_init(uint keynr, bool sorted)
No indexes, so if we get a request for an index search since we tell
the optimizer that we have unique indexes, we scan
*/
-int ha_archive::index_read(byte *buf, const byte *key,
+int ha_archive::index_read(uchar *buf, const uchar *key,
uint key_len, enum ha_rkey_function find_flag)
{
int rc;
@@ -920,7 +923,7 @@ int ha_archive::index_read(byte *buf, const byte *key,
}
-int ha_archive::index_read_idx(byte *buf, uint index, const byte *key,
+int ha_archive::index_read_idx(uchar *buf, uint index, const uchar *key,
uint key_len, enum ha_rkey_function find_flag)
{
int rc;
@@ -955,7 +958,7 @@ error:
}
-int ha_archive::index_next(byte * buf)
+int ha_archive::index_next(uchar * buf)
{
bool found= 0;
@@ -993,24 +996,6 @@ int ha_archive::rnd_init(bool scan)
{
DBUG_PRINT("info", ("archive will retrieve %llu rows",
(unsigned long long) scan_rows));
- stats.records= 0;
-
- /*
- If dirty, we lock, and then reset/flush the data.
- I found that just calling azflush() doesn't always work.
- */
- pthread_mutex_lock(&share->mutex);
- scan_rows= share->rows_recorded;
- if (share->dirty == TRUE)
- {
- if (share->dirty == TRUE)
- {
- DBUG_PRINT("ha_archive", ("archive flushing out rows for scan"));
- azflush(&(share->archive_write), Z_SYNC_FLUSH);
- share->dirty= FALSE;
- }
- }
- pthread_mutex_unlock(&share->mutex);
if (read_data_header(&archive))
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
@@ -1024,7 +1009,7 @@ int ha_archive::rnd_init(bool scan)
This is the method that is used to read a row. It assumes that the row is
positioned where you want it.
*/
-int ha_archive::get_row(azio_stream *file_to_read, byte *buf)
+int ha_archive::get_row(azio_stream *file_to_read, uchar *buf)
{
int rc;
DBUG_ENTER("ha_archive::get_row");
@@ -1051,8 +1036,8 @@ bool ha_archive::fix_rec_buff(unsigned int length)
if (length > record_buffer->length)
{
- byte *newptr;
- if (!(newptr=(byte*) my_realloc((gptr) record_buffer->buffer,
+ uchar *newptr;
+ if (!(newptr=(uchar*) my_realloc((uchar*) record_buffer->buffer,
length,
MYF(MY_ALLOW_ZERO_PTR))))
DBUG_RETURN(1);
@@ -1065,17 +1050,17 @@ bool ha_archive::fix_rec_buff(unsigned int length)
DBUG_RETURN(0);
}
-int ha_archive::unpack_row(azio_stream *file_to_read, byte *record)
+int ha_archive::unpack_row(azio_stream *file_to_read, uchar *record)
{
DBUG_ENTER("ha_archive::unpack_row");
unsigned int read;
int error;
- byte size_buffer[ARCHIVE_ROW_HEADER_SIZE];
+ uchar size_buffer[ARCHIVE_ROW_HEADER_SIZE];
unsigned int row_len;
/* First we grab the length stored */
- read= azread(file_to_read, (byte *)size_buffer, ARCHIVE_ROW_HEADER_SIZE, &error);
+ read= azread(file_to_read, size_buffer, ARCHIVE_ROW_HEADER_SIZE, &error);
if (error == Z_STREAM_ERROR || (read && read < ARCHIVE_ROW_HEADER_SIZE))
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
@@ -1100,21 +1085,21 @@ int ha_archive::unpack_row(azio_stream *file_to_read, byte *record)
}
/* Copy null bits */
- const char *ptr= (const char*) record_buffer->buffer;
+ const uchar *ptr= record_buffer->buffer;
memcpy(record, ptr, table->s->null_bytes);
ptr+= table->s->null_bytes;
for (Field **field=table->field ; *field ; field++)
+ {
if (!((*field)->is_null()))
{
- ptr= (*field)->unpack((char *)record +
- (*field)->offset(table->record[0]), ptr);
+ ptr= (*field)->unpack(record + (*field)->offset(table->record[0]), ptr);
}
-
+ }
DBUG_RETURN(0);
}
-int ha_archive::get_row_version3(azio_stream *file_to_read, byte *buf)
+int ha_archive::get_row_version3(azio_stream *file_to_read, uchar *buf)
{
DBUG_ENTER("ha_archive::get_row_version3");
@@ -1124,7 +1109,7 @@ int ha_archive::get_row_version3(azio_stream *file_to_read, byte *buf)
}
-int ha_archive::get_row_version2(azio_stream *file_to_read, byte *buf)
+int ha_archive::get_row_version2(azio_stream *file_to_read, uchar *buf)
{
unsigned int read;
int error;
@@ -1190,7 +1175,7 @@ int ha_archive::get_row_version2(azio_stream *file_to_read, byte *buf)
if ((size_t) read != size)
DBUG_RETURN(HA_ERR_END_OF_FILE);
- ((Field_blob*) table->field[*ptr])->set_ptr(size, last);
+ ((Field_blob*) table->field[*ptr])->set_ptr(size, (uchar*) last);
last += size;
}
else
@@ -1208,7 +1193,7 @@ int ha_archive::get_row_version2(azio_stream *file_to_read, byte *buf)
or by having had ha_archive::rnd_pos() called before it is called.
*/
-int ha_archive::rnd_next(byte *buf)
+int ha_archive::rnd_next(uchar *buf)
{
int rc;
DBUG_ENTER("ha_archive::rnd_next");
@@ -1224,9 +1209,7 @@ int ha_archive::rnd_next(byte *buf)
current_position= aztell(&archive);
rc= get_row(&archive, buf);
-
- if (rc != HA_ERR_END_OF_FILE)
- stats.records++;
+ table->status=rc ? STATUS_NOT_FOUND: 0;
DBUG_RETURN(rc);
}
@@ -1238,7 +1221,7 @@ int ha_archive::rnd_next(byte *buf)
needed.
*/
-void ha_archive::position(const byte *record)
+void ha_archive::position(const uchar *record)
{
DBUG_ENTER("ha_archive::position");
my_store_ptr(ref, ref_length, current_position);
@@ -1253,7 +1236,7 @@ void ha_archive::position(const byte *record)
correctly ordered row.
*/
-int ha_archive::rnd_pos(byte * buf, byte *pos)
+int ha_archive::rnd_pos(uchar * buf, uchar *pos)
{
DBUG_ENTER("ha_archive::rnd_pos");
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
@@ -1346,8 +1329,8 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
{
Field *field= table->found_next_number_field;
ulonglong auto_value=
- (ulonglong) field->val_int((char*)(table->record[0] +
- field->offset(table->record[0])));
+ (ulonglong) field->val_int(table->record[0] +
+ field->offset(table->record[0]));
if (share->archive_write.auto_increment < auto_value)
stats.auto_increment_value= share->archive_write.auto_increment=
auto_value;
@@ -1462,12 +1445,33 @@ void ha_archive::update_create_info(HA_CREATE_INFO *create_info)
int ha_archive::info(uint flag)
{
DBUG_ENTER("ha_archive::info");
+
+ /*
+ If dirty, we lock, and then reset/flush the data.
+ I found that just calling azflush() doesn't always work.
+ */
+ pthread_mutex_lock(&share->mutex);
+ if (share->dirty == TRUE)
+ {
+ if (share->dirty == TRUE)
+ {
+ DBUG_PRINT("ha_archive", ("archive flushing out rows for scan"));
+ azflush(&(share->archive_write), Z_SYNC_FLUSH);
+ share->dirty= FALSE;
+ }
+ }
+
/*
This should be an accurate number now, though bulk and delayed inserts can
cause the number to be inaccurate.
*/
stats.records= share->rows_recorded;
+ pthread_mutex_unlock(&share->mutex);
+
+ scan_rows= stats.records;
stats.deleted= 0;
+
+ DBUG_PRINT("ha_archive", ("Stats rows is %d\n", (int)stats.records));
/* Costs quite a bit more to get all information */
if (flag & HA_STATUS_TIME)
{
@@ -1487,7 +1491,9 @@ int ha_archive::info(uint flag)
if (flag & HA_STATUS_AUTO)
{
init_archive_reader();
+ pthread_mutex_lock(&share->mutex);
azflush(&archive, Z_SYNC_FLUSH);
+ pthread_mutex_unlock(&share->mutex);
stats.auto_increment_value= archive.auto_increment;
}
@@ -1549,36 +1555,24 @@ bool ha_archive::is_crashed() const
int ha_archive::check(THD* thd, HA_CHECK_OPT* check_opt)
{
int rc= 0;
- byte *buf;
const char *old_proc_info;
ha_rows count= share->rows_recorded;
DBUG_ENTER("ha_archive::check");
old_proc_info= thd_proc_info(thd, "Checking table");
/* Flush any waiting data */
+ pthread_mutex_lock(&share->mutex);
azflush(&(share->archive_write), Z_SYNC_FLUSH);
-
- /*
- First we create a buffer that we can use for reading rows, and can pass
- to get_row().
- */
- if (!(buf= (byte*) my_malloc(table->s->reclength, MYF(MY_WME))))
- rc= HA_ERR_OUT_OF_MEM;
+ pthread_mutex_unlock(&share->mutex);
/*
Now we will rewind the archive file so that we are positioned at the
start of the file.
*/
init_archive_reader();
-
- if (!rc)
- read_data_header(&archive);
-
- if (!rc)
- while (!(rc= get_row(&archive, buf)))
- count--;
-
- my_free((char*)buf, MYF(0));
+ read_data_header(&archive);
+ while (!(rc= get_row(&archive, table->record[0])))
+ count--;
thd_proc_info(thd, old_proc_info);
@@ -1618,7 +1612,7 @@ archive_record_buffer *ha_archive::create_record_buffer(unsigned int length)
}
r->length= (int)length;
- if (!(r->buffer= (byte*) my_malloc(r->length,
+ if (!(r->buffer= (uchar*) my_malloc(r->length,
MYF(MY_WME))))
{
my_free((char*) r, MYF(MY_ALLOW_ZERO_PTR));
diff --git a/storage/archive/ha_archive.h b/storage/archive/ha_archive.h
index 8fc54f6715f..ab630ed22fd 100644
--- a/storage/archive/ha_archive.h
+++ b/storage/archive/ha_archive.h
@@ -27,7 +27,7 @@
*/
typedef struct st_archive_record_buffer {
- byte *buffer;
+ uchar *buffer;
uint32 length;
} archive_record_buffer;
@@ -62,12 +62,12 @@ class ha_archive: public handler
azio_stream archive; /* Archive file we are working with */
my_off_t current_position; /* The position of the row we just read */
- byte byte_buffer[IO_SIZE]; /* Initial buffer for our string */
+ uchar byte_buffer[IO_SIZE]; /* Initial buffer for our string */
String buffer; /* Buffer used for blob storage */
ha_rows scan_rows; /* Number of rows left in scan */
bool delayed_insert; /* If the insert is delayed */
bool bulk_insert; /* If we are performing a bulk insert */
- const byte *current_key;
+ const uchar *current_key;
uint current_key_len;
uint current_k_offset;
archive_record_buffer *record_buffer;
@@ -87,6 +87,9 @@ public:
ulonglong table_flags() const
{
return (HA_NO_TRANSACTIONS | HA_REC_NOT_IN_SEQ | HA_CAN_BIT_FIELD |
+ HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE |
+ HA_STATS_RECORDS_IS_EXACT |
+ HA_HAS_RECORDS |
HA_FILE_BASED | HA_CAN_INSERT_DELAYED | HA_CAN_GEOMETRY);
}
ulong index_flags(uint idx, uint part, bool all_parts) const
@@ -100,30 +103,31 @@ public:
uint max_supported_keys() const { return 1; }
uint max_supported_key_length() const { return sizeof(ulonglong); }
uint max_supported_key_part_length() const { return sizeof(ulonglong); }
+ ha_rows records() { return share->rows_recorded; }
int index_init(uint keynr, bool sorted);
- virtual int index_read(byte * buf, const byte * key,
+ virtual int index_read(uchar * buf, const uchar * key,
uint key_len, enum ha_rkey_function find_flag);
- virtual int index_read_idx(byte * buf, uint index, const byte * key,
+ virtual int index_read_idx(uchar * buf, uint index, const uchar * key,
uint key_len, enum ha_rkey_function find_flag);
- int index_next(byte * buf);
+ int index_next(uchar * buf);
int open(const char *name, int mode, uint test_if_locked);
int close(void);
- int write_row(byte * buf);
- int real_write_row(byte *buf, azio_stream *writer);
+ int write_row(uchar * buf);
+ int real_write_row(uchar *buf, azio_stream *writer);
int delete_all_rows();
int rnd_init(bool scan=1);
- int rnd_next(byte *buf);
- int rnd_pos(byte * buf, byte *pos);
- int get_row(azio_stream *file_to_read, byte *buf);
- int get_row_version2(azio_stream *file_to_read, byte *buf);
- int get_row_version3(azio_stream *file_to_read, byte *buf);
+ int rnd_next(uchar *buf);
+ int rnd_pos(uchar * buf, uchar *pos);
+ int get_row(azio_stream *file_to_read, uchar *buf);
+ int get_row_version2(azio_stream *file_to_read, uchar *buf);
+ int get_row_version3(azio_stream *file_to_read, uchar *buf);
ARCHIVE_SHARE *get_share(const char *table_name, int *rc);
int free_share();
int init_archive_writer();
int init_archive_reader();
bool auto_repair() const { return 1; } // For the moment we just do this
int read_data_header(azio_stream *file_to_read);
- void position(const byte *record);
+ void position(const uchar *record);
int info(uint);
void update_create_info(HA_CREATE_INFO *create_info);
int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info);
@@ -140,9 +144,9 @@ public:
bool is_crashed() const;
int check(THD* thd, HA_CHECK_OPT* check_opt);
bool check_and_repair(THD *thd);
- uint32 max_row_length(const byte *buf);
+ uint32 max_row_length(const uchar *buf);
bool fix_rec_buff(unsigned int length);
- int unpack_row(azio_stream *file_to_read, byte *record);
- unsigned int pack_row(byte *record);
+ int unpack_row(azio_stream *file_to_read, uchar *record);
+ unsigned int pack_row(uchar *record);
};
diff --git a/storage/blackhole/CMakeLists.txt b/storage/blackhole/CMakeLists.txt
index 6b02e1effa9..9b6dd7adac9 100644
--- a/storage/blackhole/CMakeLists.txt
+++ b/storage/blackhole/CMakeLists.txt
@@ -19,4 +19,9 @@ SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX")
INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include ${CMAKE_SOURCE_DIR}/sql
${CMAKE_SOURCE_DIR}/regex
${CMAKE_SOURCE_DIR}/extra/yassl/include)
-ADD_LIBRARY(blackhole ha_blackhole.cc ha_blackhole.h)
+
+SET(BLACKHOLE_SOURCES ha_blackhole.cc ha_blackhole.h)
+
+IF(NOT SOURCE_SUBLIBS)
+ ADD_LIBRARY(blackhole ${BLACKHOLE_SOURCES})
+ENDIF(NOT SOURCE_SUBLIBS)
diff --git a/storage/blackhole/ha_blackhole.cc b/storage/blackhole/ha_blackhole.cc
index 6f07c4183f1..4e12e9f0ee7 100644
--- a/storage/blackhole/ha_blackhole.cc
+++ b/storage/blackhole/ha_blackhole.cc
@@ -31,6 +31,14 @@ static handler *blackhole_create_handler(handlerton *hton,
}
+/* Static declarations for shared structures */
+
+static pthread_mutex_t blackhole_mutex;
+static HASH blackhole_open_tables;
+
+static st_blackhole_share *get_share(const char *table_name);
+static void free_share(st_blackhole_share *share);
+
/*****************************************************************************
** BLACKHOLE tables
*****************************************************************************/
@@ -53,15 +61,18 @@ const char **ha_blackhole::bas_ext() const
int ha_blackhole::open(const char *name, int mode, uint test_if_locked)
{
DBUG_ENTER("ha_blackhole::open");
- thr_lock_init(&thr_lock);
- thr_lock_data_init(&thr_lock,&lock,NULL);
+
+ if (!(share= get_share(name)))
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM);
+
+ thr_lock_data_init(&share->lock, &lock, NULL);
DBUG_RETURN(0);
}
int ha_blackhole::close(void)
{
DBUG_ENTER("ha_blackhole::close");
- thr_lock_delete(&thr_lock);
+ free_share(share);
DBUG_RETURN(0);
}
@@ -83,7 +94,7 @@ const char *ha_blackhole::index_type(uint key_number)
HA_KEY_ALG_RTREE) ? "RTREE" : "BTREE");
}
-int ha_blackhole::write_row(byte * buf)
+int ha_blackhole::write_row(uchar * buf)
{
DBUG_ENTER("ha_blackhole::write_row");
DBUG_RETURN(0);
@@ -96,14 +107,14 @@ int ha_blackhole::rnd_init(bool scan)
}
-int ha_blackhole::rnd_next(byte *buf)
+int ha_blackhole::rnd_next(uchar *buf)
{
DBUG_ENTER("ha_blackhole::rnd_next");
DBUG_RETURN(HA_ERR_END_OF_FILE);
}
-int ha_blackhole::rnd_pos(byte * buf, byte *pos)
+int ha_blackhole::rnd_pos(uchar * buf, uchar *pos)
{
DBUG_ENTER("ha_blackhole::rnd_pos");
DBUG_ASSERT(0);
@@ -111,7 +122,7 @@ int ha_blackhole::rnd_pos(byte * buf, byte *pos)
}
-void ha_blackhole::position(const byte *record)
+void ha_blackhole::position(const uchar *record)
{
DBUG_ENTER("ha_blackhole::position");
DBUG_ASSERT(0);
@@ -136,23 +147,45 @@ int ha_blackhole::external_lock(THD *thd, int lock_type)
}
-uint ha_blackhole::lock_count(void) const
-{
- DBUG_ENTER("ha_blackhole::lock_count");
- DBUG_RETURN(0);
-}
-
THR_LOCK_DATA **ha_blackhole::store_lock(THD *thd,
THR_LOCK_DATA **to,
enum thr_lock_type lock_type)
{
DBUG_ENTER("ha_blackhole::store_lock");
+ if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
+ {
+ /*
+ Here is where we get into the guts of a row level lock.
+ If TL_UNLOCK is set
+ If we are not doing a LOCK TABLE or DISCARD/IMPORT
+ TABLESPACE, then allow multiple writers
+ */
+
+ if ((lock_type >= TL_WRITE_CONCURRENT_INSERT &&
+ lock_type <= TL_WRITE) && !thd_in_lock_tables(thd)
+ && !thd_tablespace_op(thd))
+ lock_type = TL_WRITE_ALLOW_WRITE;
+
+ /*
+ In queries of type INSERT INTO t1 SELECT ... FROM t2 ...
+ MySQL would use the lock TL_READ_NO_INSERT on t2, and that
+ would conflict with TL_WRITE_ALLOW_WRITE, blocking all inserts
+ to t2. Convert the lock to a normal read lock to allow
+ concurrent inserts to t2.
+ */
+
+ if (lock_type == TL_READ_NO_INSERT && !thd_in_lock_tables(thd))
+ lock_type = TL_READ;
+
+ lock.type= lock_type;
+ }
+ *to++= &lock;
DBUG_RETURN(to);
}
-int ha_blackhole::index_read(byte * buf, const byte * key,
- key_part_map keypart_map,
+int ha_blackhole::index_read_map(uchar * buf, const uchar * key,
+ key_part_map keypart_map,
enum ha_rkey_function find_flag)
{
DBUG_ENTER("ha_blackhole::index_read");
@@ -160,7 +193,7 @@ int ha_blackhole::index_read(byte * buf, const byte * key,
}
-int ha_blackhole::index_read_idx(byte * buf, uint idx, const byte * key,
+int ha_blackhole::index_read_idx_map(uchar * buf, uint idx, const uchar * key,
key_part_map keypart_map,
enum ha_rkey_function find_flag)
{
@@ -169,41 +202,98 @@ int ha_blackhole::index_read_idx(byte * buf, uint idx, const byte * key,
}
-int ha_blackhole::index_read_last(byte * buf, const byte * key,
- key_part_map keypart_map)
+int ha_blackhole::index_read_last_map(uchar * buf, const uchar * key,
+ key_part_map keypart_map)
{
DBUG_ENTER("ha_blackhole::index_read_last");
DBUG_RETURN(HA_ERR_END_OF_FILE);
}
-int ha_blackhole::index_next(byte * buf)
+int ha_blackhole::index_next(uchar * buf)
{
DBUG_ENTER("ha_blackhole::index_next");
DBUG_RETURN(HA_ERR_END_OF_FILE);
}
-int ha_blackhole::index_prev(byte * buf)
+int ha_blackhole::index_prev(uchar * buf)
{
DBUG_ENTER("ha_blackhole::index_prev");
DBUG_RETURN(HA_ERR_END_OF_FILE);
}
-int ha_blackhole::index_first(byte * buf)
+int ha_blackhole::index_first(uchar * buf)
{
DBUG_ENTER("ha_blackhole::index_first");
DBUG_RETURN(HA_ERR_END_OF_FILE);
}
-int ha_blackhole::index_last(byte * buf)
+int ha_blackhole::index_last(uchar * buf)
{
DBUG_ENTER("ha_blackhole::index_last");
DBUG_RETURN(HA_ERR_END_OF_FILE);
}
+
+static st_blackhole_share *get_share(const char *table_name)
+{
+ st_blackhole_share *share;
+ uint length;
+
+ length= (uint) strlen(table_name);
+ pthread_mutex_lock(&blackhole_mutex);
+
+ if (!(share= (st_blackhole_share*) hash_search(&blackhole_open_tables,
+ (uchar*) table_name, length)))
+ {
+ if (!(share= (st_blackhole_share*) my_malloc(sizeof(st_blackhole_share) +
+ length,
+ MYF(MY_WME | MY_ZEROFILL))))
+ goto error;
+
+ share->table_name_length= length;
+ strmov(share->table_name, table_name);
+
+ if (my_hash_insert(&blackhole_open_tables, (uchar*) share))
+ {
+ my_free((uchar*) share, MYF(0));
+ share= NULL;
+ goto error;
+ }
+
+ thr_lock_init(&share->lock);
+ }
+ share->use_count++;
+
+error:
+ pthread_mutex_unlock(&blackhole_mutex);
+ return share;
+}
+
+static void free_share(st_blackhole_share *share)
+{
+ pthread_mutex_lock(&blackhole_mutex);
+ if (!--share->use_count)
+ hash_delete(&blackhole_open_tables, (uchar*) share);
+ pthread_mutex_unlock(&blackhole_mutex);
+}
+
+static void blackhole_free_key(st_blackhole_share *share)
+{
+ thr_lock_delete(&share->lock);
+ my_free((uchar*) share, MYF(0));
+}
+
+static uchar* blackhole_get_key(st_blackhole_share *share, size_t *length,
+ my_bool not_used __attribute__((unused)))
+{
+ *length= share->table_name_length;
+ return (uchar*) share->table_name;
+}
+
static int blackhole_init(void *p)
{
handlerton *blackhole_hton;
@@ -212,6 +302,20 @@ static int blackhole_init(void *p)
blackhole_hton->db_type= DB_TYPE_BLACKHOLE_DB;
blackhole_hton->create= blackhole_create_handler;
blackhole_hton->flags= HTON_CAN_RECREATE;
+
+ VOID(pthread_mutex_init(&blackhole_mutex, MY_MUTEX_INIT_FAST));
+ (void) hash_init(&blackhole_open_tables, system_charset_info,32,0,0,
+ (hash_get_key) blackhole_get_key,
+ (hash_free_key) blackhole_free_key, 0);
+
+ return 0;
+}
+
+static int blackhole_fini(void *p)
+{
+ hash_free(&blackhole_open_tables);
+ pthread_mutex_destroy(&blackhole_mutex);
+
return 0;
}
@@ -227,7 +331,7 @@ mysql_declare_plugin(blackhole)
"/dev/null storage engine (anything you write to it disappears)",
PLUGIN_LICENSE_GPL,
blackhole_init, /* Plugin Init */
- NULL, /* Plugin Deinit */
+ blackhole_fini, /* Plugin Deinit */
0x0100 /* 1.0 */,
NULL, /* status variables */
NULL, /* system variables */
diff --git a/storage/blackhole/ha_blackhole.h b/storage/blackhole/ha_blackhole.h
index 2af12b33077..d5a0d08926c 100644
--- a/storage/blackhole/ha_blackhole.h
+++ b/storage/blackhole/ha_blackhole.h
@@ -18,13 +18,24 @@
#endif
/*
+ Shared structure for correct LOCK operation
+*/
+struct st_blackhole_share {
+ THR_LOCK lock;
+ uint use_count;
+ uint table_name_length;
+ char table_name[1];
+};
+
+
+/*
Class definition for the blackhole storage engine
"Dumbest named feature ever"
*/
class ha_blackhole: public handler
{
THR_LOCK_DATA lock; /* MySQL lock */
- THR_LOCK thr_lock;
+ st_blackhole_share *share;
public:
ha_blackhole(handlerton *hton, TABLE_SHARE *table_arg);
@@ -42,6 +53,7 @@ public:
ulonglong table_flags() const
{
return(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER |
+ HA_BINLOG_STMT_CAPABLE |
HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY |
HA_FILE_BASED | HA_CAN_GEOMETRY | HA_CAN_INSERT_DELAYED);
}
@@ -60,23 +72,23 @@ public:
uint max_supported_key_part_length() const { return BLACKHOLE_MAX_KEY_LENGTH; }
int open(const char *name, int mode, uint test_if_locked);
int close(void);
- int write_row(byte * buf);
+ int write_row(uchar * buf);
int rnd_init(bool scan);
- int rnd_next(byte *buf);
- int rnd_pos(byte * buf, byte *pos);
- int index_read(byte * buf, const byte * key, key_part_map keypart_map,
- enum ha_rkey_function find_flag);
- int index_read_idx(byte * buf, uint idx, const byte * key,
- key_part_map keypart_map, enum ha_rkey_function find_flag);
- int index_read_last(byte * buf, const byte * key, key_part_map keypart_map);
- int index_next(byte * buf);
- int index_prev(byte * buf);
- int index_first(byte * buf);
- int index_last(byte * buf);
- void position(const byte *record);
+ int rnd_next(uchar *buf);
+ int rnd_pos(uchar * buf, uchar *pos);
+ int index_read_map(uchar * buf, const uchar * key, key_part_map keypart_map,
+ enum ha_rkey_function find_flag);
+ int index_read_idx_map(uchar * buf, uint idx, const uchar * key,
+ key_part_map keypart_map,
+ enum ha_rkey_function find_flag);
+ int index_read_last_map(uchar * buf, const uchar * key, key_part_map keypart_map);
+ int index_next(uchar * buf);
+ int index_prev(uchar * buf);
+ int index_first(uchar * buf);
+ int index_last(uchar * buf);
+ void position(const uchar *record);
int info(uint flag);
int external_lock(THD *thd, int lock_type);
- uint lock_count(void) const;
int create(const char *name, TABLE *table_arg,
HA_CREATE_INFO *create_info);
THR_LOCK_DATA **store_lock(THD *thd,
diff --git a/storage/csv/CMakeLists.txt b/storage/csv/CMakeLists.txt
index 359d1509a7e..bb0df45e5f4 100644
--- a/storage/csv/CMakeLists.txt
+++ b/storage/csv/CMakeLists.txt
@@ -19,4 +19,9 @@ SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX")
INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include ${CMAKE_SOURCE_DIR}/sql
${CMAKE_SOURCE_DIR}/regex
${CMAKE_SOURCE_DIR}/extra/yassl/include)
-ADD_LIBRARY(csv ha_tina.cc ha_tina.h transparent_file.cc transparent_file.h)
+
+SET(CSV_SOURCES ha_tina.cc ha_tina.h transparent_file.cc transparent_file.h)
+
+IF(NOT SOURCE_SUBLIBS)
+ ADD_LIBRARY(csv ${CSV_SOURCES})
+ENDIF(NOT SOURCE_SUBLIBS)
diff --git a/storage/csv/ha_tina.cc b/storage/csv/ha_tina.cc
index 07a4ffc65c5..9a7781e017d 100644
--- a/storage/csv/ha_tina.cc
+++ b/storage/csv/ha_tina.cc
@@ -46,10 +46,9 @@ TODO:
#endif
#include "mysql_priv.h"
-
+#include <mysql/plugin.h>
#include "ha_tina.h"
-#include <mysql/plugin.h>
/*
uchar + uchar + ulonglong + ulonglong + ulonglong + ulonglong + uchar
@@ -69,6 +68,10 @@ static int free_share(TINA_SHARE *share);
static int read_meta_file(File meta_file, ha_rows *rows);
static int write_meta_file(File meta_file, ha_rows rows, bool dirty);
+extern "C" void tina_get_status(void* param, int concurrent_insert);
+extern "C" void tina_update_status(void* param);
+extern "C" my_bool tina_check_status(void* param);
+
/* Stuff for shares */
pthread_mutex_t tina_mutex;
static HASH tina_open_tables;
@@ -93,11 +96,11 @@ int sort_set (tina_set *a, tina_set *b)
return ( a->begin > b->begin ? 1 : ( a->begin < b->begin ? -1 : 0 ) );
}
-static byte* tina_get_key(TINA_SHARE *share,uint *length,
+static uchar* tina_get_key(TINA_SHARE *share, size_t *length,
my_bool not_used __attribute__((unused)))
{
*length=share->table_name_length;
- return (byte*) share->table_name;
+ return (uchar*) share->table_name;
}
static int tina_init_func(void *p)
@@ -144,7 +147,7 @@ static TINA_SHARE *get_share(const char *table_name, TABLE *table)
initialize its members.
*/
if (!(share=(TINA_SHARE*) hash_search(&tina_open_tables,
- (byte*) table_name,
+ (uchar*) table_name,
length)))
{
if (!my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
@@ -164,6 +167,7 @@ static TINA_SHARE *get_share(const char *table_name, TABLE *table)
share->rows_recorded= 0;
share->update_file_opened= FALSE;
share->tina_write_opened= FALSE;
+ share->data_file_version= 0;
strmov(share->table_name, table_name);
fn_format(share->data_file_name, table_name, "", CSV_EXT,
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
@@ -174,7 +178,7 @@ static TINA_SHARE *get_share(const char *table_name, TABLE *table)
goto error;
share->saved_data_file_length= file_stat.st_size;
- if (my_hash_insert(&tina_open_tables, (byte*) share))
+ if (my_hash_insert(&tina_open_tables, (uchar*) share))
goto error;
thr_lock_init(&share->lock);
pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST);
@@ -203,7 +207,7 @@ static TINA_SHARE *get_share(const char *table_name, TABLE *table)
error:
pthread_mutex_unlock(&tina_mutex);
- my_free((gptr) share, MYF(0));
+ my_free((uchar*) share, MYF(0));
return NULL;
}
@@ -236,7 +240,7 @@ static int read_meta_file(File meta_file, ha_rows *rows)
DBUG_ENTER("ha_tina::read_meta_file");
VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0)));
- if (my_read(meta_file, (byte*)meta_buffer, META_BUFFER_SIZE, 0)
+ if (my_read(meta_file, (uchar*)meta_buffer, META_BUFFER_SIZE, 0)
!= META_BUFFER_SIZE)
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
@@ -305,7 +309,7 @@ static int write_meta_file(File meta_file, ha_rows rows, bool dirty)
*ptr= (uchar)dirty;
VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0)));
- if (my_write(meta_file, (byte *)meta_buffer, META_BUFFER_SIZE, 0)
+ if (my_write(meta_file, (uchar *)meta_buffer, META_BUFFER_SIZE, 0)
!= META_BUFFER_SIZE)
DBUG_RETURN(-1);
@@ -376,10 +380,10 @@ static int free_share(TINA_SHARE *share)
share->tina_write_opened= FALSE;
}
- hash_delete(&tina_open_tables, (byte*) share);
+ hash_delete(&tina_open_tables, (uchar*) share);
thr_lock_delete(&share->lock);
pthread_mutex_destroy(&share->mutex);
- my_free((gptr) share, MYF(0));
+ my_free((uchar*) share, MYF(0));
}
pthread_mutex_unlock(&tina_mutex);
@@ -441,10 +445,10 @@ ha_tina::ha_tina(handlerton *hton, TABLE_SHARE *table_arg)
*/
current_position(0), next_position(0), local_saved_data_file_length(0),
file_buff(0), chain_alloced(0), chain_size(DEFAULT_CHAIN_LENGTH),
- records_is_known(0)
+ local_data_file_version(0), records_is_known(0)
{
/* Set our original buffers from pre-allocated memory */
- buffer.set((char*)byte_buffer, IO_SIZE, system_charset_info);
+ buffer.set((char*)byte_buffer, IO_SIZE, &my_charset_bin);
chain= chain_buffer;
file_buff= new Transparent_file();
}
@@ -454,7 +458,7 @@ ha_tina::ha_tina(handlerton *hton, TABLE_SHARE *table_arg)
Encode a buffer into the quoted format.
*/
-int ha_tina::encode_quote(byte *buf)
+int ha_tina::encode_quote(uchar *buf)
{
char attribute_buffer[1024];
String attribute(attribute_buffer, sizeof(attribute_buffer),
@@ -558,7 +562,7 @@ int ha_tina::chain_append()
if (chain_alloced)
{
/* Must cast since my_malloc unlike malloc doesn't have a void ptr */
- if ((chain= (tina_set *) my_realloc((gptr)chain,
+ if ((chain= (tina_set *) my_realloc((uchar*)chain,
chain_size, MYF(MY_WME))) == NULL)
return -1;
}
@@ -584,12 +588,13 @@ int ha_tina::chain_append()
/*
Scans for a row.
*/
-int ha_tina::find_current_row(byte *buf)
+int ha_tina::find_current_row(uchar *buf)
{
off_t end_offset, curr_offset= current_position;
int eoln_len;
my_bitmap_map *org_bitmap;
int error;
+ bool read_all;
DBUG_ENTER("ha_tina::find_current_row");
/*
@@ -601,6 +606,8 @@ int ha_tina::find_current_row(byte *buf)
local_saved_data_file_length, &eoln_len)) == 0)
DBUG_RETURN(HA_ERR_END_OF_FILE);
+ /* We must read all columns in case a table is opened for update */
+ read_all= !bitmap_is_clear_all(table->write_set);
/* Avoid asserts in ::store() for columns that are not going to be updated */
org_bitmap= dbug_tmp_use_all_columns(table, table->write_set);
error= HA_ERR_CRASHED_ON_USAGE;
@@ -609,37 +616,41 @@ int ha_tina::find_current_row(byte *buf)
for (Field **field=table->field ; *field ; field++)
{
+ char curr_char;
+
buffer.length(0);
- if (curr_offset < end_offset &&
- file_buff->get_value(curr_offset) == '"')
+ if (curr_offset >= end_offset)
+ goto err;
+ curr_char= file_buff->get_value(curr_offset);
+ if (curr_char == '"')
{
curr_offset++; // Incrementpast the first quote
- for(;curr_offset < end_offset; curr_offset++)
+ for(; curr_offset < end_offset; curr_offset++)
{
+ curr_char= file_buff->get_value(curr_offset);
// Need to convert line feeds!
- if (file_buff->get_value(curr_offset) == '"' &&
- ((file_buff->get_value(curr_offset + 1) == ',') ||
- (curr_offset == end_offset -1 )))
+ if (curr_char == '"' &&
+ (curr_offset == end_offset - 1 ||
+ file_buff->get_value(curr_offset + 1) == ','))
{
curr_offset+= 2; // Move past the , and the "
break;
}
- if (file_buff->get_value(curr_offset) == '\\' &&
- curr_offset != (end_offset - 1))
+ if (curr_char == '\\' && curr_offset != (end_offset - 1))
{
curr_offset++;
- if (file_buff->get_value(curr_offset) == 'r')
+ curr_char= file_buff->get_value(curr_offset);
+ if (curr_char == 'r')
buffer.append('\r');
- else if (file_buff->get_value(curr_offset) == 'n' )
+ else if (curr_char == 'n' )
buffer.append('\n');
- else if ((file_buff->get_value(curr_offset) == '\\') ||
- (file_buff->get_value(curr_offset) == '"'))
- buffer.append(file_buff->get_value(curr_offset));
+ else if (curr_char == '\\' || curr_char == '"')
+ buffer.append(curr_char);
else /* This could only happed with an externally created file */
{
buffer.append('\\');
- buffer.append(file_buff->get_value(curr_offset));
+ buffer.append(curr_char);
}
}
else // ordinary symbol
@@ -650,36 +661,30 @@ int ha_tina::find_current_row(byte *buf)
*/
if (curr_offset == end_offset - 1)
goto err;
- buffer.append(file_buff->get_value(curr_offset));
+ buffer.append(curr_char);
}
}
}
- else if (my_isdigit(system_charset_info,
- file_buff->get_value(curr_offset)))
+ else
{
- for(;curr_offset < end_offset; curr_offset++)
+ for(; curr_offset < end_offset; curr_offset++)
{
- if (file_buff->get_value(curr_offset) == ',')
+ curr_char= file_buff->get_value(curr_offset);
+ if (curr_char == ',')
{
- curr_offset+= 1; // Move past the ,
+ curr_offset++; // Skip the ,
break;
}
-
- if (my_isdigit(system_charset_info, file_buff->get_value(curr_offset)))
- buffer.append(file_buff->get_value(curr_offset));
- else if (file_buff->get_value(curr_offset) == '.')
- buffer.append(file_buff->get_value(curr_offset));
- else
- goto err;
+ buffer.append(curr_char);
}
}
- else
+
+ if (read_all || bitmap_is_set(table->read_set, (*field)->field_index))
{
- goto err;
+ if ((*field)->store(buffer.ptr(), buffer.length(), buffer.charset(),
+ CHECK_FIELD_WARN))
+ goto err;
}
-
- if (bitmap_is_set(table->read_set, (*field)->field_index))
- (*field)->store(buffer.ptr(), buffer.length(), system_charset_info);
}
next_position= end_offset + eoln_len;
error= 0;
@@ -785,18 +790,6 @@ void ha_tina::update_status()
}
-bool ha_tina::check_if_locking_is_allowed(uint sql_command,
- ulong type, TABLE *table,
- uint count, uint current,
- uint *system_count,
- bool called_by_privileged_thread)
-{
- if (!called_by_privileged_thread)
- return check_if_log_table_locking_is_allowed(sql_command, type, table);
-
- return TRUE;
-}
-
/*
Open a database file. Keep in mind that tables are caches, so
this will not be called for every request. Any sort of positions
@@ -815,6 +808,7 @@ int ha_tina::open(const char *name, int mode, uint open_options)
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
}
+ local_data_file_version= share->data_file_version;
if ((data_file= my_open(share->data_file_name, O_RDONLY, MYF(0))) == -1)
DBUG_RETURN(0);
@@ -851,7 +845,7 @@ int ha_tina::close(void)
of the file and appends the data. In an error case it really should
just truncate to the original position (this is not done yet).
*/
-int ha_tina::write_row(byte * buf)
+int ha_tina::write_row(uchar * buf)
{
int size;
DBUG_ENTER("ha_tina::write_row");
@@ -871,7 +865,7 @@ int ha_tina::write_row(byte * buf)
DBUG_RETURN(-1);
/* use pwrite, as concurrent reader could have changed the position */
- if (my_write(share->tina_write_filedes, (byte*)buffer.ptr(), size,
+ if (my_write(share->tina_write_filedes, (uchar*)buffer.ptr(), size,
MYF(MY_WME | MY_NABP)))
DBUG_RETURN(-1);
@@ -904,6 +898,7 @@ int ha_tina::open_update_temp_file_if_needed()
0, O_RDWR | O_TRUNC, MYF(MY_WME))) < 0)
return 1;
share->update_file_opened= TRUE;
+ temp_file_length= 0;
}
return 0;
}
@@ -916,7 +911,7 @@ int ha_tina::open_update_temp_file_if_needed()
This will be called in a table scan right before the previous ::rnd_next()
call.
*/
-int ha_tina::update_row(const byte * old_data, byte * new_data)
+int ha_tina::update_row(const uchar * old_data, uchar * new_data)
{
int size;
DBUG_ENTER("ha_tina::update_row");
@@ -928,15 +923,23 @@ int ha_tina::update_row(const byte * old_data, byte * new_data)
size= encode_quote(new_data);
+ /*
+ During update we mark each updating record as deleted
+ (see the chain_append()) then write new one to the temporary data file.
+ At the end of the sequence in the rnd_end() we append all non-marked
+ records from the data file to the temporary data file then rename it.
+ The temp_file_length is used to calculate new data file length.
+ */
if (chain_append())
DBUG_RETURN(-1);
if (open_update_temp_file_if_needed())
DBUG_RETURN(-1);
- if (my_write(update_temp_file, (byte*)buffer.ptr(), size,
+ if (my_write(update_temp_file, (uchar*)buffer.ptr(), size,
MYF(MY_WME | MY_NABP)))
DBUG_RETURN(-1);
+ temp_file_length+= size;
/* UPDATE should never happen on the log tables */
DBUG_ASSERT(!share->is_log_table);
@@ -954,7 +957,7 @@ int ha_tina::update_row(const byte * old_data, byte * new_data)
The table will then be deleted/positioned based on the ORDER (so RANDOM,
DESC, ASC).
*/
-int ha_tina::delete_row(const byte * buf)
+int ha_tina::delete_row(const uchar * buf)
{
DBUG_ENTER("ha_tina::delete_row");
ha_statistic_increment(&SSV::ha_delete_count);
@@ -963,6 +966,11 @@ int ha_tina::delete_row(const byte * buf)
DBUG_RETURN(-1);
stats.records--;
+ /* Update shared info */
+ DBUG_ASSERT(share->rows_recorded);
+ pthread_mutex_lock(&share->mutex);
+ share->rows_recorded--;
+ pthread_mutex_unlock(&share->mutex);
/* DELETE should never happen on the log table */
DBUG_ASSERT(!share->is_log_table);
@@ -971,6 +979,33 @@ int ha_tina::delete_row(const byte * buf)
}
+/**
+ @brief Initialize the data file.
+
+ @details Compare the local version of the data file with the shared one.
+ If they differ, there are some changes behind and we have to reopen
+ the data file to make the changes visible.
+ Call @c file_buff->init_buff() at the end to read the beginning of the
+ data file into buffer.
+
+ @retval 0 OK.
+ @retval 1 There was an error.
+*/
+
+int ha_tina::init_data_file()
+{
+ if (local_data_file_version != share->data_file_version)
+ {
+ local_data_file_version= share->data_file_version;
+ if (my_close(data_file, MYF(0)) ||
+ (data_file= my_open(share->data_file_name, O_RDONLY, MYF(0))) == -1)
+ return 1;
+ }
+ file_buff->init_buff(data_file);
+ return 0;
+}
+
+
/*
All table scans call this first.
The order of a table scan is:
@@ -1007,9 +1042,8 @@ int ha_tina::rnd_init(bool scan)
DBUG_ENTER("ha_tina::rnd_init");
/* set buffer to the beginning of the file */
- file_buff->init_buff(data_file);
- if (share->crashed)
- DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
+ if (share->crashed || init_data_file())
+ DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
current_position= next_position= 0;
stats.records= 0;
@@ -1033,7 +1067,7 @@ int ha_tina::rnd_init(bool scan)
NULL and "". This is ok since this table handler is for spreadsheets and
they don't know about them either :)
*/
-int ha_tina::rnd_next(byte *buf)
+int ha_tina::rnd_next(uchar *buf)
{
int rc;
DBUG_ENTER("ha_tina::rnd_next");
@@ -1065,7 +1099,7 @@ int ha_tina::rnd_next(byte *buf)
its just a position. Look at the bdb code if you want to see a case
where something other then a number is stored.
*/
-void ha_tina::position(const byte *record)
+void ha_tina::position(const uchar *record)
{
DBUG_ENTER("ha_tina::position");
my_store_ptr(ref, ref_length, current_position);
@@ -1078,7 +1112,7 @@ void ha_tina::position(const byte *record)
my_get_ptr() retrieves the data for you.
*/
-int ha_tina::rnd_pos(byte * buf, byte *pos)
+int ha_tina::rnd_pos(uchar * buf, uchar *pos)
{
DBUG_ENTER("ha_tina::rnd_pos");
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
@@ -1174,15 +1208,18 @@ int ha_tina::rnd_end()
while ((file_buffer_start != -1)) // while not end of file
{
bool in_hole= get_write_pos(&write_end, ptr);
+ off_t write_length= write_end - write_begin;
/* if there is something to write, write it */
- if ((write_end - write_begin) &&
- (my_write(update_temp_file,
- (byte*)(file_buff->ptr() +
- (write_begin - file_buff->start())),
- write_end - write_begin, MYF_RW)))
- goto error;
-
+ if (write_length)
+ {
+ if (my_write(update_temp_file,
+ (uchar*) (file_buff->ptr() +
+ (write_begin - file_buff->start())),
+ write_length, MYF_RW))
+ goto error;
+ temp_file_length+= write_length;
+ }
if (in_hole)
{
/* skip hole */
@@ -1230,11 +1267,26 @@ int ha_tina::rnd_end()
if (((data_file= my_open(share->data_file_name, O_RDONLY, MYF(0))) == -1))
DBUG_RETURN(-1);
/*
+ As we reopened the data file, increase share->data_file_version
+ in order to force other threads waiting on a table lock and
+ have already opened the table to reopen the data file.
+ That makes the latest changes become visible to them.
+ Update local_data_file_version as no need to reopen it in the
+ current thread.
+ */
+ share->data_file_version++;
+ local_data_file_version= share->data_file_version;
+ /*
The datafile is consistent at this point and the write filedes is
closed, so nothing worrying will happen to it in case of a crash.
Here we record this fact to the meta-file.
*/
(void)write_meta_file(share->meta_file, share->rows_recorded, FALSE);
+ /*
+ Update local_saved_data_file_length with the real length of the
+ data file.
+ */
+ local_saved_data_file_length= temp_file_length;
}
DBUG_RETURN(0);
@@ -1266,7 +1318,7 @@ error:
int ha_tina::repair(THD* thd, HA_CHECK_OPT* check_opt)
{
char repaired_fname[FN_REFLEN];
- byte *buf;
+ uchar *buf;
File repair_file;
int rc;
ha_rows rows_repaired= 0;
@@ -1282,11 +1334,12 @@ int ha_tina::repair(THD* thd, HA_CHECK_OPT* check_opt)
/* Don't assert in field::val() functions */
table->use_all_columns();
- if (!(buf= (byte*) my_malloc(table->s->reclength, MYF(MY_WME))))
+ if (!(buf= (uchar*) my_malloc(table->s->reclength, MYF(MY_WME))))
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
/* position buffer to the start of the file */
- file_buff->init_buff(data_file);
+ if (init_data_file())
+ DBUG_RETURN(HA_ERR_CRASHED_ON_REPAIR);
/*
Local_saved_data_file_length is initialized during the lock phase.
@@ -1300,6 +1353,7 @@ int ha_tina::repair(THD* thd, HA_CHECK_OPT* check_opt)
/* Read the file row-by-row. If everything is ok, repair is not needed. */
while (!(rc= find_current_row(buf)))
{
+ thd_inc_row_count(thd);
rows_repaired++;
current_position= next_position;
}
@@ -1338,7 +1392,7 @@ int ha_tina::repair(THD* thd, HA_CHECK_OPT* check_opt)
{
write_end= min(file_buff->end(), current_position);
if ((write_end - write_begin) &&
- (my_write(repair_file, (byte*)file_buff->ptr(),
+ (my_write(repair_file, (uchar*)file_buff->ptr(),
write_end - write_begin, MYF_RW)))
DBUG_RETURN(-1);
@@ -1392,6 +1446,11 @@ int ha_tina::delete_all_rows()
rc= my_chsize(share->tina_write_filedes, 0, 0, MYF(MY_WME));
stats.records=0;
+ /* Update shared info */
+ pthread_mutex_lock(&share->mutex);
+ share->rows_recorded= 0;
+ pthread_mutex_unlock(&share->mutex);
+ local_saved_data_file_length= 0;
DBUG_RETURN(rc);
}
@@ -1442,17 +1501,18 @@ int ha_tina::create(const char *name, TABLE *table_arg,
int ha_tina::check(THD* thd, HA_CHECK_OPT* check_opt)
{
int rc= 0;
- byte *buf;
+ uchar *buf;
const char *old_proc_info;
ha_rows count= share->rows_recorded;
DBUG_ENTER("ha_tina::check");
old_proc_info= thd_proc_info(thd, "Checking table");
- if (!(buf= (byte*) my_malloc(table->s->reclength, MYF(MY_WME))))
+ if (!(buf= (uchar*) my_malloc(table->s->reclength, MYF(MY_WME))))
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
/* position buffer to the start of the file */
- file_buff->init_buff(data_file);
+ if (init_data_file())
+ DBUG_RETURN(HA_ERR_CRASHED);
/*
Local_saved_data_file_length is initialized during the lock phase.
@@ -1465,6 +1525,7 @@ int ha_tina::check(THD* thd, HA_CHECK_OPT* check_opt)
/* Read the file row-by-row. If everything is ok, repair is not needed. */
while (!(rc= find_current_row(buf)))
{
+ thd_inc_row_count(thd);
count--;
current_position= next_position;
}
diff --git a/storage/csv/ha_tina.h b/storage/csv/ha_tina.h
index c096f21fca2..5ce09783b9b 100644
--- a/storage/csv/ha_tina.h
+++ b/storage/csv/ha_tina.h
@@ -49,6 +49,7 @@ typedef struct st_tina_share {
File tina_write_filedes; /* File handler for readers */
bool crashed; /* Meta file is crashed */
ha_rows rows_recorded; /* Number of rows in tables */
+ uint data_file_version; /* Version of the data file used */
} TINA_SHARE;
struct tina_set {
@@ -63,7 +64,8 @@ class ha_tina: public handler
off_t current_position; /* Current position in the file during a file scan */
off_t next_position; /* Next position in the file scan */
off_t local_saved_data_file_length; /* save position for reads */
- byte byte_buffer[IO_SIZE];
+ off_t temp_file_length;
+ uchar byte_buffer[IO_SIZE];
Transparent_file *file_buff;
File data_file; /* File handler for readers */
File update_temp_file;
@@ -76,21 +78,23 @@ class ha_tina: public handler
tina_set chain_buffer[DEFAULT_CHAIN_LENGTH];
tina_set *chain;
tina_set *chain_ptr;
- byte chain_alloced;
+ uchar chain_alloced;
uint32 chain_size;
+ uint local_data_file_version; /* Saved version of the data file used */
bool records_is_known;
private:
bool get_write_pos(off_t *end_pos, tina_set *closest_hole);
int open_update_temp_file_if_needed();
int init_tina_writer();
+ int init_data_file();
public:
ha_tina(handlerton *hton, TABLE_SHARE *table_arg);
~ha_tina()
{
if (chain_alloced)
- my_free((gptr)chain, 0);
+ my_free(chain, 0);
if (file_buff)
delete file_buff;
}
@@ -99,7 +103,8 @@ public:
const char **bas_ext() const;
ulonglong table_flags() const
{
- return (HA_NO_TRANSACTIONS | HA_REC_NOT_IN_SEQ | HA_NO_AUTO_INCREMENT);
+ return (HA_NO_TRANSACTIONS | HA_REC_NOT_IN_SEQ | HA_NO_AUTO_INCREMENT |
+ HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE);
}
ulong index_flags(uint idx, uint part, bool all_parts) const
{
@@ -126,19 +131,14 @@ public:
*/
ha_rows estimate_rows_upper_bound() { return HA_POS_ERROR; }
- virtual bool check_if_locking_is_allowed(uint sql_command,
- ulong type, TABLE *table,
- uint count, uint current,
- uint *system_count,
- bool called_by_logger_thread);
int open(const char *name, int mode, uint open_options);
int close(void);
- int write_row(byte * buf);
- int update_row(const byte * old_data, byte * new_data);
- int delete_row(const byte * buf);
+ int write_row(uchar * buf);
+ int update_row(const uchar * old_data, uchar * new_data);
+ int delete_row(const uchar * buf);
int rnd_init(bool scan=1);
- int rnd_next(byte *buf);
- int rnd_pos(byte * buf, byte *pos);
+ int rnd_next(uchar *buf);
+ int rnd_pos(uchar * buf, uchar *pos);
bool check_and_repair(THD *thd);
int check(THD* thd, HA_CHECK_OPT* check_opt);
bool is_crashed() const;
@@ -146,7 +146,7 @@ public:
int repair(THD* thd, HA_CHECK_OPT* check_opt);
/* This is required for SQL layer to know that we support autorepair */
bool auto_repair() const { return 1; }
- void position(const byte *record);
+ void position(const uchar *record);
int info(uint);
int extra(enum ha_extra_function operation);
int delete_all_rows(void);
@@ -165,8 +165,8 @@ public:
void update_status();
/* The following methods were added just for TINA */
- int encode_quote(byte *buf);
- int find_current_row(byte *buf);
+ int encode_quote(uchar *buf);
+ int find_current_row(uchar *buf);
int chain_append();
};
diff --git a/storage/csv/transparent_file.cc b/storage/csv/transparent_file.cc
index 27cc8c024b4..a200fa6ac36 100644
--- a/storage/csv/transparent_file.cc
+++ b/storage/csv/transparent_file.cc
@@ -22,12 +22,12 @@
Transparent_file::Transparent_file() : lower_bound(0), buff_size(IO_SIZE)
{
- buff= (byte *) my_malloc(buff_size*sizeof(byte), MYF(MY_WME));
+ buff= (uchar *) my_malloc(buff_size*sizeof(uchar), MYF(MY_WME));
}
Transparent_file::~Transparent_file()
{
- my_free((gptr)buff, MYF(MY_ALLOW_ZERO_PTR));
+ my_free((uchar*)buff, MYF(MY_ALLOW_ZERO_PTR));
}
void Transparent_file::init_buff(File filedes_arg)
@@ -40,7 +40,7 @@ void Transparent_file::init_buff(File filedes_arg)
upper_bound= my_read(filedes, buff, buff_size, MYF(0));
}
-byte *Transparent_file::ptr()
+uchar *Transparent_file::ptr()
{
return buff;
}
@@ -57,18 +57,18 @@ off_t Transparent_file::end()
off_t Transparent_file::read_next()
{
- off_t bytes_read;
+ size_t bytes_read;
/*
No need to seek here, as the file managed by Transparent_file class
always points to upper_bound byte
*/
if ((bytes_read= my_read(filedes, buff, buff_size, MYF(0))) == MY_FILE_ERROR)
- return -1;
+ return (off_t) -1;
/* end of file */
if (!bytes_read)
- return -1;
+ return (off_t) -1;
lower_bound= upper_bound;
upper_bound+= bytes_read;
@@ -79,26 +79,24 @@ off_t Transparent_file::read_next()
char Transparent_file::get_value(off_t offset)
{
- off_t bytes_read;
+ size_t bytes_read;
/* check boundaries */
if ((lower_bound <= offset) && (offset < upper_bound))
return buff[offset - lower_bound];
- else
- {
- VOID(my_seek(filedes, offset, MY_SEEK_SET, MYF(0)));
- /* read appropriate portion of the file */
- if ((bytes_read= my_read(filedes, buff, buff_size,
- MYF(0))) == MY_FILE_ERROR)
- return 0;
-
- lower_bound= offset;
- upper_bound= lower_bound + bytes_read;
-
- /* end of file */
- if (upper_bound == offset)
- return 0;
-
- return buff[0];
- }
+
+ VOID(my_seek(filedes, offset, MY_SEEK_SET, MYF(0)));
+ /* read appropriate portion of the file */
+ if ((bytes_read= my_read(filedes, buff, buff_size,
+ MYF(0))) == MY_FILE_ERROR)
+ return 0;
+
+ lower_bound= offset;
+ upper_bound= lower_bound + bytes_read;
+
+ /* end of file */
+ if (upper_bound == offset)
+ return 0;
+
+ return buff[0];
}
diff --git a/storage/csv/transparent_file.h b/storage/csv/transparent_file.h
index ceb59ec7caf..4c0f4cce7e7 100644
--- a/storage/csv/transparent_file.h
+++ b/storage/csv/transparent_file.h
@@ -21,7 +21,7 @@
class Transparent_file
{
File filedes;
- byte *buff; /* in-memory window to the file or mmaped area */
+ uchar *buff; /* in-memory window to the file or mmaped area */
/* current window sizes */
off_t lower_bound;
off_t upper_bound;
@@ -33,7 +33,7 @@ public:
~Transparent_file();
void init_buff(File filedes_arg);
- byte *ptr();
+ uchar *ptr();
off_t start();
off_t end();
char get_value (off_t offset);
diff --git a/storage/example/CMakeLists.txt b/storage/example/CMakeLists.txt
index f4e8639ef25..99c223f3f85 100644
--- a/storage/example/CMakeLists.txt
+++ b/storage/example/CMakeLists.txt
@@ -19,4 +19,9 @@ SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX")
INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include ${CMAKE_SOURCE_DIR}/sql
${CMAKE_SOURCE_DIR}/regex
${CMAKE_SOURCE_DIR}/extra/yassl/include)
-ADD_LIBRARY(example ha_example.cc)
+
+SET(EXAMPLE_SOURCES ha_example.cc)
+
+IF(NOT SOURCE_SUBLIBS)
+ ADD_LIBRARY(example ${EXAMPLE_SOURCES})
+ENDIF(NOT SOURCE_SUBLIBS)
diff --git a/storage/example/ha_example.cc b/storage/example/ha_example.cc
index a4cdcafc6d0..b7186dda676 100644
--- a/storage/example/ha_example.cc
+++ b/storage/example/ha_example.cc
@@ -79,6 +79,10 @@
ha_example::open() would also have been necessary. Calls to
ha_example::extra() are hints as to what will be occuring to the request.
+ A Longer Example can be found called the "Skeleton Engine" which can be
+ found on TangentOrg. It has both an engine and a full build environment
+ for building a pluggable storage engine.
+
Happy coding!<br>
-Brian
*/
@@ -114,11 +118,11 @@ pthread_mutex_t example_mutex;
Function we use in the creation of our hash to get key.
*/
-static byte* example_get_key(EXAMPLE_SHARE *share,uint *length,
+static uchar* example_get_key(EXAMPLE_SHARE *share, size_t *length,
my_bool not_used __attribute__((unused)))
{
*length=share->table_name_length;
- return (byte*) share->table_name;
+ return (uchar*) share->table_name;
}
@@ -132,7 +136,6 @@ static int example_init_func(void *p)
(hash_get_key) example_get_key,0,0);
example_hton->state= SHOW_OPTION_YES;
- example_hton->db_type= DB_TYPE_EXAMPLE_DB;
example_hton->create= example_create_handler;
example_hton->flags= HTON_CAN_RECREATE;
@@ -172,7 +175,7 @@ static EXAMPLE_SHARE *get_share(const char *table_name, TABLE *table)
length=(uint) strlen(table_name);
if (!(share=(EXAMPLE_SHARE*) hash_search(&example_open_tables,
- (byte*) table_name,
+ (uchar*) table_name,
length)))
{
if (!(share=(EXAMPLE_SHARE *)
@@ -189,7 +192,7 @@ static EXAMPLE_SHARE *get_share(const char *table_name, TABLE *table)
share->table_name_length=length;
share->table_name=tmp_name;
strmov(share->table_name,table_name);
- if (my_hash_insert(&example_open_tables, (byte*) share))
+ if (my_hash_insert(&example_open_tables, (uchar*) share))
goto error;
thr_lock_init(&share->lock);
pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST);
@@ -201,7 +204,7 @@ static EXAMPLE_SHARE *get_share(const char *table_name, TABLE *table)
error:
pthread_mutex_destroy(&share->mutex);
- my_free((gptr) share, MYF(0));
+ my_free(share, MYF(0));
return NULL;
}
@@ -218,10 +221,10 @@ static int free_share(EXAMPLE_SHARE *share)
pthread_mutex_lock(&example_mutex);
if (!--share->use_count)
{
- hash_delete(&example_open_tables, (byte*) share);
+ hash_delete(&example_open_tables, (uchar*) share);
thr_lock_delete(&share->lock);
pthread_mutex_destroy(&share->mutex);
- my_free((gptr) share, MYF(0));
+ my_free(share, MYF(0));
}
pthread_mutex_unlock(&example_mutex);
@@ -349,7 +352,7 @@ int ha_example::close(void)
sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc and sql_update.cc
*/
-int ha_example::write_row(byte * buf)
+int ha_example::write_row(uchar *buf)
{
DBUG_ENTER("ha_example::write_row");
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
@@ -378,7 +381,7 @@ int ha_example::write_row(byte * buf)
@see
sql_select.cc, sql_acl.cc, sql_update.cc and sql_insert.cc
*/
-int ha_example::update_row(const byte * old_data, byte * new_data)
+int ha_example::update_row(const uchar *old_data, uchar *new_data)
{
DBUG_ENTER("ha_example::update_row");
@@ -406,7 +409,7 @@ int ha_example::update_row(const byte * old_data, byte * new_data)
sql_acl.cc, sql_udf.cc, sql_delete.cc, sql_insert.cc and sql_select.cc
*/
-int ha_example::delete_row(const byte * buf)
+int ha_example::delete_row(const uchar *buf)
{
DBUG_ENTER("ha_example::delete_row");
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
@@ -420,10 +423,10 @@ int ha_example::delete_row(const byte * buf)
index.
*/
-int ha_example::index_read(byte * buf, const byte * key,
- key_part_map keypart_map __attribute__((unused)),
- enum ha_rkey_function find_flag
- __attribute__((unused)))
+int ha_example::index_read_map(uchar *buf, const uchar *key,
+ key_part_map keypart_map __attribute__((unused)),
+ enum ha_rkey_function find_flag
+ __attribute__((unused)))
{
DBUG_ENTER("ha_example::index_read");
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
@@ -435,7 +438,7 @@ int ha_example::index_read(byte * buf, const byte * key,
Used to read forward through the index.
*/
-int ha_example::index_next(byte * buf)
+int ha_example::index_next(uchar *buf)
{
DBUG_ENTER("ha_example::index_next");
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
@@ -447,7 +450,7 @@ int ha_example::index_next(byte * buf)
Used to read backwards through the index.
*/
-int ha_example::index_prev(byte * buf)
+int ha_example::index_prev(uchar *buf)
{
DBUG_ENTER("ha_example::index_prev");
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
@@ -464,7 +467,7 @@ int ha_example::index_prev(byte * buf)
@see
opt_range.cc, opt_sum.cc, sql_handler.cc and sql_select.cc
*/
-int ha_example::index_first(byte * buf)
+int ha_example::index_first(uchar *buf)
{
DBUG_ENTER("ha_example::index_first");
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
@@ -481,7 +484,7 @@ int ha_example::index_first(byte * buf)
@see
opt_range.cc, opt_sum.cc, sql_handler.cc and sql_select.cc
*/
-int ha_example::index_last(byte * buf)
+int ha_example::index_last(uchar *buf)
{
DBUG_ENTER("ha_example::index_last");
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
@@ -528,7 +531,7 @@ int ha_example::rnd_end()
@see
filesort.cc, records.cc, sql_handler.cc, sql_select.cc, sql_table.cc and sql_update.cc
*/
-int ha_example::rnd_next(byte *buf)
+int ha_example::rnd_next(uchar *buf)
{
DBUG_ENTER("ha_example::rnd_next");
DBUG_RETURN(HA_ERR_END_OF_FILE);
@@ -556,7 +559,7 @@ int ha_example::rnd_next(byte *buf)
@see
filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc
*/
-void ha_example::position(const byte *record)
+void ha_example::position(const uchar *record)
{
DBUG_ENTER("ha_example::position");
DBUG_VOID_RETURN;
@@ -576,7 +579,7 @@ void ha_example::position(const byte *record)
@see
filesort.cc, records.cc, sql_insert.cc, sql_select.cc and sql_update.cc
*/
-int ha_example::rnd_pos(byte * buf, byte *pos)
+int ha_example::rnd_pos(uchar *buf, uchar *pos)
{
DBUG_ENTER("ha_example::rnd_pos");
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
@@ -723,6 +726,11 @@ int ha_example::external_lock(THD *thd, int lock_type)
Called from lock.cc by get_lock_data().
+ @note
+ In this method one should NEVER rely on table->in_use, it may, in fact,
+ refer to a different thread! (this happens if get_lock_data() is called
+ from mysql_lock_abort_for_thread() function)
+
@see
get_lock_data() in lock.cc
*/
diff --git a/storage/example/ha_example.h b/storage/example/ha_example.h
index 9777a478209..ec3987ced5d 100644
--- a/storage/example/ha_example.h
+++ b/storage/example/ha_example.h
@@ -82,7 +82,12 @@ public:
*/
ulonglong table_flags() const
{
- return 0;
+ /*
+ We are saying that this engine is just row capable to have an
+ engine that can only handle row-based logging. This is used in
+ testing.
+ */
+ return HA_BINLOG_ROW_CAPABLE;
}
/** @brief
@@ -172,50 +177,50 @@ public:
We implement this in ha_example.cc. It's not an obligatory method;
skip it and and MySQL will treat it as not implemented.
*/
- int write_row(byte * buf);
+ int write_row(uchar *buf);
/** @brief
We implement this in ha_example.cc. It's not an obligatory method;
skip it and and MySQL will treat it as not implemented.
*/
- int update_row(const byte * old_data, byte * new_data);
+ int update_row(const uchar *old_data, uchar *new_data);
/** @brief
We implement this in ha_example.cc. It's not an obligatory method;
skip it and and MySQL will treat it as not implemented.
*/
- int delete_row(const byte * buf);
+ int delete_row(const uchar *buf);
/** @brief
We implement this in ha_example.cc. It's not an obligatory method;
skip it and and MySQL will treat it as not implemented.
*/
- int index_read(byte * buf, const byte * key,
- key_part_map keypart_map, enum ha_rkey_function find_flag);
+ int index_read_map(uchar *buf, const uchar *key,
+ key_part_map keypart_map, enum ha_rkey_function find_flag);
/** @brief
We implement this in ha_example.cc. It's not an obligatory method;
skip it and and MySQL will treat it as not implemented.
*/
- int index_next(byte * buf);
+ int index_next(uchar *buf);
/** @brief
We implement this in ha_example.cc. It's not an obligatory method;
skip it and and MySQL will treat it as not implemented.
*/
- int index_prev(byte * buf);
+ int index_prev(uchar *buf);
/** @brief
We implement this in ha_example.cc. It's not an obligatory method;
skip it and and MySQL will treat it as not implemented.
*/
- int index_first(byte * buf);
+ int index_first(uchar *buf);
/** @brief
We implement this in ha_example.cc. It's not an obligatory method;
skip it and and MySQL will treat it as not implemented.
*/
- int index_last(byte * buf);
+ int index_last(uchar *buf);
/** @brief
Unlike index_init(), rnd_init() can be called two consecutive times
@@ -227,9 +232,9 @@ public:
*/
int rnd_init(bool scan); //required
int rnd_end();
- int rnd_next(byte *buf); ///< required
- int rnd_pos(byte * buf, byte *pos); ///< required
- void position(const byte *record); ///< required
+ int rnd_next(uchar *buf); ///< required
+ int rnd_pos(uchar *buf, uchar *pos); ///< required
+ void position(const uchar *record); ///< required
int info(uint); ///< required
int extra(enum ha_extra_function operation);
int external_lock(THD *thd, int lock_type); ///< required
diff --git a/storage/example/plug.in b/storage/example/plug.in
index ba35b1ea117..ee6beaac64f 100644
--- a/storage/example/plug.in
+++ b/storage/example/plug.in
@@ -1,3 +1,3 @@
MYSQL_STORAGE_ENGINE(example,, [Example Storage Engine],
- [Skeleton for Storage Engines for developers], [max,max-no-ndb])
+ [Example for Storage Engines for developers], [max,max-no-ndb])
MYSQL_PLUGIN_DYNAMIC(example, [ha_example.la])
diff --git a/storage/federated/CMakeLists.txt b/storage/federated/CMakeLists.txt
index 1f1f4dcd517..62064a633b1 100644
--- a/storage/federated/CMakeLists.txt
+++ b/storage/federated/CMakeLists.txt
@@ -19,4 +19,9 @@ SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX")
INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include ${CMAKE_SOURCE_DIR}/sql
${CMAKE_SOURCE_DIR}/regex
${CMAKE_SOURCE_DIR}/extra/yassl/include)
-ADD_LIBRARY(federated ha_federated.cc)
+
+SET(FEDERATED_SOURCES ha_federated.cc)
+
+IF(NOT SOURCE_SUBLIBS)
+ ADD_LIBRARY(federated ${FEDERATED_SOURCES})
+ENDIF(NOT SOURCE_SUBLIBS)
diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc
index aa7184268f5..3fc17e18e76 100644
--- a/storage/federated/ha_federated.cc
+++ b/storage/federated/ha_federated.cc
@@ -388,6 +388,11 @@
/* Variables for federated share methods */
static HASH federated_open_tables; // To track open tables
pthread_mutex_t federated_mutex; // To init the hash
+static char ident_quote_char= '`'; // Character for quoting
+ // identifiers
+static char value_quote_char= '\''; // Character for quoting
+ // literals
+static const int bulk_padding= 64; // bytes "overhead" in packet
/* Variables used when chopping off trailing characters */
static const uint sizeof_trailing_comma= sizeof(", ") - 1;
@@ -414,11 +419,11 @@ static handler *federated_create_handler(handlerton *hton,
/* Function we use in the creation of our hash to get key */
-static byte *federated_get_key(FEDERATED_SHARE *share, uint *length,
- my_bool not_used __attribute__ ((unused)))
+static uchar *federated_get_key(FEDERATED_SHARE *share, size_t *length,
+ my_bool not_used __attribute__ ((unused)))
{
*length= share->share_key_length;
- return (byte*) share->share_key;
+ return (uchar*) share->share_key;
}
/*
@@ -444,6 +449,13 @@ int federated_db_init(void *p)
federated_hton->create= federated_create_handler;
federated_hton->flags= HTON_ALTER_NOT_SUPPORTED | HTON_NO_PARTITION;
+ /*
+ Support for transactions disabled until WL#2952 fixes it.
+ We do it like this to avoid "defined but not used" compiler warnings.
+ */
+ federated_hton->commit= 0;
+ federated_hton->rollback= 0;
+
if (pthread_mutex_init(&federated_mutex, MY_MUTEX_INIT_FAST))
goto error;
if (!hash_init(&federated_open_tables, &my_charset_bin, 32, 0, 0,
@@ -477,105 +489,54 @@ int federated_done(void *p)
}
-/*
- Check (in create) whether the tables exists, and that it can be connected to
+/**
+ @brief Append identifiers to the string.
- SYNOPSIS
- check_foreign_data_source()
- share pointer to FEDERATED share
- table_create_flag tells us that ::create is the caller,
- therefore, return CANT_CREATE_FEDERATED_TABLE
+ @param[in,out] string The target string.
+ @param[in] name Identifier name
+ @param[in] length Length of identifier name in bytes
+ @param[in] quote_char Quote char to use for quoting identifier.
- DESCRIPTION
- This method first checks that the connection information that parse url
- has populated into the share will be sufficient to connect to the foreign
- table, and if so, does the foreign table exist.
+ @return Operation Status
+ @retval FALSE OK
+ @retval TRUE There was an error appending to the string.
+
+ @note This function is based upon the append_identifier() function
+ in sql_show.cc except that quoting always occurs.
*/
-static int check_foreign_data_source(FEDERATED_SHARE *share,
- bool table_create_flag)
+static bool append_ident(String *string, const char *name, uint length,
+ const char quote_char)
{
- char escaped_table_name[NAME_LEN*2];
- char query_buffer[FEDERATED_QUERY_BUFFER_SIZE];
- char error_buffer[FEDERATED_QUERY_BUFFER_SIZE];
- uint error_code;
- String query(query_buffer, sizeof(query_buffer), &my_charset_bin);
- MYSQL *mysql;
- DBUG_ENTER("ha_federated::check_foreign_data_source");
-
- /* Zero the length, otherwise the string will have misc chars */
- query.length(0);
+ bool result;
+ uint clen;
+ const char *name_end;
+ DBUG_ENTER("append_ident");
- /* error out if we can't alloc memory for mysql_init(NULL) (per Georg) */
- if (!(mysql= mysql_init(NULL)))
- DBUG_RETURN(HA_ERR_OUT_OF_MEM);
- /* check if we can connect */
- if (!mysql_real_connect(mysql,
- share->hostname,
- share->username,
- share->password,
- share->database,
- share->port,
- share->socket, 0))
- {
- /*
- we want the correct error message, but it to return
- ER_CANT_CREATE_FEDERATED_TABLE if called by ::create
- */
- error_code= (table_create_flag ?
- ER_CANT_CREATE_FEDERATED_TABLE :
- ER_CONNECT_TO_FOREIGN_DATA_SOURCE);
-
- my_sprintf(error_buffer,
- (error_buffer,
- "database: '%s' username: '%s' hostname: '%s'",
- share->database, share->username, share->hostname));
-
- my_error(ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), error_buffer);
- goto error;
- }
- else
+ if (quote_char)
{
- int escaped_table_name_length= 0;
- /*
- Since we do not support transactions at this version, we can let the
- client API silently reconnect. For future versions, we will need more
- logic to deal with transactions
- */
- mysql->reconnect= 1;
- /*
- Note: I am not using INORMATION_SCHEMA because this needs to work with
- versions prior to 5.0
-
- if we can connect, then make sure the table exists
+ string->reserve(length * 2 + 2);
+ if ((result= string->append(&quote_char, 1, system_charset_info)))
+ goto err;
- the query will be: SELECT * FROM `tablename` WHERE 1=0
- */
- query.append(STRING_WITH_LEN("SELECT * FROM `"));
- escaped_table_name_length=
- escape_string_for_mysql(&my_charset_bin, (char*)escaped_table_name,
- sizeof(escaped_table_name),
- share->table_name,
- share->table_name_length);
- query.append(escaped_table_name, escaped_table_name_length);
- query.append(STRING_WITH_LEN("` WHERE 1=0"));
-
- if (mysql_real_query(mysql, query.ptr(), query.length()))
+ for (name_end= name+length; name < name_end; name+= clen)
{
- error_code= table_create_flag ?
- ER_CANT_CREATE_FEDERATED_TABLE : ER_FOREIGN_DATA_SOURCE_DOESNT_EXIST;
- my_sprintf(error_buffer, (error_buffer, "error: %d '%s'",
- mysql_errno(mysql), mysql_error(mysql)));
-
- my_error(error_code, MYF(0), error_buffer);
- goto error;
+ uchar c= *(uchar *) name;
+ if (!(clen= my_mbcharlen(system_charset_info, c)))
+ clen= 1;
+ if (clen == 1 && c == (uchar) quote_char &&
+ (result= string->append(&quote_char, 1, system_charset_info)))
+ goto err;
+ if ((result= string->append(name, clen, string->charset())))
+ goto err;
}
+ result= string->append(&quote_char, 1, system_charset_info);
}
- error_code=0;
+ else
+ result= string->append(name, length, system_charset_info);
-error:
- mysql_close(mysql);
- DBUG_RETURN(error_code);
+err:
+ DBUG_RETURN(result);
}
@@ -725,8 +686,8 @@ static int parse_url(MEM_ROOT *mem_root, FEDERATED_SHARE *share, TABLE *table,
share->port= 0;
share->socket= 0;
DBUG_PRINT("info", ("share at %lx", (long unsigned int) share));
- DBUG_PRINT("info", ("Length: %d", table->s->connect_string.length));
- DBUG_PRINT("info", ("String: '%.*s'", table->s->connect_string.length,
+ DBUG_PRINT("info", ("Length: %u", (uint) table->s->connect_string.length));
+ DBUG_PRINT("info", ("String: '%.*s'", (int) table->s->connect_string.length,
table->s->connect_string.str));
share->connection_string= strmake_root(mem_root, table->s->connect_string.str,
table->s->connect_string.length);
@@ -907,6 +868,7 @@ ha_federated::ha_federated(handlerton *hton,
mysql(0), stored_result(0)
{
trx_next= 0;
+ bzero(&bulk_insert, sizeof(bulk_insert));
}
@@ -930,7 +892,7 @@ ha_federated::ha_federated(handlerton *hton,
0 After fields have had field values stored from record
*/
-uint ha_federated::convert_row_to_internal_format(byte *record,
+uint ha_federated::convert_row_to_internal_format(uchar *record,
MYSQL_ROW row,
MYSQL_RES *result)
{
@@ -969,16 +931,15 @@ uint ha_federated::convert_row_to_internal_format(byte *record,
static bool emit_key_part_name(String *to, KEY_PART_INFO *part)
{
DBUG_ENTER("emit_key_part_name");
- if (to->append(STRING_WITH_LEN("`")) ||
- to->append(part->field->field_name) ||
- to->append(STRING_WITH_LEN("`")))
+ if (append_ident(to, part->field->field_name,
+ strlen(part->field->field_name), ident_quote_char))
DBUG_RETURN(1); // Out of memory
DBUG_RETURN(0);
}
static bool emit_key_part_element(String *to, KEY_PART_INFO *part,
bool needs_quotes, bool is_like,
- const byte *ptr, uint len)
+ const uchar *ptr, uint len)
{
Field *field= part->field;
DBUG_ENTER("emit_key_part_element");
@@ -1019,7 +980,7 @@ static bool emit_key_part_element(String *to, KEY_PART_INFO *part,
char strbuff[MAX_FIELD_WIDTH];
String str(strbuff, sizeof(strbuff), part->field->charset()), *res;
- res= field->val_str(&str, (char *)ptr);
+ res= field->val_str(&str, ptr);
if (field->result_type() == STRING_RESULT)
{
@@ -1286,7 +1247,7 @@ bool ha_federated::create_where_from_key(String *to,
{
bool both_not_null=
(start_key != NULL && end_key != NULL) ? TRUE : FALSE;
- const byte *ptr;
+ const uchar *ptr;
uint remainder, length;
char tmpbuff[FEDERATED_QUERY_BUFFER_SIZE];
String tmp(tmpbuff, sizeof(tmpbuff), system_charset_info);
@@ -1325,7 +1286,7 @@ bool ha_federated::create_where_from_key(String *to,
uint store_length= key_part->store_length;
uint part_length= min(store_length, length);
needs_quotes= field->str_needs_quotes();
- DBUG_DUMP("key, start of loop", (char *) ptr, length);
+ DBUG_DUMP("key, start of loop", ptr, length);
if (key_part->null_bit)
{
@@ -1507,7 +1468,7 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table)
/* TODO: change tmp_share.scheme to LEX_STRING object */
if (!(share= (FEDERATED_SHARE *) hash_search(&federated_open_tables,
- (byte*) tmp_share.share_key,
+ (uchar*) tmp_share.share_key,
tmp_share.
share_key_length)))
{
@@ -1515,20 +1476,20 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table)
query.append(STRING_WITH_LEN("SELECT "));
for (field= table->field; *field; field++)
{
- query.append(STRING_WITH_LEN("`"));
- query.append((*field)->field_name);
- query.append(STRING_WITH_LEN("`, "));
+ append_ident(&query, (*field)->field_name,
+ strlen((*field)->field_name), ident_quote_char);
+ query.append(STRING_WITH_LEN(", "));
}
/* chops off trailing comma */
query.length(query.length() - sizeof_trailing_comma);
- query.append(STRING_WITH_LEN(" FROM `"));
- query.append(tmp_share.table_name, tmp_share.table_name_length);
- query.append(STRING_WITH_LEN("`"));
- DBUG_PRINT("info", ("calling alloc_root"));
+ query.append(STRING_WITH_LEN(" FROM "));
+
+ append_ident(&query, tmp_share.table_name,
+ tmp_share.table_name_length, ident_quote_char);
if (!(share= (FEDERATED_SHARE *) memdup_root(&mem_root, (char*)&tmp_share, sizeof(*share))) ||
- !(share->select_query= (char*) strmake_root(&mem_root, query.ptr(), query.length())))
+ !(share->select_query= (char*) strmake_root(&mem_root, query.ptr(), query.length() + 1)))
goto error;
share->use_count= 0;
@@ -1537,7 +1498,7 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table)
DBUG_PRINT("info",
("share->select_query %s", share->select_query));
- if (my_hash_insert(&federated_open_tables, (byte*) share))
+ if (my_hash_insert(&federated_open_tables, (uchar*) share))
goto error;
thr_lock_init(&share->lock);
pthread_mutex_init(&share->mutex, MY_MUTEX_INIT_FAST);
@@ -1571,7 +1532,7 @@ static int free_share(FEDERATED_SHARE *share)
pthread_mutex_lock(&federated_mutex);
if (!--share->use_count)
{
- hash_delete(&federated_open_tables, (byte*) share);
+ hash_delete(&federated_open_tables, (uchar*) share);
thr_lock_delete(&share->lock);
VOID(pthread_mutex_destroy(&share->mutex));
free_root(&mem_root, MYF(0));
@@ -1631,44 +1592,15 @@ int ha_federated::open(const char *name, int mode, uint test_if_locked)
DBUG_RETURN(1);
thr_lock_data_init(&share->lock, &lock, NULL);
- /* Connect to foreign database mysql_real_connect() */
- mysql= mysql_init(0);
-
- /*
- BUG# 17044 Federated Storage Engine is not UTF8 clean
- Add set names to whatever charset the table is at open
- of table
- */
- /* this sets the csname like 'set names utf8' */
- mysql_options(mysql,MYSQL_SET_CHARSET_NAME,
- this->table->s->table_charset->csname);
-
- DBUG_PRINT("info", ("calling mysql_real_connect hostname %s user %s",
- share->hostname, share->username));
- if (!mysql || !mysql_real_connect(mysql,
- share->hostname,
- share->username,
- share->password,
- share->database,
- share->port,
- share->socket, 0))
- {
- free_share(share);
- DBUG_RETURN(stash_remote_error());
- }
- /*
- Since we do not support transactions at this version, we can let the client
- API silently reconnect. For future versions, we will need more logic to
- deal with transactions
- */
-
- mysql->reconnect= 1;
+ DBUG_ASSERT(mysql == NULL);
ref_length= (table->s->primary_key != MAX_KEY ?
table->key_info[table->s->primary_key].key_length :
table->s->reclength);
DBUG_PRINT("info", ("ref_length: %u", ref_length));
+ reset();
+
DBUG_RETURN(0);
}
@@ -1696,8 +1628,8 @@ int ha_federated::close(void)
stored_result= 0;
}
/* Disconnect from mysql */
- if (mysql) // QQ is this really needed
- mysql_close(mysql);
+ mysql_close(mysql);
+ mysql= NULL;
retval= free_share(share);
DBUG_RETURN(retval);
@@ -1741,85 +1673,106 @@ static inline uint field_in_record_is_null(TABLE *table,
}
-/*
- write_row() inserts a row. No extra() hint is given currently if a bulk load
- is happeneding. buf() is a byte array of data. You can use the field
- information to extract the data from the native byte array type.
- Example of this would be:
- for (Field **field=table->field ; *field ; field++)
- {
- ...
- }
-
- Called from item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc,
- sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc, and sql_update.cc.
+/**
+ @brief Construct the INSERT statement.
+
+ @details This method will construct the INSERT statement and appends it to
+ the supplied query string buffer.
+
+ @return
+ @retval FALSE No error
+ @retval TRUE Failure
*/
-int ha_federated::write_row(byte *buf)
+bool ha_federated::append_stmt_insert(String *query)
{
- /*
- I need a bool again, in 5.0, I used table->s->fields to accomplish this.
- This worked as a flag that says there are fields with values or not.
- In 5.1, this value doesn't work the same, and I end up with the code
- truncating open parenthesis:
-
- the statement "INSERT INTO t1 VALUES ()" ends up being first built
- in two strings
- "INSERT INTO t1 ("
- and
- " VALUES ("
-
- If there are fields with values, they get appended, with commas, and
- the last loop, a trailing comma is there
-
- "INSERT INTO t1 ( col1, col2, colN, "
-
- " VALUES ( 'val1', 'val2', 'valN', "
-
- Then, if there are fields, it should decrement the string by ", " length.
-
- "INSERT INTO t1 ( col1, col2, colN"
- " VALUES ( 'val1', 'val2', 'valN'"
+ char insert_buffer[FEDERATED_QUERY_BUFFER_SIZE];
+ Field **field;
+ uint tmp_length;
+ bool added_field= FALSE;
- Then it adds a close paren to both - if there are fields
+ /* The main insert query string */
+ String insert_string(insert_buffer, sizeof(insert_buffer), &my_charset_bin);
+ DBUG_ENTER("ha_federated::append_stmt_insert");
- "INSERT INTO t1 ( col1, col2, colN)"
- " VALUES ( 'val1', 'val2', 'valN')"
+ insert_string.length(0);
- Then appends both together
- "INSERT INTO t1 ( col1, col2, colN) VALUES ( 'val1', 'val2', 'valN')"
+ if (replace_duplicates)
+ insert_string.append(STRING_WITH_LEN("REPLACE INTO "));
+ else if (ignore_duplicates && !insert_dup_update)
+ insert_string.append(STRING_WITH_LEN("INSERT IGNORE INTO "));
+ else
+ insert_string.append(STRING_WITH_LEN("INSERT INTO "));
+ append_ident(&insert_string, share->table_name, share->table_name_length,
+ ident_quote_char);
+ tmp_length= insert_string.length();
+ insert_string.append(STRING_WITH_LEN(" ("));
- So... the problem, is if you have the original statement:
+ /*
+ loop through the field pointer array, add any fields to both the values
+ list and the fields list that match the current query id
+ */
+ for (field= table->field; *field; field++)
+ {
+ if (bitmap_is_set(table->write_set, (*field)->field_index))
+ {
+ /* append the field name */
+ append_ident(&insert_string, (*field)->field_name,
+ strlen((*field)->field_name), ident_quote_char);
- "INSERT INTO t1 VALUES ()"
+ /* append commas between both fields and fieldnames */
+ /*
+ unfortunately, we can't use the logic if *(fields + 1) to
+ make the following appends conditional as we don't know if the
+ next field is in the write set
+ */
+ insert_string.append(STRING_WITH_LEN(", "));
+ added_field= TRUE;
+ }
+ }
- Which is legitimate, but if the code thinks there are fields
+ if (added_field)
+ {
+ /* Remove trailing comma. */
+ insert_string.length(insert_string.length() - sizeof_trailing_comma);
+ insert_string.append(STRING_WITH_LEN(") "));
+ }
+ else
+ {
+ /* If there were no fields, we don't want to add a closing paren. */
+ insert_string.length(tmp_length);
+ }
- "INSERT INTO t1 ("
- " VALUES ( "
+ insert_string.append(STRING_WITH_LEN(" VALUES "));
- If the field flag is set, but there are no commas, reduces the
- string by strlen(", ")
+ DBUG_RETURN(query->append(insert_string));
+}
- "INSERT INTO t1 "
- " VALUES "
- Then adds the close parenthesis
+/*
+ write_row() inserts a row. No extra() hint is given currently if a bulk load
+ is happeneding. buf() is a byte array of data. You can use the field
+ information to extract the data from the native byte array type.
+ Example of this would be:
+ for (Field **field=table->field ; *field ; field++)
+ {
+ ...
+ }
- "INSERT INTO t1 )"
- " VALUES )"
+ Called from item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc,
+ sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc, and sql_update.cc.
+*/
- So, I have to use a bool as before, set in the loop where fields and commas
- are appended to the string
- */
- my_bool commas_added= FALSE;
- char insert_buffer[FEDERATED_QUERY_BUFFER_SIZE];
+int ha_federated::write_row(uchar *buf)
+{
char values_buffer[FEDERATED_QUERY_BUFFER_SIZE];
char insert_field_value_buffer[STRING_BUFFER_USUAL_SIZE];
Field **field;
+ uint tmp_length;
+ int error= 0;
+ bool use_bulk_insert;
+ bool auto_increment_update_required= (table->next_number_field != NULL);
- /* The main insert query string */
- String insert_string(insert_buffer, sizeof(insert_buffer), &my_charset_bin);
/* The string containing the values to be added to the insert */
String values_string(values_buffer, sizeof(values_buffer), &my_charset_bin);
/* The actual value of the field, to be added to the values_string */
@@ -1830,22 +1783,26 @@ int ha_federated::write_row(byte *buf)
DBUG_ENTER("ha_federated::write_row");
values_string.length(0);
- insert_string.length(0);
insert_field_value_string.length(0);
- statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status);
+ ha_statistic_increment(&SSV::ha_write_count);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
table->timestamp_field->set_time();
/*
start both our field and field values strings
+ We must disable multi-row insert for "INSERT...ON DUPLICATE KEY UPDATE"
+ Ignore duplicates is always true when insert_dup_update is true.
+ When replace_duplicates == TRUE, we can safely enable multi-row insert.
+ When performing multi-row insert, we only collect the columns values for
+ the row. The start of the statement is only created when the first
+ row is copied in to the bulk_insert string.
*/
- insert_string.append(STRING_WITH_LEN("INSERT INTO `"));
- insert_string.append(share->table_name, share->table_name_length);
- insert_string.append('`');
- insert_string.append(STRING_WITH_LEN(" ("));
+ if (!(use_bulk_insert= bulk_insert.str &&
+ (!insert_dup_update || replace_duplicates)))
+ append_stmt_insert(&values_string);
- values_string.append(STRING_WITH_LEN(" VALUES "));
values_string.append(STRING_WITH_LEN(" ("));
+ tmp_length= values_string.length();
/*
loop through the field pointer array, add any fields to both the values
@@ -1855,7 +1812,6 @@ int ha_federated::write_row(byte *buf)
{
if (bitmap_is_set(table->write_set, (*field)->field_index))
{
- commas_added= TRUE;
if ((*field)->is_null())
values_string.append(STRING_WITH_LEN(" NULL "));
else
@@ -1863,15 +1819,13 @@ int ha_federated::write_row(byte *buf)
bool needs_quote= (*field)->str_needs_quotes();
(*field)->val_str(&insert_field_value_string);
if (needs_quote)
- values_string.append('\'');
+ values_string.append(value_quote_char);
insert_field_value_string.print(&values_string);
if (needs_quote)
- values_string.append('\'');
+ values_string.append(value_quote_char);
insert_field_value_string.length(0);
}
- /* append the field name */
- insert_string.append((*field)->field_name);
/* append commas between both fields and fieldnames */
/*
@@ -1879,7 +1833,6 @@ int ha_federated::write_row(byte *buf)
make the following appends conditional as we don't know if the
next field is in the write set
*/
- insert_string.append(STRING_WITH_LEN(", "));
values_string.append(STRING_WITH_LEN(", "));
}
}
@@ -1890,26 +1843,52 @@ int ha_federated::write_row(byte *buf)
AND, we don't want to chop off the last char '('
insert will be "INSERT INTO t1 VALUES ();"
*/
- if (commas_added)
+ if (values_string.length() > tmp_length)
{
- insert_string.length(insert_string.length() - sizeof_trailing_comma);
- /* chops off leading commas */
+ /* chops off trailing comma */
values_string.length(values_string.length() - sizeof_trailing_comma);
- insert_string.append(STRING_WITH_LEN(") "));
- }
- else
- {
- /* chops off trailing ) */
- insert_string.length(insert_string.length() - sizeof_trailing_closeparen);
}
-
/* we always want to append this, even if there aren't any fields */
values_string.append(STRING_WITH_LEN(") "));
- /* add the values */
- insert_string.append(values_string);
+ if (use_bulk_insert)
+ {
+ /*
+ Send the current bulk insert out if appending the current row would
+ cause the statement to overflow the packet size, otherwise set
+ auto_increment_update_required to FALSE as no query was executed.
+ */
+ if (bulk_insert.length + values_string.length() + bulk_padding >
+ mysql->net.max_packet_size && bulk_insert.length)
+ {
+ error= real_query(bulk_insert.str, bulk_insert.length);
+ bulk_insert.length= 0;
+ }
+ else
+ auto_increment_update_required= FALSE;
+
+ if (bulk_insert.length == 0)
+ {
+ char insert_buffer[FEDERATED_QUERY_BUFFER_SIZE];
+ String insert_string(insert_buffer, sizeof(insert_buffer),
+ &my_charset_bin);
+ insert_string.length(0);
+ append_stmt_insert(&insert_string);
+ dynstr_append_mem(&bulk_insert, insert_string.ptr(),
+ insert_string.length());
+ }
+ else
+ dynstr_append_mem(&bulk_insert, ",", 1);
- if (mysql_real_query(mysql, insert_string.ptr(), insert_string.length()))
+ dynstr_append_mem(&bulk_insert, values_string.ptr(),
+ values_string.length());
+ }
+ else
+ {
+ error= real_query(values_string.ptr(), values_string.length());
+ }
+
+ if (error)
{
DBUG_RETURN(stash_remote_error());
}
@@ -1917,12 +1896,91 @@ int ha_federated::write_row(byte *buf)
If the table we've just written a record to contains an auto_increment
field, then store the last_insert_id() value from the foreign server
*/
- if (table->next_number_field)
+ if (auto_increment_update_required)
+ {
update_auto_increment();
+ /* mysql_insert() uses this for protocol return value */
+ table->next_number_field->store(stats.auto_increment_value, 1);
+ }
+
DBUG_RETURN(0);
}
+
+/**
+ @brief Prepares the storage engine for bulk inserts.
+
+ @param[in] rows estimated number of rows in bulk insert
+ or 0 if unknown.
+
+ @details Initializes memory structures required for bulk insert.
+*/
+
+void ha_federated::start_bulk_insert(ha_rows rows)
+{
+ uint page_size;
+ DBUG_ENTER("ha_federated::start_bulk_insert");
+
+ dynstr_free(&bulk_insert);
+
+ /**
+ We don't bother with bulk-insert semantics when the estimated rows == 1
+ The rows value will be 0 if the server does not know how many rows
+ would be inserted. This can occur when performing INSERT...SELECT
+ */
+
+ if (rows == 1)
+ DBUG_VOID_RETURN;
+
+ /*
+ Make sure we have an open connection so that we know the
+ maximum packet size.
+ */
+ if (!mysql && real_connect())
+ DBUG_VOID_RETURN;
+
+ page_size= (uint) my_getpagesize();
+
+ if (init_dynamic_string(&bulk_insert, NULL, page_size, page_size))
+ DBUG_VOID_RETURN;
+
+ bulk_insert.length= 0;
+ DBUG_VOID_RETURN;
+}
+
+
+/**
+ @brief End bulk insert.
+
+ @details This method will send any remaining rows to the remote server.
+ Finally, it will deinitialize the bulk insert data structure.
+
+ @return Operation status
+ @retval 0 No error
+ @retval != 0 Error occured at remote server. Also sets my_errno.
+*/
+
+int ha_federated::end_bulk_insert()
+{
+ int error= 0;
+ DBUG_ENTER("ha_federated::end_bulk_insert");
+
+ if (bulk_insert.str && bulk_insert.length)
+ {
+ if (real_query(bulk_insert.str, bulk_insert.length))
+ error= stash_remote_error();
+ else
+ if (table->next_number_field)
+ update_auto_increment();
+ }
+
+ dynstr_free(&bulk_insert);
+
+ DBUG_RETURN(my_errno= error);
+}
+
+
/*
ha_federated::update_auto_increment
@@ -1936,8 +1994,9 @@ void ha_federated::update_auto_increment(void)
THD *thd= current_thd;
DBUG_ENTER("ha_federated::update_auto_increment");
+ ha_federated::info(HA_STATUS_AUTO);
thd->first_successful_insert_id_in_cur_stmt=
- mysql->last_used_con->insert_id;
+ stats.auto_increment_value;
DBUG_PRINT("info",("last_insert_id: %ld", (long) stats.auto_increment_value));
DBUG_VOID_RETURN;
@@ -1952,11 +2011,11 @@ int ha_federated::optimize(THD* thd, HA_CHECK_OPT* check_opt)
query.length(0);
query.set_charset(system_charset_info);
- query.append(STRING_WITH_LEN("OPTIMIZE TABLE `"));
- query.append(share->table_name, share->table_name_length);
- query.append(STRING_WITH_LEN("`"));
+ query.append(STRING_WITH_LEN("OPTIMIZE TABLE "));
+ append_ident(&query, share->table_name, share->table_name_length,
+ ident_quote_char);
- if (mysql_real_query(mysql, query.ptr(), query.length()))
+ if (real_query(query.ptr(), query.length()))
{
DBUG_RETURN(stash_remote_error());
}
@@ -1974,9 +2033,9 @@ int ha_federated::repair(THD* thd, HA_CHECK_OPT* check_opt)
query.length(0);
query.set_charset(system_charset_info);
- query.append(STRING_WITH_LEN("REPAIR TABLE `"));
- query.append(share->table_name, share->table_name_length);
- query.append(STRING_WITH_LEN("`"));
+ query.append(STRING_WITH_LEN("REPAIR TABLE "));
+ append_ident(&query, share->table_name, share->table_name_length,
+ ident_quote_char);
if (check_opt->flags & T_QUICK)
query.append(STRING_WITH_LEN(" QUICK"));
if (check_opt->flags & T_EXTEND)
@@ -1984,7 +2043,7 @@ int ha_federated::repair(THD* thd, HA_CHECK_OPT* check_opt)
if (check_opt->sql_flags & TT_USEFRM)
query.append(STRING_WITH_LEN(" USE_FRM"));
- if (mysql_real_query(mysql, query.ptr(), query.length()))
+ if (real_query(query.ptr(), query.length()))
{
DBUG_RETURN(stash_remote_error());
}
@@ -2010,7 +2069,7 @@ int ha_federated::repair(THD* thd, HA_CHECK_OPT* check_opt)
Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc.
*/
-int ha_federated::update_row(const byte *old_data, byte *new_data)
+int ha_federated::update_row(const uchar *old_data, uchar *new_data)
{
/*
This used to control how the query was built. If there was a
@@ -2044,7 +2103,7 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
String where_string(where_buffer,
sizeof(where_buffer),
&my_charset_bin);
- byte *record= table->record[0];
+ uchar *record= table->record[0];
DBUG_ENTER("ha_federated::update_row");
/*
set string lengths to 0 to avoid misc chars in string
@@ -2053,9 +2112,13 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
update_string.length(0);
where_string.length(0);
- update_string.append(STRING_WITH_LEN("UPDATE `"));
- update_string.append(share->table_name);
- update_string.append(STRING_WITH_LEN("` SET "));
+ if (ignore_duplicates)
+ update_string.append(STRING_WITH_LEN("UPDATE IGNORE "));
+ else
+ update_string.append(STRING_WITH_LEN("UPDATE "));
+ append_ident(&update_string, share->table_name,
+ share->table_name_length, ident_quote_char);
+ update_string.append(STRING_WITH_LEN(" SET "));
/*
In this loop, we want to match column names to values being inserted
@@ -2071,7 +2134,9 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
{
if (bitmap_is_set(table->write_set, (*field)->field_index))
{
- update_string.append((*field)->field_name);
+ uint field_name_length= strlen((*field)->field_name);
+ append_ident(&update_string, (*field)->field_name, field_name_length,
+ ident_quote_char);
update_string.append(STRING_WITH_LEN(" = "));
if ((*field)->is_null())
@@ -2083,10 +2148,10 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
bool needs_quote= (*field)->str_needs_quotes();
(*field)->val_str(&field_value);
if (needs_quote)
- update_string.append('\'');
+ update_string.append(value_quote_char);
field_value.print(&update_string);
if (needs_quote)
- update_string.append('\'');
+ update_string.append(value_quote_char);
field_value.length(0);
tmp_restore_column_map(table->read_set, old_map);
}
@@ -2095,7 +2160,9 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
if (bitmap_is_set(table->read_set, (*field)->field_index))
{
- where_string.append((*field)->field_name);
+ uint field_name_length= strlen((*field)->field_name);
+ append_ident(&where_string, (*field)->field_name, field_name_length,
+ ident_quote_char);
if (field_in_record_is_null(table, *field, (char*) old_data))
where_string.append(STRING_WITH_LEN(" IS NULL "));
else
@@ -2103,12 +2170,12 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
bool needs_quote= (*field)->str_needs_quotes();
where_string.append(STRING_WITH_LEN(" = "));
(*field)->val_str(&field_value,
- (char*) (old_data + (*field)->offset(record)));
+ (old_data + (*field)->offset(record)));
if (needs_quote)
- where_string.append('\'');
+ where_string.append(value_quote_char);
field_value.print(&where_string);
if (needs_quote)
- where_string.append('\'');
+ where_string.append(value_quote_char);
field_value.length(0);
}
where_string.append(STRING_WITH_LEN(" AND "));
@@ -2133,7 +2200,7 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
if (!has_a_primary_key)
update_string.append(STRING_WITH_LEN(" LIMIT 1"));
- if (mysql_real_query(mysql, update_string.ptr(), update_string.length()))
+ if (real_query(update_string.ptr(), update_string.length()))
{
DBUG_RETURN(stash_remote_error());
}
@@ -2155,7 +2222,7 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
calls.
*/
-int ha_federated::delete_row(const byte *buf)
+int ha_federated::delete_row(const uchar *buf)
{
char delete_buffer[FEDERATED_QUERY_BUFFER_SIZE];
char data_buffer[FEDERATED_QUERY_BUFFER_SIZE];
@@ -2165,9 +2232,10 @@ int ha_federated::delete_row(const byte *buf)
DBUG_ENTER("ha_federated::delete_row");
delete_string.length(0);
- delete_string.append(STRING_WITH_LEN("DELETE FROM `"));
- delete_string.append(share->table_name);
- delete_string.append(STRING_WITH_LEN("` WHERE "));
+ delete_string.append(STRING_WITH_LEN("DELETE FROM "));
+ append_ident(&delete_string, share->table_name,
+ share->table_name_length, ident_quote_char);
+ delete_string.append(STRING_WITH_LEN(" WHERE "));
for (Field **field= table->field; *field; field++)
{
@@ -2175,8 +2243,9 @@ int ha_federated::delete_row(const byte *buf)
found++;
if (bitmap_is_set(table->read_set, cur_field->field_index))
{
+ append_ident(&delete_string, (*field)->field_name,
+ strlen((*field)->field_name), ident_quote_char);
data_string.length(0);
- delete_string.append(cur_field->field_name);
if (cur_field->is_null())
{
delete_string.append(STRING_WITH_LEN(" IS NULL "));
@@ -2187,10 +2256,10 @@ int ha_federated::delete_row(const byte *buf)
delete_string.append(STRING_WITH_LEN(" = "));
cur_field->val_str(&data_string);
if (needs_quote)
- delete_string.append('\'');
+ delete_string.append(value_quote_char);
data_string.print(&delete_string);
if (needs_quote)
- delete_string.append('\'');
+ delete_string.append(value_quote_char);
}
delete_string.append(STRING_WITH_LEN(" AND "));
}
@@ -2204,7 +2273,7 @@ int ha_federated::delete_row(const byte *buf)
delete_string.append(STRING_WITH_LEN(" LIMIT 1"));
DBUG_PRINT("info",
("Delete sql: %s", delete_string.c_ptr_quick()));
- if (mysql_real_query(mysql, delete_string.ptr(), delete_string.length()))
+ if (real_query(delete_string.ptr(), delete_string.length()))
{
DBUG_RETURN(stash_remote_error());
}
@@ -2225,7 +2294,7 @@ int ha_federated::delete_row(const byte *buf)
a WHERE clause on a non-primary key index, simply calls index_read_idx.
*/
-int ha_federated::index_read(byte *buf, const byte *key,
+int ha_federated::index_read(uchar *buf, const uchar *key,
uint key_len, ha_rkey_function find_flag)
{
DBUG_ENTER("ha_federated::index_read");
@@ -2251,7 +2320,7 @@ int ha_federated::index_read(byte *buf, const byte *key,
returns. We need to be able to be calable from ha_rnd_pos()
*/
-int ha_federated::index_read_idx(byte *buf, uint index, const byte *key,
+int ha_federated::index_read_idx(uchar *buf, uint index, const uchar *key,
uint key_len, enum ha_rkey_function find_flag)
{
int retval;
@@ -2277,8 +2346,8 @@ int ha_federated::index_read_idx(byte *buf, uint index, const byte *key,
table->status == STATUS_NOT_FOUND
*/
-int ha_federated::index_read_idx_with_result_set(byte *buf, uint index,
- const byte *key,
+int ha_federated::index_read_idx_with_result_set(uchar *buf, uint index,
+ const uchar *key,
uint key_len,
ha_rkey_function find_flag,
MYSQL_RES **result)
@@ -2299,8 +2368,7 @@ int ha_federated::index_read_idx_with_result_set(byte *buf, uint index,
*result= 0; // In case of errors
index_string.length(0);
sql_query.length(0);
- statistic_increment(table->in_use->status_var.ha_read_key_count,
- &LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_key_count);
sql_query.append(share->select_query);
@@ -2313,7 +2381,7 @@ int ha_federated::index_read_idx_with_result_set(byte *buf, uint index,
NULL, 0, 0);
sql_query.append(index_string);
- if (mysql_real_query(mysql, sql_query.ptr(), sql_query.length()))
+ if (real_query(sql_query.ptr(), sql_query.length()))
{
my_sprintf(error_buffer, (error_buffer, "error: %d '%s'",
mysql_errno(mysql), mysql_error(mysql)));
@@ -2379,7 +2447,7 @@ int ha_federated::read_range_first(const key_range *start_key,
mysql_free_result(stored_result);
stored_result= 0;
}
- if (mysql_real_query(mysql, sql_query.ptr(), sql_query.length()))
+ if (real_query(sql_query.ptr(), sql_query.length()))
{
retval= ER_QUERY_ON_FOREIGN_DATA_SOURCE;
goto error;
@@ -2411,11 +2479,10 @@ int ha_federated::read_range_next()
/* Used to read forward through the index. */
-int ha_federated::index_next(byte *buf)
+int ha_federated::index_next(uchar *buf)
{
DBUG_ENTER("ha_federated::index_next");
- statistic_increment(table->in_use->status_var.ha_read_next_count,
- &LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_next_count);
DBUG_RETURN(read_next(buf, stored_result));
}
@@ -2479,9 +2546,7 @@ int ha_federated::rnd_init(bool scan)
stored_result= 0;
}
- if (mysql_real_query(mysql,
- share->select_query,
- strlen(share->select_query)))
+ if (real_query(share->select_query, strlen(share->select_query)))
goto error;
stored_result= mysql_store_result(mysql);
@@ -2525,7 +2590,7 @@ int ha_federated::index_end(void)
sql_table.cc, and sql_update.cc.
*/
-int ha_federated::rnd_next(byte *buf)
+int ha_federated::rnd_next(uchar *buf)
{
DBUG_ENTER("ha_federated::rnd_next");
@@ -2562,7 +2627,7 @@ int ha_federated::rnd_next(byte *buf)
0 no error
*/
-int ha_federated::read_next(byte *buf, MYSQL_RES *result)
+int ha_federated::read_next(uchar *buf, MYSQL_RES *result)
{
int retval;
MYSQL_ROW row;
@@ -2591,11 +2656,11 @@ int ha_federated::read_next(byte *buf, MYSQL_RES *result)
Called from filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc.
*/
-void ha_federated::position(const byte *record)
+void ha_federated::position(const uchar *record)
{
DBUG_ENTER("ha_federated::position");
if (table->s->primary_key != MAX_KEY)
- key_copy(ref, (byte *)record, table->key_info + table->s->primary_key,
+ key_copy(ref, (uchar *)record, table->key_info + table->s->primary_key,
ref_length);
else
memcpy(ref, record, ref_length);
@@ -2612,12 +2677,11 @@ void ha_federated::position(const byte *record)
Called from filesort.cc records.cc sql_insert.cc sql_select.cc sql_update.cc.
*/
-int ha_federated::rnd_pos(byte *buf, byte *pos)
+int ha_federated::rnd_pos(uchar *buf, uchar *pos)
{
int result;
DBUG_ENTER("ha_federated::rnd_pos");
- statistic_increment(table->in_use->status_var.ha_read_rnd_count,
- &LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_rnd_count);
if (table->s->primary_key != MAX_KEY)
{
/* We have a primary key, so use index_read_idx to find row */
@@ -2683,7 +2747,6 @@ int ha_federated::info(uint flag)
{
char error_buffer[FEDERATED_QUERY_BUFFER_SIZE];
char status_buf[FEDERATED_QUERY_BUFFER_SIZE];
- char escaped_table_name[FEDERATED_QUERY_BUFFER_SIZE];
int error;
uint error_code;
MYSQL_RES *result= 0;
@@ -2696,16 +2759,11 @@ int ha_federated::info(uint flag)
if (flag & (HA_STATUS_VARIABLE | HA_STATUS_CONST))
{
status_query_string.length(0);
- status_query_string.append(STRING_WITH_LEN("SHOW TABLE STATUS LIKE '"));
- escape_string_for_mysql(&my_charset_bin, (char *)escaped_table_name,
- sizeof(escaped_table_name),
- share->table_name,
- share->table_name_length);
- status_query_string.append(escaped_table_name);
- status_query_string.append(STRING_WITH_LEN("'"));
-
- if (mysql_real_query(mysql, status_query_string.ptr(),
- status_query_string.length()))
+ status_query_string.append(STRING_WITH_LEN("SHOW TABLE STATUS LIKE "));
+ append_ident(&status_query_string, share->table_name,
+ share->table_name_length, value_quote_char);
+
+ if (real_query(status_query_string.ptr(), status_query_string.length()))
goto error;
status_query_string.length(0);
@@ -2737,15 +2795,15 @@ int ha_federated::info(uint flag)
stats.records= (ha_rows) my_strtoll10(row[4], (char**) 0,
&error);
if (row[5] != NULL)
- stats.mean_rec_length= (ha_rows) my_strtoll10(row[5], (char**) 0, &error);
+ stats.mean_rec_length= (ulong) my_strtoll10(row[5], (char**) 0, &error);
stats.data_file_length= stats.records * stats.mean_rec_length;
if (row[12] != NULL)
- stats.update_time= (ha_rows) my_strtoll10(row[12], (char**) 0,
+ stats.update_time= (time_t) my_strtoll10(row[12], (char**) 0,
&error);
if (row[13] != NULL)
- stats.check_time= (ha_rows) my_strtoll10(row[13], (char**) 0,
+ stats.check_time= (time_t) my_strtoll10(row[13], (char**) 0,
&error);
}
/*
@@ -2757,22 +2815,90 @@ int ha_federated::info(uint flag)
}
- if (result)
- mysql_free_result(result);
+ if (flag & HA_STATUS_AUTO)
+ stats.auto_increment_value= mysql->last_used_con->insert_id;
+
+ mysql_free_result(result);
DBUG_RETURN(0);
error:
- if (result)
- mysql_free_result(result);
-
- my_sprintf(error_buffer, (error_buffer, ": %d : %s",
- mysql_errno(mysql), mysql_error(mysql)));
- my_error(error_code, MYF(0), error_buffer);
+ mysql_free_result(result);
+ if (mysql)
+ {
+ my_sprintf(error_buffer, (error_buffer, ": %d : %s",
+ mysql_errno(mysql), mysql_error(mysql)));
+ my_error(error_code, MYF(0), error_buffer);
+ }
+ else
+ if (remote_error_number != -1 /* error already reported */)
+ {
+ error_code= remote_error_number;
+ my_error(error_code, MYF(0), ER(error_code));
+ }
DBUG_RETURN(error_code);
}
+/**
+ @brief Handles extra signals from MySQL server
+
+ @param[in] operation Hint for storage engine
+
+ @return Operation Status
+ @retval 0 OK
+ */
+int ha_federated::extra(ha_extra_function operation)
+{
+ DBUG_ENTER("ha_federated::extra");
+ switch (operation) {
+ case HA_EXTRA_IGNORE_DUP_KEY:
+ ignore_duplicates= TRUE;
+ break;
+ case HA_EXTRA_NO_IGNORE_DUP_KEY:
+ insert_dup_update= FALSE;
+ ignore_duplicates= FALSE;
+ break;
+ case HA_EXTRA_WRITE_CAN_REPLACE:
+ replace_duplicates= TRUE;
+ break;
+ case HA_EXTRA_WRITE_CANNOT_REPLACE:
+ /*
+ We use this flag to ensure that we do not create an "INSERT IGNORE"
+ statement when inserting new rows into the remote table.
+ */
+ replace_duplicates= FALSE;
+ break;
+ case HA_EXTRA_INSERT_WITH_UPDATE:
+ insert_dup_update= TRUE;
+ break;
+ default:
+ /* do nothing */
+ DBUG_PRINT("info",("unhandled operation: %d", (uint) operation));
+ }
+ DBUG_RETURN(0);
+}
+
+
+/**
+ @brief Reset state of file to after 'open'.
+
+ @detail This function is called after every statement for all tables
+ used by that statement.
+
+ @return Operation status
+ @retval 0 OK
+*/
+
+int ha_federated::reset(void)
+{
+ insert_dup_update= FALSE;
+ ignore_duplicates= FALSE;
+ replace_duplicates= FALSE;
+ return 0;
+}
+
+
/*
Used to delete all rows in a table. Both for cases of truncate and
for cases where the optimizer realizes that all rows will be
@@ -2794,14 +2920,14 @@ int ha_federated::delete_all_rows()
query.length(0);
query.set_charset(system_charset_info);
- query.append(STRING_WITH_LEN("TRUNCATE `"));
- query.append(share->table_name);
- query.append(STRING_WITH_LEN("`"));
+ query.append(STRING_WITH_LEN("TRUNCATE "));
+ append_ident(&query, share->table_name, share->table_name_length,
+ ident_quote_char);
/*
TRUNCATE won't return anything in mysql_affected_rows
*/
- if (mysql_real_query(mysql, query.ptr(), query.length()))
+ if (real_query(query.ptr(), query.length()))
{
DBUG_RETURN(stash_remote_error());
}
@@ -2891,19 +3017,125 @@ int ha_federated::create(const char *name, TABLE *table_arg,
FEDERATED_SHARE tmp_share; // Only a temporary share, to test the url
DBUG_ENTER("ha_federated::create");
- if (!(retval= parse_url(thd->mem_root, &tmp_share, table_arg, 1)))
- retval= check_foreign_data_source(&tmp_share, 1);
+ retval= parse_url(thd->mem_root, &tmp_share, table_arg, 1);
DBUG_RETURN(retval);
}
+int ha_federated::real_connect()
+{
+ char buffer[FEDERATED_QUERY_BUFFER_SIZE];
+ String sql_query(buffer, sizeof(buffer), &my_charset_bin);
+ DBUG_ENTER("ha_federated::real_connect");
+
+ /*
+ Bug#25679
+ Ensure that we do not hold the LOCK_open mutex while attempting
+ to establish Federated connection to guard against a trivial
+ Denial of Service scenerio.
+ */
+ safe_mutex_assert_not_owner(&LOCK_open);
+
+ DBUG_ASSERT(mysql == NULL);
+
+ if (!(mysql= mysql_init(NULL)))
+ {
+ remote_error_number= HA_ERR_OUT_OF_MEM;
+ DBUG_RETURN(-1);
+ }
+
+ /*
+ BUG# 17044 Federated Storage Engine is not UTF8 clean
+ Add set names to whatever charset the table is at open
+ of table
+ */
+ /* this sets the csname like 'set names utf8' */
+ mysql_options(mysql,MYSQL_SET_CHARSET_NAME,
+ this->table->s->table_charset->csname);
+
+ sql_query.length(0);
+
+ if (!mysql_real_connect(mysql,
+ share->hostname,
+ share->username,
+ share->password,
+ share->database,
+ share->port,
+ share->socket, 0))
+ {
+ stash_remote_error();
+ mysql_close(mysql);
+ mysql= NULL;
+ my_error(ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), remote_error_buf);
+ remote_error_number= -1;
+ DBUG_RETURN(-1);
+ }
+
+ /*
+ We have established a connection, lets try a simple dummy query just
+ to check that the table and expected columns are present.
+ */
+ sql_query.append(share->select_query);
+ sql_query.append(STRING_WITH_LEN(" WHERE 1=0"));
+ if (mysql_real_query(mysql, sql_query.ptr(), sql_query.length()))
+ {
+ sql_query.length(0);
+ sql_query.append("error: ");
+ sql_query.qs_append(mysql_errno(mysql));
+ sql_query.append(" '");
+ sql_query.append(mysql_error(mysql));
+ sql_query.append("'");
+ mysql_close(mysql);
+ mysql= NULL;
+ my_error(ER_FOREIGN_DATA_SOURCE_DOESNT_EXIST, MYF(0), sql_query.ptr());
+ remote_error_number= -1;
+ DBUG_RETURN(-1);
+ }
+
+ /* Just throw away the result, no rows anyways but need to keep in sync */
+ mysql_free_result(mysql_store_result(mysql));
+
+ /*
+ Since we do not support transactions at this version, we can let the client
+ API silently reconnect. For future versions, we will need more logic to
+ deal with transactions
+ */
+
+ mysql->reconnect= 1;
+ DBUG_RETURN(0);
+}
+
+
+int ha_federated::real_query(const char *query, uint length)
+{
+ int rc= 0;
+ DBUG_ENTER("ha_federated::real_query");
+
+ if (!mysql && (rc= real_connect()))
+ goto end;
+
+ if (!query || !length)
+ goto end;
+
+ rc= mysql_real_query(mysql, query, length);
+
+end:
+ DBUG_RETURN(rc);
+}
+
+
int ha_federated::stash_remote_error()
{
DBUG_ENTER("ha_federated::stash_remote_error()");
+ if (!mysql)
+ DBUG_RETURN(remote_error_number);
remote_error_number= mysql_errno(mysql);
strmake(remote_error_buf, mysql_error(mysql), sizeof(remote_error_buf)-1);
+ if (remote_error_number == ER_DUP_ENTRY ||
+ remote_error_number == ER_DUP_KEY)
+ DBUG_RETURN(HA_ERR_FOUND_DUPP_KEY);
DBUG_RETURN(HA_FEDERATED_ERROR_WITH_REMOTE_SYSTEM);
}
@@ -2929,11 +3161,16 @@ bool ha_federated::get_error_message(int error, String* buf)
int ha_federated::external_lock(THD *thd, int lock_type)
{
int error= 0;
- ha_federated *trx= (ha_federated *)thd->ha_data[ht->slot];
DBUG_ENTER("ha_federated::external_lock");
+ /*
+ Support for transactions disabled until WL#2952 fixes it.
+ */
+#ifdef XXX_SUPERCEDED_BY_WL2952
if (lock_type != F_UNLCK)
{
+ ha_federated *trx= (ha_federated *)thd_get_ha_data(thd, ht);
+
DBUG_PRINT("info",("federated not lock F_UNLCK"));
if (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))
{
@@ -2963,7 +3200,7 @@ int ha_federated::external_lock(THD *thd, int lock_type)
DBUG_PRINT("info", ("error setting autocommit FALSE: %d", error));
DBUG_RETURN(error);
}
- thd->ha_data[ht->slot]= this;
+ thd_set_ha_data(thd, ht, this);
trans_register_ha(thd, TRUE, ht);
/*
Send a lock table to the remote end.
@@ -2985,14 +3222,15 @@ int ha_federated::external_lock(THD *thd, int lock_type)
}
}
}
- DBUG_RETURN(0);
+#endif /* XXX_SUPERCEDED_BY_WL2952 */
+ DBUG_RETURN(error);
}
static int federated_commit(handlerton *hton, THD *thd, bool all)
{
int return_val= 0;
- ha_federated *trx= (ha_federated *)thd->ha_data[hton->slot];
+ ha_federated *trx= (ha_federated *) thd_get_ha_data(thd, hton);
DBUG_ENTER("federated_commit");
if (all)
@@ -3007,7 +3245,7 @@ static int federated_commit(handlerton *hton, THD *thd, bool all)
if (error && !return_val)
return_val= error;
}
- thd->ha_data[hton->slot]= NULL;
+ thd_set_ha_data(thd, hton, NULL);
}
DBUG_PRINT("info", ("error val: %d", return_val));
@@ -3018,7 +3256,7 @@ static int federated_commit(handlerton *hton, THD *thd, bool all)
static int federated_rollback(handlerton *hton, THD *thd, bool all)
{
int return_val= 0;
- ha_federated *trx= (ha_federated *)thd->ha_data[hton->slot];
+ ha_federated *trx= (ha_federated *)thd_get_ha_data(thd, hton);
DBUG_ENTER("federated_rollback");
if (all)
@@ -3033,7 +3271,7 @@ static int federated_rollback(handlerton *hton, THD *thd, bool all)
if (error && !return_val)
return_val= error;
}
- thd->ha_data[hton->slot]= NULL;
+ thd_set_ha_data(thd, hton, NULL);
}
DBUG_PRINT("info", ("error val: %d", return_val));
diff --git a/storage/federated/ha_federated.h b/storage/federated/ha_federated.h
index 4d2eefdd986..40bcf9cc402 100644
--- a/storage/federated/ha_federated.h
+++ b/storage/federated/ha_federated.h
@@ -88,13 +88,16 @@ class ha_federated: public handler
MYSQL_ROW_OFFSET current_position; // Current position used by ::position()
int remote_error_number;
char remote_error_buf[FEDERATED_QUERY_BUFFER_SIZE];
+ bool ignore_duplicates, replace_duplicates;
+ bool insert_dup_update;
+ DYNAMIC_STRING bulk_insert;
private:
/*
return 0 on success
return errorcode otherwise
*/
- uint convert_row_to_internal_format(byte *buf, MYSQL_ROW row,
+ uint convert_row_to_internal_format(uchar *buf, MYSQL_ROW row,
MYSQL_RES *result);
bool create_where_from_key(String *to, KEY *key_info,
const key_range *start_key,
@@ -102,6 +105,16 @@ private:
bool records_in_range, bool eq_range);
int stash_remote_error();
+ bool append_stmt_insert(String *query);
+
+ int read_next(uchar *buf, MYSQL_RES *result);
+ int index_read_idx_with_result_set(uchar *buf, uint index,
+ const uchar *key,
+ uint key_len,
+ ha_rkey_function find_flag,
+ MYSQL_RES **result);
+ int real_query(const char *query, uint length);
+ int real_connect();
public:
ha_federated(handlerton *hton, TABLE_SHARE *table_arg);
~ha_federated() {}
@@ -128,7 +141,9 @@ public:
/* fix server to be able to get remote server table flags */
return (HA_PRIMARY_KEY_IN_READ_INDEX | HA_FILE_BASED
| HA_REC_NOT_IN_SEQ | HA_AUTO_PART_KEY | HA_CAN_INDEX_BLOBS |
+ HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE |
HA_NO_PREFIX_CHAR_KEYS | HA_PRIMARY_KEY_REQUIRED_FOR_DELETE |
+ HA_NO_TRANSACTIONS /* until fixed by WL#2952 */ |
HA_PARTIAL_COLUMN_READ | HA_NULL_IN_KEY);
}
/*
@@ -150,6 +165,7 @@ public:
uint max_supported_keys() const { return MAX_KEY; }
uint max_supported_key_parts() const { return MAX_REF_PARTS; }
uint max_supported_key_length() const { return FEDERATED_MAX_KEY_LENGTH; }
+ uint max_supported_key_part_length() const { return FEDERATED_MAX_KEY_LENGTH; }
/*
Called in test_quick_select to determine if indexes should be used.
Normally, we need to know number of blocks . For federated we need to
@@ -188,15 +204,17 @@ public:
int open(const char *name, int mode, uint test_if_locked); // required
int close(void); // required
- int write_row(byte *buf);
- int update_row(const byte *old_data, byte *new_data);
- int delete_row(const byte *buf);
+ void start_bulk_insert(ha_rows rows);
+ int end_bulk_insert();
+ int write_row(uchar *buf);
+ int update_row(const uchar *old_data, uchar *new_data);
+ int delete_row(const uchar *buf);
int index_init(uint keynr, bool sorted);
- int index_read(byte *buf, const byte *key,
+ int index_read(uchar *buf, const uchar *key,
uint key_len, enum ha_rkey_function find_flag);
- int index_read_idx(byte *buf, uint idx, const byte *key,
+ int index_read_idx(uchar *buf, uint idx, const uchar *key,
uint key_len, enum ha_rkey_function find_flag);
- int index_next(byte *buf);
+ int index_next(uchar *buf);
int index_end();
int read_range_first(const key_range *start_key,
const key_range *end_key,
@@ -212,10 +230,11 @@ public:
*/
int rnd_init(bool scan); //required
int rnd_end();
- int rnd_next(byte *buf); //required
- int rnd_pos(byte *buf, byte *pos); //required
- void position(const byte *record); //required
+ int rnd_next(uchar *buf); //required
+ int rnd_pos(uchar *buf, uchar *pos); //required
+ void position(const uchar *record); //required
int info(uint); //required
+ int extra(ha_extra_function operation);
void update_auto_increment(void);
int repair(THD* thd, HA_CHECK_OPT* check_opt);
@@ -230,18 +249,12 @@ public:
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
enum thr_lock_type lock_type); //required
- virtual bool get_error_message(int error, String *buf);
+ bool get_error_message(int error, String *buf);
int external_lock(THD *thd, int lock_type);
int connection_commit();
int connection_rollback();
int connection_autocommit(bool state);
int execute_simple_query(const char *query, int len);
-
- int read_next(byte *buf, MYSQL_RES *result);
- int index_read_idx_with_result_set(byte *buf, uint index,
- const byte *key,
- uint key_len,
- ha_rkey_function find_flag,
- MYSQL_RES **result);
+ int reset(void);
};
diff --git a/storage/heap/CMakeLists.txt b/storage/heap/CMakeLists.txt
index 39953684b8f..fd3ce149b2c 100644..100755
--- a/storage/heap/CMakeLists.txt
+++ b/storage/heap/CMakeLists.txt
@@ -20,8 +20,13 @@ INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include ${CMAKE_SOURCE_DIR}/zlib
${CMAKE_SOURCE_DIR}/sql
${CMAKE_SOURCE_DIR}/regex
${CMAKE_SOURCE_DIR}/extra/yassl/include)
-ADD_LIBRARY(heap _check.c _rectest.c hp_block.c hp_clear.c hp_close.c hp_create.c
+
+SET(HEAP_SOURCES _check.c _rectest.c hp_block.c hp_clear.c hp_close.c hp_create.c
ha_heap.cc
hp_delete.c hp_extra.c hp_hash.c hp_info.c hp_open.c hp_panic.c
hp_rename.c hp_rfirst.c hp_rkey.c hp_rlast.c hp_rnext.c hp_rprev.c
hp_rrnd.c hp_rsame.c hp_scan.c hp_static.c hp_update.c hp_write.c)
+
+IF(NOT SOURCE_SUBLIBS)
+ ADD_LIBRARY(heap ${HEAP_SOURCES})
+ENDIF(NOT SOURCE_SUBLIBS)
diff --git a/storage/heap/_check.c b/storage/heap/_check.c
index 05f12dade0d..08b6da62ae1 100644
--- a/storage/heap/_check.c
+++ b/storage/heap/_check.c
@@ -167,7 +167,7 @@ static int check_one_rb_key(HP_INFO *info, uint keynr, ulong records,
HP_KEYDEF *keydef= info->s->keydef + keynr;
int error= 0;
ulong found= 0;
- byte *key, *recpos;
+ uchar *key, *recpos;
uint key_length;
uint not_used[2];
@@ -176,7 +176,7 @@ static int check_one_rb_key(HP_INFO *info, uint keynr, ulong records,
{
do
{
- memcpy(&recpos, key + (*keydef->get_key_length)(keydef,key), sizeof(byte*));
+ memcpy(&recpos, key + (*keydef->get_key_length)(keydef,key), sizeof(uchar*));
key_length= hp_rb_make_key(keydef, info->recbuf, recpos, 0);
if (ha_key_cmp(keydef->seg, (uchar*) info->recbuf, (uchar*) key,
key_length, SEARCH_FIND | SEARCH_SAME, not_used))
diff --git a/storage/heap/_rectest.c b/storage/heap/_rectest.c
index 2fd2d39bed7..068fedf719c 100644
--- a/storage/heap/_rectest.c
+++ b/storage/heap/_rectest.c
@@ -18,7 +18,7 @@
#include "heapdef.h"
-int hp_rectest(register HP_INFO *info, register const byte *old)
+int hp_rectest(register HP_INFO *info, register const uchar *old)
{
DBUG_ENTER("hp_rectest");
diff --git a/storage/heap/ha_heap.cc b/storage/heap/ha_heap.cc
index 8c378f7334f..601d4612dda 100644
--- a/storage/heap/ha_heap.cc
+++ b/storage/heap/ha_heap.cc
@@ -22,7 +22,7 @@
#include "mysql_priv.h"
#include <mysql/plugin.h>
#include "ha_heap.h"
-
+#include "heapdef.h"
static handler *heap_create_handler(handlerton *hton,
TABLE_SHARE *table,
@@ -61,8 +61,8 @@ static handler *heap_create_handler(handlerton *hton,
*****************************************************************************/
ha_heap::ha_heap(handlerton *hton, TABLE_SHARE *table_arg)
- :handler(hton, table_arg), file(0), records_changed(0),
- key_stat_version(0)
+ :handler(hton, table_arg), file(0), records_changed(0), internal_table(0),
+ key_stat_version(0)
{}
@@ -90,13 +90,25 @@ const char **ha_heap::bas_ext() const
int ha_heap::open(const char *name, int mode, uint test_if_locked)
{
- if (!(file= heap_open(name, mode)) && my_errno == ENOENT)
+ if ((test_if_locked & HA_OPEN_INTERNAL_TABLE) ||
+ !(file= heap_open(name, mode)) && my_errno == ENOENT)
{
HA_CREATE_INFO create_info;
+ internal_table= test(test_if_locked & HA_OPEN_INTERNAL_TABLE);
bzero(&create_info, sizeof(create_info));
+ file= 0;
if (!create(name, table, &create_info))
{
- file= heap_open(name, mode);
+ file= internal_table ?
+ heap_open_from_share(internal_share, mode) :
+ heap_open_from_share_and_register(internal_share, mode);
+ if (!file)
+ {
+ /* Couldn't open table; Remove the newly created table */
+ pthread_mutex_lock(&THR_LOCK_heap);
+ hp_free(internal_share);
+ pthread_mutex_unlock(&THR_LOCK_heap);
+ }
implicit_emptied= 1;
}
}
@@ -120,7 +132,27 @@ int ha_heap::open(const char *name, int mode, uint test_if_locked)
int ha_heap::close(void)
{
- return heap_close(file);
+ return internal_table ? hp_close(file) : heap_close(file);
+}
+
+
+/*
+ Create a copy of this table
+
+ DESCRIPTION
+ Do same as default implementation but use file->s->name instead of
+ table->s->path. This is needed by Windows where the clone() call sees
+ '/'-delimited path in table->s->path, while ha_peap::open() was called
+ with '\'-delimited path.
+*/
+
+handler *ha_heap::clone(MEM_ROOT *mem_root)
+{
+ handler *new_handler= get_new_handler(table->s, mem_root, table->s->db_type());
+ if (new_handler && !new_handler->ha_open(table, file->s->name, table->db_stat,
+ HA_OPEN_IGNORE_IF_LOCKED))
+ return new_handler;
+ return NULL; /* purecov: inspected */
}
@@ -165,7 +197,7 @@ void ha_heap::update_key_stats()
else
{
ha_rows hash_buckets= file->s->keydef[i].hash_buckets;
- uint no_records= hash_buckets ? file->s->records/hash_buckets : 2;
+ uint no_records= hash_buckets ? (uint) (file->s->records/hash_buckets) : 2;
if (no_records < 2)
no_records= 2;
key->rec_per_key[key->key_parts-1]= no_records;
@@ -178,10 +210,10 @@ void ha_heap::update_key_stats()
}
-int ha_heap::write_row(byte * buf)
+int ha_heap::write_row(uchar * buf)
{
int res;
- statistic_increment(table->in_use->status_var.ha_write_count,&LOCK_status);
+ ha_statistic_increment(&SSV::ha_write_count);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
table->timestamp_field->set_time();
if (table->next_number_field && buf == table->record[0])
@@ -202,10 +234,10 @@ int ha_heap::write_row(byte * buf)
return res;
}
-int ha_heap::update_row(const byte * old_data, byte * new_data)
+int ha_heap::update_row(const uchar * old_data, uchar * new_data)
{
int res;
- statistic_increment(table->in_use->status_var.ha_update_count,&LOCK_status);
+ ha_statistic_increment(&SSV::ha_update_count);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
table->timestamp_field->set_time();
res= heap_update(file,old_data,new_data);
@@ -221,10 +253,10 @@ int ha_heap::update_row(const byte * old_data, byte * new_data)
return res;
}
-int ha_heap::delete_row(const byte * buf)
+int ha_heap::delete_row(const uchar * buf)
{
int res;
- statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status);
+ ha_statistic_increment(&SSV::ha_delete_count);
res= heap_delete(file,buf);
if (!res && table->s->tmp_table == NO_TMP_TABLE &&
++records_changed*HEAP_STATS_UPDATE_THRESHOLD > file->s->records)
@@ -238,74 +270,69 @@ int ha_heap::delete_row(const byte * buf)
return res;
}
-int ha_heap::index_read(byte * buf, const byte * key, key_part_map keypart_map,
- enum ha_rkey_function find_flag)
+int ha_heap::index_read_map(uchar *buf, const uchar *key,
+ key_part_map keypart_map,
+ enum ha_rkey_function find_flag)
{
DBUG_ASSERT(inited==INDEX);
- statistic_increment(table->in_use->status_var.ha_read_key_count,
- &LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_key_count);
int error = heap_rkey(file,buf,active_index, key, keypart_map, find_flag);
table->status = error ? STATUS_NOT_FOUND : 0;
return error;
}
-int ha_heap::index_read_last(byte *buf, const byte *key, key_part_map keypart_map)
+int ha_heap::index_read_last_map(uchar *buf, const uchar *key,
+ key_part_map keypart_map)
{
DBUG_ASSERT(inited==INDEX);
- statistic_increment(table->in_use->status_var.ha_read_key_count,
- &LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_key_count);
int error= heap_rkey(file, buf, active_index, key, keypart_map,
HA_READ_PREFIX_LAST);
table->status= error ? STATUS_NOT_FOUND : 0;
return error;
}
-int ha_heap::index_read_idx(byte * buf, uint index, const byte * key,
- key_part_map keypart_map,
- enum ha_rkey_function find_flag)
+int ha_heap::index_read_idx_map(uchar *buf, uint index, const uchar *key,
+ key_part_map keypart_map,
+ enum ha_rkey_function find_flag)
{
- statistic_increment(table->in_use->status_var.ha_read_key_count,
- &LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_key_count);
int error = heap_rkey(file, buf, index, key, keypart_map, find_flag);
table->status = error ? STATUS_NOT_FOUND : 0;
return error;
}
-int ha_heap::index_next(byte * buf)
+int ha_heap::index_next(uchar * buf)
{
DBUG_ASSERT(inited==INDEX);
- statistic_increment(table->in_use->status_var.ha_read_next_count,
- &LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_next_count);
int error=heap_rnext(file,buf);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
}
-int ha_heap::index_prev(byte * buf)
+int ha_heap::index_prev(uchar * buf)
{
DBUG_ASSERT(inited==INDEX);
- statistic_increment(table->in_use->status_var.ha_read_prev_count,
- &LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_prev_count);
int error=heap_rprev(file,buf);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
}
-int ha_heap::index_first(byte * buf)
+int ha_heap::index_first(uchar * buf)
{
DBUG_ASSERT(inited==INDEX);
- statistic_increment(table->in_use->status_var.ha_read_first_count,
- &LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_first_count);
int error=heap_rfirst(file, buf, active_index);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
}
-int ha_heap::index_last(byte * buf)
+int ha_heap::index_last(uchar * buf)
{
DBUG_ASSERT(inited==INDEX);
- statistic_increment(table->in_use->status_var.ha_read_last_count,
- &LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_last_count);
int error=heap_rlast(file, buf, active_index);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
@@ -316,28 +343,26 @@ int ha_heap::rnd_init(bool scan)
return scan ? heap_scan_init(file) : 0;
}
-int ha_heap::rnd_next(byte *buf)
+int ha_heap::rnd_next(uchar *buf)
{
- statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
- &LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_rnd_next_count);
int error=heap_scan(file, buf);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
}
-int ha_heap::rnd_pos(byte * buf, byte *pos)
+int ha_heap::rnd_pos(uchar * buf, uchar *pos)
{
int error;
HEAP_PTR heap_position;
- statistic_increment(table->in_use->status_var.ha_read_rnd_count,
- &LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_rnd_count);
memcpy_fixed((char*) &heap_position, pos, sizeof(HEAP_PTR));
error=heap_rrnd(file, buf, heap_position);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
}
-void ha_heap::position(const byte *record)
+void ha_heap::position(const uchar *record)
{
*(HEAP_PTR*) ref= heap_position(file); // Ref is aligned
}
@@ -531,7 +556,7 @@ int ha_heap::delete_table(const char *name)
void ha_heap::drop_table(const char *name)
{
- heap_drop_table(file);
+ file->s->delete_on_close= 1;
close();
}
@@ -670,16 +695,16 @@ int ha_heap::create(const char *name, TABLE *table_arg,
create_info->auto_increment_value - 1 : 0);
hp_create_info.max_table_size=current_thd->variables.max_heap_table_size;
hp_create_info.with_auto_increment= found_real_auto_increment;
+ hp_create_info.internal_table= internal_table;
max_rows = (ha_rows) (hp_create_info.max_table_size / mem_per_row);
error= heap_create(name,
keys, keydef, share->reclength,
(ulong) ((share->max_rows < max_rows &&
share->max_rows) ?
share->max_rows : max_rows),
- (ulong) share->min_rows, &hp_create_info);
- my_free((gptr) keydef, MYF(0));
- if (file)
- info(HA_STATUS_NO_LOCK | HA_STATUS_CONST | HA_STATUS_VARIABLE);
+ (ulong) share->min_rows, &hp_create_info, &internal_share);
+ my_free((uchar*) keydef, MYF(0));
+ DBUG_ASSERT(file == 0);
return (error);
}
diff --git a/storage/heap/ha_heap.h b/storage/heap/ha_heap.h
index a2d531fc515..5c5ad43658e 100644
--- a/storage/heap/ha_heap.h
+++ b/storage/heap/ha_heap.h
@@ -25,13 +25,16 @@
class ha_heap: public handler
{
HP_INFO *file;
+ HP_SHARE *internal_share;
key_map btree_keys;
/* number of records changed since last statistics update */
uint records_changed;
uint key_stat_version;
+ my_bool internal_table;
public:
ha_heap(handlerton *hton, TABLE_SHARE *table);
~ha_heap() {}
+ handler *clone(MEM_ROOT *mem_root);
const char *table_type() const
{
return (table->in_use->variables.sql_mode & MODE_MYSQL323) ?
@@ -48,14 +51,15 @@ public:
ulonglong table_flags() const
{
return (HA_FAST_KEY_READ | HA_NO_BLOBS | HA_NULL_IN_KEY |
+ HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE |
HA_REC_NOT_IN_SEQ | HA_CAN_INSERT_DELAYED | HA_NO_TRANSACTIONS |
HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT);
}
ulong index_flags(uint inx, uint part, bool all_parts) const
{
return ((table_share->key_info[inx].algorithm == HA_KEY_ALG_BTREE) ?
- HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER | HA_READ_RANGE :
- HA_ONLY_WHOLE_INDEX);
+ HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER | HA_READ_RANGE :
+ HA_ONLY_WHOLE_INDEX | HA_KEY_SCAN_NOT_ROR);
}
const key_map *keys_to_use_for_scanning() { return &btree_keys; }
uint max_supported_keys() const { return MAX_KEY; }
@@ -68,26 +72,27 @@ public:
int open(const char *name, int mode, uint test_if_locked);
int close(void);
void set_keys_for_scanning(void);
- int write_row(byte * buf);
- int update_row(const byte * old_data, byte * new_data);
- int delete_row(const byte * buf);
+ int write_row(uchar * buf);
+ int update_row(const uchar * old_data, uchar * new_data);
+ int delete_row(const uchar * buf);
virtual void get_auto_increment(ulonglong offset, ulonglong increment,
ulonglong nb_desired_values,
ulonglong *first_value,
ulonglong *nb_reserved_values);
- int index_read(byte * buf, const byte * key, key_part_map keypart_map,
- enum ha_rkey_function find_flag);
- int index_read_last(byte *buf, const byte *key, key_part_map keypart_map);
- int index_read_idx(byte * buf, uint index, const byte * key,
- key_part_map keypart_map, enum ha_rkey_function find_flag);
- int index_next(byte * buf);
- int index_prev(byte * buf);
- int index_first(byte * buf);
- int index_last(byte * buf);
+ int index_read_map(uchar * buf, const uchar * key, key_part_map keypart_map,
+ enum ha_rkey_function find_flag);
+ int index_read_last_map(uchar *buf, const uchar *key, key_part_map keypart_map);
+ int index_read_idx_map(uchar * buf, uint index, const uchar * key,
+ key_part_map keypart_map,
+ enum ha_rkey_function find_flag);
+ int index_next(uchar * buf);
+ int index_prev(uchar * buf);
+ int index_first(uchar * buf);
+ int index_last(uchar * buf);
int rnd_init(bool scan);
- int rnd_next(byte *buf);
- int rnd_pos(byte * buf, byte *pos);
- void position(const byte *record);
+ int rnd_next(uchar *buf);
+ int rnd_pos(uchar * buf, uchar *pos);
+ void position(const uchar *record);
int info(uint);
int extra(enum ha_extra_function operation);
int reset();
@@ -105,11 +110,9 @@ public:
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
enum thr_lock_type lock_type);
- int cmp_ref(const byte *ref1, const byte *ref2)
+ int cmp_ref(const uchar *ref1, const uchar *ref2)
{
- HEAP_PTR ptr1=*(HEAP_PTR*)ref1;
- HEAP_PTR ptr2=*(HEAP_PTR*)ref2;
- return ptr1 < ptr2? -1 : (ptr1 > ptr2? 1 : 0);
+ return memcmp(ref1, ref2, sizeof(HEAP_PTR));
}
bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes);
private:
diff --git a/storage/heap/heapdef.h b/storage/heap/heapdef.h
index b52a6c60ac6..3fc94062303 100644
--- a/storage/heap/heapdef.h
+++ b/storage/heap/heapdef.h
@@ -16,6 +16,7 @@
/* This file is included in all heap-files */
#include <my_base.h> /* This includes global */
+C_MODE_START
#ifdef THREAD
#include <my_pthread.h>
#endif
@@ -48,7 +49,7 @@ if (!(info->update & HA_STATE_AKTIV))\
typedef struct st_hp_hash_info
{
struct st_hp_hash_info *next_key;
- byte *ptr_to_rec;
+ uchar *ptr_to_rec;
} HASH_INFO;
typedef struct {
@@ -60,42 +61,42 @@ typedef struct {
/* Prototypes for intern functions */
extern HP_SHARE *hp_find_named_heap(const char *name);
-extern int hp_rectest(HP_INFO *info,const byte *old);
-extern byte *hp_find_block(HP_BLOCK *info,ulong pos);
-extern int hp_get_new_block(HP_BLOCK *info, ulong* alloc_length);
+extern int hp_rectest(HP_INFO *info,const uchar *old);
+extern uchar *hp_find_block(HP_BLOCK *info,ulong pos);
+extern int hp_get_new_block(HP_BLOCK *info, size_t* alloc_length);
extern void hp_free(HP_SHARE *info);
-extern byte *hp_free_level(HP_BLOCK *block,uint level,HP_PTRS *pos,
- byte *last_pos);
+extern uchar *hp_free_level(HP_BLOCK *block,uint level,HP_PTRS *pos,
+ uchar *last_pos);
extern int hp_write_key(HP_INFO *info, HP_KEYDEF *keyinfo,
- const byte *record, byte *recpos);
+ const uchar *record, uchar *recpos);
extern int hp_rb_write_key(HP_INFO *info, HP_KEYDEF *keyinfo,
- const byte *record, byte *recpos);
+ const uchar *record, uchar *recpos);
extern int hp_rb_delete_key(HP_INFO *info,HP_KEYDEF *keyinfo,
- const byte *record,byte *recpos,int flag);
+ const uchar *record,uchar *recpos,int flag);
extern int hp_delete_key(HP_INFO *info,HP_KEYDEF *keyinfo,
- const byte *record,byte *recpos,int flag);
+ const uchar *record,uchar *recpos,int flag);
extern HASH_INFO *_heap_find_hash(HP_BLOCK *block,ulong pos);
-extern byte *hp_search(HP_INFO *info,HP_KEYDEF *keyinfo,const byte *key,
+extern uchar *hp_search(HP_INFO *info,HP_KEYDEF *keyinfo,const uchar *key,
uint nextflag);
-extern byte *hp_search_next(HP_INFO *info, HP_KEYDEF *keyinfo,
- const byte *key, HASH_INFO *pos);
-extern ulong hp_hashnr(HP_KEYDEF *keyinfo,const byte *key);
-extern ulong hp_rec_hashnr(HP_KEYDEF *keyinfo,const byte *rec);
+extern uchar *hp_search_next(HP_INFO *info, HP_KEYDEF *keyinfo,
+ const uchar *key, HASH_INFO *pos);
+extern ulong hp_hashnr(HP_KEYDEF *keyinfo,const uchar *key);
+extern ulong hp_rec_hashnr(HP_KEYDEF *keyinfo,const uchar *rec);
extern ulong hp_mask(ulong hashnr,ulong buffmax,ulong maxlength);
extern void hp_movelink(HASH_INFO *pos,HASH_INFO *next_link,
HASH_INFO *newlink);
-extern int hp_rec_key_cmp(HP_KEYDEF *keydef,const byte *rec1,
- const byte *rec2,
+extern int hp_rec_key_cmp(HP_KEYDEF *keydef,const uchar *rec1,
+ const uchar *rec2,
my_bool diff_if_only_endspace_difference);
-extern int hp_key_cmp(HP_KEYDEF *keydef,const byte *rec,
- const byte *key);
-extern void hp_make_key(HP_KEYDEF *keydef,byte *key,const byte *rec);
-extern uint hp_rb_make_key(HP_KEYDEF *keydef, byte *key,
- const byte *rec, byte *recpos);
-extern uint hp_rb_key_length(HP_KEYDEF *keydef, const byte *key);
-extern uint hp_rb_null_key_length(HP_KEYDEF *keydef, const byte *key);
-extern uint hp_rb_var_key_length(HP_KEYDEF *keydef, const byte *key);
-extern my_bool hp_if_null_in_key(HP_KEYDEF *keyinfo, const byte *record);
+extern int hp_key_cmp(HP_KEYDEF *keydef,const uchar *rec,
+ const uchar *key);
+extern void hp_make_key(HP_KEYDEF *keydef,uchar *key,const uchar *rec);
+extern uint hp_rb_make_key(HP_KEYDEF *keydef, uchar *key,
+ const uchar *rec, uchar *recpos);
+extern uint hp_rb_key_length(HP_KEYDEF *keydef, const uchar *key);
+extern uint hp_rb_null_key_length(HP_KEYDEF *keydef, const uchar *key);
+extern uint hp_rb_var_key_length(HP_KEYDEF *keydef, const uchar *key);
+extern my_bool hp_if_null_in_key(HP_KEYDEF *keyinfo, const uchar *record);
extern int hp_close(register HP_INFO *info);
extern void hp_clear(HP_SHARE *info);
extern void hp_clear_keys(HP_SHARE *info);
@@ -107,3 +108,4 @@ extern pthread_mutex_t THR_LOCK_heap;
#define pthread_mutex_lock(A)
#define pthread_mutex_unlock(A)
#endif
+C_MODE_END
diff --git a/storage/heap/hp_block.c b/storage/heap/hp_block.c
index 85219380287..c622a9e52f8 100644
--- a/storage/heap/hp_block.c
+++ b/storage/heap/hp_block.c
@@ -26,7 +26,7 @@
{p_0, p_1, ...} serve as indexes to descend the blocks tree.
*/
-byte *hp_find_block(HP_BLOCK *block, ulong pos)
+uchar *hp_find_block(HP_BLOCK *block, ulong pos)
{
reg1 int i;
reg3 HP_PTRS *ptr; /* block base ptr */
@@ -36,12 +36,13 @@ byte *hp_find_block(HP_BLOCK *block, ulong pos)
ptr=(HP_PTRS*)ptr->blocks[pos/block->level_info[i].records_under_level];
pos%=block->level_info[i].records_under_level;
}
- return (byte*) ptr+ pos*block->recbuffer;
+ return (uchar*) ptr+ pos*block->recbuffer;
}
/*
Get one new block-of-records. Alloc ptr to block if needed
+
SYNOPSIS
hp_get_new_block()
block HP_BLOCK tree-like block
@@ -53,7 +54,7 @@ byte *hp_find_block(HP_BLOCK *block, ulong pos)
1 Out of memory
*/
-int hp_get_new_block(HP_BLOCK *block, ulong *alloc_length)
+int hp_get_new_block(HP_BLOCK *block, size_t *alloc_length)
{
reg1 uint i,j;
HP_PTRS *root;
@@ -101,13 +102,13 @@ int hp_get_new_block(HP_BLOCK *block, ulong *alloc_length)
/* Occupy the free slot we've found at level i */
block->level_info[i].last_blocks->
blocks[HP_PTRS_IN_NOD - block->level_info[i].free_ptrs_in_block--]=
- (byte*) root;
+ (uchar*) root;
/* Add a block subtree with each node having one left-most child */
for (j=i-1 ; j >0 ; j--)
{
block->level_info[j].last_blocks= root++;
- block->level_info[j].last_blocks->blocks[0]=(byte*) root;
+ block->level_info[j].last_blocks->blocks[0]=(uchar*) root;
block->level_info[j].free_ptrs_in_block=HP_PTRS_IN_NOD-1;
}
@@ -124,27 +125,27 @@ int hp_get_new_block(HP_BLOCK *block, ulong *alloc_length)
/* free all blocks under level */
-byte *hp_free_level(HP_BLOCK *block, uint level, HP_PTRS *pos, byte *last_pos)
+uchar *hp_free_level(HP_BLOCK *block, uint level, HP_PTRS *pos, uchar *last_pos)
{
int i,max_pos;
- byte *next_ptr;
+ uchar *next_ptr;
if (level == 1)
- next_ptr=(byte*) pos+block->recbuffer;
+ next_ptr=(uchar*) pos+block->recbuffer;
else
{
max_pos= (block->level_info[level-1].last_blocks == pos) ?
HP_PTRS_IN_NOD - block->level_info[level-1].free_ptrs_in_block :
HP_PTRS_IN_NOD;
- next_ptr=(byte*) (pos+1);
+ next_ptr=(uchar*) (pos+1);
for (i=0 ; i < max_pos ; i++)
next_ptr=hp_free_level(block,level-1,
(HP_PTRS*) pos->blocks[i],next_ptr);
}
- if ((byte*) pos != last_pos)
+ if ((uchar*) pos != last_pos)
{
- my_free((gptr) pos,MYF(0));
+ my_free((uchar*) pos,MYF(0));
return last_pos;
}
return next_ptr; /* next memory position */
diff --git a/storage/heap/hp_clear.c b/storage/heap/hp_clear.c
index 2d8b8b394d5..babfcbd6f41 100644
--- a/storage/heap/hp_clear.c
+++ b/storage/heap/hp_clear.c
@@ -32,7 +32,7 @@ void hp_clear(HP_SHARE *info)
if (info->block.levels)
VOID(hp_free_level(&info->block,info->block.levels,info->block.root,
- (byte*) 0));
+ (uchar*) 0));
info->block.levels=0;
hp_clear_keys(info);
info->records= info->deleted= 0;
@@ -94,7 +94,7 @@ void hp_clear_keys(HP_SHARE *info)
{
HP_BLOCK *block= &keyinfo->block;
if (block->levels)
- VOID(hp_free_level(block,block->levels,block->root,(byte*) 0));
+ VOID(hp_free_level(block,block->levels,block->root,(uchar*) 0));
block->levels=0;
block->last_allocated=0;
keyinfo->hash_buckets= 0;
diff --git a/storage/heap/hp_close.c b/storage/heap/hp_close.c
index 5f6fc3249b5..d571815980c 100644
--- a/storage/heap/hp_close.c
+++ b/storage/heap/hp_close.c
@@ -42,9 +42,10 @@ int hp_close(register HP_INFO *info)
}
#endif
info->s->changed=0;
- heap_open_list=list_delete(heap_open_list,&info->open_list);
+ if (info->open_list.data)
+ heap_open_list=list_delete(heap_open_list,&info->open_list);
if (!--info->s->open_count && info->s->delete_on_close)
hp_free(info->s); /* Table was deleted */
- my_free((gptr) info,MYF(0));
+ my_free((uchar*) info,MYF(0));
DBUG_RETURN(error);
}
diff --git a/storage/heap/hp_create.c b/storage/heap/hp_create.c
index 4e1347966b9..b6814fc1614 100644
--- a/storage/heap/hp_create.c
+++ b/storage/heap/hp_create.c
@@ -19,33 +19,37 @@ static int keys_compare(heap_rb_param *param, uchar *key1, uchar *key2);
static void init_block(HP_BLOCK *block,uint reclength,ulong min_records,
ulong max_records);
+/* Create a heap table */
+
int heap_create(const char *name, uint keys, HP_KEYDEF *keydef,
uint reclength, ulong max_records, ulong min_records,
- HP_CREATE_INFO *create_info)
+ HP_CREATE_INFO *create_info, HP_SHARE **res)
{
uint i, j, key_segs, max_length, length;
- HP_SHARE *share;
+ HP_SHARE *share= 0;
HA_KEYSEG *keyseg;
-
DBUG_ENTER("heap_create");
- pthread_mutex_lock(&THR_LOCK_heap);
- if ((share= hp_find_named_heap(name)) && share->open_count == 0)
+ if (!create_info->internal_table)
{
- hp_free(share);
- share= NULL;
- }
-
+ pthread_mutex_lock(&THR_LOCK_heap);
+ if ((share= hp_find_named_heap(name)) && share->open_count == 0)
+ {
+ hp_free(share);
+ share= 0;
+ }
+ }
+
if (!share)
{
HP_KEYDEF *keyinfo;
DBUG_PRINT("info",("Initializing new table"));
/*
- We have to store sometimes byte* del_link in records,
- so the record length should be at least sizeof(byte*)
+ We have to store sometimes uchar* del_link in records,
+ so the record length should be at least sizeof(uchar*)
*/
- set_if_bigger(reclength, sizeof (byte*));
+ set_if_bigger(reclength, sizeof (uchar*));
for (i= key_segs= max_length= 0, keyinfo= keydef; i < keys; i++, keyinfo++)
{
@@ -112,7 +116,7 @@ int heap_create(const char *name, uint keys, HP_KEYDEF *keydef,
}
keyinfo->length= length;
length+= keyinfo->rb_tree.size_of_element +
- ((keyinfo->algorithm == HA_KEY_ALG_BTREE) ? sizeof(byte*) : 0);
+ ((keyinfo->algorithm == HA_KEY_ALG_BTREE) ? sizeof(uchar*) : 0);
if (length > max_length)
max_length= length;
key_segs+= keyinfo->keysegs;
@@ -131,10 +135,7 @@ int heap_create(const char *name, uint keys, HP_KEYDEF *keydef,
keys*sizeof(HP_KEYDEF)+
key_segs*sizeof(HA_KEYSEG),
MYF(MY_ZEROFILL))))
- {
- pthread_mutex_unlock(&THR_LOCK_heap);
- DBUG_RETURN(1);
- }
+ goto err;
share->keydef= (HP_KEYDEF*) (share + 1);
share->key_stat_version= 1;
keyseg= (HA_KEYSEG*) (share->keydef + keys);
@@ -152,12 +153,12 @@ int heap_create(const char *name, uint keys, HP_KEYDEF *keydef,
{
/* additional HA_KEYTYPE_END keyseg */
keyseg->type= HA_KEYTYPE_END;
- keyseg->length= sizeof(byte*);
+ keyseg->length= sizeof(uchar*);
keyseg->flag= 0;
keyseg->null_bit= 0;
keyseg++;
- init_tree(&keyinfo->rb_tree, 0, 0, sizeof(byte*),
+ init_tree(&keyinfo->rb_tree, 0, 0, sizeof(uchar*),
(qsort_cmp2)keys_compare, 1, NULL, NULL);
keyinfo->delete_key= hp_rb_delete_key;
keyinfo->write_key= hp_rb_write_key;
@@ -188,21 +189,34 @@ int heap_create(const char *name, uint keys, HP_KEYDEF *keydef,
/* Must be allocated separately for rename to work */
if (!(share->name= my_strdup(name,MYF(0))))
{
- my_free((gptr) share,MYF(0));
- pthread_mutex_unlock(&THR_LOCK_heap);
- DBUG_RETURN(1);
+ my_free((uchar*) share,MYF(0));
+ goto err;
}
#ifdef THREAD
thr_lock_init(&share->lock);
VOID(pthread_mutex_init(&share->intern_lock,MY_MUTEX_INIT_FAST));
#endif
- share->open_list.data= (void*) share;
- heap_share_list= list_add(heap_share_list,&share->open_list);
+ if (!create_info->internal_table)
+ {
+ share->open_list.data= (void*) share;
+ heap_share_list= list_add(heap_share_list,&share->open_list);
+ }
+ else
+ share->delete_on_close= 1;
}
- pthread_mutex_unlock(&THR_LOCK_heap);
+ if (!create_info->internal_table)
+ pthread_mutex_unlock(&THR_LOCK_heap);
+
+ *res= share;
DBUG_RETURN(0);
+
+err:
+ if (!create_info->internal_table)
+ pthread_mutex_unlock(&THR_LOCK_heap);
+ DBUG_RETURN(1);
} /* heap_create */
+
static int keys_compare(heap_rb_param *param, uchar *key1, uchar *key2)
{
uint not_used[2];
@@ -218,7 +232,7 @@ static void init_block(HP_BLOCK *block, uint reclength, ulong min_records,
max_records= max(min_records,max_records);
if (!max_records)
max_records= 1000; /* As good as quess as anything */
- recbuffer= (uint) (reclength + sizeof(byte**) - 1) & ~(sizeof(byte**) - 1);
+ recbuffer= (uint) (reclength + sizeof(uchar**) - 1) & ~(sizeof(uchar**) - 1);
records_in_block= max_records / 10;
if (records_in_block < 10 && max_records)
records_in_block= 10;
@@ -279,13 +293,14 @@ void heap_drop_table(HP_INFO *info)
void hp_free(HP_SHARE *share)
{
- heap_share_list= list_delete(heap_share_list, &share->open_list);
+ if (share->open_list.data) /* If not internal table */
+ heap_share_list= list_delete(heap_share_list, &share->open_list);
hp_clear(share); /* Remove blocks from memory */
#ifdef THREAD
thr_lock_delete(&share->lock);
VOID(pthread_mutex_destroy(&share->intern_lock));
#endif
- my_free((gptr) share->name, MYF(0));
- my_free((gptr) share, MYF(0));
+ my_free((uchar*) share->name, MYF(0));
+ my_free((uchar*) share, MYF(0));
return;
}
diff --git a/storage/heap/hp_delete.c b/storage/heap/hp_delete.c
index 637e5f1a497..1dd79a42e0b 100644
--- a/storage/heap/hp_delete.c
+++ b/storage/heap/hp_delete.c
@@ -17,9 +17,9 @@
#include "heapdef.h"
-int heap_delete(HP_INFO *info, const byte *record)
+int heap_delete(HP_INFO *info, const uchar *record)
{
- byte *pos;
+ uchar *pos;
HP_SHARE *share=info->s;
HP_KEYDEF *keydef, *end, *p_lastinx;
DBUG_ENTER("heap_delete");
@@ -43,7 +43,7 @@ int heap_delete(HP_INFO *info, const byte *record)
}
info->update=HA_STATE_DELETED;
- *((byte**) pos)=share->del_link;
+ *((uchar**) pos)=share->del_link;
share->del_link=pos;
pos[share->reclength]=0; /* Record deleted */
share->deleted++;
@@ -65,7 +65,7 @@ err:
*/
int hp_rb_delete_key(HP_INFO *info, register HP_KEYDEF *keyinfo,
- const byte *record, byte *recpos, int flag)
+ const uchar *record, uchar *recpos, int flag)
{
heap_rb_param custom_arg;
uint old_allocated;
@@ -105,7 +105,7 @@ int hp_rb_delete_key(HP_INFO *info, register HP_KEYDEF *keyinfo,
*/
int hp_delete_key(HP_INFO *info, register HP_KEYDEF *keyinfo,
- const byte *record, byte *recpos, int flag)
+ const uchar *record, uchar *recpos, int flag)
{
ulong blength,pos2,pos_hashnr,lastpos_hashnr;
HASH_INFO *lastpos,*gpos,*pos,*pos3,*empty,*last_ptr;
diff --git a/storage/heap/hp_hash.c b/storage/heap/hp_hash.c
index fbf3e541372..aaaa0fe833f 100644
--- a/storage/heap/hp_hash.c
+++ b/storage/heap/hp_hash.c
@@ -97,7 +97,7 @@ ha_rows hp_rb_records_in_range(HP_INFO *info, int inx, key_range *min_key,
/* Sets info->current_ptr to found record */
/* next_flag: Search=0, next=1, prev =2, same =3 */
-byte *hp_search(HP_INFO *info, HP_KEYDEF *keyinfo, const byte *key,
+uchar *hp_search(HP_INFO *info, HP_KEYDEF *keyinfo, const uchar *key,
uint nextflag)
{
reg1 HASH_INFO *pos,*prev_ptr;
@@ -175,7 +175,7 @@ byte *hp_search(HP_INFO *info, HP_KEYDEF *keyinfo, const byte *key,
since last read !
*/
-byte *hp_search_next(HP_INFO *info, HP_KEYDEF *keyinfo, const byte *key,
+uchar *hp_search_next(HP_INFO *info, HP_KEYDEF *keyinfo, const uchar *key,
HASH_INFO *pos)
{
DBUG_ENTER("hp_search_next");
@@ -238,7 +238,7 @@ void hp_movelink(HASH_INFO *pos, HASH_INFO *next_link, HASH_INFO *newlink)
/* Calc hashvalue for a key */
-ulong hp_hashnr(register HP_KEYDEF *keydef, register const byte *key)
+ulong hp_hashnr(register HP_KEYDEF *keydef, register const uchar *key)
{
/*register*/
ulong nr=1, nr2=4;
@@ -304,7 +304,7 @@ ulong hp_hashnr(register HP_KEYDEF *keydef, register const byte *key)
/* Calc hashvalue for a key in a record */
-ulong hp_rec_hashnr(register HP_KEYDEF *keydef, register const byte *rec)
+ulong hp_rec_hashnr(register HP_KEYDEF *keydef, register const uchar *rec)
{
ulong nr=1, nr2=4;
HA_KEYSEG *seg,*endseg;
@@ -377,9 +377,15 @@ ulong hp_rec_hashnr(register HP_KEYDEF *keydef, register const byte *rec)
* far, and works well on both numbers and strings.
*/
-ulong hp_hashnr(register HP_KEYDEF *keydef, register const byte *key)
+ulong hp_hashnr(register HP_KEYDEF *keydef, register const uchar *key)
{
- register ulong nr=0;
+ /*
+ Note, if a key consists of a combination of numeric and
+ a text columns, it most likely won't work well.
+ Making text columns work with NEW_HASH_FUNCTION
+ needs also changes in strings/ctype-xxx.c.
+ */
+ ulong nr= 1, nr2= 4;
HA_KEYSEG *seg,*endseg;
for (seg=keydef->seg,endseg=seg+keydef->keysegs ; seg < endseg ; seg++)
@@ -401,14 +407,15 @@ ulong hp_hashnr(register HP_KEYDEF *keydef, register const byte *key)
}
if (seg->type == HA_KEYTYPE_TEXT)
{
- seg->charset->hash_sort(seg->charset,pos,((uchar*)key)-pos,&nr,NULL);
+ seg->charset->coll->hash_sort(seg->charset, pos, ((uchar*)key)-pos,
+ &nr, &nr2);
}
else if (seg->type == HA_KEYTYPE_VARTEXT1) /* Any VARCHAR segments */
{
uint pack_length= 2; /* Key packing is constant */
uint length= uint2korr(pos);
- seg->charset->hash_sort(seg->charset, pos+pack_length, length, &nr,
- NULL);
+ seg->charset->coll->hash_sort(seg->charset, pos+pack_length, length,
+ &nr, &nr2);
key+= pack_length;
}
else
@@ -426,9 +433,9 @@ ulong hp_hashnr(register HP_KEYDEF *keydef, register const byte *key)
/* Calc hashvalue for a key in a record */
-ulong hp_rec_hashnr(register HP_KEYDEF *keydef, register const byte *rec)
+ulong hp_rec_hashnr(register HP_KEYDEF *keydef, register const uchar *rec)
{
- register ulong nr=0;
+ ulong nr= 1, nr2= 4;
HA_KEYSEG *seg,*endseg;
for (seg=keydef->seg,endseg=seg+keydef->keysegs ; seg < endseg ; seg++)
@@ -444,14 +451,16 @@ ulong hp_rec_hashnr(register HP_KEYDEF *keydef, register const byte *rec)
}
if (seg->type == HA_KEYTYPE_TEXT)
{
- seg->charset->hash_sort(seg->charset,pos,((uchar*)key)-pos,&nr,NULL);
+ uint char_length= seg->length; /* TODO: fix to use my_charpos() */
+ seg->charset->coll->hash_sort(seg->charset, pos, char_length,
+ &nr, &nr2);
}
else if (seg->type == HA_KEYTYPE_VARTEXT1) /* Any VARCHAR segments */
{
uint pack_length= seg->bit_start;
uint length= (pack_length == 1 ? (uint) *(uchar*) pos : uint2korr(pos));
- seg->charset->hash_sort(seg->charset, pos+pack_length,
- length, &nr, NULL);
+ seg->charset->coll->hash_sort(seg->charset, pos+pack_length,
+ length, &nr, &nr2);
}
else
{
@@ -490,7 +499,7 @@ ulong hp_rec_hashnr(register HP_KEYDEF *keydef, register const byte *rec)
<> 0 Key differes
*/
-int hp_rec_key_cmp(HP_KEYDEF *keydef, const byte *rec1, const byte *rec2,
+int hp_rec_key_cmp(HP_KEYDEF *keydef, const uchar *rec1, const uchar *rec2,
my_bool diff_if_only_endspace_difference)
{
HA_KEYSEG *seg,*endseg;
@@ -577,7 +586,7 @@ int hp_rec_key_cmp(HP_KEYDEF *keydef, const byte *rec1, const byte *rec2,
/* Compare a key in a record to a whole key */
-int hp_key_cmp(HP_KEYDEF *keydef, const byte *rec, const byte *key)
+int hp_key_cmp(HP_KEYDEF *keydef, const uchar *rec, const uchar *key)
{
HA_KEYSEG *seg,*endseg;
@@ -661,7 +670,7 @@ int hp_key_cmp(HP_KEYDEF *keydef, const byte *rec, const byte *key)
/* Copy a key from a record to a keybuffer */
-void hp_make_key(HP_KEYDEF *keydef, byte *key, const byte *rec)
+void hp_make_key(HP_KEYDEF *keydef, uchar *key, const uchar *rec)
{
HA_KEYSEG *seg,*endseg;
@@ -693,10 +702,10 @@ void hp_make_key(HP_KEYDEF *keydef, byte *key, const byte *rec)
} while(0)
-uint hp_rb_make_key(HP_KEYDEF *keydef, byte *key,
- const byte *rec, byte *recpos)
+uint hp_rb_make_key(HP_KEYDEF *keydef, uchar *key,
+ const uchar *rec, uchar *recpos)
{
- byte *start_key= key;
+ uchar *start_key= key;
HA_KEYSEG *seg, *endseg;
for (seg= keydef->seg, endseg= seg + keydef->keysegs; seg < endseg; seg++)
@@ -710,7 +719,7 @@ uint hp_rb_make_key(HP_KEYDEF *keydef, byte *key,
if (seg->flag & HA_SWAP_KEY)
{
uint length= seg->length;
- byte *pos= (byte*) rec + seg->start;
+ uchar *pos= (uchar*) rec + seg->start;
#ifdef HAVE_ISNAN
if (seg->type == HA_KEYTYPE_FLOAT)
@@ -759,7 +768,7 @@ uint hp_rb_make_key(HP_KEYDEF *keydef, byte *key,
set_if_smaller(length,tmp_length);
FIX_LENGTH(cs, pos, length, char_length);
store_key_length_inc(key,char_length);
- memcpy((byte*) key,(byte*) pos,(size_t) char_length);
+ memcpy((uchar*) key,(uchar*) pos,(size_t) char_length);
key+= char_length;
continue;
}
@@ -778,7 +787,7 @@ uint hp_rb_make_key(HP_KEYDEF *keydef, byte *key,
memcpy(key, rec + seg->start, (size_t) char_length);
key+= seg->length;
}
- memcpy(key, &recpos, sizeof(byte*));
+ memcpy(key, &recpos, sizeof(uchar*));
return (uint) (key - start_key);
}
@@ -802,7 +811,7 @@ uint hp_rb_pack_key(HP_KEYDEF *keydef, uchar *key, const uchar *old,
if (seg->flag & HA_SWAP_KEY)
{
uint length= seg->length;
- byte *pos= (byte*) old + length;
+ uchar *pos= (uchar*) old + length;
while (length--)
{
@@ -822,7 +831,7 @@ uint hp_rb_pack_key(HP_KEYDEF *keydef, uchar *key, const uchar *old,
set_if_smaller(length,tmp_length); /* Safety */
FIX_LENGTH(cs, old, length, char_length);
store_key_length_inc(key,char_length);
- memcpy((byte*) key, old,(size_t) char_length);
+ memcpy((uchar*) key, old,(size_t) char_length);
key+= char_length;
continue;
}
@@ -844,15 +853,15 @@ uint hp_rb_pack_key(HP_KEYDEF *keydef, uchar *key, const uchar *old,
uint hp_rb_key_length(HP_KEYDEF *keydef,
- const byte *key __attribute__((unused)))
+ const uchar *key __attribute__((unused)))
{
return keydef->length;
}
-uint hp_rb_null_key_length(HP_KEYDEF *keydef, const byte *key)
+uint hp_rb_null_key_length(HP_KEYDEF *keydef, const uchar *key)
{
- const byte *start_key= key;
+ const uchar *start_key= key;
HA_KEYSEG *seg, *endseg;
for (seg= keydef->seg, endseg= seg + keydef->keysegs; seg < endseg; seg++)
@@ -865,9 +874,9 @@ uint hp_rb_null_key_length(HP_KEYDEF *keydef, const byte *key)
}
-uint hp_rb_var_key_length(HP_KEYDEF *keydef, const byte *key)
+uint hp_rb_var_key_length(HP_KEYDEF *keydef, const uchar *key)
{
- const byte *start_key= key;
+ const uchar *start_key= key;
HA_KEYSEG *seg, *endseg;
for (seg= keydef->seg, endseg= seg + keydef->keysegs; seg < endseg; seg++)
@@ -892,7 +901,7 @@ uint hp_rb_var_key_length(HP_KEYDEF *keydef, const byte *key)
0 otherwise
*/
-my_bool hp_if_null_in_key(HP_KEYDEF *keydef, const byte *record)
+my_bool hp_if_null_in_key(HP_KEYDEF *keydef, const uchar *record)
{
HA_KEYSEG *seg,*endseg;
for (seg=keydef->seg,endseg=seg+keydef->keysegs ; seg < endseg ; seg++)
@@ -918,7 +927,7 @@ my_bool hp_if_null_in_key(HP_KEYDEF *keydef, const byte *record)
less than zero.
*/
-void heap_update_auto_increment(HP_INFO *info, const byte *record)
+void heap_update_auto_increment(HP_INFO *info, const uchar *record)
{
ulonglong value= 0; /* Store unsigned values here */
longlong s_value= 0; /* Store signed values here */
diff --git a/storage/heap/hp_info.c b/storage/heap/hp_info.c
index 2c58604eed1..ea78c53fd40 100644
--- a/storage/heap/hp_info.c
+++ b/storage/heap/hp_info.c
@@ -18,7 +18,7 @@
#include "heapdef.h"
-byte *heap_position(HP_INFO *info)
+uchar *heap_position(HP_INFO *info)
{
return ((info->update & HA_STATE_AKTIV) ? info->current_ptr :
(HEAP_PTR) 0);
diff --git a/storage/heap/hp_open.c b/storage/heap/hp_open.c
index 02a8d4f95ca..4d5ec6e27ac 100644
--- a/storage/heap/hp_open.c
+++ b/storage/heap/hp_open.c
@@ -22,43 +22,34 @@
#include "my_sys.h"
-HP_INFO *heap_open(const char *name, int mode)
+/*
+ Open heap table based on HP_SHARE structure
+
+ NOTE
+ This doesn't register the table in the open table list.
+*/
+
+HP_INFO *heap_open_from_share(HP_SHARE *share, int mode)
{
HP_INFO *info;
- HP_SHARE *share;
+ DBUG_ENTER("heap_open_from_share");
- DBUG_ENTER("heap_open");
- pthread_mutex_lock(&THR_LOCK_heap);
- if (!(share= hp_find_named_heap(name)))
- {
- my_errno= ENOENT;
- pthread_mutex_unlock(&THR_LOCK_heap);
- DBUG_RETURN(0);
- }
if (!(info= (HP_INFO*) my_malloc((uint) sizeof(HP_INFO) +
2 * share->max_key_length,
MYF(MY_ZEROFILL))))
{
- pthread_mutex_unlock(&THR_LOCK_heap);
DBUG_RETURN(0);
}
share->open_count++;
#ifdef THREAD
thr_lock_data_init(&share->lock,&info->lock,NULL);
#endif
- info->open_list.data= (void*) info;
- heap_open_list= list_add(heap_open_list,&info->open_list);
- pthread_mutex_unlock(&THR_LOCK_heap);
-
info->s= share;
- info->lastkey= (byte*) (info + 1);
- info->recbuf= (byte*) (info->lastkey + share->max_key_length);
+ info->lastkey= (uchar*) (info + 1);
+ info->recbuf= (uchar*) (info->lastkey + share->max_key_length);
info->mode= mode;
info->current_record= (ulong) ~0L; /* No current record */
- info->current_ptr= 0;
- info->current_hash_ptr= 0;
info->lastinx= info->errkey= -1;
- info->update= 0;
#ifndef DBUG_OFF
info->opt_flag= READ_CHECK_USED; /* Check when changing */
#endif
@@ -68,7 +59,59 @@ HP_INFO *heap_open(const char *name, int mode)
DBUG_RETURN(info);
}
- /* map name to a heap-nr. If name isn't found return 0 */
+
+/*
+ Open heap table based on HP_SHARE structure and register it
+*/
+
+HP_INFO *heap_open_from_share_and_register(HP_SHARE *share, int mode)
+{
+ HP_INFO *info;
+ DBUG_ENTER("heap_open_from_share_and_register");
+
+ pthread_mutex_lock(&THR_LOCK_heap);
+ if ((info= heap_open_from_share(share, mode)))
+ {
+ info->open_list.data= (void*) info;
+ heap_open_list= list_add(heap_open_list,&info->open_list);
+ }
+ pthread_mutex_unlock(&THR_LOCK_heap);
+ DBUG_RETURN(info);
+}
+
+
+/*
+ Open heap table based on name
+
+ NOTE
+ This register the table in the open table list. so that it can be
+ found by future heap_open() calls.
+*/
+
+HP_INFO *heap_open(const char *name, int mode)
+{
+ HP_INFO *info;
+ HP_SHARE *share;
+ DBUG_ENTER("heap_open");
+
+ pthread_mutex_lock(&THR_LOCK_heap);
+ if (!(share= hp_find_named_heap(name)))
+ {
+ my_errno= ENOENT;
+ pthread_mutex_unlock(&THR_LOCK_heap);
+ DBUG_RETURN(0);
+ }
+ if ((info= heap_open_from_share(share, mode)))
+ {
+ info->open_list.data= (void*) info;
+ heap_open_list= list_add(heap_open_list,&info->open_list);
+ }
+ pthread_mutex_unlock(&THR_LOCK_heap);
+ DBUG_RETURN(info);
+}
+
+
+/* map name to a heap-nr. If name isn't found return 0 */
HP_SHARE *hp_find_named_heap(const char *name)
{
diff --git a/storage/heap/hp_rfirst.c b/storage/heap/hp_rfirst.c
index d1842949421..48c1e625bd8 100644
--- a/storage/heap/hp_rfirst.c
+++ b/storage/heap/hp_rfirst.c
@@ -17,7 +17,7 @@
/* Read first record with the current key */
-int heap_rfirst(HP_INFO *info, byte *record, int inx)
+int heap_rfirst(HP_INFO *info, uchar *record, int inx)
{
HP_SHARE *share = info->s;
HP_KEYDEF *keyinfo = share->keydef + inx;
@@ -26,13 +26,13 @@ int heap_rfirst(HP_INFO *info, byte *record, int inx)
info->lastinx= inx;
if (keyinfo->algorithm == HA_KEY_ALG_BTREE)
{
- byte *pos;
+ uchar *pos;
if ((pos = tree_search_edge(&keyinfo->rb_tree, info->parents,
&info->last_pos, offsetof(TREE_ELEMENT, left))))
{
memcpy(&pos, pos + (*keyinfo->get_key_length)(keyinfo, pos),
- sizeof(byte*));
+ sizeof(uchar*));
info->current_ptr = pos;
memcpy(record, pos, (size_t)share->reclength);
info->update = HA_STATE_AKTIV;
diff --git a/storage/heap/hp_rkey.c b/storage/heap/hp_rkey.c
index ced81985f99..6eeac6acd7b 100644
--- a/storage/heap/hp_rkey.c
+++ b/storage/heap/hp_rkey.c
@@ -15,10 +15,10 @@
#include "heapdef.h"
-int heap_rkey(HP_INFO *info, byte *record, int inx, const byte *key,
+int heap_rkey(HP_INFO *info, uchar *record, int inx, const uchar *key,
key_part_map keypart_map, enum ha_rkey_function find_flag)
{
- byte *pos;
+ uchar *pos;
HP_SHARE *share= info->s;
HP_KEYDEF *keyinfo= share->keydef + inx;
DBUG_ENTER("heap_rkey");
@@ -53,7 +53,7 @@ int heap_rkey(HP_INFO *info, byte *record, int inx, const byte *key,
info->update= 0;
DBUG_RETURN(my_errno= HA_ERR_KEY_NOT_FOUND);
}
- memcpy(&pos, pos + (*keyinfo->get_key_length)(keyinfo, pos), sizeof(byte*));
+ memcpy(&pos, pos + (*keyinfo->get_key_length)(keyinfo, pos), sizeof(uchar*));
info->current_ptr= pos;
}
else
@@ -74,7 +74,7 @@ int heap_rkey(HP_INFO *info, byte *record, int inx, const byte *key,
/* Quick find of record */
-gptr heap_find(HP_INFO *info, int inx, const byte *key)
+uchar* heap_find(HP_INFO *info, int inx, const uchar *key)
{
return hp_search(info, info->s->keydef + inx, key, 0);
}
diff --git a/storage/heap/hp_rlast.c b/storage/heap/hp_rlast.c
index b72e815147f..45ad7c21f49 100644
--- a/storage/heap/hp_rlast.c
+++ b/storage/heap/hp_rlast.c
@@ -18,7 +18,7 @@
/* Read first record with the current key */
-int heap_rlast(HP_INFO *info, byte *record, int inx)
+int heap_rlast(HP_INFO *info, uchar *record, int inx)
{
HP_SHARE *share= info->s;
HP_KEYDEF *keyinfo= share->keydef + inx;
@@ -27,13 +27,13 @@ int heap_rlast(HP_INFO *info, byte *record, int inx)
info->lastinx= inx;
if (keyinfo->algorithm == HA_KEY_ALG_BTREE)
{
- byte *pos;
+ uchar *pos;
if ((pos = tree_search_edge(&keyinfo->rb_tree, info->parents,
&info->last_pos, offsetof(TREE_ELEMENT, right))))
{
memcpy(&pos, pos + (*keyinfo->get_key_length)(keyinfo, pos),
- sizeof(byte*));
+ sizeof(uchar*));
info->current_ptr = pos;
memcpy(record, pos, (size_t)share->reclength);
info->update = HA_STATE_AKTIV;
diff --git a/storage/heap/hp_rnext.c b/storage/heap/hp_rnext.c
index 3b436fe87aa..262754e9e64 100644
--- a/storage/heap/hp_rnext.c
+++ b/storage/heap/hp_rnext.c
@@ -17,9 +17,9 @@
/* Read next record with the same key */
-int heap_rnext(HP_INFO *info, byte *record)
+int heap_rnext(HP_INFO *info, uchar *record)
{
- byte *pos;
+ uchar *pos;
HP_SHARE *share=info->s;
HP_KEYDEF *keyinfo;
DBUG_ENTER("heap_rnext");
@@ -47,7 +47,7 @@ int heap_rnext(HP_INFO *info, byte *record)
if (pos)
{
memcpy(&pos, pos + (*keyinfo->get_key_length)(keyinfo, pos),
- sizeof(byte*));
+ sizeof(uchar*));
info->current_ptr = pos;
}
else
diff --git a/storage/heap/hp_rprev.c b/storage/heap/hp_rprev.c
index bfdd2f9d47a..63bfffffba9 100644
--- a/storage/heap/hp_rprev.c
+++ b/storage/heap/hp_rprev.c
@@ -18,9 +18,9 @@
/* Read prev record for key */
-int heap_rprev(HP_INFO *info, byte *record)
+int heap_rprev(HP_INFO *info, uchar *record)
{
- byte *pos;
+ uchar *pos;
HP_SHARE *share=info->s;
HP_KEYDEF *keyinfo;
DBUG_ENTER("heap_rprev");
@@ -47,7 +47,7 @@ int heap_rprev(HP_INFO *info, byte *record)
if (pos)
{
memcpy(&pos, pos + (*keyinfo->get_key_length)(keyinfo, pos),
- sizeof(byte*));
+ sizeof(uchar*));
info->current_ptr = pos;
}
else
diff --git a/storage/heap/hp_rrnd.c b/storage/heap/hp_rrnd.c
index ad0190cc00c..3ac23d293f2 100644
--- a/storage/heap/hp_rrnd.c
+++ b/storage/heap/hp_rrnd.c
@@ -24,7 +24,7 @@
HA_ERR_END_OF_FILE = EOF.
*/
-int heap_rrnd(register HP_INFO *info, byte *record, byte *pos)
+int heap_rrnd(register HP_INFO *info, uchar *record, uchar *pos)
{
HP_SHARE *share=info->s;
DBUG_ENTER("heap_rrnd");
@@ -59,7 +59,7 @@ int heap_rrnd(register HP_INFO *info, byte *record, byte *pos)
HA_ERR_END_OF_FILE = EOF.
*/
-int heap_rrnd_old(register HP_INFO *info, byte *record, ulong pos)
+int heap_rrnd_old(register HP_INFO *info, uchar *record, ulong pos)
{
HP_SHARE *share=info->s;
DBUG_ENTER("heap_rrnd");
diff --git a/storage/heap/hp_rsame.c b/storage/heap/hp_rsame.c
index 10513f91726..1a3724672b6 100644
--- a/storage/heap/hp_rsame.c
+++ b/storage/heap/hp_rsame.c
@@ -25,7 +25,7 @@
HA_ERR_KEY_NOT_FOUND = Record not found with key
*/
-int heap_rsame(register HP_INFO *info, byte *record, int inx)
+int heap_rsame(register HP_INFO *info, uchar *record, int inx)
{
HP_SHARE *share=info->s;
DBUG_ENTER("heap_rsame");
diff --git a/storage/heap/hp_scan.c b/storage/heap/hp_scan.c
index 4249ac4148a..e8913e92c86 100644
--- a/storage/heap/hp_scan.c
+++ b/storage/heap/hp_scan.c
@@ -34,7 +34,7 @@ int heap_scan_init(register HP_INFO *info)
DBUG_RETURN(0);
}
-int heap_scan(register HP_INFO *info, byte *record)
+int heap_scan(register HP_INFO *info, uchar *record)
{
HP_SHARE *share=info->s;
ulong pos;
diff --git a/storage/heap/hp_test1.c b/storage/heap/hp_test1.c
index 31c9b8f2f30..b1b55098a78 100644
--- a/storage/heap/hp_test1.c
+++ b/storage/heap/hp_test1.c
@@ -32,11 +32,12 @@ int main(int argc, char **argv)
{
int i,j,error,deleted;
HP_INFO *file;
- char record[128],key[32];
+ uchar record[128],key[32];
const char *filename;
HP_KEYDEF keyinfo[10];
HA_KEYSEG keyseg[4];
HP_CREATE_INFO hp_create_info;
+ HP_SHARE *tmp_share;
MY_INIT(argv[0]);
filename= "test1";
@@ -52,23 +53,24 @@ int main(int argc, char **argv)
keyinfo[0].seg[0].start=1;
keyinfo[0].seg[0].length=6;
keyinfo[0].seg[0].charset= &my_charset_latin1;
+ keyinfo[0].seg[0].null_bit= 0;
keyinfo[0].flag = HA_NOSAME;
deleted=0;
- bzero((gptr) flags,sizeof(flags));
+ bzero((uchar*) flags,sizeof(flags));
printf("- Creating heap-file\n");
if (heap_create(filename,1,keyinfo,30,(ulong) flag*100000L,10L,
- &hp_create_info) ||
+ &hp_create_info, &tmp_share) ||
!(file= heap_open(filename, 2)))
goto err;
printf("- Writing records:s\n");
- strmov(record," ..... key ");
+ strmov((char*) record," ..... key ");
for (i=49 ; i>=1 ; i-=2 )
{
j=i%25 +1;
- sprintf(key,"%6d",j);
+ sprintf((char*) key,"%6d",j);
bmove(record+1,key,6);
error=heap_write(file,record);
if (heap_check_heap(file,0))
@@ -90,18 +92,18 @@ int main(int argc, char **argv)
for (i=1 ; i<=10 ; i++)
{
if (i == remove_ant) { VOID(heap_close(file)) ; return (0) ; }
- sprintf(key,"%6d",(j=(int) ((rand() & 32767)/32767.*25)));
+ sprintf((char*) key,"%6d",(j=(int) ((rand() & 32767)/32767.*25)));
if ((error = heap_rkey(file,record,0,key,6,HA_READ_KEY_EXACT)))
{
if (verbose || (flags[j] == 1 ||
(error && my_errno != HA_ERR_KEY_NOT_FOUND)))
- printf("key: %s rkey: %3d my_errno: %3d\n",key,error,my_errno);
+ printf("key: %s rkey: %3d my_errno: %3d\n",(char*) key,error,my_errno);
}
else
{
error=heap_delete(file,record);
if (error || verbose)
- printf("key: %s delete: %d my_errno: %d\n",key,error,my_errno);
+ printf("key: %s delete: %d my_errno: %d\n",(char*) key,error,my_errno);
flags[j]=0;
if (! error)
deleted++;
@@ -116,7 +118,7 @@ int main(int argc, char **argv)
printf("- Reading records with key\n");
for (i=1 ; i<=25 ; i++)
{
- sprintf(key,"%6d",i);
+ sprintf((char*) key,"%6d",i);
bmove(record+1,key,6);
my_errno=0;
error=heap_rkey(file,record,0,key,6,HA_READ_KEY_EXACT);
@@ -125,7 +127,7 @@ int main(int argc, char **argv)
(error && (flags[i] != 0 || my_errno != HA_ERR_KEY_NOT_FOUND)))
{
printf("key: %s rkey: %3d my_errno: %3d record: %s\n",
- key,error,my_errno,record+1);
+ (char*) key,error,my_errno,record+1);
}
}
@@ -148,7 +150,7 @@ int main(int argc, char **argv)
if (verbose || (error != 0 && error != HA_ERR_RECORD_DELETED))
{
printf("pos: %2d ni_rrnd: %3d my_errno: %3d record: %s\n",
- i-1,error,my_errno,record+1);
+ i-1,error,my_errno,(char*) record+1);
}
}
}
diff --git a/storage/heap/hp_test2.c b/storage/heap/hp_test2.c
index dcca5fb44b9..e57a554e5d9 100644
--- a/storage/heap/hp_test2.c
+++ b/storage/heap/hp_test2.c
@@ -42,8 +42,8 @@ static my_bool key3[MAX_RECORDS];
static int reclength=39;
-static int calc_check(byte *buf,uint length);
-static void make_record(char *record, uint n1, uint n2, uint n3,
+static int calc_check(uchar *buf,uint length);
+static void make_record(uchar *record, uint n1, uint n2, uint n3,
const char *mark, uint count);
/* Main program */
@@ -56,9 +56,10 @@ int main(int argc, char *argv[])
int error;
ulong pos;
unsigned long key_check;
- char record[128],record2[128],record3[128],key[10];
+ uchar record[128],record2[128],record3[128],key[10];
const char *filename,*filename2;
HP_INFO *file,*file2;
+ HP_SHARE *tmp_share;
HP_KEYDEF keyinfo[MAX_KEYS];
HA_KEYSEG keyseg[MAX_KEYS*5];
HEAP_PTR position;
@@ -126,13 +127,13 @@ int main(int argc, char *argv[])
printf("- Creating heap-file\n");
if (heap_create(filename,keys,keyinfo,reclength,(ulong) flag*100000L,
- (ulong) recant/2, &hp_create_info) ||
+ (ulong) recant/2, &hp_create_info, &tmp_share) ||
!(file= heap_open(filename, 2)))
goto err;
signal(SIGINT,endprog);
printf("- Writing records:s\n");
- strmov(record," ..... key");
+ strmov((char*) record," ..... key");
for (i=0 ; i < recant ; i++)
{
@@ -178,10 +179,10 @@ int main(int argc, char *argv[])
for (j=rnd(1000)+1 ; j>0 && key1[j] == 0 ; j--) ;
if (j != 0)
{
- sprintf(key,"%6d",j);
+ sprintf((char*) key,"%6d",j);
if (heap_rkey(file,record,0,key,6, HA_READ_KEY_EXACT))
{
- printf("can't find key1: \"%s\"\n",key);
+ printf("can't find key1: \"%s\"\n",(char*) key);
goto err;
}
#ifdef NOT_USED
@@ -191,13 +192,13 @@ int main(int argc, char *argv[])
#endif
if (heap_delete(file,record))
{
- printf("error: %d; can't delete record: \"%s\"\n", my_errno,record);
+ printf("error: %d; can't delete record: \"%s\"\n", my_errno,(char*) record);
goto err;
}
opt_delete++;
- key1[atoi(record+keyinfo[0].seg[0].start)]--;
- key3[atoi(record+keyinfo[2].seg[0].start)]=0;
- key_check-=atoi(record);
+ key1[atoi((char*) record+keyinfo[0].seg[0].start)]--;
+ key3[atoi((char*) record+keyinfo[2].seg[0].start)]=0;
+ key_check-=atoi((char*) record);
if (testflag == 2 && heap_check_heap(file,0))
{
puts("Heap keys crashed");
@@ -238,10 +239,10 @@ int main(int argc, char *argv[])
for (j=rnd(1000)+1 ; j>0 && key1[j] == 0 ; j--) ;
if (!key1[j])
continue;
- sprintf(key,"%6d",j);
+ sprintf((char*) key,"%6d",j);
if (heap_rkey(file,record,0,key,6, HA_READ_KEY_EXACT))
{
- printf("can't find key1: \"%s\"\n",key);
+ printf("can't find key1: \"%s\"\n",(char*) key);
goto err;
}
}
@@ -250,19 +251,20 @@ int main(int argc, char *argv[])
if (my_errno != HA_ERR_FOUND_DUPP_KEY || key3[n3] == 0)
{
printf("error: %d; can't update:\nFrom: \"%s\"\nTo: \"%s\"\n",
- my_errno,record,record2);
+ my_errno,(char*) record, (char*) record2);
goto err;
}
if (verbose)
- printf("Double key when tried to update:\nFrom: \"%s\"\nTo: \"%s\"\n",record,record2);
+ printf("Double key when tried to update:\nFrom: \"%s\"\nTo: \"%s\"\n",
+ (char*) record, (char*) record2);
}
else
{
- key1[atoi(record+keyinfo[0].seg[0].start)]--;
- key3[atoi(record+keyinfo[2].seg[0].start)]=0;
+ key1[atoi((char*) record+keyinfo[0].seg[0].start)]--;
+ key3[atoi((char*) record+keyinfo[2].seg[0].start)]=0;
key1[n1]++; key3[n3]=1;
update++;
- key_check=key_check-atoi(record)+n1;
+ key_check=key_check-atoi((char*) record)+n1;
}
if (testflag == 3 && heap_check_heap(file,0))
{
@@ -280,7 +282,7 @@ int main(int argc, char *argv[])
for (i=999, dupp_keys=found_key=0 ; i>0 ; i--)
{
if (key1[i] > dupp_keys) { dupp_keys=key1[i]; found_key=i; }
- sprintf(key,"%6d",found_key);
+ sprintf((char*) key,"%6d",found_key);
}
if (dupp_keys > 3)
@@ -293,9 +295,9 @@ int main(int argc, char *argv[])
goto err;
if (heap_rnext(file,record3)) goto err;
if (heap_delete(file,record3)) goto err;
- key_check-=atoi(record3);
- key1[atoi(record+keyinfo[0].seg[0].start)]--;
- key3[atoi(record+keyinfo[2].seg[0].start)]=0;
+ key_check-=atoi((char*) record3);
+ key1[atoi((char*) record+keyinfo[0].seg[0].start)]--;
+ key3[atoi((char*) record+keyinfo[2].seg[0].start)]=0;
opt_delete++;
ant=2;
while ((error=heap_rnext(file,record3)) == 0 ||
@@ -320,16 +322,16 @@ int main(int argc, char *argv[])
if (heap_rlast(file,record3,0)) goto err;
if (heap_delete(file,record3)) goto err;
- key_check-=atoi(record3);
- key1[atoi(record+keyinfo[0].seg[0].start)]--;
- key3[atoi(record+keyinfo[2].seg[0].start)]=0;
+ key_check-=atoi((char*) record3);
+ key1[atoi((char*) record+keyinfo[0].seg[0].start)]--;
+ key3[atoi((char*) record+keyinfo[2].seg[0].start)]=0;
opt_delete++;
if (heap_rprev(file,record3) || heap_rprev(file,record3))
goto err;
if (heap_delete(file,record3)) goto err;
- key_check-=atoi(record3);
- key1[atoi(record+keyinfo[0].seg[0].start)]--;
- key3[atoi(record+keyinfo[2].seg[0].start)]=0;
+ key_check-=atoi((char*) record3);
+ key1[atoi((char*) record+keyinfo[0].seg[0].start)]--;
+ key3[atoi((char*) record+keyinfo[2].seg[0].start)]=0;
opt_delete++;
ant=3;
while ((error=heap_rprev(file,record3)) == 0 ||
@@ -364,10 +366,10 @@ int main(int argc, char *argv[])
if (error)
goto err;
if (heap_delete(file,record3)) goto err;
- key_check-=atoi(record3);
+ key_check-=atoi((char*) record3);
opt_delete++;
- key1[atoi(record+keyinfo[0].seg[0].start)]--;
- key3[atoi(record+keyinfo[2].seg[0].start)]=0;
+ key1[atoi((char*) record+keyinfo[0].seg[0].start)]--;
+ key3[atoi((char*) record+keyinfo[2].seg[0].start)]=0;
ant=0;
while ((error=heap_scan(file,record3)) == 0 ||
error == HA_ERR_RECORD_DELETED)
@@ -509,7 +511,7 @@ int main(int argc, char *argv[])
for (i=999, dupp_keys=found_key=0 ; i>0 ; i--)
{
if (key1[i] > dupp_keys) { dupp_keys=key1[i]; found_key=i; }
- sprintf(key,"%6d",found_key);
+ sprintf((char*) key,"%6d",found_key);
}
printf("- Read through all keys with first-next-last-prev\n");
ant=0;
@@ -562,8 +564,9 @@ int main(int argc, char *argv[])
heap_close(file2);
printf("- Creating output heap-file 2\n");
- if (heap_create(filename2,1,keyinfo,reclength,0L,0L,&hp_create_info) ||
- !(file2= heap_open(filename2, 2)))
+ if (heap_create(filename2, 1, keyinfo, reclength, 0L, 0L, &hp_create_info,
+ &tmp_share) ||
+ !(file2= heap_open_from_share_and_register(tmp_share, 2)))
goto err;
printf("- Copying and removing records\n");
@@ -575,7 +578,7 @@ int main(int argc, char *argv[])
{
if (heap_write(file2,record))
goto err;
- key_check-=atoi(record);
+ key_check-=atoi((char*) record);
write_count++;
if (heap_delete(file,record))
goto err;
@@ -674,7 +677,7 @@ static sig_handler endprog(int sig_number __attribute__((unused)))
}
}
-static int calc_check(byte *buf, uint length)
+static int calc_check(uchar *buf, uint length)
{
int check=0;
while (length--)
@@ -682,11 +685,11 @@ static int calc_check(byte *buf, uint length)
return check;
}
-static void make_record(char *record, uint n1, uint n2, uint n3,
+static void make_record(uchar *record, uint n1, uint n2, uint n3,
const char *mark, uint count)
{
bfill(record,reclength,' ');
- sprintf(record,"%6d:%4d:%8d:%3.3s: %4d",
+ sprintf((char*) record,"%6d:%4d:%8d:%3.3s: %4d",
n1,n2,n3,mark,count);
record[37]='A'; /* Store A in null key */
record[38]=1; /* set as null */
diff --git a/storage/heap/hp_update.c b/storage/heap/hp_update.c
index e7314e3d38c..11dca974ad4 100644
--- a/storage/heap/hp_update.c
+++ b/storage/heap/hp_update.c
@@ -17,10 +17,10 @@
#include "heapdef.h"
-int heap_update(HP_INFO *info, const byte *old, const byte *heap_new)
+int heap_update(HP_INFO *info, const uchar *old, const uchar *heap_new)
{
HP_KEYDEF *keydef, *end, *p_lastinx;
- byte *pos;
+ uchar *pos;
bool auto_key_changed= 0;
HP_SHARE *share= info->s;
DBUG_ENTER("heap_update");
diff --git a/storage/heap/hp_write.c b/storage/heap/hp_write.c
index 19215fcf017..2abef2d9b43 100644
--- a/storage/heap/hp_write.c
+++ b/storage/heap/hp_write.c
@@ -25,14 +25,14 @@
#define HIGHFIND 4
#define HIGHUSED 8
-static byte *next_free_record_pos(HP_SHARE *info);
+static uchar *next_free_record_pos(HP_SHARE *info);
static HASH_INFO *hp_find_free_hash(HP_SHARE *info, HP_BLOCK *block,
ulong records);
-int heap_write(HP_INFO *info, const byte *record)
+int heap_write(HP_INFO *info, const uchar *record)
{
HP_KEYDEF *keydef, *end;
- byte *pos;
+ uchar *pos;
HP_SHARE *share=info->s;
DBUG_ENTER("heap_write");
#ifndef DBUG_OFF
@@ -88,7 +88,7 @@ err:
}
share->deleted++;
- *((byte**) pos)=share->del_link;
+ *((uchar**) pos)=share->del_link;
share->del_link=pos;
pos[share->reclength]=0; /* Record deleted */
@@ -99,8 +99,8 @@ err:
Write a key to rb_tree-index
*/
-int hp_rb_write_key(HP_INFO *info, HP_KEYDEF *keyinfo, const byte *record,
- byte *recpos)
+int hp_rb_write_key(HP_INFO *info, HP_KEYDEF *keyinfo, const uchar *record,
+ uchar *recpos)
{
heap_rb_param custom_arg;
uint old_allocated;
@@ -130,17 +130,17 @@ int hp_rb_write_key(HP_INFO *info, HP_KEYDEF *keyinfo, const byte *record,
/* Find where to place new record */
-static byte *next_free_record_pos(HP_SHARE *info)
+static uchar *next_free_record_pos(HP_SHARE *info)
{
int block_pos;
- byte *pos;
- ulong length;
+ uchar *pos;
+ size_t length;
DBUG_ENTER("next_free_record_pos");
if (info->del_link)
{
pos=info->del_link;
- info->del_link= *((byte**) pos);
+ info->del_link= *((uchar**) pos);
info->deleted--;
DBUG_PRINT("exit",("Used old position: 0x%lx",(long) pos));
DBUG_RETURN(pos);
@@ -158,9 +158,9 @@ static byte *next_free_record_pos(HP_SHARE *info)
info->data_length+=length;
}
DBUG_PRINT("exit",("Used new position: 0x%lx",
- (long) ((byte*) info->block.level_info[0].last_blocks+
+ (long) ((uchar*) info->block.level_info[0].last_blocks+
block_pos * info->block.recbuffer)));
- DBUG_RETURN((byte*) info->block.level_info[0].last_blocks+
+ DBUG_RETURN((uchar*) info->block.level_info[0].last_blocks+
block_pos*info->block.recbuffer);
}
@@ -191,12 +191,12 @@ static byte *next_free_record_pos(HP_SHARE *info)
*/
int hp_write_key(HP_INFO *info, HP_KEYDEF *keyinfo,
- const byte *record, byte *recpos)
+ const uchar *record, uchar *recpos)
{
HP_SHARE *share = info->s;
int flag;
ulong halfbuff,hashnr,first_index;
- byte *ptr_to_rec,*ptr_to_rec2;
+ uchar *ptr_to_rec,*ptr_to_rec2;
HASH_INFO *empty,*gpos,*gpos2,*pos;
DBUG_ENTER("hp_write_key");
@@ -390,7 +390,7 @@ static HASH_INFO *hp_find_free_hash(HP_SHARE *info,
HP_BLOCK *block, ulong records)
{
uint block_pos;
- ulong length;
+ size_t length;
if (records < block->last_allocated)
return hp_find_hash(block,records);
@@ -401,6 +401,6 @@ static HASH_INFO *hp_find_free_hash(HP_SHARE *info,
info->index_length+=length;
}
block->last_allocated=records+1;
- return((HASH_INFO*) ((byte*) block->level_info[0].last_blocks+
+ return((HASH_INFO*) ((uchar*) block->level_info[0].last_blocks+
block_pos*block->recbuffer));
}
diff --git a/storage/innobase/CMakeLists.txt b/storage/innobase/CMakeLists.txt
index 873a73be0ec..47b1a566cd8 100644..100755
--- a/storage/innobase/CMakeLists.txt
+++ b/storage/innobase/CMakeLists.txt
@@ -15,15 +15,16 @@
SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX")
SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX")
-ADD_DEFINITIONS(-DMYSQL_SERVER -D_WIN32 -DWIN32 -D_LIB)
+ADD_DEFINITIONS(-DMYSQL_SERVER -D_WIN32 -D_LIB)
INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include ${CMAKE_SOURCE_DIR}/zlib
- include
- handler
+ ${CMAKE_SOURCE_DIR}/storage/innobase/include
+ ${CMAKE_SOURCE_DIR}/storage/innobase/handler
${CMAKE_SOURCE_DIR}/sql
${CMAKE_SOURCE_DIR}/regex
${CMAKE_SOURCE_DIR}/extra/yassl/include)
-ADD_LIBRARY(innobase btr/btr0btr.c btr/btr0cur.c btr/btr0pcur.c btr/btr0sea.c
+
+SET(INNOBASE_SOURCES btr/btr0btr.c btr/btr0cur.c btr/btr0pcur.c btr/btr0sea.c
buf/buf0buf.c buf/buf0flu.c buf/buf0lru.c buf/buf0rea.c
data/data0data.c data/data0type.c
dict/dict0boot.c dict/dict0crea.c dict/dict0dict.c dict/dict0load.c dict/dict0mem.c
@@ -54,3 +55,7 @@ ADD_LIBRARY(innobase btr/btr0btr.c btr/btr0cur.c btr/btr0pcur.c btr/btr0sea.c
trx/trx0purge.c trx/trx0rec.c trx/trx0roll.c trx/trx0rseg.c trx/trx0sys.c trx/trx0trx.c trx/trx0undo.c
usr/usr0sess.c
ut/ut0byte.c ut/ut0dbg.c ut/ut0mem.c ut/ut0rnd.c ut/ut0ut.c ut/ut0vec.c ut/ut0list.c ut/ut0wqueue.c)
+
+IF(NOT SOURCE_SUBLIBS)
+ ADD_LIBRARY(innobase ${INNOBASE_SOURCES})
+ENDIF(NOT SOURCE_SUBLIBS)
diff --git a/storage/innobase/Makefile.am b/storage/innobase/Makefile.am
index 62c0f8e817c..30e056d68fb 100644
--- a/storage/innobase/Makefile.am
+++ b/storage/innobase/Makefile.am
@@ -25,102 +25,149 @@ INCLUDES = -I$(top_srcdir)/include -I$(top_builddir)/include \
-I$(top_srcdir)/sql \
-I$(srcdir)
-AUTOMAKE_OPTIONS = foreign
-TAR = gtar
+DEFS = @DEFS@
-noinst_HEADERS =
-SUBDIRS = os ut btr buf data dict dyn eval fil fsp fut \
- ha ibuf lock log mach mem mtr page \
- handler \
- pars que read rem row srv sync thr trx usr
+noinst_HEADERS = include/btr0btr.h include/btr0btr.ic \
+ include/btr0cur.h include/btr0cur.ic \
+ include/btr0pcur.h include/btr0pcur.ic \
+ include/btr0sea.h include/btr0sea.ic \
+ include/btr0types.h include/buf0buf.h \
+ include/buf0buf.ic include/buf0flu.h \
+ include/buf0flu.ic include/buf0lru.h \
+ include/buf0lru.ic include/buf0rea.h \
+ include/buf0types.h include/data0data.h \
+ include/data0data.ic include/data0type.h \
+ include/data0type.ic include/data0types.h \
+ include/db0err.h include/dict0boot.h \
+ include/dict0boot.ic include/dict0crea.h \
+ include/dict0crea.ic include/dict0dict.h \
+ include/dict0dict.ic include/dict0load.h \
+ include/dict0load.ic include/dict0mem.h \
+ include/dict0mem.ic include/dict0types.h \
+ include/dyn0dyn.h include/dyn0dyn.ic \
+ include/eval0eval.h include/eval0eval.ic \
+ include/eval0proc.h include/eval0proc.ic \
+ include/fil0fil.h include/fsp0fsp.h \
+ include/fsp0fsp.ic include/fut0fut.h \
+ include/fut0fut.ic include/fut0lst.h \
+ include/fut0lst.ic include/ha0ha.h \
+ include/ha0ha.ic include/hash0hash.h \
+ include/hash0hash.ic include/ibuf0ibuf.h \
+ include/ibuf0ibuf.ic include/ibuf0types.h \
+ include/lock0iter.h \
+ include/lock0lock.h include/lock0lock.ic \
+ include/lock0priv.h include/lock0priv.ic \
+ include/lock0types.h include/log0log.h \
+ include/log0log.ic include/log0recv.h \
+ include/log0recv.ic include/mach0data.h \
+ include/mach0data.ic include/mem0dbg.h \
+ include/mem0dbg.ic mem/mem0dbg.c \
+ include/mem0mem.h include/mem0mem.ic \
+ include/mem0pool.h include/mem0pool.ic \
+ include/mtr0log.h include/mtr0log.ic \
+ include/mtr0mtr.h include/mtr0mtr.ic \
+ include/mtr0types.h include/os0file.h \
+ include/os0proc.h include/os0proc.ic \
+ include/os0sync.h include/os0sync.ic \
+ include/os0thread.h include/os0thread.ic \
+ include/page0cur.h include/page0cur.ic \
+ include/page0page.h include/page0page.ic \
+ include/page0types.h include/pars0grm.h \
+ include/pars0opt.h include/pars0opt.ic \
+ include/pars0pars.h include/pars0pars.ic \
+ include/pars0sym.h include/pars0sym.ic \
+ include/pars0types.h include/que0que.h \
+ include/que0que.ic include/que0types.h \
+ include/read0read.h include/read0read.ic \
+ include/read0types.h include/rem0cmp.h \
+ include/rem0cmp.ic include/rem0rec.h \
+ include/rem0rec.ic include/rem0types.h \
+ include/row0ins.h include/row0ins.ic \
+ include/row0mysql.h include/row0mysql.ic \
+ include/row0purge.h include/row0purge.ic \
+ include/row0row.h include/row0row.ic \
+ include/row0sel.h include/row0sel.ic \
+ include/row0types.h include/row0uins.h \
+ include/row0uins.ic include/row0umod.h \
+ include/row0umod.ic include/row0undo.h \
+ include/row0undo.ic include/row0upd.h \
+ include/row0upd.ic include/row0vers.h \
+ include/row0vers.ic include/srv0que.h \
+ include/srv0srv.h include/srv0srv.ic \
+ include/srv0start.h include/sync0arr.h \
+ include/sync0arr.ic include/sync0rw.h \
+ include/sync0rw.ic include/sync0sync.h \
+ include/sync0sync.ic include/sync0types.h \
+ include/thr0loc.h include/thr0loc.ic \
+ include/trx0purge.h include/trx0purge.ic \
+ include/trx0rec.h include/trx0rec.ic \
+ include/trx0roll.h include/trx0roll.ic \
+ include/trx0rseg.h include/trx0rseg.ic \
+ include/trx0sys.h include/trx0sys.ic \
+ include/trx0trx.h include/trx0trx.ic \
+ include/trx0types.h include/trx0undo.h \
+ include/trx0undo.ic include/trx0xa.h \
+ include/univ.i include/usr0sess.h \
+ include/usr0sess.ic include/usr0types.h \
+ include/ut0byte.h include/ut0byte.ic \
+ include/ut0dbg.h include/ut0lst.h \
+ include/ut0mem.h include/ut0mem.ic \
+ include/ut0rnd.h include/ut0rnd.ic \
+ include/ut0sort.h include/ut0ut.h \
+ include/ut0ut.ic include/ut0vec.h \
+ include/ut0vec.ic include/ut0list.h \
+ include/ut0list.ic include/ut0wqueue.h \
+ include/ha_prototypes.h handler/ha_innodb.h
-EXTRA_DIST = include/btr0btr.h include/btr0btr.ic include/btr0cur.h include/btr0cur.ic \
- include/btr0pcur.h include/btr0pcur.ic include/btr0sea.h include/btr0sea.ic \
- include/btr0types.h \
- include/buf0buf.h include/buf0buf.ic include/buf0flu.h include/buf0flu.ic \
- include/buf0lru.h include/buf0lru.ic include/buf0rea.h include/buf0types.h \
- include/data0data.h include/data0data.ic include/data0type.h include/data0type.ic \
- include/data0types.h include/db0err.h \
- include/dict0boot.h include/dict0boot.ic include/dict0crea.h include/dict0crea.ic \
- include/dict0dict.h include/dict0dict.ic include/dict0load.h include/dict0load.ic \
- include/dict0mem.h include/dict0mem.ic include/dict0types.h \
- include/dyn0dyn.h include/dyn0dyn.ic \
- include/eval0eval.h include/eval0eval.ic include/eval0proc.h include/eval0proc.ic \
- include/fil0fil.h include/fsp0fsp.h include/fsp0fsp.ic \
- include/fut0fut.h include/fut0fut.ic include/fut0lst.h include/fut0lst.ic \
- include/ha0ha.h include/ha0ha.ic include/hash0hash.h include/hash0hash.ic \
- include/ibuf0ibuf.h include/ibuf0ibuf.ic include/ibuf0types.h \
- include/lock0lock.h include/lock0lock.ic include/lock0types.h \
- include/log0log.h include/log0log.ic include/log0recv.h include/log0recv.ic \
- include/mach0data.h include/mach0data.ic include/mem0dbg.h include/mem0dbg.ic \
- include/mem0mem.h include/mem0mem.ic include/mem0pool.h include/mem0pool.ic \
- include/mtr0log.h include/mtr0log.ic include/mtr0mtr.h include/mtr0mtr.ic \
- include/mtr0types.h include/os0file.h \
- include/os0proc.h include/os0proc.ic include/os0sync.h include/os0sync.ic \
- include/os0thread.h include/os0thread.ic \
- include/page0cur.h include/page0cur.ic include/page0page.h include/page0page.ic \
- include/page0types.h \
- include/pars0grm.h include/pars0opt.h include/pars0opt.ic \
- include/pars0pars.h include/pars0pars.ic include/pars0sym.h include/pars0sym.ic \
- include/pars0types.h \
- include/que0que.h include/que0que.ic include/que0types.h \
- include/read0read.h include/read0read.ic include/read0types.h \
- include/rem0cmp.h include/rem0cmp.ic include/rem0rec.h include/rem0rec.ic \
- include/rem0types.h \
- include/row0ins.h include/row0ins.ic include/row0mysql.h include/row0mysql.ic \
- include/row0purge.h include/row0purge.ic include/row0row.h include/row0row.ic \
- include/row0sel.h include/row0sel.ic include/row0types.h \
- include/row0uins.h include/row0uins.ic include/row0umod.h include/row0umod.ic \
- include/row0undo.h include/row0undo.ic include/row0upd.h include/row0upd.ic \
- include/row0vers.h include/row0vers.ic \
- include/srv0que.h include/srv0srv.h include/srv0srv.ic include/srv0start.h \
- include/sync0arr.h include/sync0arr.ic include/sync0rw.h include/sync0rw.ic \
- include/sync0sync.h include/sync0sync.ic include/sync0types.h \
- include/thr0loc.h include/thr0loc.ic \
- include/trx0purge.h include/trx0purge.ic include/trx0rec.h include/trx0rec.ic \
- include/trx0roll.h include/trx0roll.ic include/trx0rseg.h include/trx0rseg.ic \
- include/trx0sys.h include/trx0sys.ic include/trx0trx.h include/trx0trx.ic \
- include/trx0types.h include/trx0undo.h include/trx0undo.ic include/trx0xa.h \
- include/univ.i include/usr0sess.h include/usr0sess.ic include/usr0types.h \
- include/ut0byte.h include/ut0byte.ic include/ut0dbg.h include/ut0lst.h \
- include/ut0mem.h include/ut0mem.ic include/ut0rnd.h include/ut0rnd.ic \
- handler/ha_innodb.h \
- include/ut0sort.h include/ut0ut.h include/ut0ut.ic include/ut0vec.h include/ut0vec.ic include/ha_prototypes.h \
- include/ut0list.h include/ut0list.ic \
- include/ut0wqueue.h \
- pars/make_bison.sh pars/make_flex.sh \
- pars/pars0grm.y pars/pars0lex.l \
- CMakeLists.txt plug.in
+EXTRA_LIBRARIES = libinnobase.a
+noinst_LIBRARIES = @plugin_innobase_static_target@
+libinnobase_a_SOURCES = btr/btr0btr.c btr/btr0cur.c btr/btr0pcur.c \
+ btr/btr0sea.c buf/buf0buf.c buf/buf0flu.c \
+ buf/buf0lru.c buf/buf0rea.c data/data0data.c \
+ data/data0type.c dict/dict0boot.c \
+ dict/dict0crea.c dict/dict0dict.c \
+ dict/dict0load.c dict/dict0mem.c dyn/dyn0dyn.c \
+ eval/eval0eval.c eval/eval0proc.c \
+ fil/fil0fil.c fsp/fsp0fsp.c fut/fut0fut.c \
+ fut/fut0lst.c ha/ha0ha.c ha/hash0hash.c \
+ ibuf/ibuf0ibuf.c lock/lock0iter.c \
+ lock/lock0lock.c \
+ log/log0log.c log/log0recv.c mach/mach0data.c \
+ mem/mem0mem.c mem/mem0pool.c mtr/mtr0log.c \
+ mtr/mtr0mtr.c os/os0file.c os/os0proc.c \
+ os/os0sync.c os/os0thread.c page/page0cur.c \
+ page/page0page.c pars/lexyy.c pars/pars0grm.c \
+ pars/pars0opt.c pars/pars0pars.c \
+ pars/pars0sym.c que/que0que.c read/read0read.c \
+ rem/rem0cmp.c rem/rem0rec.c row/row0ins.c \
+ row/row0mysql.c row/row0purge.c row/row0row.c \
+ row/row0sel.c row/row0uins.c row/row0umod.c \
+ row/row0undo.c row/row0upd.c row/row0vers.c \
+ srv/srv0que.c srv/srv0srv.c srv/srv0start.c \
+ sync/sync0arr.c sync/sync0rw.c \
+ sync/sync0sync.c thr/thr0loc.c trx/trx0purge.c \
+ trx/trx0rec.c trx/trx0roll.c trx/trx0rseg.c \
+ trx/trx0sys.c trx/trx0trx.c trx/trx0undo.c \
+ usr/usr0sess.c ut/ut0byte.c ut/ut0dbg.c \
+ ut/ut0list.c ut/ut0mem.c ut/ut0rnd.c \
+ ut/ut0ut.c ut/ut0vec.c ut/ut0wqueue.c \
+ handler/ha_innodb.cc
-noinst_LIBRARIES = libinnobase.a
-libinnobase_a_LIBADD = usr/libusr.a srv/libsrv.a dict/libdict.a \
- que/libque.a srv/libsrv.a ibuf/libibuf.a \
- row/librow.a pars/libpars.a btr/libbtr.a \
- trx/libtrx.a read/libread.a usr/libusr.a \
- buf/libbuf.a ibuf/libibuf.a eval/libeval.a \
- log/liblog.a fsp/libfsp.a fut/libfut.a \
- fil/libfil.a lock/liblock.a mtr/libmtr.a \
- page/libpage.a rem/librem.a thr/libthr.a \
- sync/libsync.a data/libdata.a mach/libmach.a \
- ha/libha.a dyn/libdyn.a mem/libmem.a \
- handler/libhandler.a \
- ut/libut.a os/libos.a ut/libut.a
-libinnobase_a_SOURCES =
+libinnobase_a_CXXFLAGS= $(AM_CFLAGS)
+libinnobase_a_CFLAGS = $(AM_CFLAGS)
+EXTRA_LTLIBRARIES = ha_innodb.la
+pkglib_LTLIBRARIES = @plugin_innobase_shared_target@
-libinnobase.a: $(libinnobase_a_LIBADD)
- -rm -f $@
- if test "$(host_os)" = "netware" ; \
- then \
- $(libinnobase_a_AR) $@ $(libinnobase_a_LIBADD) ; \
- else \
- for arc in $(libinnobase_a_LIBADD); do \
- arpath=`echo $$arc|sed 's|[^/]*$$||'`; \
- $(AR) t $$arc|sed "s|^|$$arpath|"; \
- done | sort -u | xargs $(AR) cq $@ ; \
- $(RANLIB) $@ ; \
- fi
+ha_innodb_la_LDFLAGS = -module -rpath $(MYSQLLIBdir)
+ha_innodb_la_CXXFLAGS= $(AM_CFLAGS) -DMYSQL_DYNAMIC_PLUGIN
+ha_innodb_la_CFLAGS = $(AM_CFLAGS) -DMYSQL_DYNAMIC_PLUGIN
+ha_innodb_la_SOURCES = $(libinnobase_a_SOURCES)
+
+EXTRA_DIST = CMakeLists.txt plug.in \
+ pars/make_bison.sh pars/make_flex.sh \
+ pars/pars0grm.y pars/pars0lex.l
# Don't update the files from bitkeeper
%::SCCS/s.%
diff --git a/storage/innobase/btr/Makefile.am b/storage/innobase/btr/Makefile.am
deleted file mode 100644
index 6b09b289cdc..00000000000
--- a/storage/innobase/btr/Makefile.am
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-include ../include/Makefile.i
-
-noinst_LIBRARIES = libbtr.a
-
-libbtr_a_SOURCES = btr0btr.c btr0cur.c btr0pcur.c btr0sea.c
-
-EXTRA_PROGRAMS =
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/storage/innobase/btr/btr0btr.c b/storage/innobase/btr/btr0btr.c
index 50a349e78d6..6e8b43aeb8d 100644
--- a/storage/innobase/btr/btr0btr.c
+++ b/storage/innobase/btr/btr0btr.c
@@ -2606,8 +2606,11 @@ btr_index_rec_validate(
rec_get_nth_field(rec, offsets, i, &len);
- /* Note that prefix indexes are not fixed size even when
- their type is CHAR. */
+ /* Note that if fixed_size != 0, it equals the
+ length of a fixed-size column in the clustered index.
+ A prefix index of the column is of fixed, but different
+ length. When fixed_size == 0, prefix_len is the maximum
+ length of the prefix index column. */
if ((dict_index_get_nth_field(index, i)->prefix_len == 0
&& len != UNIV_SQL_NULL && fixed_size
diff --git a/storage/innobase/buf/Makefile.am b/storage/innobase/buf/Makefile.am
deleted file mode 100644
index 946d5a2e5c2..00000000000
--- a/storage/innobase/buf/Makefile.am
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-include ../include/Makefile.i
-
-noinst_LIBRARIES = libbuf.a
-
-libbuf_a_SOURCES = buf0buf.c buf0flu.c buf0lru.c buf0rea.c
-
-EXTRA_PROGRAMS =
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/storage/innobase/buf/buf0buf.c b/storage/innobase/buf/buf0buf.c
index c847b8db9e2..469d3ac05d7 100644
--- a/storage/innobase/buf/buf0buf.c
+++ b/storage/innobase/buf/buf0buf.c
@@ -903,8 +903,7 @@ buf_block_make_young(
/* Note that we read freed_page_clock's without holding any mutex:
this is allowed since the result is used only in heuristics */
- if (buf_pool->freed_page_clock >= block->freed_page_clock
- + 1 + (buf_pool->curr_size / 4)) {
+ if (buf_block_peek_if_too_old(block)) {
mutex_enter(&buf_pool->mutex);
/* There has been freeing activity in the LRU list:
@@ -1648,6 +1647,15 @@ buf_page_init(
block->lock_hash_val = lock_rec_hash(space, offset);
+#ifdef UNIV_DEBUG_VALGRIND
+ if (!space) {
+ /* Silence valid Valgrind warnings about uninitialized
+ data being written to data files. There are some unused
+ bytes on some pages that InnoDB does not initialize. */
+ UNIV_MEM_VALID(block->frame, UNIV_PAGE_SIZE);
+ }
+#endif /* UNIV_DEBUG_VALGRIND */
+
/* Insert into the hash table of file pages */
if (buf_page_hash_get(space, offset)) {
diff --git a/storage/innobase/buf/buf0lru.c b/storage/innobase/buf/buf0lru.c
index 1e27144bdbf..7b49a7641af 100644
--- a/storage/innobase/buf/buf0lru.c
+++ b/storage/innobase/buf/buf0lru.c
@@ -244,7 +244,15 @@ buf_LRU_search_and_free_block(
frame at all */
if (block->frame) {
+ /* The page was declared uninitialized
+ by buf_LRU_block_remove_hashed_page().
+ We need to flag the contents of the
+ page valid (which it still is) in
+ order to avoid bogus Valgrind
+ warnings. */
+ UNIV_MEM_VALID(block->frame, UNIV_PAGE_SIZE);
btr_search_drop_page_hash_index(block->frame);
+ UNIV_MEM_INVALID(block->frame, UNIV_PAGE_SIZE);
}
ut_a(block->buf_fix_count == 0);
@@ -449,6 +457,7 @@ loop:
mutex_enter(&block->mutex);
block->state = BUF_BLOCK_READY_FOR_USE;
+ UNIV_MEM_ALLOC(block->frame, UNIV_PAGE_SIZE);
mutex_exit(&block->mutex);
@@ -864,6 +873,7 @@ buf_LRU_block_free_non_file_page(
block->state = BUF_BLOCK_NOT_USED;
+ UNIV_MEM_ALLOC(block->frame, UNIV_PAGE_SIZE);
#ifdef UNIV_DEBUG
/* Wipe contents of page to reveal possible stale pointers to it */
memset(block->frame, '\0', UNIV_PAGE_SIZE);
@@ -871,6 +881,8 @@ buf_LRU_block_free_non_file_page(
UT_LIST_ADD_FIRST(free, buf_pool->free, block);
block->in_free_list = TRUE;
+ UNIV_MEM_FREE(block->frame, UNIV_PAGE_SIZE);
+
if (srv_use_awe && block->frame) {
/* Add to the list of mapped pages */
@@ -939,6 +951,7 @@ buf_LRU_block_remove_hashed_page(
buf_page_address_fold(block->space, block->offset),
block);
+ UNIV_MEM_INVALID(block->frame, UNIV_PAGE_SIZE);
block->state = BUF_BLOCK_REMOVE_HASH;
}
diff --git a/storage/innobase/data/Makefile.am b/storage/innobase/data/Makefile.am
deleted file mode 100644
index 6f9407d40e5..00000000000
--- a/storage/innobase/data/Makefile.am
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-include ../include/Makefile.i
-
-noinst_LIBRARIES = libdata.a
-
-libdata_a_SOURCES = data0data.c data0type.c
-
-EXTRA_PROGRAMS =
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/storage/innobase/data/data0data.c b/storage/innobase/data/data0data.c
index fc4494d991a..0f03de4ca9d 100644
--- a/storage/innobase/data/data0data.c
+++ b/storage/innobase/data/data0data.c
@@ -18,6 +18,8 @@ Created 5/30/1994 Heikki Tuuri
#include "dict0dict.h"
#include "btr0cur.h"
+#include <ctype.h>
+
#ifdef UNIV_DEBUG
byte data_error; /* data pointers of tuple fields are initialized
to point here for error checking */
diff --git a/storage/innobase/data/data0type.c b/storage/innobase/data/data0type.c
index 77779d185cf..305000d7c0a 100644
--- a/storage/innobase/data/data0type.c
+++ b/storage/innobase/data/data0type.c
@@ -190,7 +190,8 @@ dtype_validate(
dtype_t* type) /* in: type struct to validate */
{
ut_a(type);
- ut_a((type->mtype >= DATA_VARCHAR) && (type->mtype <= DATA_MYSQL));
+ ut_a(type->mtype >= DATA_VARCHAR);
+ ut_a(type->mtype <= DATA_MYSQL);
if (type->mtype == DATA_SYS) {
ut_a((type->prtype & DATA_MYSQL_TYPE_MASK) < DATA_N_SYS_COLS);
diff --git a/storage/innobase/dict/Makefile.am b/storage/innobase/dict/Makefile.am
deleted file mode 100644
index 15cacca6f58..00000000000
--- a/storage/innobase/dict/Makefile.am
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-include ../include/Makefile.i
-
-noinst_LIBRARIES = libdict.a
-
-libdict_a_SOURCES = dict0boot.c dict0crea.c dict0dict.c dict0load.c\
- dict0mem.c
-
-EXTRA_PROGRAMS =
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/storage/innobase/dict/dict0boot.c b/storage/innobase/dict/dict0boot.c
index f8849008854..5f9aaf71e18 100644
--- a/storage/innobase/dict/dict0boot.c
+++ b/storage/innobase/dict/dict0boot.c
@@ -211,6 +211,7 @@ dict_boot(void)
dict_table_t* table;
dict_index_t* index;
dict_hdr_t* dict_hdr;
+ mem_heap_t* heap;
mtr_t mtr;
mtr_start(&mtr);
@@ -218,6 +219,8 @@ dict_boot(void)
/* Create the hash tables etc. */
dict_init();
+ heap = mem_heap_create(450);
+
mutex_enter(&(dict_sys->mutex));
/* Get the dictionary header */
@@ -244,19 +247,20 @@ dict_boot(void)
/*-------------------------*/
table = dict_mem_table_create("SYS_TABLES", DICT_HDR_SPACE, 8, 0);
- dict_mem_table_add_col(table, "NAME", DATA_BINARY, 0, 0);
- dict_mem_table_add_col(table, "ID", DATA_BINARY, 0, 0);
- dict_mem_table_add_col(table, "N_COLS", DATA_INT, 0, 4);
- dict_mem_table_add_col(table, "TYPE", DATA_INT, 0, 4);
- dict_mem_table_add_col(table, "MIX_ID", DATA_BINARY, 0, 0);
- dict_mem_table_add_col(table, "MIX_LEN", DATA_INT, 0, 4);
- dict_mem_table_add_col(table, "CLUSTER_NAME", DATA_BINARY, 0, 0);
- dict_mem_table_add_col(table, "SPACE", DATA_INT, 0, 4);
+ dict_mem_table_add_col(table, heap, "NAME", DATA_BINARY, 0, 0);
+ dict_mem_table_add_col(table, heap, "ID", DATA_BINARY, 0, 0);
+ dict_mem_table_add_col(table, heap, "N_COLS", DATA_INT, 0, 4);
+ dict_mem_table_add_col(table, heap, "TYPE", DATA_INT, 0, 4);
+ dict_mem_table_add_col(table, heap, "MIX_ID", DATA_BINARY, 0, 0);
+ dict_mem_table_add_col(table, heap, "MIX_LEN", DATA_INT, 0, 4);
+ dict_mem_table_add_col(table, heap, "CLUSTER_NAME", DATA_BINARY, 0, 0);
+ dict_mem_table_add_col(table, heap, "SPACE", DATA_INT, 0, 4);
table->id = DICT_TABLES_ID;
- dict_table_add_to_cache(table);
+ dict_table_add_to_cache(table, heap);
dict_sys->sys_tables = table;
+ mem_heap_empty(heap);
index = dict_mem_index_create("SYS_TABLES", "CLUST_IND",
DICT_HDR_SPACE,
@@ -283,18 +287,19 @@ dict_boot(void)
/*-------------------------*/
table = dict_mem_table_create("SYS_COLUMNS", DICT_HDR_SPACE, 7, 0);
- dict_mem_table_add_col(table, "TABLE_ID", DATA_BINARY, 0, 0);
- dict_mem_table_add_col(table, "POS", DATA_INT, 0, 4);
- dict_mem_table_add_col(table, "NAME", DATA_BINARY, 0, 0);
- dict_mem_table_add_col(table, "MTYPE", DATA_INT, 0, 4);
- dict_mem_table_add_col(table, "PRTYPE", DATA_INT, 0, 4);
- dict_mem_table_add_col(table, "LEN", DATA_INT, 0, 4);
- dict_mem_table_add_col(table, "PREC", DATA_INT, 0, 4);
+ dict_mem_table_add_col(table, heap, "TABLE_ID", DATA_BINARY, 0, 0);
+ dict_mem_table_add_col(table, heap, "POS", DATA_INT, 0, 4);
+ dict_mem_table_add_col(table, heap, "NAME", DATA_BINARY, 0, 0);
+ dict_mem_table_add_col(table, heap, "MTYPE", DATA_INT, 0, 4);
+ dict_mem_table_add_col(table, heap, "PRTYPE", DATA_INT, 0, 4);
+ dict_mem_table_add_col(table, heap, "LEN", DATA_INT, 0, 4);
+ dict_mem_table_add_col(table, heap, "PREC", DATA_INT, 0, 4);
table->id = DICT_COLUMNS_ID;
- dict_table_add_to_cache(table);
+ dict_table_add_to_cache(table, heap);
dict_sys->sys_columns = table;
+ mem_heap_empty(heap);
index = dict_mem_index_create("SYS_COLUMNS", "CLUST_IND",
DICT_HDR_SPACE,
@@ -311,13 +316,13 @@ dict_boot(void)
/*-------------------------*/
table = dict_mem_table_create("SYS_INDEXES", DICT_HDR_SPACE, 7, 0);
- dict_mem_table_add_col(table, "TABLE_ID", DATA_BINARY, 0, 0);
- dict_mem_table_add_col(table, "ID", DATA_BINARY, 0, 0);
- dict_mem_table_add_col(table, "NAME", DATA_BINARY, 0, 0);
- dict_mem_table_add_col(table, "N_FIELDS", DATA_INT, 0, 4);
- dict_mem_table_add_col(table, "TYPE", DATA_INT, 0, 4);
- dict_mem_table_add_col(table, "SPACE", DATA_INT, 0, 4);
- dict_mem_table_add_col(table, "PAGE_NO", DATA_INT, 0, 4);
+ dict_mem_table_add_col(table, heap, "TABLE_ID", DATA_BINARY, 0, 0);
+ dict_mem_table_add_col(table, heap, "ID", DATA_BINARY, 0, 0);
+ dict_mem_table_add_col(table, heap, "NAME", DATA_BINARY, 0, 0);
+ dict_mem_table_add_col(table, heap, "N_FIELDS", DATA_INT, 0, 4);
+ dict_mem_table_add_col(table, heap, "TYPE", DATA_INT, 0, 4);
+ dict_mem_table_add_col(table, heap, "SPACE", DATA_INT, 0, 4);
+ dict_mem_table_add_col(table, heap, "PAGE_NO", DATA_INT, 0, 4);
/* The '+ 2' below comes from the 2 system fields */
#if DICT_SYS_INDEXES_PAGE_NO_FIELD != 6 + 2
@@ -331,8 +336,9 @@ dict_boot(void)
#endif
table->id = DICT_INDEXES_ID;
- dict_table_add_to_cache(table);
+ dict_table_add_to_cache(table, heap);
dict_sys->sys_indexes = table;
+ mem_heap_empty(heap);
index = dict_mem_index_create("SYS_INDEXES", "CLUST_IND",
DICT_HDR_SPACE,
@@ -349,13 +355,14 @@ dict_boot(void)
/*-------------------------*/
table = dict_mem_table_create("SYS_FIELDS", DICT_HDR_SPACE, 3, 0);
- dict_mem_table_add_col(table, "INDEX_ID", DATA_BINARY, 0, 0);
- dict_mem_table_add_col(table, "POS", DATA_INT, 0, 4);
- dict_mem_table_add_col(table, "COL_NAME", DATA_BINARY, 0, 0);
+ dict_mem_table_add_col(table, heap, "INDEX_ID", DATA_BINARY, 0, 0);
+ dict_mem_table_add_col(table, heap, "POS", DATA_INT, 0, 4);
+ dict_mem_table_add_col(table, heap, "COL_NAME", DATA_BINARY, 0, 0);
table->id = DICT_FIELDS_ID;
- dict_table_add_to_cache(table);
+ dict_table_add_to_cache(table, heap);
dict_sys->sys_fields = table;
+ mem_heap_free(heap);
index = dict_mem_index_create("SYS_FIELDS", "CLUST_IND",
DICT_HDR_SPACE,
diff --git a/storage/innobase/dict/dict0crea.c b/storage/innobase/dict/dict0crea.c
index e060d45768e..4116230347d 100644
--- a/storage/innobase/dict/dict0crea.c
+++ b/storage/innobase/dict/dict0crea.c
@@ -960,7 +960,7 @@ dict_create_table_step(
if (node->state == TABLE_ADD_TO_CACHE) {
- dict_table_add_to_cache(node->table);
+ dict_table_add_to_cache(node->table, node->heap);
err = DB_SUCCESS;
}
@@ -1195,7 +1195,8 @@ dict_create_or_check_foreign_constraint_tables(void)
fprintf(stderr, "InnoDB: error %lu in creation\n",
(ulong) error);
- ut_a(error == DB_OUT_OF_FILE_SPACE);
+ ut_a(error == DB_OUT_OF_FILE_SPACE
+ || error == DB_TOO_MANY_CONCURRENT_TRXS);
fprintf(stderr,
"InnoDB: creation failed\n"
diff --git a/storage/innobase/dict/dict0dict.c b/storage/innobase/dict/dict0dict.c
index f450d3553eb..595dfb06ee5 100644
--- a/storage/innobase/dict/dict0dict.c
+++ b/storage/innobase/dict/dict0dict.c
@@ -30,6 +30,8 @@ Created 1/8/1996 Heikki Tuuri
# include "m_ctype.h" /* my_isspace() */
#endif /* !UNIV_HOTBACKUP */
+#include <ctype.h>
+
dict_sys_t* dict_sys = NULL; /* the dictionary system */
rw_lock_t dict_operation_lock; /* table create, drop, etc. reserve
@@ -408,14 +410,27 @@ dict_table_get_col_name(
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
s = table->col_names;
-
- for (i = 0; i < col_nr; i++) {
- s += strlen(s) + 1;
+ if (s) {
+ for (i = 0; i < col_nr; i++) {
+ s += strlen(s) + 1;
+ }
}
return(s);
}
+
+/************************************************************************
+Acquire the autoinc lock.*/
+
+void
+dict_table_autoinc_lock(
+/*====================*/
+ dict_table_t* table)
+{
+ mutex_enter(&table->autoinc_mutex);
+}
+
/************************************************************************
Initializes the autoinc counter. It is not an error to initialize an already
initialized counter. */
@@ -426,54 +441,8 @@ dict_table_autoinc_initialize(
dict_table_t* table, /* in: table */
ib_longlong value) /* in: next value to assign to a row */
{
- mutex_enter(&(table->autoinc_mutex));
-
table->autoinc_inited = TRUE;
table->autoinc = value;
-
- mutex_exit(&(table->autoinc_mutex));
-}
-
-/************************************************************************
-Gets the next autoinc value (== autoinc counter value), 0 if not yet
-initialized. If initialized, increments the counter by 1. */
-
-ib_longlong
-dict_table_autoinc_get(
-/*===================*/
- /* out: value for a new row, or 0 */
- dict_table_t* table) /* in: table */
-{
- ib_longlong value;
-
- mutex_enter(&(table->autoinc_mutex));
-
- if (!table->autoinc_inited) {
-
- value = 0;
- } else {
- value = table->autoinc;
- table->autoinc = table->autoinc + 1;
- }
-
- mutex_exit(&(table->autoinc_mutex));
-
- return(value);
-}
-
-/************************************************************************
-Decrements the autoinc counter value by 1. */
-
-void
-dict_table_autoinc_decrement(
-/*=========================*/
- dict_table_t* table) /* in: table */
-{
- mutex_enter(&(table->autoinc_mutex));
-
- table->autoinc = table->autoinc - 1;
-
- mutex_exit(&(table->autoinc_mutex));
}
/************************************************************************
@@ -488,8 +457,6 @@ dict_table_autoinc_read(
{
ib_longlong value;
- mutex_enter(&(table->autoinc_mutex));
-
if (!table->autoinc_inited) {
value = 0;
@@ -497,35 +464,11 @@ dict_table_autoinc_read(
value = table->autoinc;
}
- mutex_exit(&(table->autoinc_mutex));
-
return(value);
}
/************************************************************************
-Peeks the autoinc counter value, 0 if not yet initialized. Does not
-increment the counter. The read not protected by any mutex! */
-
-ib_longlong
-dict_table_autoinc_peek(
-/*====================*/
- /* out: value of the counter */
- dict_table_t* table) /* in: table */
-{
- ib_longlong value;
-
- if (!table->autoinc_inited) {
-
- value = 0;
- } else {
- value = table->autoinc;
- }
-
- return(value);
-}
-
-/************************************************************************
-Updates the autoinc counter if the value supplied is equal or bigger than the
+Updates the autoinc counter if the value supplied is greater than the
current value. If not inited, does nothing. */
void
@@ -535,15 +478,21 @@ dict_table_autoinc_update(
dict_table_t* table, /* in: table */
ib_longlong value) /* in: value which was assigned to a row */
{
- mutex_enter(&(table->autoinc_mutex));
+ if (table->autoinc_inited && value > table->autoinc) {
- if (table->autoinc_inited) {
- if (value >= table->autoinc) {
- table->autoinc = value + 1;
- }
+ table->autoinc = value;
}
+}
- mutex_exit(&(table->autoinc_mutex));
+/************************************************************************
+Release the autoinc lock.*/
+
+void
+dict_table_autoinc_unlock(
+/*======================*/
+ dict_table_t* table) /* in: release autoinc lock for this table */
+{
+ mutex_exit(&table->autoinc_mutex);
}
/************************************************************************
@@ -840,28 +789,18 @@ dict_table_get(
}
/**************************************************************************
-Adds a table object to the dictionary cache. */
+Adds system columns to a table object. */
void
-dict_table_add_to_cache(
-/*====================*/
- dict_table_t* table) /* in: table */
+dict_table_add_system_columns(
+/*==========================*/
+ dict_table_t* table, /* in/out: table */
+ mem_heap_t* heap) /* in: temporary heap */
{
- ulint fold;
- ulint id_fold;
- ulint i;
- ulint row_len;
-
ut_ad(table);
- ut_ad(mutex_own(&(dict_sys->mutex)));
ut_ad(table->n_def == table->n_cols - DATA_N_SYS_COLS);
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
- ut_ad(table->cached == FALSE);
-
- fold = ut_fold_string(table->name);
- id_fold = ut_fold_dulint(table->id);
-
- table->cached = TRUE;
+ ut_ad(!table->cached);
/* NOTE: the system columns MUST be added in the following order
(so that they can be indexed by the numerical value of DATA_ROW_ID,
@@ -869,19 +808,19 @@ dict_table_add_to_cache(
The clustered index will not always physically contain all
system columns. */
- dict_mem_table_add_col(table, "DB_ROW_ID", DATA_SYS,
+ dict_mem_table_add_col(table, heap, "DB_ROW_ID", DATA_SYS,
DATA_ROW_ID | DATA_NOT_NULL,
DATA_ROW_ID_LEN);
#if DATA_ROW_ID != 0
#error "DATA_ROW_ID != 0"
#endif
- dict_mem_table_add_col(table, "DB_TRX_ID", DATA_SYS,
+ dict_mem_table_add_col(table, heap, "DB_TRX_ID", DATA_SYS,
DATA_TRX_ID | DATA_NOT_NULL,
DATA_TRX_ID_LEN);
#if DATA_TRX_ID != 1
#error "DATA_TRX_ID != 1"
#endif
- dict_mem_table_add_col(table, "DB_ROLL_PTR", DATA_SYS,
+ dict_mem_table_add_col(table, heap, "DB_ROLL_PTR", DATA_SYS,
DATA_ROLL_PTR | DATA_NOT_NULL,
DATA_ROLL_PTR_LEN);
#if DATA_ROLL_PTR != 2
@@ -893,10 +832,34 @@ dict_table_add_to_cache(
#if DATA_N_SYS_COLS != 3
#error "DATA_N_SYS_COLS != 3"
#endif
+}
+
+/**************************************************************************
+Adds a table object to the dictionary cache. */
+
+void
+dict_table_add_to_cache(
+/*====================*/
+ dict_table_t* table, /* in: table */
+ mem_heap_t* heap) /* in: temporary heap */
+{
+ ulint fold;
+ ulint id_fold;
+ ulint i;
+ ulint row_len;
/* The lower limit for what we consider a "big" row */
#define BIG_ROW_SIZE 1024
+ ut_ad(mutex_own(&(dict_sys->mutex)));
+
+ dict_table_add_system_columns(table, heap);
+
+ table->cached = TRUE;
+
+ fold = ut_fold_string(table->name);
+ id_fold = ut_fold_dulint(table->id);
+
row_len = 0;
for (i = 0; i < table->n_def; i++) {
ulint col_len = dict_col_get_max_size(
@@ -1529,6 +1492,12 @@ dict_index_add_col(
if (field->fixed_len > DICT_MAX_INDEX_COL_LEN) {
field->fixed_len = 0;
}
+#if DICT_MAX_INDEX_COL_LEN != 768
+ /* The comparison limit above must be constant. If it were
+ changed, the disk format of some fixed-length columns would
+ change, which would be a disaster. */
+# error "DICT_MAX_INDEX_COL_LEN != 768"
+#endif
if (!(col->prtype & DATA_NOT_NULL)) {
index->n_nullable++;
@@ -1585,9 +1554,6 @@ dict_index_copy_types(
ifield = dict_index_get_nth_field(index, i);
dfield_type = dfield_get_type(dtuple_get_nth_field(tuple, i));
dict_col_copy_type(dict_field_get_col(ifield), dfield_type);
- if (UNIV_UNLIKELY(ifield->prefix_len)) {
- dfield_type->len = ifield->prefix_len;
- }
}
}
@@ -3361,7 +3327,8 @@ dict_create_foreign_constraints(
ulint err;
mem_heap_t* heap;
- ut_a(trx && trx->mysql_thd);
+ ut_a(trx);
+ ut_a(trx->mysql_thd);
str = dict_strip_comments(sql_string);
heap = mem_heap_create(10000);
@@ -3403,7 +3370,8 @@ dict_foreign_parse_drop_constraints(
FILE* ef = dict_foreign_err_file;
struct charset_info_st* cs;
- ut_a(trx && trx->mysql_thd);
+ ut_a(trx);
+ ut_a(trx->mysql_thd);
cs = innobase_get_charset(trx->mysql_thd);
@@ -3712,7 +3680,7 @@ dict_index_calc_min_rec_len(
}
/* round the NULL flags up to full bytes */
- sum += (nullable + 7) / 8;
+ sum += UT_BITS_IN_BYTES(nullable);
return(sum);
}
diff --git a/storage/innobase/dict/dict0load.c b/storage/innobase/dict/dict0load.c
index ba2e25cf031..1ff1fd54cec 100644
--- a/storage/innobase/dict/dict0load.c
+++ b/storage/innobase/dict/dict0load.c
@@ -423,7 +423,8 @@ dict_load_columns(
ut_a(name_of_col_is(sys_columns, sys_index, 8, "PREC"));
- dict_mem_table_add_col(table, name, mtype, prtype, col_len);
+ dict_mem_table_add_col(table, heap, name,
+ mtype, prtype, col_len);
btr_pcur_move_to_next_user_rec(&pcur, &mtr);
}
@@ -746,7 +747,7 @@ dict_load_table(
ut_ad(mutex_own(&(dict_sys->mutex)));
- heap = mem_heap_create(1000);
+ heap = mem_heap_create(32000);
mtr_start(&mtr);
@@ -852,7 +853,9 @@ err_exit:
dict_load_columns(table, heap);
- dict_table_add_to_cache(table);
+ dict_table_add_to_cache(table, heap);
+
+ mem_heap_empty(heap);
dict_load_indexes(table, heap);
diff --git a/storage/innobase/dict/dict0mem.c b/storage/innobase/dict/dict0mem.c
index 9aa49dee745..47cf7a0bc9c 100644
--- a/storage/innobase/dict/dict0mem.c
+++ b/storage/innobase/dict/dict0mem.c
@@ -90,6 +90,15 @@ dict_mem_table_create(
mutex_create(&table->autoinc_mutex, SYNC_DICT_AUTOINC_MUTEX);
table->autoinc_inited = FALSE;
+
+ /* The actual increment value will be set by MySQL, we simply
+ default to 1 here.*/
+ table->autoinc_increment = 1;
+
+ /* The number of transactions that are either waiting on the
+ AUTOINC lock or have been granted the lock. */
+ table->n_waiting_or_granted_auto_inc_locks = 0;
+
#ifdef UNIV_DEBUG
table->magic_n = DICT_TABLE_MAGIC_N;
#endif /* UNIV_DEBUG */
@@ -108,18 +117,11 @@ dict_mem_table_free(
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
mutex_free(&(table->autoinc_mutex));
-
- if (table->col_names && (table->n_def < table->n_cols)) {
- ut_free((void*)table->col_names);
- }
-
mem_heap_free(table->heap);
}
/********************************************************************
-Add 'name' to end of the col_names array (see dict_table_t::col_names). Call
-ut_free on col_names (if not NULL), allocate new array (if heap, from it,
-otherwise with ut_malloc), and copy col_names + name to it. */
+Append 'name' to 'col_names' (@see dict_table_t::col_names). */
static
const char*
dict_add_col_name(
@@ -129,21 +131,19 @@ dict_add_col_name(
NULL */
ulint cols, /* in: number of existing columns */
const char* name, /* in: new column name */
- mem_heap_t* heap) /* in: heap, or NULL */
+ mem_heap_t* heap) /* in: heap */
{
- ulint i;
- ulint old_len;
- ulint new_len;
- ulint total_len;
- const char* s;
- char* res;
+ ulint old_len;
+ ulint new_len;
+ ulint total_len;
+ char* res;
- ut_a(((cols == 0) && !col_names) || ((cols > 0) && col_names));
- ut_a(*name);
+ ut_ad(!cols == !col_names);
/* Find out length of existing array. */
if (col_names) {
- s = col_names;
+ const char* s = col_names;
+ ulint i;
for (i = 0; i < cols; i++) {
s += strlen(s) + 1;
@@ -157,11 +157,7 @@ dict_add_col_name(
new_len = strlen(name) + 1;
total_len = old_len + new_len;
- if (heap) {
- res = mem_heap_alloc(heap, total_len);
- } else {
- res = ut_malloc(total_len);
- }
+ res = mem_heap_alloc(heap, total_len);
if (old_len > 0) {
memcpy(res, col_names, old_len);
@@ -169,10 +165,6 @@ dict_add_col_name(
memcpy(res + old_len, name, new_len);
- if (col_names) {
- ut_free((char*)col_names);
- }
-
return(res);
}
@@ -183,7 +175,8 @@ void
dict_mem_table_add_col(
/*===================*/
dict_table_t* table, /* in: table */
- const char* name, /* in: column name */
+ mem_heap_t* heap, /* in: temporary memory heap, or NULL */
+ const char* name, /* in: column name, or NULL */
ulint mtype, /* in: main datatype */
ulint prtype, /* in: precise type */
ulint len) /* in: precision */
@@ -191,21 +184,32 @@ dict_mem_table_add_col(
dict_col_t* col;
ulint mbminlen;
ulint mbmaxlen;
- mem_heap_t* heap;
+ ulint i;
- ut_ad(table && name);
+ ut_ad(table);
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
+ ut_ad(!heap == !name);
- table->n_def++;
+ i = table->n_def++;
- heap = table->n_def < table->n_cols ? NULL : table->heap;
- table->col_names = dict_add_col_name(table->col_names,
- table->n_def - 1,
- name, heap);
+ if (name) {
+ if (UNIV_UNLIKELY(table->n_def == table->n_cols)) {
+ heap = table->heap;
+ }
+ if (UNIV_LIKELY(i) && UNIV_UNLIKELY(!table->col_names)) {
+ /* All preceding column names are empty. */
+ char* s = mem_heap_alloc(heap, table->n_def);
+ memset(s, 0, table->n_def);
+ table->col_names = s;
+ }
- col = (dict_col_t*) dict_table_get_nth_col(table, table->n_def - 1);
+ table->col_names = dict_add_col_name(table->col_names,
+ i, name, heap);
+ }
- col->ind = table->n_def - 1;
+ col = (dict_col_t*) dict_table_get_nth_col(table, i);
+
+ col->ind = (unsigned int) i;
col->ord_part = 0;
col->mtype = (unsigned int) mtype;
@@ -318,7 +322,7 @@ dict_mem_index_add_field(
{
dict_field_t* field;
- ut_ad(index && name);
+ ut_ad(index);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
index->n_def++;
diff --git a/storage/innobase/dyn/Makefile.am b/storage/innobase/dyn/Makefile.am
deleted file mode 100644
index 57d9a25e481..00000000000
--- a/storage/innobase/dyn/Makefile.am
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-include ../include/Makefile.i
-
-noinst_LIBRARIES = libdyn.a
-
-libdyn_a_SOURCES = dyn0dyn.c
-
-EXTRA_PROGRAMS =
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/storage/innobase/eval/Makefile.am b/storage/innobase/eval/Makefile.am
deleted file mode 100644
index 6c2b05d8b7a..00000000000
--- a/storage/innobase/eval/Makefile.am
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-include ../include/Makefile.i
-
-noinst_LIBRARIES = libeval.a
-
-libeval_a_SOURCES = eval0eval.c eval0proc.c
-
-EXTRA_PROGRAMS =
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/storage/innobase/fil/Makefile.am b/storage/innobase/fil/Makefile.am
deleted file mode 100644
index 0a85ceb5b86..00000000000
--- a/storage/innobase/fil/Makefile.am
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-include ../include/Makefile.i
-
-noinst_LIBRARIES = libfil.a
-
-libfil_a_SOURCES = fil0fil.c
-
-EXTRA_PROGRAMS =
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/storage/innobase/fsp/Makefile.am b/storage/innobase/fsp/Makefile.am
deleted file mode 100644
index 7818cdafc1b..00000000000
--- a/storage/innobase/fsp/Makefile.am
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-
-include ../include/Makefile.i
-
-noinst_LIBRARIES = libfsp.a
-
-libfsp_a_SOURCES = fsp0fsp.c
-
-EXTRA_PROGRAMS =
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/storage/innobase/fsp/fsp0fsp.c b/storage/innobase/fsp/fsp0fsp.c
index b5662fd24a4..e1074933fe8 100644
--- a/storage/innobase/fsp/fsp0fsp.c
+++ b/storage/innobase/fsp/fsp0fsp.c
@@ -205,10 +205,9 @@ the extent are free and which contain old tuple version to clean. */
space */
#define XDES_FSEG 4 /* extent belongs to a segment */
-/* File extent data structure size in bytes. The "+ 7 ) / 8" part in the
-definition rounds the number of bytes upward. */
+/* File extent data structure size in bytes. */
#define XDES_SIZE \
- (XDES_BITMAP + (FSP_EXTENT_SIZE * XDES_BITS_PER_PAGE + 7) / 8)
+ (XDES_BITMAP + UT_BITS_IN_BYTES(FSP_EXTENT_SIZE * XDES_BITS_PER_PAGE))
/* Offset of the descriptor array on a descriptor page */
#define XDES_ARR_OFFSET (FSP_HEADER_OFFSET + FSP_HEADER_SIZE)
@@ -2830,7 +2829,7 @@ will be able to insert new data to the database without running out the
tablespace. Only free extents are taken into account and we also subtract
the safety margin required by the above function fsp_reserve_free_extents. */
-ulint
+ullint
fsp_get_available_space_in_free_extents(
/*====================================*/
/* out: available space in kB */
@@ -2896,7 +2895,8 @@ fsp_get_available_space_in_free_extents(
return(0);
}
- return(((n_free - reserve) * FSP_EXTENT_SIZE)
+ return((ullint)(n_free - reserve)
+ * FSP_EXTENT_SIZE
* (UNIV_PAGE_SIZE / 1024));
}
@@ -3649,7 +3649,11 @@ fsp_validate(
n_full_frag_pages = FSP_EXTENT_SIZE
* flst_get_len(header + FSP_FULL_FRAG, &mtr);
- ut_a(free_limit <= size || (space != 0 && size < FSP_EXTENT_SIZE));
+ if (UNIV_UNLIKELY(free_limit > size)) {
+
+ ut_a(space != 0);
+ ut_a(size < FSP_EXTENT_SIZE);
+ }
flst_validate(header + FSP_FREE, &mtr);
flst_validate(header + FSP_FREE_FRAG, &mtr);
diff --git a/storage/innobase/fut/Makefile.am b/storage/innobase/fut/Makefile.am
deleted file mode 100644
index ffe9835a023..00000000000
--- a/storage/innobase/fut/Makefile.am
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-include ../include/Makefile.i
-
-noinst_LIBRARIES = libfut.a
-
-libfut_a_SOURCES = fut0fut.c fut0lst.c
-
-EXTRA_PROGRAMS =
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/storage/innobase/ha/Makefile.am b/storage/innobase/ha/Makefile.am
deleted file mode 100644
index 696cad0b203..00000000000
--- a/storage/innobase/ha/Makefile.am
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-include ../include/Makefile.i
-
-noinst_LIBRARIES = libha.a
-
-libha_a_SOURCES = ha0ha.c hash0hash.c
-
-EXTRA_PROGRAMS =
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/storage/innobase/handler/Makefile.am b/storage/innobase/handler/Makefile.am
deleted file mode 100644
index 0d34212bdd4..00000000000
--- a/storage/innobase/handler/Makefile.am
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
-# & Innobase Oy
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-include ../include/Makefile.i
-
-DEFS = -DMYSQL_SERVER @DEFS@
-
-noinst_LIBRARIES = libhandler.a
-
-libhandler_a_SOURCES = ha_innodb.cc
-
-EXTRA_PROGRAMS =
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index de9cf06fe3a..783553f5d87 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -15,11 +15,10 @@
/* This file defines the InnoDB handler: the interface between MySQL and InnoDB
NOTE: You can only use noninlined InnoDB functions in this file, because we
-have disables the InnoDB inlining in this file. */
+have disabled the InnoDB inlining in this file. */
/* TODO list for the InnoDB handler in 5.0:
- - Remove the flag trx->active_trans and look at the InnoDB
- trx struct state field
+ - Remove the flag trx->active_trans and look at trx->conc_state
- fix savepoint functions to use savepoint storage area
- Find out what kind of problems the OS X case-insensitivity causes to
table and database names; should we 'normalize' the names like we do
@@ -31,8 +30,7 @@ have disables the InnoDB inlining in this file. */
#endif
#include <mysql_priv.h>
-
-#ifdef WITH_INNOBASE_STORAGE_ENGINE
+#include <mysqld_error.h>
#include <m_ctype.h>
#include <hash.h>
@@ -40,27 +38,29 @@ have disables the InnoDB inlining in this file. */
#include <mysys_err.h>
#include <my_sys.h>
#include "ha_innodb.h"
-
-pthread_mutex_t innobase_share_mutex, /* to protect innobase_open_files */
- prepare_commit_mutex; /* to force correct commit order in
- binlog */
-ulong commit_threads= 0;
-pthread_mutex_t commit_threads_m;
-pthread_cond_t commit_cond;
-pthread_mutex_t commit_cond_m;
-bool innodb_inited= 0;
+#include <mysql/plugin.h>
+
+#ifndef MYSQL_SERVER
+/* This is needed because of Bug #3596. Let us hope that pthread_mutex_t
+is defined the same in both builds: the MySQL server and the InnoDB plugin. */
+extern pthread_mutex_t LOCK_thread_count;
+#endif /* MYSQL_SERVER */
+
+/** to protect innobase_open_files */
+static pthread_mutex_t innobase_share_mutex;
+/** to force correct commit order in binlog */
+static pthread_mutex_t prepare_commit_mutex;
+static ulong commit_threads = 0;
+static pthread_mutex_t commit_threads_m;
+static pthread_cond_t commit_cond;
+static pthread_mutex_t commit_cond_m;
+static bool innodb_inited = 0;
/*
This needs to exist until the query cache callback is removed
or learns to pass hton.
*/
-static handlerton *legacy_innodb_hton;
-
-/* Store MySQL definition of 'byte': in Linux it is char while InnoDB
-uses unsigned char; the header univ.i which we include next defines
-'byte' as a macro which expands to 'unsigned char' */
-
-typedef byte mysql_byte;
+static handlerton *innodb_hton_ptr;
#define INSIDE_HA_INNOBASE_CC
@@ -92,46 +92,45 @@ extern "C" {
#include "../storage/innobase/include/ha_prototypes.h"
}
-ulong innobase_large_page_size = 0;
+static const long AUTOINC_OLD_STYLE_LOCKING = 0;
+static const long AUTOINC_NEW_STYLE_LOCKING = 1;
+static const long AUTOINC_NO_LOCKING = 2;
-/* The default values for the following, type long or longlong, start-up
-parameters are declared in mysqld.cc: */
-
-long innobase_mirrored_log_groups, innobase_log_files_in_group,
+static long innobase_mirrored_log_groups, innobase_log_files_in_group,
innobase_log_buffer_size, innobase_buffer_pool_awe_mem_mb,
innobase_additional_mem_pool_size, innobase_file_io_threads,
innobase_lock_wait_timeout, innobase_force_recovery,
- innobase_open_files;
+ innobase_open_files, innobase_autoinc_lock_mode;
-longlong innobase_buffer_pool_size, innobase_log_file_size;
+static long long innobase_buffer_pool_size, innobase_log_file_size;
/* The default values for the following char* start-up parameters
are determined in innobase_init below: */
-char* innobase_data_home_dir = NULL;
-char* innobase_data_file_path = NULL;
-char* innobase_log_group_home_dir = NULL;
-char* innobase_log_arch_dir = NULL;/* unused */
+static char* innobase_data_home_dir = NULL;
+static char* innobase_data_file_path = NULL;
+static char* innobase_log_group_home_dir = NULL;
/* The following has a misleading name: starting from 4.0.5, this also
affects Windows: */
-char* innobase_unix_file_flush_method = NULL;
+static char* innobase_unix_file_flush_method = NULL;
/* Below we have boolean-valued start-up parameters, and their default
values */
-ulong innobase_fast_shutdown = 1;
-my_bool innobase_log_archive = FALSE;/* unused */
-my_bool innobase_use_doublewrite = TRUE;
-my_bool innobase_use_checksums = TRUE;
-my_bool innobase_use_large_pages = FALSE;
-my_bool innobase_use_native_aio = FALSE;
-my_bool innobase_file_per_table = FALSE;
-my_bool innobase_locks_unsafe_for_binlog = FALSE;
-my_bool innobase_rollback_on_timeout = FALSE;
-my_bool innobase_create_status_file = FALSE;
-my_bool innobase_stats_on_metadata = TRUE;
+static ulong innobase_fast_shutdown = 1;
+#ifdef UNIV_LOG_ARCHIVE
+static my_bool innobase_log_archive = FALSE;
+static char* innobase_log_arch_dir = NULL;
+#endif /* UNIV_LOG_ARCHIVE */
+static my_bool innobase_use_doublewrite = TRUE;
+static my_bool innobase_use_checksums = TRUE;
+static my_bool innobase_file_per_table = FALSE;
+static my_bool innobase_locks_unsafe_for_binlog = FALSE;
+static my_bool innobase_rollback_on_timeout = FALSE;
+static my_bool innobase_create_status_file = FALSE;
+static my_bool innobase_stats_on_metadata = TRUE;
-static char *internal_innobase_data_file_path = NULL;
+static char* internal_innobase_data_file_path = NULL;
/* The following counter is used to convey information to InnoDB
about server activity: in selects it is not sensible to call
@@ -147,7 +146,7 @@ static HASH innobase_open_tables;
bool nw_panic = FALSE;
#endif
-static mysql_byte* innobase_get_key(INNOBASE_SHARE *share,uint *length,
+static uchar* innobase_get_key(INNOBASE_SHARE *share, size_t *length,
my_bool not_used __attribute__((unused)));
static INNOBASE_SHARE *get_share(const char *table_name);
static void free_share(INNOBASE_SHARE *share);
@@ -165,6 +164,17 @@ static handler *innobase_create_handler(handlerton *hton,
static const char innobase_hton_name[]= "InnoDB";
+
+static MYSQL_THDVAR_BOOL(support_xa, PLUGIN_VAR_OPCMDARG,
+ "Enable InnoDB support for the XA two-phase commit",
+ /* check_func */ NULL, /* update_func */ NULL,
+ /* default */ TRUE);
+
+static MYSQL_THDVAR_BOOL(table_locks, PLUGIN_VAR_OPCMDARG,
+ "Enable InnoDB locking in LOCK TABLES",
+ /* check_func */ NULL, /* update_func */ NULL,
+ /* default */ TRUE);
+
static handler *innobase_create_handler(handlerton *hton,
TABLE_SHARE *table,
MEM_ROOT *mem_root)
@@ -407,6 +417,22 @@ static SHOW_VAR innodb_status_variables[]= {
/* General functions */
/**********************************************************************
+Returns true if the thread is the replication thread on the slave
+server. Used in srv_conc_enter_innodb() to determine if the thread
+should be allowed to enter InnoDB - the replication thread is treated
+differently than other threads. Also used in
+srv_conc_force_exit_innodb(). */
+extern "C"
+ibool
+thd_is_replication_slave_thread(
+/*============================*/
+ /* out: true if thd is the replication thread */
+ void* thd) /* in: thread handle (THD*) */
+{
+ return((ibool) thd_slave_thread((THD*) thd));
+}
+
+/**********************************************************************
Save some CPU by testing the value of srv_thread_concurrency in inline
functions. */
inline
@@ -462,17 +488,32 @@ innobase_release_stat_resources(
}
}
+/**********************************************************************
+Returns true if the transaction this thread is processing has edited
+non-transactional tables. Used by the deadlock detector when deciding
+which transaction to rollback in case of a deadlock - we try to avoid
+rolling back transactions that have edited non-transactional tables. */
+extern "C"
+ibool
+thd_has_edited_nontrans_tables(
+/*===========================*/
+ /* out: true if non-transactional tables have
+ been edited */
+ void* thd) /* in: thread handle (THD*) */
+{
+ return((ibool) thd_non_transactional_update((THD*) thd));
+}
+
/************************************************************************
Obtain the InnoDB transaction of a MySQL thread. */
inline
trx_t*&
thd_to_trx(
/*=======*/
- /* out: reference to transaction pointer */
- THD* thd, /* in: MySQL thread */
- handlerton* hton) /* in: InnoDB handlerton */
+ /* out: reference to transaction pointer */
+ THD* thd) /* in: MySQL thread */
{
- return(*(trx_t**) thd_ha_data(thd, hton));
+ return(*(trx_t**) thd_ha_data(thd, innodb_hton_ptr));
}
/************************************************************************
@@ -483,17 +524,20 @@ static
int
innobase_release_temporary_latches(
/*===============================*/
- handlerton *hton,
- THD *thd)
+ /* out: 0 */
+ handlerton* hton, /* in: handlerton */
+ THD* thd) /* in: MySQL thread */
{
trx_t* trx;
+ DBUG_ASSERT(hton == innodb_hton_ptr);
+
if (!innodb_inited) {
return 0;
}
- trx = thd_to_trx(thd, hton);
+ trx = thd_to_trx(thd);
if (trx) {
innobase_release_stat_resources(trx);
@@ -555,21 +599,17 @@ convert_error_code_to_mysql(
tell it also to MySQL so that MySQL knows to empty the
cached binlog for this transaction */
- if (thd) {
- ha_rollback(thd);
- }
+ thd_mark_transaction_to_rollback(thd, TRUE);
return(HA_ERR_LOCK_DEADLOCK);
-
} else if (error == (int) DB_LOCK_WAIT_TIMEOUT) {
/* Starting from 5.0.13, we let MySQL just roll back the
latest SQL statement in a lock wait timeout. Previously, we
rolled back the whole transaction. */
- if (thd && row_rollback_on_timeout) {
- ha_rollback(thd);
- }
+ thd_mark_transaction_to_rollback(thd,
+ (bool)row_rollback_on_timeout);
return(HA_ERR_LOCK_WAIT_TIMEOUT);
@@ -604,7 +644,7 @@ convert_error_code_to_mysql(
} else if (error == (int) DB_TABLE_NOT_FOUND) {
- return(HA_ERR_KEY_NOT_FOUND);
+ return(HA_ERR_NO_SUCH_TABLE);
} else if (error == (int) DB_TOO_BIG_RECORD) {
@@ -621,11 +661,23 @@ convert_error_code_to_mysql(
tell it also to MySQL so that MySQL knows to empty the
cached binlog for this transaction */
- if (thd) {
- ha_rollback(thd);
- }
+ thd_mark_transaction_to_rollback(thd, TRUE);
return(HA_ERR_LOCK_TABLE_FULL);
+ } else if (error == DB_TOO_MANY_CONCURRENT_TRXS) {
+
+ /* Once MySQL add the appropriate code to errmsg.txt then
+ we can get rid of this #ifdef. NOTE: The code checked by
+ the #ifdef is the suggested name for the error condition
+ and the actual error code name could very well be different.
+ This will require some monitoring, ie. the status
+ of this request on our part.*/
+#ifdef ER_TOO_MANY_CONCURRENT_TRXS
+ return(ER_TOO_MANY_CONCURRENT_TRXS);
+#else
+ return(HA_ERR_RECORD_FILE_FULL);
+#endif
+
} else {
return(-1); // Unknown error
}
@@ -673,78 +725,12 @@ innobase_mysql_print_thd(
uint max_query_len) /* in: max query length to print, or 0 to
use the default max length */
{
- const THD* thd;
- const Security_context *sctx;
- const char* s;
-
- thd = (const THD*) input_thd;
- /* We probably want to have original user as part of debug output. */
- sctx = &thd->main_security_ctx;
-
-
- fprintf(f, "MySQL thread id %lu, query id %lu",
- thd->thread_id, (ulong) thd->query_id);
- if (sctx->host) {
- putc(' ', f);
- fputs(sctx->host, f);
- }
-
- if (sctx->ip) {
- putc(' ', f);
- fputs(sctx->ip, f);
- }
-
- if (sctx->user) {
- putc(' ', f);
- fputs(sctx->user, f);
- }
-
- if ((s = thd->proc_info)) {
- putc(' ', f);
- fputs(s, f);
- }
-
- if ((s = thd->query)) {
- /* 3100 is chosen because currently 3000 is the maximum
- max_query_len we ever give this. */
- char buf[3100];
- uint len;
-
- /* If buf is too small, we dynamically allocate storage
- in this. */
- char* dyn_str = NULL;
-
- /* Points to buf or dyn_str. */
- char* str = buf;
-
- if (max_query_len == 0) {
- /* ADDITIONAL SAFETY: the default is to print at
- most 300 chars to reduce the probability of a
- seg fault if there is a race in
- thd->query_length in MySQL; after May 14, 2004
- probably no race any more, but better be
- safe */
- max_query_len = 300;
- }
-
- len = min(thd->query_length, max_query_len);
-
- if (len > (sizeof(buf) - 1)) {
- dyn_str = my_malloc(len + 1, MYF(0));
- str = dyn_str;
- }
-
- /* Use strmake to reduce the timeframe for a race,
- compared to fwrite() */
- len = (uint) (strmake(str, s, len) - str);
- putc('\n', f);
- fwrite(str, 1, len, f);
-
- if (dyn_str) {
- my_free(dyn_str, MYF(0));
- }
- }
+ THD* thd;
+ char buffer[1024];
+ thd = (THD*) input_thd;
+ fputs(thd_security_context(thd, buffer, sizeof(buffer),
+ max_query_len), f);
putc('\n', f);
}
@@ -752,7 +738,7 @@ innobase_mysql_print_thd(
Get the variable length bounds of the given character set.
NOTE that the exact prototype of this function has to be in
-/innobase/data/data0type.ic! */
+/innobase/include/data0type.ic! */
extern "C"
void
innobase_get_cset_width(
@@ -791,7 +777,7 @@ innobase_convert_from_table_id(
{
uint errors;
- strconvert(current_thd->charset(), from,
+ strconvert(thd_charset(current_thd), from,
&my_charset_filename, to, (uint) len, &errors);
}
@@ -810,7 +796,7 @@ innobase_convert_from_id(
{
uint errors;
- strconvert(current_thd->charset(), from,
+ strconvert(thd_charset(current_thd), from,
system_charset_info, to, (uint) len, &errors);
}
@@ -873,7 +859,7 @@ innobase_get_charset(
/* out: connection character set */
void* mysql_thd) /* in: MySQL thread handle */
{
- return(((THD*) mysql_thd)->charset());
+ return(thd_charset((THD*) mysql_thd));
}
/*************************************************************************
@@ -884,22 +870,9 @@ innobase_mysql_tmpfile(void)
/*========================*/
/* out: temporary file descriptor, or < 0 on error */
{
- char filename[FN_REFLEN];
int fd2 = -1;
- File fd = create_temp_file(filename, mysql_tmpdir, "ib",
-#ifdef __WIN__
- O_BINARY | O_TRUNC | O_SEQUENTIAL |
- O_TEMPORARY | O_SHORT_LIVED |
-#endif /* __WIN__ */
- O_CREAT | O_EXCL | O_RDWR,
- MYF(MY_WME));
+ File fd = mysql_tmpfile("ib");
if (fd >= 0) {
-#ifndef __WIN__
- /* On Windows, open files cannot be removed, but files can be
- created with the O_TEMPORARY flag to the same effect
- ("delete on close"). */
- unlink(filename);
-#endif /* !__WIN__ */
/* Copy the file descriptor, so that the additional resources
allocated by create_temp_file() can be freed by invoking
my_close().
@@ -914,7 +887,7 @@ innobase_mysql_tmpfile(void)
my_errno=errno;
my_error(EE_OUT_OF_FILERESOURCES,
MYF(ME_BELL+ME_WAITTANG),
- filename, my_errno);
+ "ib*", my_errno);
}
my_close(fd, MYF(MY_WME));
}
@@ -950,10 +923,9 @@ trx_t*
check_trx_exists(
/*=============*/
/* out: InnoDB transaction handle */
- handlerton* hton, /* in: handlerton for innodb */
THD* thd) /* in: user thread handle */
{
- trx_t*& trx = thd_to_trx(thd, hton);
+ trx_t*& trx = thd_to_trx(thd);
ut_ad(thd == current_thd);
@@ -962,27 +934,26 @@ check_trx_exists(
trx = trx_allocate_for_mysql();
trx->mysql_thd = thd;
- trx->mysql_query_str = &(thd->query);
- trx->active_trans = 0;
+ trx->mysql_query_str = thd_query(thd);
/* Update the info whether we should skip XA steps that eat
CPU time */
- trx->support_xa = (ibool)(thd->variables.innodb_support_xa);
+ trx->support_xa = THDVAR(thd, support_xa);
} else {
if (trx->magic_n != TRX_MAGIC_N) {
mem_analyze_corruption(trx);
- ut_a(0);
+ ut_error;
}
}
- if (thd->options & OPTION_NO_FOREIGN_KEY_CHECKS) {
+ if (thd_test_options(thd, OPTION_NO_FOREIGN_KEY_CHECKS)) {
trx->check_foreigns = FALSE;
} else {
trx->check_foreigns = TRUE;
}
- if (thd->options & OPTION_RELAXED_UNIQUE_CHECKS) {
+ if (thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS)) {
trx->check_unique_secondary = FALSE;
} else {
trx->check_unique_secondary = TRUE;
@@ -1003,6 +974,7 @@ ha_innobase::ha_innobase(handlerton *hton, TABLE_SHARE *table_arg)
HA_CAN_SQL_HANDLER |
HA_PRIMARY_KEY_REQUIRED_FOR_POSITION |
HA_PRIMARY_KEY_IN_READ_INDEX |
+ HA_BINLOG_ROW_CAPABLE |
HA_CAN_GEOMETRY | HA_PARTIAL_COLUMN_READ |
HA_TABLE_SCAN_ON_INDEX),
start_of_scan(0),
@@ -1022,7 +994,7 @@ ha_innobase::update_thd(
{
trx_t* trx;
- trx = check_trx_exists(ht, thd);
+ trx = check_trx_exists(thd);
if (prebuilt->trx != trx) {
@@ -1069,7 +1041,7 @@ innobase_register_trx_and_stmt(
innobase_register_stmt(hton, thd);
- if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) {
+ if (thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) {
/* No autocommit mode, register for a transaction */
trans_register_ha(thd, TRUE, hton);
@@ -1161,14 +1133,15 @@ innobase_query_caching_of_table_permitted(
ut_a(full_name_len < 999);
- if (thd->variables.tx_isolation == ISO_SERIALIZABLE) {
+ trx = check_trx_exists(thd);
+
+ if (trx->isolation_level == TRX_ISO_SERIALIZABLE) {
/* In the SERIALIZABLE mode we add LOCK IN SHARE MODE to every
plain SELECT if AUTOCOMMIT is not on. */
return((my_bool)FALSE);
}
- trx = check_trx_exists(legacy_innodb_hton, thd);
if (trx->has_search_latch) {
ut_print_timestamp(stderr);
sql_print_error("The calling thread is holding the adaptive "
@@ -1182,7 +1155,7 @@ innobase_query_caching_of_table_permitted(
innobase_release_stat_resources(trx);
- if (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) {
+ if (!thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) {
is_autocommit = TRUE;
} else {
@@ -1227,7 +1200,7 @@ innobase_query_caching_of_table_permitted(
if (trx->active_trans == 0) {
- innobase_register_trx_and_stmt(legacy_innodb_hton, thd);
+ innobase_register_trx_and_stmt(innodb_hton_ptr, thd);
trx->active_trans = 1;
}
@@ -1265,10 +1238,10 @@ innobase_invalidate_query_cache(
/* Argument TRUE below means we are using transactions */
#ifdef HAVE_QUERY_CACHE
- query_cache.invalidate((THD*)(trx->mysql_thd),
- (const char*)full_name,
- (uint32)full_name_len,
- TRUE);
+ mysql_query_cache_invalidate4((THD*) trx->mysql_thd,
+ (const char*) full_name,
+ (uint32) full_name_len,
+ TRUE);
#endif
}
@@ -1296,12 +1269,12 @@ innobase_print_identifier(
output strings buffers must not be shared. The function
only produces more output when the name contains other
characters than [0-9A-Z_a-z]. */
- char* temp_name = my_malloc((uint) namelen + 1, MYF(MY_WME));
+ char* temp_name = (char*) my_malloc((uint) namelen + 1, MYF(MY_WME));
uint qnamelen = (uint) (namelen
+ (1 + sizeof srv_mysql50_table_name_prefix));
if (temp_name) {
- qname = my_malloc(qnamelen, MYF(MY_WME));
+ qname = (char*) my_malloc(qnamelen, MYF(MY_WME));
if (qname) {
memcpy(temp_name, name, namelen);
temp_name[namelen] = 0;
@@ -1348,7 +1321,20 @@ trx_is_interrupted(
/* out: TRUE if interrupted */
trx_t* trx) /* in: transaction */
{
- return(trx && trx->mysql_thd && ((THD*) trx->mysql_thd)->killed);
+ return(trx && trx->mysql_thd && thd_killed((THD*) trx->mysql_thd));
+}
+
+/******************************************************************
+Resets some fields of a prebuilt struct. The template is used in fast
+retrieval of just those column values MySQL needs in its processing. */
+static
+void
+reset_template(
+/*===========*/
+ row_prebuilt_t* prebuilt) /* in/out: prebuilt struct */
+{
+ prebuilt->keep_other_fields_on_keyread = 0;
+ prebuilt->read_just_key = 0;
}
/*********************************************************************
@@ -1367,7 +1353,7 @@ ha_innobase::init_table_handle_for_HANDLER(void)
one. Update the trx pointers in the prebuilt struct. Normally
this operation is done in external_lock. */
- update_thd(current_thd);
+ update_thd(ha_thd());
/* Initialize the prebuilt struct much like it would be inited in
external_lock */
@@ -1386,7 +1372,7 @@ ha_innobase::init_table_handle_for_HANDLER(void)
if (prebuilt->trx->active_trans == 0) {
- innobase_register_trx_and_stmt(ht, current_thd);
+ innobase_register_trx_and_stmt(ht, user_thd);
prebuilt->trx->active_trans = 1;
}
@@ -1409,19 +1395,18 @@ ha_innobase::init_table_handle_for_HANDLER(void)
/* We want always to fetch all columns in the whole row? Or do
we???? */
- prebuilt->read_just_key = FALSE;
-
prebuilt->used_in_HANDLER = TRUE;
-
- prebuilt->keep_other_fields_on_keyread = FALSE;
+ reset_template(prebuilt);
}
/*************************************************************************
Opens an InnoDB database. */
static
int
-innobase_init(void *p)
-/*===============*/
+innobase_init(
+/*==========*/
+ /* out: 0 on success, error code on failure */
+ void *p) /* in: InnoDB handlerton */
{
static char current_dir[3]; /* Set if using current lib */
int err;
@@ -1430,9 +1415,9 @@ innobase_init(void *p)
DBUG_ENTER("innobase_init");
handlerton *innobase_hton= (handlerton *)p;
- legacy_innodb_hton= innobase_hton;
+ innodb_hton_ptr = innobase_hton;
- innobase_hton->state=have_innodb;
+ innobase_hton->state = SHOW_OPTION_YES;
innobase_hton->db_type= DB_TYPE_INNODB;
innobase_hton->savepoint_offset=sizeof(trx_named_savept_t);
innobase_hton->close_connection=innobase_close_connection;
@@ -1457,9 +1442,6 @@ innobase_init(void *p)
innobase_hton->flags=HTON_NO_FLAGS;
innobase_hton->release_temporary_latches=innobase_release_temporary_latches;
- if (have_innodb != SHOW_OPTION_YES)
- DBUG_RETURN(0); // nothing else to do
-
ut_a(DATA_MYSQL_TRUE_VARCHAR == (ulint)MYSQL_TYPE_VARCHAR);
#ifdef UNIV_DEBUG
@@ -1615,10 +1597,7 @@ innobase_init(void *p)
changes the value so that it becomes the number of database pages. */
if (innobase_buffer_pool_awe_mem_mb == 0) {
- /* Careful here: we first convert the signed long int to ulint
- and only after that divide */
-
- srv_pool_size = ((ulint) innobase_buffer_pool_size) / 1024;
+ srv_pool_size = (ulint)(innobase_buffer_pool_size / 1024);
} else {
srv_use_awe = TRUE;
srv_pool_size = (ulint)
@@ -1641,8 +1620,10 @@ innobase_init(void *p)
srv_use_doublewrite_buf = (ibool) innobase_use_doublewrite;
srv_use_checksums = (ibool) innobase_use_checksums;
- os_use_large_pages = (ibool) innobase_use_large_pages;
- os_large_page_size = (ulint) innobase_large_page_size;
+#ifdef HAVE_LARGE_PAGES
+ if ((os_use_large_pages = (ibool) my_use_large_pages))
+ os_large_page_size = (ulint) opt_large_page_size;
+#endif
row_rollback_on_timeout = (ibool) innobase_rollback_on_timeout;
@@ -1701,7 +1682,6 @@ innobase_init(void *p)
DBUG_RETURN(FALSE);
error:
- have_innodb= SHOW_OPTION_DISABLED; // If we couldn't use handler
DBUG_RETURN(TRUE);
}
@@ -1796,7 +1776,7 @@ innobase_start_trx_and_assign_read_view(
/* Create a new trx struct for thd, if it does not yet have one */
- trx = check_trx_exists(hton, thd);
+ trx = check_trx_exists(thd);
/* This is just to play safe: release a possible FIFO ticket and
search latch. Since we will reserve the kernel mutex, we have to
@@ -1841,10 +1821,10 @@ innobase_commit(
DBUG_ENTER("innobase_commit");
DBUG_PRINT("trans", ("ending transaction"));
- trx = check_trx_exists(hton, thd);
+ trx = check_trx_exists(thd);
/* Update the info whether we should skip XA steps that eat CPU time */
- trx->support_xa = (ibool)(thd->variables.innodb_support_xa);
+ trx->support_xa = THDVAR(thd, support_xa);
/* Since we will reserve the kernel mutex, we have to release
the search system latch first to obey the latching order. */
@@ -1875,7 +1855,7 @@ innobase_commit(
" trx->conc_state != TRX_NOT_STARTED");
}
if (all
- || (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))) {
+ || (!thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) {
/* We were instructed to commit the whole transaction, or
this is an SQL statement end and autocommit is on */
@@ -1900,9 +1880,8 @@ retry:
}
}
- trx->mysql_log_file_name = mysql_bin_log.get_log_fname();
- trx->mysql_log_offset =
- (ib_longlong)mysql_bin_log.get_log_file()->pos_in_file;
+ trx->mysql_log_file_name = mysql_bin_log_file_name();
+ trx->mysql_log_offset = (ib_longlong) mysql_bin_log_file_pos();
innobase_commit_low(trx);
@@ -1924,12 +1903,11 @@ retry:
/* We just mark the SQL statement ended and do not do a
transaction commit */
- if (trx->auto_inc_lock) {
- /* If we had reserved the auto-inc lock for some
- table in this SQL statement we release it now */
+ /* If we had reserved the auto-inc lock for some
+ table in this SQL statement we release it now */
+
+ row_unlock_table_autoinc_for_mysql(trx);
- row_unlock_table_autoinc_for_mysql(trx);
- }
/* Store the current undo_no of the transaction so that we
know where to roll back if we have to roll back the next
SQL statement */
@@ -1937,6 +1915,8 @@ retry:
trx_mark_sql_stat_end(trx);
}
+ trx->n_autoinc_rows = 0; /* Reset the number AUTO-INC rows required */
+
if (trx->declared_to_be_inside_innodb) {
/* Release our possible ticket in the FIFO */
@@ -1950,115 +1930,10 @@ retry:
DBUG_RETURN(0);
}
-#if 0
-/* TODO: put the
-MySQL-4.1 functionality back to 5.0. This is needed to get InnoDB Hot Backup
-to work. */
-
-/*********************************************************************
-This is called when MySQL writes the binlog entry for the current
-transaction. Writes to the InnoDB tablespace info which tells where the
-MySQL binlog entry for the current transaction ended. Also commits the
-transaction inside InnoDB but does NOT flush InnoDB log files to disk.
-To flush you have to call innobase_commit_complete(). We have separated
-flushing to eliminate the bottleneck of LOCK_log in log.cc which disabled
-InnoDB's group commit capability. */
-static
-int
-innobase_report_binlog_offset_and_commit(
-/*=====================================*/
- /* out: 0 */
- handlerton *hton, /* in: Innodb handlerton */
- THD* thd, /* in: user thread */
- void* trx_handle, /* in: InnoDB trx handle */
- char* log_file_name, /* in: latest binlog file name */
- my_off_t end_offset) /* in: the offset in the binlog file
- up to which we wrote */
-{
- trx_t* trx;
-
- trx = (trx_t*)trx_handle;
-
- ut_a(trx != NULL);
-
- trx->mysql_log_file_name = log_file_name;
- trx->mysql_log_offset = (ib_longlong)end_offset;
-
- trx->flush_log_later = TRUE;
-
- innobase_commit(hton, thd, TRUE);
-
- trx->flush_log_later = FALSE;
-
- return(0);
-}
-
-/***********************************************************************
-This function stores the binlog offset and flushes logs. */
-static
-void
-innobase_store_binlog_offset_and_flush_log(
-/*=======================================*/
- char* binlog_name, /* in: binlog name */
- longlong offset) /* in: binlog offset */
-{
- mtr_t mtr;
-
- assert(binlog_name != NULL);
-
- /* Start a mini-transaction */
- mtr_start_noninline(&mtr);
-
- /* Update the latest MySQL binlog name and offset info
- in trx sys header */
-
- trx_sys_update_mysql_binlog_offset(
- binlog_name,
- offset,
- TRX_SYS_MYSQL_LOG_INFO, &mtr);
-
- /* Commits the mini-transaction */
- mtr_commit(&mtr);
-
- /* Synchronous flush of the log buffer to disk */
- log_buffer_flush_to_disk();
-}
-
/*********************************************************************
-This is called after MySQL has written the binlog entry for the current
-transaction. Flushes the InnoDB log files to disk if required. */
+Rolls back a transaction or the latest SQL statement. */
static
int
-innobase_commit_complete(
-/*=====================*/
- /* out: 0 */
- handlerton *hton, /* in: Innodb handlerton */
- THD* thd) /* in: user thread */
-{
- trx_t* trx;
-
- trx = thd_to_trx(thd, hton);
-
- if (trx && trx->active_trans) {
-
- trx->active_trans = 0;
-
- if (UNIV_UNLIKELY(srv_flush_log_at_trx_commit == 0)) {
-
- return(0);
- }
-
- trx_commit_complete_for_mysql(trx);
- }
-
- return(0);
-}
-#endif
-
-/*********************************************************************
-Rolls back a transaction or the latest SQL statement. */
-
-static int
innobase_rollback(
/*==============*/
/* out: 0 or error number */
@@ -2074,10 +1949,10 @@ innobase_rollback(
DBUG_ENTER("innobase_rollback");
DBUG_PRINT("trans", ("aborting transaction"));
- trx = check_trx_exists(hton, thd);
+ trx = check_trx_exists(thd);
/* Update the info whether we should skip XA steps that eat CPU time */
- trx->support_xa = (ibool)(thd->variables.innodb_support_xa);
+ trx->support_xa = THDVAR(thd, support_xa);
/* Release a possible FIFO ticket and search latch. Since we will
reserve the kernel mutex, we have to release the search system latch
@@ -2085,16 +1960,14 @@ innobase_rollback(
innobase_release_stat_resources(trx);
- if (trx->auto_inc_lock) {
- /* If we had reserved the auto-inc lock for some table (if
- we come here to roll back the latest SQL statement) we
- release it now before a possibly lengthy rollback */
+ /* If we had reserved the auto-inc lock for some table (if
+ we come here to roll back the latest SQL statement) we
+ release it now before a possibly lengthy rollback */
- row_unlock_table_autoinc_for_mysql(trx);
- }
+ row_unlock_table_autoinc_for_mysql(trx);
if (all
- || (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))) {
+ || !thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) {
error = trx_rollback_for_mysql(trx);
trx->active_trans = 0;
@@ -2125,13 +1998,11 @@ innobase_rollback_trx(
innobase_release_stat_resources(trx);
- if (trx->auto_inc_lock) {
- /* If we had reserved the auto-inc lock for some table (if
- we come here to roll back the latest SQL statement) we
- release it now before a possibly lengthy rollback */
+ /* If we had reserved the auto-inc lock for some table (if
+ we come here to roll back the latest SQL statement) we
+ release it now before a possibly lengthy rollback */
- row_unlock_table_autoinc_for_mysql(trx);
- }
+ row_unlock_table_autoinc_for_mysql(trx);
error = trx_rollback_for_mysql(trx);
@@ -2140,8 +2011,8 @@ innobase_rollback_trx(
/*********************************************************************
Rolls back a transaction to a savepoint. */
-
-static int
+static
+int
innobase_rollback_to_savepoint(
/*===========================*/
/* out: 0 if success, HA_ERR_NO_SAVEPOINT if
@@ -2158,7 +2029,7 @@ innobase_rollback_to_savepoint(
DBUG_ENTER("innobase_rollback_to_savepoint");
- trx = check_trx_exists(hton, thd);
+ trx = check_trx_exists(thd);
/* Release a possible FIFO ticket and search latch. Since we will
reserve the kernel mutex, we have to release the search system latch
@@ -2194,7 +2065,7 @@ innobase_release_savepoint(
DBUG_ENTER("innobase_release_savepoint");
- trx = check_trx_exists(hton, thd);
+ trx = check_trx_exists(thd);
/* TODO: use provided savepoint data area to store savepoint data */
@@ -2226,10 +2097,12 @@ innobase_savepoint(
(unless we are in sub-statement), so SQL layer ensures that
this method is never called in such situation.
*/
- DBUG_ASSERT(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN) ||
+#ifdef MYSQL_SERVER /* plugins cannot access thd->in_sub_stmt */
+ DBUG_ASSERT(thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN) ||
thd->in_sub_stmt);
+#endif /* MYSQL_SERVER */
- trx = check_trx_exists(hton, thd);
+ trx = check_trx_exists(thd);
/* Release a possible FIFO ticket and search latch. Since we will
reserve the kernel mutex, we have to release the search system latch
@@ -2262,7 +2135,9 @@ innobase_close_connection(
{
trx_t* trx;
- trx = thd_to_trx(thd, hton);
+ DBUG_ENTER("innobase_close_connection");
+ DBUG_ASSERT(hton == innodb_hton_ptr);
+ trx = thd_to_trx(thd);
ut_a(trx);
@@ -2288,7 +2163,7 @@ innobase_close_connection(
thr_local_free(trx->mysql_thread_id);
trx_free_for_mysql(trx);
- return(0);
+ DBUG_RETURN(0);
}
@@ -2314,6 +2189,21 @@ ha_innobase::get_row_type() const
return(ROW_TYPE_NOT_USED);
}
+
+
+/********************************************************************
+Get the table flags to use for the statement. */
+handler::Table_flags
+ha_innobase::table_flags() const
+{
+ /* Need to use tx_isolation here since table flags is (also)
+ called before prebuilt is inited. */
+ ulong const tx_isolation = thd_tx_isolation(current_thd);
+ if (tx_isolation <= ISO_READ_COMMITTED)
+ return int_table_flags;
+ return int_table_flags | HA_BINLOG_STMT_CAPABLE;
+}
+
/********************************************************************
Gives the file extension of an InnoDB single-table tablespace. */
static const char* ha_innobase_exts[] = {
@@ -2397,7 +2287,7 @@ ha_innobase::open(
UT_NOT_USED(mode);
UT_NOT_USED(test_if_locked);
- thd = current_thd;
+ thd = ha_thd();
normalize_table_name(norm_name, name);
user_thd = NULL;
@@ -2415,7 +2305,7 @@ ha_innobase::open(
upd_and_key_val_buff_len =
table->s->reclength + table->s->max_key_length
+ MAX_REF_PARTS * 3;
- if (!(mysql_byte*) my_multi_malloc(MYF(MY_WME),
+ if (!(uchar*) my_multi_malloc(MYF(MY_WME),
&upd_buff, upd_and_key_val_buff_len,
&key_val_buff, upd_and_key_val_buff_len,
NullS)) {
@@ -2441,7 +2331,7 @@ ha_innobase::open(
"how you can resolve the problem.\n",
norm_name);
free_share(share);
- my_free((gptr) upd_buff, MYF(0));
+ my_free(upd_buff, MYF(0));
my_errno = ENOENT;
DBUG_RETURN(HA_ERR_NO_SUCH_TABLE);
@@ -2458,7 +2348,7 @@ ha_innobase::open(
"how you can resolve the problem.\n",
norm_name);
free_share(share);
- my_free((gptr) upd_buff, MYF(0));
+ my_free(upd_buff, MYF(0));
my_errno = ENOENT;
dict_table_decrement_handle_count(ib_table);
@@ -2553,11 +2443,18 @@ ha_innobase::close(void)
/*====================*/
/* out: 0 */
{
+ THD* thd;
+
DBUG_ENTER("ha_innobase::close");
+ thd = current_thd; // avoid calling current_thd twice, it may be slow
+ if (thd != NULL) {
+ innobase_release_temporary_latches(ht, thd);
+ }
+
row_prebuilt_free(prebuilt);
- my_free((gptr) upd_buff, MYF(0));
+ my_free(upd_buff, MYF(0));
free_share(share);
/* Tell InnoDB server that there might be work for
@@ -2580,7 +2477,7 @@ get_field_offset(
TABLE* table, /* in: MySQL table object */
Field* field) /* in: MySQL field object */
{
- return((uint) (field->ptr - (char*) table->record[0]));
+ return((uint) (field->ptr - table->record[0]));
}
/******************************************************************
@@ -2842,8 +2739,8 @@ inline
uint
innobase_read_from_2_little_endian(
/*===============================*/
- /* out: value */
- const mysql_byte* buf) /* in: from where to read */
+ /* out: value */
+ const uchar* buf) /* in: from where to read */
{
return (uint) ((ulint)(buf[0]) + 256 * ((ulint)(buf[1])));
}
@@ -2859,7 +2756,7 @@ ha_innobase::store_key_val_for_row(
char* buff, /* in/out: buffer for the key value (in MySQL
format) */
uint buff_len,/* in: buffer length */
- const mysql_byte* record)/* in: row in MySQL format */
+ const uchar* record)/* in: row in MySQL format */
{
KEY* key_info = table->key_info + keynr;
KEY_PART_INFO* key_part = key_info->key_part;
@@ -3056,7 +2953,7 @@ ha_innobase::store_key_val_for_row(
CHARSET_INFO* cs;
ulint true_len;
ulint key_len;
- const mysql_byte* src_start;
+ const uchar* src_start;
int error=0;
enum_field_types real_type;
@@ -3129,7 +3026,7 @@ static
void
build_template(
/*===========*/
- row_prebuilt_t* prebuilt, /* in: prebuilt struct */
+ row_prebuilt_t* prebuilt, /* in/out: prebuilt struct */
THD* thd, /* in: current user thread, used
only if templ_type is
ROW_MYSQL_REC_FIELDS */
@@ -3336,21 +3233,128 @@ skip_field:
}
/************************************************************************
+This special handling is really to overcome the limitations of MySQL's
+binlogging. We need to eliminate the non-determinism that will arise in
+INSERT ... SELECT type of statements, since MySQL binlog only stores the
+min value of the autoinc interval. Once that is fixed we can get rid of
+the special lock handling.*/
+
+ulong
+ha_innobase::innobase_autoinc_lock(void)
+/*====================================*/
+ /* out: DB_SUCCESS if all OK else
+ error code */
+{
+ ulint error = DB_SUCCESS;
+
+ switch (innobase_autoinc_lock_mode) {
+ case AUTOINC_NO_LOCKING:
+ /* Acquire only the AUTOINC mutex. */
+ dict_table_autoinc_lock(prebuilt->table);
+ break;
+
+ case AUTOINC_NEW_STYLE_LOCKING:
+ /* For simple (single/multi) row INSERTs, we fallback to the
+ old style only if another transaction has already acquired
+ the AUTOINC lock on behalf of a LOAD FILE or INSERT ... SELECT
+ etc. type of statement. */
+ if (thd_sql_command(user_thd) == SQLCOM_INSERT) {
+ dict_table_t* table = prebuilt->table;
+
+ /* Acquire the AUTOINC mutex. */
+ dict_table_autoinc_lock(table);
+
+ /* We need to check that another transaction isn't
+ already holding the AUTOINC lock on the table. */
+ if (table->n_waiting_or_granted_auto_inc_locks) {
+ /* Release the mutex to avoid deadlocks. */
+ dict_table_autoinc_unlock(table);
+ } else {
+ break;
+ }
+ }
+ /* Fall through to old style locking. */
+
+ case AUTOINC_OLD_STYLE_LOCKING:
+ error = row_lock_table_autoinc_for_mysql(prebuilt);
+
+ if (error == DB_SUCCESS) {
+
+ /* Acquire the AUTOINC mutex. */
+ dict_table_autoinc_lock(prebuilt->table);
+ }
+ break;
+
+ default:
+ ut_error;
+ }
+
+ return(ulong(error));
+}
+
+/************************************************************************
+Reset the autoinc value in the table.*/
+
+ulong
+ha_innobase::innobase_reset_autoinc(
+/*================================*/
+ /* out: DB_SUCCESS if all went well
+ else error code */
+ ulonglong autoinc) /* in: value to store */
+{
+ ulint error;
+
+ error = innobase_autoinc_lock();
+
+ if (error == DB_SUCCESS) {
+
+ dict_table_autoinc_initialize(prebuilt->table, autoinc);
+
+ dict_table_autoinc_unlock(prebuilt->table);
+ }
+
+ return(ulong(error));
+}
+
+/************************************************************************
+Store the autoinc value in the table. The autoinc value is only set if
+it's greater than the existing autoinc value in the table.*/
+
+ulong
+ha_innobase::innobase_set_max_autoinc(
+/*==================================*/
+ /* out: DB_SUCCES if all went well
+ else error code */
+ ulonglong auto_inc) /* in: value to store */
+{
+ ulint error;
+
+ error = innobase_autoinc_lock();
+
+ if (error == DB_SUCCESS) {
+
+ dict_table_autoinc_update(prebuilt->table, auto_inc);
+
+ dict_table_autoinc_unlock(prebuilt->table);
+ }
+
+ return(ulong(error));
+}
+
+/************************************************************************
Stores a row in an InnoDB database, to the table specified in this
handle. */
int
ha_innobase::write_row(
/*===================*/
- /* out: error code */
- mysql_byte* record) /* in: a row in MySQL format */
+ /* out: error code */
+ uchar* record) /* in: a row in MySQL format */
{
- int error;
- longlong auto_inc;
- longlong dummy;
+ int error = 0;
ibool auto_inc_used= FALSE;
- THD* thd = current_thd;
- trx_t* trx = thd_to_trx(thd, ht);
+ ulint sql_command;
+ trx_t* trx = thd_to_trx(user_thd);
DBUG_ENTER("ha_innobase::write_row");
@@ -3374,11 +3378,13 @@ ha_innobase::write_row(
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
table->timestamp_field->set_time();
- if ((user_thd->lex->sql_command == SQLCOM_ALTER_TABLE
- || user_thd->lex->sql_command == SQLCOM_OPTIMIZE
- || user_thd->lex->sql_command == SQLCOM_CREATE_INDEX
- || user_thd->lex->sql_command == SQLCOM_DROP_INDEX)
- && num_write_row >= 10000) {
+ sql_command = thd_sql_command(user_thd);
+
+ if ((sql_command == SQLCOM_ALTER_TABLE
+ || sql_command == SQLCOM_OPTIMIZE
+ || sql_command == SQLCOM_CREATE_INDEX
+ || sql_command == SQLCOM_DROP_INDEX)
+ && num_write_row >= 10000) {
/* ALTER TABLE is COMMITted at every 10000 copied rows.
The IX table lock for the original table has to be re-issued.
As this method will be called on a temporary table where the
@@ -3444,62 +3450,20 @@ no_commit:
num_write_row++;
+ /* This is the case where the table has an auto-increment column */
if (table->next_number_field && record == table->record[0]) {
- /* This is the case where the table has an
- auto-increment column */
-
- /* Initialize the auto-inc counter if it has not been
- initialized yet */
-
- if (0 == dict_table_autoinc_peek(prebuilt->table)) {
-
- /* This call initializes the counter */
- error = innobase_read_and_init_auto_inc(&dummy);
-
- if (error) {
- /* Deadlock or lock wait timeout */
-
- goto func_exit;
- }
-
- /* We have to set sql_stat_start to TRUE because
- the above call probably has called a select, and
- has reset that flag; row_insert_for_mysql has to
- know to set the IX intention lock on the table,
- something it only does at the start of each
- statement */
- prebuilt->sql_stat_start = TRUE;
- }
-
- /* We have to use the transactional lock mechanism on the
- auto-inc counter of the table to ensure that replication and
- roll-forward of the binlog exactly imitates also the given
- auto-inc values. The lock is released at each SQL statement's
- end. This lock also prevents a race where two threads would
- call ::get_auto_increment() simultaneously. */
-
- error = row_lock_table_autoinc_for_mysql(prebuilt);
-
- if (error != DB_SUCCESS) {
- /* Deadlock or lock wait timeout */
-
- error = convert_error_code_to_mysql(error, user_thd);
+ if ((error = update_auto_increment())) {
goto func_exit;
}
- /* We must use the handler code to update the auto-increment
- value to be sure that we increment it correctly. */
-
- if ((error= update_auto_increment()))
- goto func_exit;
- auto_inc_used = 1;
-
+ auto_inc_used = TRUE;
}
if (prebuilt->mysql_template == NULL
- || prebuilt->template_type != ROW_MYSQL_WHOLE_ROW) {
+ || prebuilt->template_type != ROW_MYSQL_WHOLE_ROW) {
+
/* Build the template used in converting quickly between
the two database formats */
@@ -3510,45 +3474,71 @@ no_commit:
error = row_insert_for_mysql((byte*) record, prebuilt);
- if (error == DB_SUCCESS && auto_inc_used) {
+ /* Handle duplicate key errors */
+ if (auto_inc_used) {
+ ulonglong auto_inc;
- /* Fetch the value that was set in the autoincrement field */
+ /* Note the number of rows processed for this statement, used
+ by get_auto_increment() to determine the number of AUTO-INC
+ values to reserve. This is only useful for a mult-value INSERT
+ and is a statement level counter.*/
+ if (trx->n_autoinc_rows > 0) {
+ --trx->n_autoinc_rows;
+ }
+ /* Get the value that MySQL attempted to store in the table.*/
auto_inc = table->next_number_field->val_int();
- if (auto_inc != 0) {
- /* This call will update the counter according to the
- value that was inserted in the table */
+ switch (error) {
+ case DB_DUPLICATE_KEY:
+
+ /* A REPLACE command and LOAD DATA INFILE REPLACE
+ handle a duplicate key error themselves, but we
+ must update the autoinc counter if we are performing
+ those statements. */
+
+ switch (sql_command) {
+ case SQLCOM_LOAD:
+ if ((trx->duplicates
+ & (TRX_DUP_IGNORE | TRX_DUP_REPLACE))) {
+
+ goto set_max_autoinc;
+ }
+ break;
+
+ case SQLCOM_REPLACE:
+ case SQLCOM_INSERT_SELECT:
+ case SQLCOM_REPLACE_SELECT:
+ goto set_max_autoinc;
+ break;
- dict_table_autoinc_update(prebuilt->table, auto_inc);
- }
- }
+ default:
+ break;
+ }
- /* A REPLACE command and LOAD DATA INFILE REPLACE handle a duplicate
- key error themselves, and we must update the autoinc counter if we are
- performing those statements. */
+ break;
- if (error == DB_DUPLICATE_KEY && auto_inc_used
- && (user_thd->lex->sql_command == SQLCOM_REPLACE
- || user_thd->lex->sql_command == SQLCOM_REPLACE_SELECT
- || (user_thd->lex->sql_command == SQLCOM_INSERT
- && user_thd->lex->duplicates == DUP_UPDATE)
- || (user_thd->lex->sql_command == SQLCOM_LOAD
- && user_thd->lex->duplicates == DUP_REPLACE))) {
+ case DB_SUCCESS:
+ /* If the actual value inserted is greater than
+ the upper limit of the interval, then we try and
+ update the table upper limit. Note: last_value
+ will be 0 if get_auto_increment() was not called.*/
- auto_inc = table->next_number_field->val_int();
+ if (auto_inc > prebuilt->last_value) {
+set_max_autoinc:
+ ut_a(prebuilt->table->autoinc_increment > 0);
+ auto_inc += prebuilt->table->autoinc_increment;
- if (auto_inc != 0) {
- dict_table_autoinc_update(prebuilt->table, auto_inc);
- }
- }
+ innobase_set_max_autoinc(auto_inc);
+ }
+ break;
+ }
+ }
innodb_srv_conc_exit_innodb(prebuilt->trx);
error = convert_error_code_to_mysql(error, user_thd);
- /* Tell InnoDB server that there might be work for
- utility threads: */
func_exit:
innobase_active_small();
@@ -3564,16 +3554,16 @@ calc_row_difference(
/*================*/
/* out: error number or 0 */
upd_t* uvect, /* in/out: update vector */
- mysql_byte* old_row, /* in: old row in MySQL format */
- mysql_byte* new_row, /* in: new row in MySQL format */
+ uchar* old_row, /* in: old row in MySQL format */
+ uchar* new_row, /* in: new row in MySQL format */
struct st_table* table, /* in: table in MySQL data
dictionary */
- mysql_byte* upd_buff, /* in: buffer to use */
+ uchar* upd_buff, /* in: buffer to use */
ulint buff_len, /* in: buffer length */
row_prebuilt_t* prebuilt, /* in: InnoDB prebuilt struct */
THD* thd) /* in: user thread */
{
- mysql_byte* original_upd_buff = upd_buff;
+ uchar* original_upd_buff = upd_buff;
Field* field;
enum_field_types field_mysql_type;
uint n_fields;
@@ -3717,12 +3707,12 @@ int
ha_innobase::update_row(
/*====================*/
/* out: error number or 0 */
- const mysql_byte* old_row,/* in: old row in MySQL format */
- mysql_byte* new_row)/* in: new row in MySQL format */
+ const uchar* old_row, /* in: old row in MySQL format */
+ uchar* new_row) /* in: new row in MySQL format */
{
upd_t* uvect;
int error = 0;
- trx_t* trx = thd_to_trx(current_thd, ht);
+ trx_t* trx = thd_to_trx(user_thd);
DBUG_ENTER("ha_innobase::update_row");
@@ -3740,7 +3730,7 @@ ha_innobase::update_row(
/* Build an update vector from the modified fields in the rows
(uses upd_buff of the handle) */
- calc_row_difference(uvect, (mysql_byte*) old_row, new_row, table,
+ calc_row_difference(uvect, (uchar*) old_row, new_row, table,
upd_buff, (ulint)upd_and_key_val_buff_len,
prebuilt, user_thd);
@@ -3753,6 +3743,32 @@ ha_innobase::update_row(
error = row_update_for_mysql((byte*) old_row, prebuilt);
+ /* We need to do some special AUTOINC handling for the following case:
+
+ INSERT INTO t (c1,c2) VALUES(x,y) ON DUPLICATE KEY UPDATE ...
+
+ We need to use the AUTOINC counter that was actually used by
+ MySQL in the UPDATE statement, which can be different from the
+ value used in the INSERT statement.*/
+
+ if (error == DB_SUCCESS
+ && table->next_number_field
+ && new_row == table->record[0]
+ && thd_sql_command(user_thd) == SQLCOM_INSERT
+ && (trx->duplicates & (TRX_DUP_IGNORE | TRX_DUP_REPLACE))
+ == TRX_DUP_IGNORE) {
+
+ longlong auto_inc;
+
+ auto_inc = table->next_number_field->val_int();
+
+ if (auto_inc != 0) {
+ auto_inc += prebuilt->table->autoinc_increment;
+
+ innobase_set_max_autoinc(auto_inc);
+ }
+ }
+
innodb_srv_conc_exit_innodb(trx);
error = convert_error_code_to_mysql(error, user_thd);
@@ -3771,16 +3787,40 @@ Deletes a row given as the parameter. */
int
ha_innobase::delete_row(
/*====================*/
- /* out: error number or 0 */
- const mysql_byte* record) /* in: a row in MySQL format */
+ /* out: error number or 0 */
+ const uchar* record) /* in: a row in MySQL format */
{
int error = 0;
- trx_t* trx = thd_to_trx(current_thd, ht);
+ trx_t* trx = thd_to_trx(user_thd);
DBUG_ENTER("ha_innobase::delete_row");
ut_a(prebuilt->trx == trx);
+ /* Only if the table has an AUTOINC column */
+ if (table->found_next_number_field && record == table->record[0]) {
+ ulonglong dummy = 0;
+
+ /* First check whether the AUTOINC sub-system has been
+ initialized using the AUTOINC mutex. If not then we
+ do it the "proper" way, by acquiring the heavier locks. */
+ dict_table_autoinc_lock(prebuilt->table);
+
+ if (!prebuilt->table->autoinc_inited) {
+ dict_table_autoinc_unlock(prebuilt->table);
+
+ error = innobase_get_auto_increment(&dummy);
+
+ if (error == DB_SUCCESS) {
+ dict_table_autoinc_unlock(prebuilt->table);
+ } else {
+ goto error_exit;
+ }
+ } else {
+ dict_table_autoinc_unlock(prebuilt->table);
+ }
+ }
+
if (!prebuilt->upd_node) {
row_get_prebuilt_update_vector(prebuilt);
}
@@ -3795,6 +3835,7 @@ ha_innobase::delete_row(
innodb_srv_conc_exit_innodb(trx);
+error_exit:
error = convert_error_code_to_mysql(error, user_thd);
/* Tell the InnoDB server that there might be work for
@@ -3854,14 +3895,15 @@ void
ha_innobase::try_semi_consistent_read(bool yes)
/*===========================================*/
{
- ut_a(prebuilt->trx == thd_to_trx(current_thd, ht));
+ ut_a(prebuilt->trx == thd_to_trx(ha_thd()));
/* Row read type is set to semi consistent read if this was
requested by the MySQL and either innodb_locks_unsafe_for_binlog
option is used or this session is using READ COMMITTED isolation
level. */
- if (yes && (srv_locks_unsafe_for_binlog
+ if (yes
+ && (srv_locks_unsafe_for_binlog
|| prebuilt->trx->isolation_level == TRX_ISO_READ_COMMITTED)) {
prebuilt->row_read_type = ROW_READ_TRY_SEMI_CONSISTENT;
} else {
@@ -3997,9 +4039,9 @@ ha_innobase::index_read(
/*====================*/
/* out: 0, HA_ERR_KEY_NOT_FOUND,
or error number */
- mysql_byte* buf, /* in/out: buffer for the returned
+ uchar* buf, /* in/out: buffer for the returned
row */
- const mysql_byte* key_ptr,/* in: key value; if this is NULL
+ const uchar* key_ptr, /* in: key value; if this is NULL
we position the cursor at the
start or end of index; this can
also contain an InnoDB row id, in
@@ -4019,7 +4061,7 @@ ha_innobase::index_read(
DBUG_ENTER("index_read");
- ut_a(prebuilt->trx == thd_to_trx(current_thd, ht));
+ ut_a(prebuilt->trx == thd_to_trx(user_thd));
ha_statistic_increment(&SSV::ha_read_key_count);
@@ -4096,54 +4138,78 @@ row with the current key value or prefix. */
int
ha_innobase::index_read_last(
/*=========================*/
- /* out: 0, HA_ERR_KEY_NOT_FOUND, or an
- error code */
- mysql_byte* buf, /* out: fetched row */
- const mysql_byte* key_ptr, /* in: key value, or a prefix of a full
- key value */
- uint key_len) /* in: length of the key val or prefix
- in bytes */
+ /* out: 0, HA_ERR_KEY_NOT_FOUND, or an
+ error code */
+ uchar* buf, /* out: fetched row */
+ const uchar* key_ptr,/* in: key value, or a prefix of a full
+ key value */
+ uint key_len)/* in: length of the key val or prefix
+ in bytes */
{
return(index_read(buf, key_ptr, key_len, HA_READ_PREFIX_LAST));
}
/************************************************************************
-Changes the active index of a handle. */
+Get the index for a handle. Does not change active index.*/
-int
-ha_innobase::change_active_index(
-/*=============================*/
- /* out: 0 or error code */
- uint keynr) /* in: use this index; MAX_KEY means always clustered
- index, even if it was internally generated by
- InnoDB */
+dict_index_t*
+ha_innobase::innobase_get_index(
+/*============================*/
+ /* out: NULL or index instance. */
+ uint keynr) /* in: use this index; MAX_KEY means always
+ clustered index, even if it was internally
+ generated by InnoDB */
{
- KEY* key=0;
+ KEY* key = 0;
+ dict_index_t* index = 0;
+
+ DBUG_ENTER("innobase_get_index");
ha_statistic_increment(&SSV::ha_read_key_count);
- DBUG_ENTER("change_active_index");
- ut_ad(user_thd == current_thd);
- ut_a(prebuilt->trx == thd_to_trx(user_thd, ht));
-
- active_index = keynr;
+ ut_ad(user_thd == ha_thd());
+ ut_a(prebuilt->trx == thd_to_trx(user_thd));
if (keynr != MAX_KEY && table->s->keys > 0) {
- key = table->key_info + active_index;
+ key = table->key_info + keynr;
- prebuilt->index = dict_table_get_index_noninline(
+ index = dict_table_get_index_noninline(
prebuilt->table, key->name);
} else {
- prebuilt->index = dict_table_get_first_index_noninline(
- prebuilt->table);
+ index = dict_table_get_first_index_noninline(prebuilt->table);
}
- if (!prebuilt->index) {
+ if (!index) {
sql_print_error(
"Innodb could not find key n:o %u with name %s "
"from dict cache for table %s",
keynr, key ? key->name : "NULL",
prebuilt->table->name);
+ }
+
+ DBUG_RETURN(index);
+}
+
+/************************************************************************
+Changes the active index of a handle. */
+
+int
+ha_innobase::change_active_index(
+/*=============================*/
+ /* out: 0 or error code */
+ uint keynr) /* in: use this index; MAX_KEY means always clustered
+ index, even if it was internally generated by
+ InnoDB */
+{
+ DBUG_ENTER("change_active_index");
+
+ ut_ad(user_thd == ha_thd());
+ ut_a(prebuilt->trx == thd_to_trx(user_thd));
+
+ active_index = keynr;
+
+ prebuilt->index = innobase_get_index(keynr);
+ if (!prebuilt->index) {
DBUG_RETURN(1);
}
@@ -4174,10 +4240,10 @@ int
ha_innobase::index_read_idx(
/*========================*/
/* out: error number or 0 */
- mysql_byte* buf, /* in/out: buffer for the returned
+ uchar* buf, /* in/out: buffer for the returned
row */
uint keynr, /* in: use this index */
- const mysql_byte* key, /* in: key value; if this is NULL
+ const uchar* key, /* in: key value; if this is NULL
we position the cursor at the
start or end of index */
uint key_len, /* in: key value length */
@@ -4200,7 +4266,7 @@ ha_innobase::general_fetch(
/*=======================*/
/* out: 0, HA_ERR_END_OF_FILE, or error
number */
- mysql_byte* buf, /* in/out: buffer for next row in MySQL
+ uchar* buf, /* in/out: buffer for next row in MySQL
format */
uint direction, /* in: ROW_SEL_NEXT or ROW_SEL_PREV */
uint match_mode) /* in: 0, ROW_SEL_EXACT, or
@@ -4211,7 +4277,7 @@ ha_innobase::general_fetch(
DBUG_ENTER("general_fetch");
- ut_a(prebuilt->trx == thd_to_trx(current_thd, ht));
+ ut_a(prebuilt->trx == thd_to_trx(user_thd));
innodb_srv_conc_enter_innodb(prebuilt->trx);
@@ -4247,7 +4313,7 @@ ha_innobase::index_next(
/*====================*/
/* out: 0, HA_ERR_END_OF_FILE, or error
number */
- mysql_byte* buf) /* in/out: buffer for next row in MySQL
+ uchar* buf) /* in/out: buffer for next row in MySQL
format */
{
ha_statistic_increment(&SSV::ha_read_next_count);
@@ -4263,8 +4329,8 @@ ha_innobase::index_next_same(
/*=========================*/
/* out: 0, HA_ERR_END_OF_FILE, or error
number */
- mysql_byte* buf, /* in/out: buffer for the row */
- const mysql_byte* key, /* in: key value */
+ uchar* buf, /* in/out: buffer for the row */
+ const uchar* key, /* in: key value */
uint keylen) /* in: key value length */
{
ha_statistic_increment(&SSV::ha_read_next_count);
@@ -4279,10 +4345,8 @@ positioned using index_read. */
int
ha_innobase::index_prev(
/*====================*/
- /* out: 0, HA_ERR_END_OF_FILE, or error
- number */
- mysql_byte* buf) /* in/out: buffer for previous row in MySQL
- format */
+ /* out: 0, HA_ERR_END_OF_FILE, or error number */
+ uchar* buf) /* in/out: buffer for previous row in MySQL format */
{
ha_statistic_increment(&SSV::ha_read_prev_count);
@@ -4296,9 +4360,8 @@ corresponding row to buf. */
int
ha_innobase::index_first(
/*=====================*/
- /* out: 0, HA_ERR_END_OF_FILE,
- or error code */
- mysql_byte* buf) /* in/out: buffer for the row */
+ /* out: 0, HA_ERR_END_OF_FILE, or error code */
+ uchar* buf) /* in/out: buffer for the row */
{
int error;
@@ -4323,8 +4386,8 @@ corresponding row to buf. */
int
ha_innobase::index_last(
/*====================*/
- /* out: 0, HA_ERR_END_OF_FILE, or error code */
- mysql_byte* buf) /* in/out: buffer for the row */
+ /* out: 0, HA_ERR_END_OF_FILE, or error code */
+ uchar* buf) /* in/out: buffer for the row */
{
int error;
@@ -4393,7 +4456,7 @@ int
ha_innobase::rnd_next(
/*==================*/
/* out: 0, HA_ERR_END_OF_FILE, or error number */
- mysql_byte* buf)/* in/out: returns the row in this buffer,
+ uchar* buf) /* in/out: returns the row in this buffer,
in MySQL format */
{
int error;
@@ -4420,23 +4483,21 @@ Fetches a row from the table based on a row reference. */
int
ha_innobase::rnd_pos(
/*=================*/
- /* out: 0, HA_ERR_KEY_NOT_FOUND,
- or error code */
- mysql_byte* buf, /* in/out: buffer for the row */
- mysql_byte* pos) /* in: primary key value of the row in the
- MySQL format, or the row id if the clustered
- index was internally generated by InnoDB;
- the length of data in pos has to be
- ref_length */
+ /* out: 0, HA_ERR_KEY_NOT_FOUND, or error code */
+ uchar* buf, /* in/out: buffer for the row */
+ uchar* pos) /* in: primary key value of the row in the
+ MySQL format, or the row id if the clustered
+ index was internally generated by InnoDB; the
+ length of data in pos has to be ref_length */
{
int error;
uint keynr = active_index;
DBUG_ENTER("rnd_pos");
- DBUG_DUMP("key", (char*) pos, ref_length);
+ DBUG_DUMP("key", pos, ref_length);
ha_statistic_increment(&SSV::ha_read_rnd_count);
- ut_a(prebuilt->trx == thd_to_trx(current_thd, ht));
+ ut_a(prebuilt->trx == thd_to_trx(ha_thd()));
if (prebuilt->clust_index_was_generated) {
/* No primary key was defined for the table and we
@@ -4480,11 +4541,11 @@ was positioned the last time. */
void
ha_innobase::position(
/*==================*/
- const mysql_byte* record) /* in: row in MySQL format */
+ const uchar* record) /* in: row in MySQL format */
{
uint len;
- ut_a(prebuilt->trx == thd_to_trx(current_thd, ht));
+ ut_a(prebuilt->trx == thd_to_trx(ha_thd()));
if (prebuilt->clust_index_was_generated) {
/* No primary key was defined for the table and we
@@ -4510,6 +4571,24 @@ ha_innobase::position(
}
/*********************************************************************
+If it's a DB_TOO_BIG_RECORD error then set a suitable message to
+return to the client.*/
+inline
+void
+innodb_check_for_record_too_big_error(
+/*==================================*/
+ ulint comp, /* in: ROW_FORMAT: nonzero=COMPACT, 0=REDUNDANT */
+ int error) /* in: error code to check */
+{
+ if (error == (int)DB_TOO_BIG_RECORD) {
+ ulint max_row_size
+ = page_get_free_space_of_empty_noninline(comp) / 2;
+
+ my_error(ER_TOO_BIG_ROWSIZE, MYF(0), max_row_size);
+ }
+}
+
+/*********************************************************************
Creates a table definition to an InnoDB database. */
static
int
@@ -4604,7 +4683,7 @@ create_table_def(
}
}
- dict_mem_table_add_col(table,
+ dict_mem_table_add_col(table, table->heap,
(char*) field->field_name,
col_type,
dtype_form_prtype(
@@ -4617,6 +4696,8 @@ create_table_def(
error = row_create_table_for_mysql(table, trx);
+ innodb_check_for_record_too_big_error(flags & DICT_TF_COMPACT, error);
+
error = convert_error_code_to_mysql(error, NULL);
DBUG_RETURN(error);
@@ -4739,9 +4820,12 @@ create_index(
sure we don't create too long indexes. */
error = row_create_index_for_mysql(index, trx, field_lengths);
+ innodb_check_for_record_too_big_error(form->s->row_type
+ != ROW_TYPE_REDUNDANT, error);
+
error = convert_error_code_to_mysql(error, NULL);
- my_free((gptr) field_lengths, MYF(0));
+ my_free(field_lengths, MYF(0));
DBUG_RETURN(error);
}
@@ -4754,6 +4838,8 @@ int
create_clustered_index_when_no_primary(
/*===================================*/
trx_t* trx, /* in: InnoDB transaction handle */
+ ulint comp, /* in: ROW_FORMAT:
+ nonzero=COMPACT, 0=REDUNDANT */
const char* table_name) /* in: table name */
{
dict_index_t* index;
@@ -4762,10 +4848,12 @@ create_clustered_index_when_no_primary(
/* We pass 0 as the space id, and determine at a lower level the space
id where to store the table */
- index = dict_mem_index_create((char*) table_name,
- (char*) "GEN_CLUST_INDEX", 0, DICT_CLUSTERED, 0);
+ index = dict_mem_index_create(table_name, "GEN_CLUST_INDEX",
+ 0, DICT_CLUSTERED, 0);
error = row_create_index_for_mysql(index, trx, NULL);
+ innodb_check_for_record_too_big_error(comp, error);
+
error = convert_error_code_to_mysql(error, NULL);
return(error);
@@ -4807,7 +4895,7 @@ ha_innobase::create(
uint i;
char name2[FN_REFLEN];
char norm_name[FN_REFLEN];
- THD *thd= current_thd;
+ THD* thd = ha_thd();
ib_longlong auto_inc_value;
ulint flags;
@@ -4825,7 +4913,7 @@ ha_innobase::create(
/* Get the transaction associated with the current thd, or create one
if not yet created */
- parent_trx = check_trx_exists(ht, thd);
+ parent_trx = check_trx_exists(thd);
/* In case MySQL calls this in the middle of a SELECT query, release
possible adaptive hash latch to avoid deadlocks of threads */
@@ -4835,13 +4923,13 @@ ha_innobase::create(
trx = trx_allocate_for_mysql();
trx->mysql_thd = thd;
- trx->mysql_query_str = &((*thd).query);
+ trx->mysql_query_str = thd_query(thd);
- if (thd->options & OPTION_NO_FOREIGN_KEY_CHECKS) {
+ if (thd_test_options(thd, OPTION_NO_FOREIGN_KEY_CHECKS)) {
trx->check_foreigns = FALSE;
}
- if (thd->options & OPTION_RELAXED_UNIQUE_CHECKS) {
+ if (thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS)) {
trx->check_unique_secondary = FALSE;
}
@@ -4895,8 +4983,9 @@ ha_innobase::create(
order the rows by their row id which is internally generated
by InnoDB */
- error = create_clustered_index_when_no_primary(trx,
- norm_name);
+ error = create_clustered_index_when_no_primary(
+ trx, form->s->row_type != ROW_TYPE_REDUNDANT,
+ norm_name);
if (error) {
goto cleanup;
}
@@ -4921,9 +5010,9 @@ ha_innobase::create(
}
}
- if (thd->query != NULL) {
+ if (*trx->mysql_query_str) {
error = row_table_add_foreign_constraints(trx,
- thd->query, norm_name,
+ *trx->mysql_query_str, norm_name,
create_info->options & HA_LEX_CREATE_TMP_TABLE);
error = convert_error_code_to_mysql(error, NULL);
@@ -4958,7 +5047,10 @@ ha_innobase::create(
maximum value in the column. */
auto_inc_value = create_info->auto_increment_value;
+
+ dict_table_autoinc_lock(innobase_table);
dict_table_autoinc_initialize(innobase_table, auto_inc_value);
+ dict_table_autoinc_unlock(innobase_table);
}
/* Tell the InnoDB server that there might be work for
@@ -4997,7 +5089,7 @@ ha_innobase::discard_or_import_tablespace(
ut_a(prebuilt->trx);
ut_a(prebuilt->trx->magic_n == TRX_MAGIC_N);
- ut_a(prebuilt->trx == thd_to_trx(current_thd, ht));
+ ut_a(prebuilt->trx == thd_to_trx(ha_thd()));
dict_table = prebuilt->table;
trx = prebuilt->trx;
@@ -5022,22 +5114,21 @@ ha_innobase::delete_all_rows(void)
/* out: error number */
{
int error;
- THD* thd = current_thd;
DBUG_ENTER("ha_innobase::delete_all_rows");
- if (thd->lex->sql_command != SQLCOM_TRUNCATE) {
+ /* Get the transaction associated with the current thd, or create one
+ if not yet created, and update prebuilt->trx */
+
+ update_thd(ha_thd());
+
+ if (thd_sql_command(user_thd) != SQLCOM_TRUNCATE) {
fallback:
/* We only handle TRUNCATE TABLE t as a special case.
DELETE FROM t will have to use ha_innobase::delete_row(). */
DBUG_RETURN(my_errno=HA_ERR_WRONG_COMMAND);
}
- /* Get the transaction associated with the current thd, or create one
- if not yet created, and update prebuilt->trx */
-
- update_thd(thd);
-
/* Truncate the table in InnoDB */
error = row_truncate_table_for_mysql(prebuilt->table, prebuilt->trx);
@@ -5068,7 +5159,7 @@ ha_innobase::delete_table(
int error;
trx_t* parent_trx;
trx_t* trx;
- THD *thd= current_thd;
+ THD *thd = ha_thd();
char norm_name[1000];
DBUG_ENTER("ha_innobase::delete_table");
@@ -5076,7 +5167,7 @@ ha_innobase::delete_table(
/* Get the transaction associated with the current thd, or create one
if not yet created */
- parent_trx = check_trx_exists(ht, thd);
+ parent_trx = check_trx_exists(thd);
/* In case MySQL calls this in the middle of a SELECT query, release
possible adaptive hash latch to avoid deadlocks of threads */
@@ -5091,14 +5182,14 @@ ha_innobase::delete_table(
trx = trx_allocate_for_mysql();
- trx->mysql_thd = current_thd;
- trx->mysql_query_str = &((*current_thd).query);
+ trx->mysql_thd = thd;
+ trx->mysql_query_str = thd_query(thd);
- if (thd->options & OPTION_NO_FOREIGN_KEY_CHECKS) {
+ if (thd_test_options(thd, OPTION_NO_FOREIGN_KEY_CHECKS)) {
trx->check_foreigns = FALSE;
}
- if (thd->options & OPTION_RELAXED_UNIQUE_CHECKS) {
+ if (thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS)) {
trx->check_unique_secondary = FALSE;
}
@@ -5114,7 +5205,8 @@ ha_innobase::delete_table(
/* Drop the table in InnoDB */
error = row_drop_table_for_mysql(norm_name, trx,
- thd->lex->sql_command == SQLCOM_DROP_DB);
+ thd_sql_command(thd)
+ == SQLCOM_DROP_DB);
/* Flush the log to reduce probability that the .frm files and
the InnoDB data dictionary get out-of-sync if the user runs
@@ -5155,11 +5247,12 @@ innobase_drop_database(
char* ptr;
int error;
char* namebuf;
+ THD* thd = current_thd;
/* Get the transaction associated with the current thd, or create one
if not yet created */
- parent_trx = check_trx_exists(hton, current_thd);
+ parent_trx = check_trx_exists(thd);
/* In case MySQL calls this in the middle of a SELECT query, release
possible adaptive hash latch to avoid deadlocks of threads */
@@ -5174,7 +5267,7 @@ innobase_drop_database(
}
ptr++;
- namebuf = my_malloc((uint) len + 2, MYF(0));
+ namebuf = (char*) my_malloc((uint) len + 2, MYF(0));
memcpy(namebuf, ptr, len);
namebuf[len] = '/';
@@ -5183,10 +5276,10 @@ innobase_drop_database(
innobase_casedn_str(namebuf);
#endif
trx = trx_allocate_for_mysql();
- trx->mysql_thd = current_thd;
- trx->mysql_query_str = &((*current_thd).query);
+ trx->mysql_thd = thd;
+ trx->mysql_query_str = thd_query(thd);
- if (current_thd->options & OPTION_NO_FOREIGN_KEY_CHECKS) {
+ if (thd_test_options(thd, OPTION_NO_FOREIGN_KEY_CHECKS)) {
trx->check_foreigns = FALSE;
}
@@ -5232,13 +5325,14 @@ ha_innobase::rename_table(
trx_t* trx;
char norm_from[1000];
char norm_to[1000];
+ THD* thd = ha_thd();
DBUG_ENTER("ha_innobase::rename_table");
/* Get the transaction associated with the current thd, or create one
if not yet created */
- parent_trx = check_trx_exists(ht, current_thd);
+ parent_trx = check_trx_exists(thd);
/* In case MySQL calls this in the middle of a SELECT query, release
possible adaptive hash latch to avoid deadlocks of threads */
@@ -5252,10 +5346,10 @@ ha_innobase::rename_table(
}
trx = trx_allocate_for_mysql();
- trx->mysql_thd = current_thd;
- trx->mysql_query_str = &((*current_thd).query);
+ trx->mysql_thd = thd;
+ trx->mysql_query_str = thd_query(thd);
- if (current_thd->options & OPTION_NO_FOREIGN_KEY_CHECKS) {
+ if (thd_test_options(thd, OPTION_NO_FOREIGN_KEY_CHECKS)) {
trx->check_foreigns = FALSE;
}
@@ -5307,7 +5401,7 @@ ha_innobase::records_in_range(
{
KEY* key;
dict_index_t* index;
- mysql_byte* key_val_buff2 = (mysql_byte*) my_malloc(
+ uchar* key_val_buff2 = (uchar*) my_malloc(
table->s->reclength
+ table->s->max_key_length + 100,
MYF(MY_FAE));
@@ -5323,7 +5417,7 @@ ha_innobase::records_in_range(
DBUG_ENTER("records_in_range");
- ut_a(prebuilt->trx == thd_to_trx(current_thd, ht));
+ ut_a(prebuilt->trx == thd_to_trx(ha_thd()));
prebuilt->trx->op_info = (char*)"estimating records in index range";
@@ -5349,7 +5443,7 @@ ha_innobase::records_in_range(
(ulint)upd_and_key_val_buff_len,
index,
(byte*) (min_key ? min_key->key :
- (const mysql_byte*) 0),
+ (const uchar*) 0),
(ulint) (min_key ? min_key->length : 0),
prebuilt->trx);
@@ -5357,7 +5451,7 @@ ha_innobase::records_in_range(
range_end, (byte*) key_val_buff2,
buff2_len, index,
(byte*) (max_key ? max_key->key :
- (const mysql_byte*) 0),
+ (const uchar*) 0),
(ulint) (max_key ? max_key->length : 0),
prebuilt->trx);
@@ -5371,7 +5465,7 @@ ha_innobase::records_in_range(
dtuple_free_for_mysql(heap1);
dtuple_free_for_mysql(heap2);
- my_free((gptr) key_val_buff2, MYF(0));
+ my_free(key_val_buff2, MYF(0));
prebuilt->trx->op_info = (char*)"";
@@ -5407,7 +5501,7 @@ ha_innobase::estimate_rows_upper_bound(void)
external_lock(). To be safe, update the thd of the current table
handle. */
- update_thd(current_thd);
+ update_thd(ha_thd());
prebuilt->trx->op_info = (char*)
"calculating upper bound for table rows";
@@ -5517,14 +5611,19 @@ ha_innobase::info(
if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) {
- DBUG_RETURN(HA_ERR_CRASHED);
+ /* We return success (0) instead of HA_ERR_CRASHED,
+ because we want MySQL to process this query and not
+ stop, like it would do if it received the error code
+ HA_ERR_CRASHED. */
+
+ DBUG_RETURN(0);
}
/* We do not know if MySQL can call this function before calling
external_lock(). To be safe, update the thd of the current table
handle. */
- update_thd(current_thd);
+ update_thd(ha_thd());
/* In case MySQL calls this in the middle of a SELECT query, release
possible adaptive hash latch to avoid deadlocks of threads */
@@ -5537,15 +5636,14 @@ ha_innobase::info(
if (flag & HA_STATUS_TIME) {
if (srv_stats_on_metadata) {
- /* In sql_show we call with this flag: update then statistics
- so that they are up-to-date */
+ /* In sql_show we call with this flag: update
+ then statistics so that they are up-to-date */
- prebuilt->trx->op_info = (char*)"updating table statistics";
+ prebuilt->trx->op_info = "updating table statistics";
dict_update_statistics(ib_table);
- prebuilt->trx->op_info = (char*)
- "returning various info to MySQL";
+ prebuilt->trx->op_info = "returning various info to MySQL";
}
my_snprintf(path, sizeof(path), "%s/%s%s",
@@ -5662,7 +5760,7 @@ ha_innobase::info(
table->key_info[i].rec_per_key[j]=
rec_per_key >= ~(ulong) 0 ? ~(ulong) 0 :
- rec_per_key;
+ (ulong) rec_per_key;
}
index = dict_table_get_next_index_noninline(index);
@@ -5670,7 +5768,8 @@ ha_innobase::info(
}
if (flag & HA_STATUS_ERRKEY) {
- ut_a(prebuilt->trx && prebuilt->trx->magic_n == TRX_MAGIC_N);
+ ut_a(prebuilt->trx);
+ ut_a(prebuilt->trx->magic_n == TRX_MAGIC_N);
errkey = (unsigned int) row_get_mysql_key_number_for_index(
(dict_index_t*) trx_get_error_info(prebuilt->trx));
@@ -5754,8 +5853,10 @@ ha_innobase::check(
{
ulint ret;
- ut_a(prebuilt->trx && prebuilt->trx->magic_n == TRX_MAGIC_N);
- ut_a(prebuilt->trx == thd_to_trx(current_thd, ht));
+ DBUG_ASSERT(thd == ha_thd());
+ ut_a(prebuilt->trx);
+ ut_a(prebuilt->trx->magic_n == TRX_MAGIC_N);
+ ut_a(prebuilt->trx == thd_to_trx(thd));
if (prebuilt->mysql_template == NULL) {
/* Build the template; we will use a dummy template
@@ -5797,7 +5898,7 @@ ha_innobase::update_table_comment(
return((char*)comment); /* string too long */
}
- update_thd(current_thd);
+ update_thd(ha_thd());
prebuilt->trx->op_info = (char*)"returning table comment";
@@ -5812,9 +5913,9 @@ ha_innobase::update_table_comment(
mutex_enter_noninline(&srv_dict_tmpfile_mutex);
rewind(srv_dict_tmpfile);
- fprintf(srv_dict_tmpfile, "InnoDB free: %lu kB",
- (ulong) fsp_get_available_space_in_free_extents(
- prebuilt->table->space));
+ fprintf(srv_dict_tmpfile, "InnoDB free: %llu kB",
+ fsp_get_available_space_in_free_extents(
+ prebuilt->table->space));
dict_print_info_on_foreign_keys(FALSE, srv_dict_tmpfile,
prebuilt->trx, prebuilt->table);
@@ -5828,7 +5929,7 @@ ha_innobase::update_table_comment(
/* allocate buffer for the full string, and
read the contents of the temporary file */
- str = my_malloc(length + flen + 3, MYF(0));
+ str = (char*) my_malloc(length + flen + 3, MYF(0));
if (str) {
char* pos = str + length;
@@ -5868,7 +5969,7 @@ ha_innobase::get_foreign_key_create_info(void)
external_lock(). To be safe, update the thd of the current table
handle. */
- update_thd(current_thd);
+ update_thd(ha_thd());
prebuilt->trx->op_info = (char*)"getting info on foreign keys";
@@ -5896,7 +5997,7 @@ ha_innobase::get_foreign_key_create_info(void)
/* allocate buffer for the string, and
read the contents of the temporary file */
- str = my_malloc(flen + 1, MYF(0));
+ str = (char*) my_malloc(flen + 1, MYF(0));
if (str) {
rewind(srv_dict_tmpfile);
@@ -5917,7 +6018,7 @@ ha_innobase::get_foreign_key_list(THD *thd, List<FOREIGN_KEY_INFO> *f_key_list)
DBUG_ENTER("get_foreign_key_list");
ut_a(prebuilt != NULL);
- update_thd(current_thd);
+ update_thd(ha_thd());
prebuilt->trx->op_info = (char*)"getting list of foreign keys";
trx_search_latch_release_if_reserved(prebuilt->trx);
mutex_enter_noninline(&(dict_sys->mutex));
@@ -5937,8 +6038,8 @@ ha_innobase::get_foreign_key_list(THD *thd, List<FOREIGN_KEY_INFO> *f_key_list)
while (tmp_buff[i] != '/')
i++;
tmp_buff+= i + 1;
- f_key_info.forein_id= make_lex_string(thd, 0, tmp_buff,
- (uint) strlen(tmp_buff), 1);
+ f_key_info.forein_id = thd_make_lex_string(thd, 0,
+ tmp_buff, (uint) strlen(tmp_buff), 1);
tmp_buff= foreign->referenced_table_name;
/* Database name */
@@ -5950,22 +6051,23 @@ ha_innobase::get_foreign_key_list(THD *thd, List<FOREIGN_KEY_INFO> *f_key_list)
}
db_name[i]= 0;
ulen= filename_to_tablename(db_name, uname, sizeof(uname));
- f_key_info.referenced_db= make_lex_string(thd, 0, uname, ulen, 1);
+ f_key_info.referenced_db = thd_make_lex_string(thd, 0,
+ uname, ulen, 1);
/* Table name */
tmp_buff+= i + 1;
ulen= filename_to_tablename(tmp_buff, uname, sizeof(uname));
- f_key_info.referenced_table= make_lex_string(thd, 0, uname,
- ulen, 1);
+ f_key_info.referenced_table = thd_make_lex_string(thd, 0,
+ uname, ulen, 1);
for (i= 0;;) {
tmp_buff= foreign->foreign_col_names[i];
- name= make_lex_string(thd, name, tmp_buff,
- (uint) strlen(tmp_buff), 1);
+ name = thd_make_lex_string(thd, name,
+ tmp_buff, (uint) strlen(tmp_buff), 1);
f_key_info.foreign_fields.push_back(name);
tmp_buff= foreign->referenced_col_names[i];
- name= make_lex_string(thd, name, tmp_buff,
- (uint) strlen(tmp_buff), 1);
+ name = thd_make_lex_string(thd, name,
+ tmp_buff, (uint) strlen(tmp_buff), 1);
f_key_info.referenced_fields.push_back(name);
if (++i >= foreign->n_fields)
break;
@@ -5992,8 +6094,8 @@ ha_innobase::get_foreign_key_list(THD *thd, List<FOREIGN_KEY_INFO> *f_key_list)
length=8;
tmp_buff= "RESTRICT";
}
- f_key_info.delete_method= make_lex_string(thd, f_key_info.delete_method,
- tmp_buff, length, 1);
+ f_key_info.delete_method = thd_make_lex_string(
+ thd, f_key_info.delete_method, tmp_buff, length, 1);
if (foreign->type & DICT_FOREIGN_ON_UPDATE_CASCADE)
@@ -6016,20 +6118,19 @@ ha_innobase::get_foreign_key_list(THD *thd, List<FOREIGN_KEY_INFO> *f_key_list)
length=8;
tmp_buff= "RESTRICT";
}
- f_key_info.update_method= make_lex_string(thd, f_key_info.update_method,
- tmp_buff, length, 1);
+ f_key_info.update_method = thd_make_lex_string(
+ thd, f_key_info.update_method, tmp_buff, length, 1);
if (foreign->referenced_index &&
foreign->referenced_index->name)
{
- f_key_info.referenced_key_name=
- make_lex_string(thd, f_key_info.referenced_key_name,
- foreign->referenced_index->name,
- strlen(foreign->referenced_index->name), 1);
+ f_key_info.referenced_key_name = thd_make_lex_string(
+ thd, f_key_info.referenced_key_name,
+ foreign->referenced_index->name,
+ strlen(foreign->referenced_index->name), 1);
}
- FOREIGN_KEY_INFO *pf_key_info= ((FOREIGN_KEY_INFO *)
- thd->memdup((gptr) &f_key_info,
- sizeof(FOREIGN_KEY_INFO)));
+ FOREIGN_KEY_INFO *pf_key_info = (FOREIGN_KEY_INFO *)
+ thd_memdup(thd, &f_key_info, sizeof(FOREIGN_KEY_INFO));
f_key_list->push_back(pf_key_info);
foreign = UT_LIST_GET_NEXT(foreign_list, foreign);
}
@@ -6052,7 +6153,7 @@ ha_innobase::can_switch_engines(void)
DBUG_ENTER("ha_innobase::can_switch_engines");
- ut_a(prebuilt->trx == thd_to_trx(current_thd, ht));
+ ut_a(prebuilt->trx == thd_to_trx(ha_thd()));
prebuilt->trx->op_info =
"determining if there are foreign key constraints";
@@ -6121,8 +6222,7 @@ ha_innobase::extra(
}
break;
case HA_EXTRA_RESET_STATE:
- prebuilt->keep_other_fields_on_keyread = 0;
- prebuilt->read_just_key = 0;
+ reset_template(prebuilt);
break;
case HA_EXTRA_NO_KEYREAD:
prebuilt->read_just_key = 0;
@@ -6133,6 +6233,26 @@ ha_innobase::extra(
case HA_EXTRA_KEYREAD_PRESERVE_FIELDS:
prebuilt->keep_other_fields_on_keyread = 1;
break;
+
+ /* IMPORTANT: prebuilt->trx can be obsolete in
+ this method, because it is not sure that MySQL
+ calls external_lock before this method with the
+ parameters below. We must not invoke update_thd()
+ either, because the calling threads may change.
+ CAREFUL HERE, OR MEMORY CORRUPTION MAY OCCUR! */
+ case HA_EXTRA_IGNORE_DUP_KEY:
+ thd_to_trx(ha_thd())->duplicates |= TRX_DUP_IGNORE;
+ break;
+ case HA_EXTRA_WRITE_CAN_REPLACE:
+ thd_to_trx(ha_thd())->duplicates |= TRX_DUP_REPLACE;
+ break;
+ case HA_EXTRA_WRITE_CANNOT_REPLACE:
+ thd_to_trx(ha_thd())->duplicates &= ~TRX_DUP_REPLACE;
+ break;
+ case HA_EXTRA_NO_IGNORE_DUP_KEY:
+ thd_to_trx(ha_thd())->duplicates &=
+ ~(TRX_DUP_IGNORE | TRX_DUP_REPLACE);
+ break;
default:/* Do nothing */
;
}
@@ -6145,8 +6265,7 @@ int ha_innobase::reset()
if (prebuilt->blob_heap) {
row_mysql_prebuilt_free_blob_heap(prebuilt);
}
- prebuilt->keep_other_fields_on_keyread = 0;
- prebuilt->read_just_key = 0;
+ reset_template(prebuilt);
return 0;
}
@@ -6187,8 +6306,7 @@ ha_innobase::start_stmt(
prebuilt->sql_stat_start = TRUE;
prebuilt->hint_need_to_fetch_extra_cols = 0;
- prebuilt->read_just_key = 0;
- prebuilt->keep_other_fields_on_keyread = FALSE;
+ reset_template(prebuilt);
if (!prebuilt->mysql_has_locked) {
/* This handle is for a temporary table created inside
@@ -6199,7 +6317,7 @@ ha_innobase::start_stmt(
prebuilt->select_lock_type = LOCK_X;
} else {
if (trx->isolation_level != TRX_ISO_SERIALIZABLE
- && thd->lex->sql_command == SQLCOM_SELECT
+ && thd_sql_command(thd) == SQLCOM_SELECT
&& lock_type == TL_READ) {
/* For other than temporary tables, we obtain
@@ -6275,13 +6393,35 @@ ha_innobase::external_lock(
update_thd(thd);
+ /* Statement based binlogging does not work in isolation level
+ READ UNCOMMITTED and READ COMMITTED since the necessary
+ locks cannot be taken. In this case, we print an
+ informative error message and return with an error. */
+ if (lock_type == F_WRLCK)
+ {
+ ulong const binlog_format= thd_binlog_format(thd);
+ ulong const tx_isolation = thd_tx_isolation(current_thd);
+ if (tx_isolation <= ISO_READ_COMMITTED &&
+ binlog_format == BINLOG_FORMAT_STMT)
+ {
+ char buf[256];
+ my_snprintf(buf, sizeof(buf),
+ "Transaction level '%s' in"
+ " InnoDB is not safe for binlog mode '%s'",
+ tx_isolation_names[tx_isolation],
+ binlog_format_names[binlog_format]);
+ my_error(ER_BINLOG_LOGGING_IMPOSSIBLE, MYF(0), buf);
+ DBUG_RETURN(HA_ERR_LOGGING_IMPOSSIBLE);
+ }
+ }
+
+
trx = prebuilt->trx;
prebuilt->sql_stat_start = TRUE;
prebuilt->hint_need_to_fetch_extra_cols = 0;
- prebuilt->read_just_key = 0;
- prebuilt->keep_other_fields_on_keyread = FALSE;
+ reset_template(prebuilt);
if (lock_type == F_WRLCK) {
@@ -6311,8 +6451,8 @@ ha_innobase::external_lock(
if (trx->isolation_level == TRX_ISO_SERIALIZABLE
&& prebuilt->select_lock_type == LOCK_NONE
- && (thd->options
- & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) {
+ && thd_test_options(thd,
+ OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) {
/* To get serializable execution, we let InnoDB
conceptually add 'LOCK IN SHARE MODE' to all SELECTs
@@ -6339,9 +6479,9 @@ ha_innobase::external_lock(
if (prebuilt->select_lock_type != LOCK_NONE) {
- if (thd->lex->sql_command == SQLCOM_LOCK_TABLES
- && thd->variables.innodb_table_locks
- && (thd->options & OPTION_NOT_AUTOCOMMIT)
+ if (thd_sql_command(thd) == SQLCOM_LOCK_TABLES
+ && THDVAR(thd, table_locks)
+ && thd_test_options(thd, OPTION_NOT_AUTOCOMMIT)
&& thd_in_lock_tables(thd)) {
ulint error = row_lock_table_for_mysql(
@@ -6349,7 +6489,7 @@ ha_innobase::external_lock(
if (error != DB_SUCCESS) {
error = convert_error_code_to_mysql(
- (int) error, user_thd);
+ (int) error, thd);
DBUG_RETURN((int) error);
}
}
@@ -6365,6 +6505,12 @@ ha_innobase::external_lock(
trx->n_mysql_tables_in_use--;
prebuilt->mysql_has_locked = FALSE;
+ /* Release a possible FIFO ticket and search latch. Since we
+ may reserve the kernel mutex, we have to release the search
+ system latch first to obey the latching order. */
+
+ innobase_release_stat_resources(trx);
+
/* If the MySQL lock count drops to zero we know that the current SQL
statement has ended */
@@ -6373,13 +6519,7 @@ ha_innobase::external_lock(
trx->mysql_n_tables_locked = 0;
prebuilt->used_in_HANDLER = FALSE;
- /* Release a possible FIFO ticket and search latch. Since we
- may reserve the kernel mutex, we have to release the search
- system latch first to obey the latching order. */
-
- innobase_release_stat_resources(trx);
-
- if (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) {
+ if (!thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) {
if (trx->active_trans != 0) {
innobase_commit(ht, thd, TRUE);
}
@@ -6420,8 +6560,7 @@ ha_innobase::transactional_table_lock(
update_thd(thd);
- if (prebuilt->table->ibd_file_missing
- && !thd_tablespace_op(current_thd)) {
+ if (prebuilt->table->ibd_file_missing && !thd_tablespace_op(thd)) {
ut_print_timestamp(stderr);
fprintf(stderr, " InnoDB error:\n"
"MySQL is trying to use a table handle but the .ibd file for\n"
@@ -6439,8 +6578,7 @@ ha_innobase::transactional_table_lock(
prebuilt->sql_stat_start = TRUE;
prebuilt->hint_need_to_fetch_extra_cols = 0;
- prebuilt->read_just_key = 0;
- prebuilt->keep_other_fields_on_keyread = FALSE;
+ reset_template(prebuilt);
if (lock_type == F_WRLCK) {
prebuilt->select_lock_type = LOCK_X;
@@ -6466,17 +6604,17 @@ ha_innobase::transactional_table_lock(
trx->active_trans = 1;
}
- if (thd->variables.innodb_table_locks && thd_in_lock_tables(thd)) {
+ if (THDVAR(thd, table_locks) && thd_in_lock_tables(thd)) {
ulint error = DB_SUCCESS;
error = row_lock_table_for_mysql(prebuilt, NULL, 0);
if (error != DB_SUCCESS) {
- error = convert_error_code_to_mysql((int) error, user_thd);
+ error = convert_error_code_to_mysql((int) error, thd);
DBUG_RETURN((int) error);
}
- if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) {
+ if (thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) {
/* Store the current undo_no of the transaction
so that we know where to roll back if we have
@@ -6522,11 +6660,7 @@ innodb_show_status(
DBUG_ENTER("innodb_show_status");
- if (have_innodb != SHOW_OPTION_YES) {
- DBUG_RETURN(FALSE);
- }
-
- trx = check_trx_exists(hton, thd);
+ trx = check_trx_exists(thd);
innobase_release_stat_resources(trx);
@@ -6556,7 +6690,7 @@ innodb_show_status(
/* allocate buffer for the string, and
read the contents of the temporary file */
- if (!(str = my_malloc(usable_len + 1, MYF(0)))) {
+ if (!(str = (char*) my_malloc(usable_len + 1, MYF(0)))) {
mutex_exit_noninline(&srv_monitor_file_mutex);
DBUG_RETURN(TRUE);
}
@@ -6717,12 +6851,12 @@ bool innobase_show_status(handlerton *hton, THD* thd,
locking.
****************************************************************************/
-static mysql_byte* innobase_get_key(INNOBASE_SHARE* share, uint* length,
+static uchar* innobase_get_key(INNOBASE_SHARE* share, size_t *length,
my_bool not_used __attribute__((unused)))
{
*length=share->table_name_length;
- return (mysql_byte*) share->table_name;
+ return (uchar*) share->table_name;
}
static INNOBASE_SHARE* get_share(const char* table_name)
@@ -6732,7 +6866,7 @@ static INNOBASE_SHARE* get_share(const char* table_name)
uint length=(uint) strlen(table_name);
if (!(share=(INNOBASE_SHARE*) hash_search(&innobase_open_tables,
- (mysql_byte*) table_name,
+ (uchar*) table_name,
length))) {
share = (INNOBASE_SHARE *) my_malloc(sizeof(*share)+length+1,
@@ -6743,9 +6877,9 @@ static INNOBASE_SHARE* get_share(const char* table_name)
strmov(share->table_name,table_name);
if (my_hash_insert(&innobase_open_tables,
- (mysql_byte*) share)) {
+ (uchar*) share)) {
pthread_mutex_unlock(&innobase_share_mutex);
- my_free((gptr) share,0);
+ my_free(share,0);
return 0;
}
@@ -6765,10 +6899,10 @@ static void free_share(INNOBASE_SHARE* share)
pthread_mutex_lock(&innobase_share_mutex);
if (!--share->use_count) {
- hash_delete(&innobase_open_tables, (mysql_byte*) share);
+ hash_delete(&innobase_open_tables, (uchar*) share);
thr_lock_delete(&share->lock);
pthread_mutex_destroy(&share->mutex);
- my_free((gptr) share, MYF(0));
+ my_free(share, MYF(0));
}
pthread_mutex_unlock(&innobase_share_mutex);
@@ -6804,7 +6938,7 @@ ha_innobase::store_lock(
because we call update_thd() later, in ::external_lock()! Failure to
understand this caused a serious memory corruption bug in 5.1.11. */
- trx = check_trx_exists(ht, thd);
+ trx = check_trx_exists(thd);
/* NOTE: MySQL can call this function with lock 'type' TL_IGNORE!
Be careful to ignore TL_IGNORE if we are going to do something with
@@ -6814,10 +6948,9 @@ ha_innobase::store_lock(
of the transaction. */
if (lock_type != TL_IGNORE
- && trx->n_mysql_tables_in_use == 0) {
+ && trx->n_mysql_tables_in_use == 0) {
trx->isolation_level = innobase_map_isolation_level(
- (enum_tx_isolation)
- thd->variables.tx_isolation);
+ (enum_tx_isolation) thd_tx_isolation(thd));
if (trx->isolation_level <= TRX_ISO_READ_COMMITTED
&& trx->global_read_view) {
@@ -6827,23 +6960,24 @@ ha_innobase::store_lock(
read_view_close_for_mysql(trx);
}
-
}
+ DBUG_ASSERT(thd == current_thd);
const bool in_lock_tables = thd_in_lock_tables(thd);
+ const uint sql_command = thd_sql_command(thd);
- if (thd->lex->sql_command == SQLCOM_DROP_TABLE) {
+ if (sql_command == SQLCOM_DROP_TABLE) {
/* MySQL calls this function in DROP TABLE though this table
handle may belong to another thd that is running a query. Let
us in that case skip any changes to the prebuilt struct. */
- } else if ((lock_type == TL_READ && in_lock_tables) ||
- (lock_type == TL_READ_HIGH_PRIORITY && in_lock_tables) ||
- lock_type == TL_READ_WITH_SHARED_LOCKS ||
- lock_type == TL_READ_NO_INSERT ||
- (thd->lex->sql_command != SQLCOM_SELECT
- && lock_type != TL_IGNORE)) {
+ } else if ((lock_type == TL_READ && in_lock_tables)
+ || (lock_type == TL_READ_HIGH_PRIORITY && in_lock_tables)
+ || lock_type == TL_READ_WITH_SHARED_LOCKS
+ || lock_type == TL_READ_NO_INSERT
+ || (lock_type != TL_IGNORE
+ && sql_command != SQLCOM_SELECT)) {
/* The OR cases above are in this order:
1) MySQL is doing LOCK TABLES ... READ LOCAL, or we
@@ -6868,12 +7002,12 @@ ha_innobase::store_lock(
isolation_level = trx->isolation_level;
if ((srv_locks_unsafe_for_binlog
- || isolation_level == TRX_ISO_READ_COMMITTED)
- && isolation_level != TRX_ISO_SERIALIZABLE
- && (lock_type == TL_READ || lock_type == TL_READ_NO_INSERT)
- && (thd->lex->sql_command == SQLCOM_INSERT_SELECT
- || thd->lex->sql_command == SQLCOM_UPDATE
- || thd->lex->sql_command == SQLCOM_CREATE_TABLE)) {
+ || isolation_level == TRX_ISO_READ_COMMITTED)
+ && isolation_level != TRX_ISO_SERIALIZABLE
+ && (lock_type == TL_READ || lock_type == TL_READ_NO_INSERT)
+ && (sql_command == SQLCOM_INSERT_SELECT
+ || sql_command == SQLCOM_UPDATE
+ || sql_command == SQLCOM_CREATE_TABLE)) {
/* If we either have innobase_locks_unsafe_for_binlog
option set or this session is using READ COMMITTED
@@ -6886,7 +7020,7 @@ ha_innobase::store_lock(
prebuilt->select_lock_type = LOCK_NONE;
prebuilt->stored_select_lock_type = LOCK_NONE;
- } else if (thd->lex->sql_command == SQLCOM_CHECKSUM) {
+ } else if (sql_command == SQLCOM_CHECKSUM) {
/* Use consistent read for checksum table */
prebuilt->select_lock_type = LOCK_NONE;
@@ -6916,7 +7050,7 @@ ha_innobase::store_lock(
(if it does not use a consistent read). */
if (lock_type == TL_READ
- && thd->lex->sql_command == SQLCOM_LOCK_TABLES) {
+ && sql_command == SQLCOM_LOCK_TABLES) {
/* We come here if MySQL is processing LOCK TABLES
... READ LOCAL. MyISAM under that table lock type
reads the table as it was at the time the lock was
@@ -6942,24 +7076,13 @@ ha_innobase::store_lock(
TRUE there). */
if ((lock_type >= TL_WRITE_CONCURRENT_INSERT
- && lock_type <= TL_WRITE)
- && !(in_lock_tables
- && thd->lex->sql_command == SQLCOM_LOCK_TABLES)
- && !thd_tablespace_op(thd)
- && thd->lex->sql_command != SQLCOM_TRUNCATE
- && thd->lex->sql_command != SQLCOM_OPTIMIZE
-
-#ifdef __WIN__
- /* For alter table on win32 for succesful operation
- completion it is used TL_WRITE(=10) lock instead of
- TL_WRITE_ALLOW_READ(=6), however here in innodb handler
- TL_WRITE is lifted to TL_WRITE_ALLOW_WRITE, which causes
- race condition when several clients do alter table
- simultaneously (bug #17264). This fix avoids the problem. */
- && thd->lex->sql_command != SQLCOM_ALTER_TABLE
-#endif
-
- && thd->lex->sql_command != SQLCOM_CREATE_TABLE) {
+ && lock_type <= TL_WRITE)
+ && !(in_lock_tables
+ && sql_command == SQLCOM_LOCK_TABLES)
+ && !thd_tablespace_op(thd)
+ && sql_command != SQLCOM_TRUNCATE
+ && sql_command != SQLCOM_OPTIMIZE
+ && sql_command != SQLCOM_CREATE_TABLE) {
lock_type = TL_WRITE_ALLOW_WRITE;
}
@@ -6972,10 +7095,10 @@ ha_innobase::store_lock(
We especially allow concurrent inserts if MySQL is at the
start of a stored procedure call (SQLCOM_CALL)
- (MySQL does have in_lock_tables TRUE there). */
+ (MySQL does have thd_in_lock_tables() TRUE there). */
if (lock_type == TL_READ_NO_INSERT
- && thd->lex->sql_command != SQLCOM_LOCK_TABLES) {
+ && sql_command != SQLCOM_LOCK_TABLES) {
lock_type = TL_READ;
}
@@ -6997,20 +7120,25 @@ the value of the auto-inc counter. */
int
ha_innobase::innobase_read_and_init_auto_inc(
/*=========================================*/
- /* out: 0 or error code: deadlock or lock wait
- timeout */
- longlong* ret) /* out: auto-inc value */
+ /* out: 0 or error code:
+ deadlock or lock wait timeout */
+ longlong* value) /* out: the autoinc value */
{
longlong auto_inc;
- ulint old_select_lock_type;
+ ibool stmt_start;
+ int mysql_error = 0;
+ dict_table_t* innodb_table = prebuilt->table;
ibool trx_was_not_started = FALSE;
- int error;
ut_a(prebuilt);
ut_a(prebuilt->table);
+ /* Remember if we are in the beginning of an SQL statement.
+ This function must not change that flag. */
+ stmt_start = prebuilt->sql_stat_start;
+
/* Prepare prebuilt->trx in the table handle */
- update_thd(current_thd);
+ update_thd(ha_thd());
if (prebuilt->trx->conc_state == TRX_NOT_STARTED) {
trx_was_not_started = TRUE;
@@ -7021,114 +7149,115 @@ ha_innobase::innobase_read_and_init_auto_inc(
trx_search_latch_release_if_reserved(prebuilt->trx);
- auto_inc = dict_table_autoinc_read(prebuilt->table);
-
- if (auto_inc != 0) {
- /* Already initialized */
- *ret = auto_inc;
+ dict_table_autoinc_lock(prebuilt->table);
- error = 0;
+ auto_inc = dict_table_autoinc_read(prebuilt->table);
- goto func_exit_early;
+ /* Was the AUTOINC counter reset during normal processing, if
+ so then we simply start count from 1. No need to go to the index.*/
+ if (auto_inc == 0 && innodb_table->autoinc_inited) {
+ ++auto_inc;
+ dict_table_autoinc_initialize(innodb_table, auto_inc);
}
- error = row_lock_table_autoinc_for_mysql(prebuilt);
+ if (auto_inc == 0) {
+ dict_index_t* index;
+ ulint error;
+ const char* autoinc_col_name;
- if (error != DB_SUCCESS) {
- error = convert_error_code_to_mysql(error, user_thd);
+ ut_a(!innodb_table->autoinc_inited);
- goto func_exit_early;
- }
+ index = innobase_get_index(table->s->next_number_index);
- /* Check again if someone has initialized the counter meanwhile */
- auto_inc = dict_table_autoinc_read(prebuilt->table);
+ autoinc_col_name = table->found_next_number_field->field_name;
- if (auto_inc != 0) {
- *ret = auto_inc;
+ error = row_search_max_autoinc(
+ index, autoinc_col_name, &auto_inc);
- error = 0;
+ if (error == DB_SUCCESS) {
+ ++auto_inc;
+ dict_table_autoinc_initialize(innodb_table, auto_inc);
+ } else {
+ fprintf(stderr, " InnoDB error: Couldn't read the "
+ "max AUTOINC value from index (%s).\n",
+ index->name);
- goto func_exit_early;
+ mysql_error = 1;
+ }
}
- (void) extra(HA_EXTRA_KEYREAD);
- index_init(table->s->next_number_index, 1);
+ *value = auto_inc;
- /* Starting from 5.0.9, we use a consistent read to read the auto-inc
- column maximum value. This eliminates the spurious deadlocks caused
- by the row X-lock that we previously used. Note the following flaw
- in our algorithm: if some other user meanwhile UPDATEs the auto-inc
- column, our consistent read will not return the largest value. We
- accept this flaw, since the deadlocks were a bigger trouble. */
+ dict_table_autoinc_unlock(prebuilt->table);
- /* Fetch all the columns in the key */
+ /* Since MySQL does not seem to call autocommit after SHOW TABLE
+ STATUS (even if we would register the trx here), we commit our
+ transaction here if it was started here. This is to eliminate a
+ dangling transaction. If the user had AUTOCOMMIT=0, then SHOW
+ TABLE STATUS does leave a dangling transaction if the user does not
+ himself call COMMIT. */
- prebuilt->hint_need_to_fetch_extra_cols = ROW_RETRIEVE_ALL_COLS;
+ if (trx_was_not_started) {
- old_select_lock_type = prebuilt->select_lock_type;
- prebuilt->select_lock_type = LOCK_NONE;
+ innobase_commit_low(prebuilt->trx);
+ }
- /* Eliminate an InnoDB error print that happens when we try to SELECT
- from a table when no table has been locked in ::external_lock(). */
- prebuilt->trx->n_mysql_tables_in_use++;
+ prebuilt->sql_stat_start = stmt_start;
- error = index_last(table->record[1]);
+ return(mysql_error);
+}
- prebuilt->trx->n_mysql_tables_in_use--;
- prebuilt->select_lock_type = old_select_lock_type;
+/*******************************************************************************
+Read the next autoinc value, initialize the table if it's not initialized.
+On return if there is no error then the tables AUTOINC lock is locked.*/
- if (error) {
- if (error == HA_ERR_END_OF_FILE) {
- /* The table was empty, initialize to 1 */
- auto_inc = 1;
+ulong
+ha_innobase::innobase_get_auto_increment(
+ ulonglong* value) /* out: autoinc value */
+{
+ ulong error;
- error = 0;
- } else {
- /* This should not happen in a consistent read */
- sql_print_error("Consistent read of auto-inc column "
- "returned %lu", (ulong) error);
- auto_inc = -1;
+ do {
+ error = innobase_autoinc_lock();
- goto func_exit;
- }
- } else {
- /* Initialize to max(col) + 1; we use
- 'found_next_number_field' below because MySQL in SHOW TABLE
- STATUS does not seem to set 'next_number_field'. The comment
- in table.h says that 'next_number_field' is set when it is
- 'active'.
- Since 5.1 MySQL enforces that we announce fields which we will
- read; as we only do a val_*() call, dbug_tmp_use_all_columns()
- with read_set is sufficient. */
+ if (error == DB_SUCCESS) {
+ ib_longlong autoinc;
- my_bitmap_map *old_map;
- old_map= dbug_tmp_use_all_columns(table, table->read_set);
- auto_inc = (longlong) table->found_next_number_field->
- val_int_offset(table->s->rec_buff_length) + 1;
- dbug_tmp_restore_column_map(table->read_set, old_map);
- }
+ /* Determine the first value of the interval */
+ autoinc = dict_table_autoinc_read(prebuilt->table);
- dict_table_autoinc_initialize(prebuilt->table, auto_inc);
+ /* We need to initialize the AUTO-INC value, for
+ that we release all locks.*/
+ if (autoinc <= 0) {
+ trx_t* trx;
-func_exit:
- (void) extra(HA_EXTRA_NO_KEYREAD);
+ trx = prebuilt->trx;
+ dict_table_autoinc_unlock(prebuilt->table);
- index_end();
+ /* If we had reserved the AUTO-INC
+ lock in this SQL statement we release
+ it before retrying.*/
+ row_unlock_table_autoinc_for_mysql(trx);
- *ret = auto_inc;
+ /* Just to make sure */
+ ut_a(!trx->auto_inc_lock);
-func_exit_early:
- /* Since MySQL does not seem to call autocommit after SHOW TABLE
- STATUS (even if we would register the trx here), we commit our
- transaction here if it was started here. This is to eliminate a
- dangling transaction. If the user had AUTOCOMMIT=0, then SHOW
- TABLE STATUS does leave a dangling transaction if the user does not
- himself call COMMIT. */
+ int mysql_error;
- if (trx_was_not_started) {
+ mysql_error = innobase_read_and_init_auto_inc(
+ &autoinc);
- innobase_commit_low(prebuilt->trx);
- }
+ if (!mysql_error) {
+ /* Should have read the proper value */
+ ut_a(autoinc > 0);
+ } else {
+ error = DB_ERROR;
+ }
+ } else {
+ *value = (ulonglong) autoinc;
+ }
+ }
+ } while (*value == 0 && error == DB_SUCCESS);
return(error);
}
@@ -7141,37 +7270,91 @@ auto-inc counter in *first_value, and ULONGLONG_MAX in *nb_reserved_values (as
we have a table-level lock). offset, increment, nb_desired_values are ignored.
*first_value is set to -1 if error (deadlock or lock wait timeout) */
-void ha_innobase::get_auto_increment(
+void
+ha_innobase::get_auto_increment(
/*=================================*/
- ulonglong offset, /* in */
- ulonglong increment, /* in */
- ulonglong nb_desired_values, /* in */
- ulonglong *first_value, /* out */
- ulonglong *nb_reserved_values) /* out */
+ ulonglong offset, /* in: */
+ ulonglong increment, /* in: table autoinc increment */
+ ulonglong nb_desired_values, /* in: number of values reqd */
+ ulonglong *first_value, /* out: the autoinc value */
+ ulonglong *nb_reserved_values) /* out: count of reserved values */
{
- longlong nr;
- int error;
+ trx_t* trx;
+ ulint error;
+ ulonglong autoinc = 0;
/* Prepare prebuilt->trx in the table handle */
- update_thd(current_thd);
+ update_thd(ha_thd());
- error = innobase_read_and_init_auto_inc(&nr);
+ error = innobase_get_auto_increment(&autoinc);
- if (error) {
- /* This should never happen in the current (5.0.6) code, since
- we call this function only after the counter has been
- initialized. */
+ if (error != DB_SUCCESS) {
+ /* This should never happen in the code > ver 5.0.6,
+ since we call this function only after the counter
+ has been initialized. */
ut_print_timestamp(stderr);
- sql_print_error("Error %lu in ::get_auto_increment()",
- (ulong) error);
- *first_value= (~(ulonglong) 0);
+ sql_print_error("Error %lu in ::get_auto_increment()", error);
+
+ *first_value = (~(ulonglong) 0);
return;
}
- *first_value= (ulonglong) nr;
- /* table-level autoinc lock reserves up to +inf */
- *nb_reserved_values= ULONGLONG_MAX;
+ /* This is a hack, since nb_desired_values seems to be accurate only
+ for the first call to get_auto_increment() for multi-row INSERT and
+ meaningless for other statements e.g, LOAD etc. Subsequent calls to
+ this method for the same statement results in different values which
+ don't make sense. Therefore we store the value the first time we are
+ called and count down from that as rows are written (see write_row()).
+ */
+
+ trx = prebuilt->trx;
+
+ /* Called for the first time ? */
+ if (trx->n_autoinc_rows == 0) {
+
+ trx->n_autoinc_rows = (ulint) nb_desired_values;
+
+ /* It's possible for nb_desired_values to be 0:
+ e.g., INSERT INTO T1(C) SELECT C FROM T2; */
+ if (nb_desired_values == 0) {
+
+ trx->n_autoinc_rows = 1;
+ }
+
+ set_if_bigger(*first_value, autoinc);
+ /* Not in the middle of a mult-row INSERT. */
+ } else if (prebuilt->last_value == 0) {
+ set_if_bigger(*first_value, autoinc);
+ }
+
+ *nb_reserved_values = trx->n_autoinc_rows;
+
+ /* With old style AUTOINC locking we only update the table's
+ AUTOINC counter after attempting to insert the row. */
+ if (innobase_autoinc_lock_mode != AUTOINC_OLD_STYLE_LOCKING) {
+
+ /* Compute the last value in the interval */
+ prebuilt->last_value = *first_value +
+ (*nb_reserved_values * increment);
+
+ ut_a(prebuilt->last_value >= *first_value);
+
+ /* Update the table autoinc variable */
+ dict_table_autoinc_update(
+ prebuilt->table, prebuilt->last_value);
+ } else {
+ /* This will force write_row() into attempting an update
+ of the table's AUTOINC counter. */
+ prebuilt->last_value = 0;
+ }
+
+ /* The increment to be used to increase the AUTOINC value, we use
+ this in write_row() and update_row() to increase the autoinc counter
+ for columns that are filled by the user.*/
+ prebuilt->table->autoinc_increment = increment;
+
+ dict_table_autoinc_unlock(prebuilt->table);
}
/* See comment in handler.h */
@@ -7182,7 +7365,7 @@ ha_innobase::reset_auto_increment(ulonglong value)
int error;
- update_thd(current_thd);
+ update_thd(ha_thd());
error = row_lock_table_autoinc_for_mysql(prebuilt);
@@ -7192,7 +7375,7 @@ ha_innobase::reset_auto_increment(ulonglong value)
DBUG_RETURN(error);
}
- dict_table_autoinc_initialize(prebuilt->table, value);
+ innobase_reset_autoinc(value);
DBUG_RETURN(0);
}
@@ -7201,7 +7384,7 @@ ha_innobase::reset_auto_increment(ulonglong value)
bool
ha_innobase::get_error_message(int error, String *buf)
{
- trx_t* trx = check_trx_exists(ht, current_thd);
+ trx_t* trx = check_trx_exists(ha_thd());
buf->copy(trx->detailed_error, strlen(trx->detailed_error),
system_charset_info);
@@ -7219,9 +7402,9 @@ ha_innobase::cmp_ref(
/*=================*/
/* out: < 0 if ref1 < ref2, 0 if equal, else
> 0 */
- const mysql_byte* ref1, /* in: an (internal) primary key value in the
+ const uchar* ref1, /* in: an (internal) primary key value in the
MySQL key value format */
- const mysql_byte* ref2) /* in: an (internal) primary key value in the
+ const uchar* ref2) /* in: an (internal) primary key value in the
MySQL key value format */
{
enum_field_types mysql_type;
@@ -7263,9 +7446,8 @@ ha_innobase::cmp_ref(
ref1 += 2;
ref2 += 2;
- result = ((Field_blob*)field)->cmp(
- (const char*)ref1, len1,
- (const char*)ref2, len2);
+ result = ((Field_blob*)field)->cmp( ref1, len1,
+ ref2, len2);
} else {
result = field->key_cmp(ref1, ref2);
}
@@ -7324,7 +7506,6 @@ ha_innobase::get_mysql_bin_log_pos()
return(trx_sys_mysql_bin_log_pos);
}
-extern "C" {
/**********************************************************************
This function is used to find the storage length in bytes of the first n
characters for prefix indexes using a multibyte character set. The function
@@ -7333,7 +7514,7 @@ index field in bytes.
NOTE: the prototype of this function is copied to data0type.c! If you change
this function, you MUST change also data0type.c! */
-
+extern "C"
ulint
innobase_get_at_most_n_mbchars(
/*===========================*/
@@ -7398,46 +7579,6 @@ innobase_get_at_most_n_mbchars(
return(char_length);
}
-}
-
-/**********************************************************************
-This function returns true if
-
-1) SQL-query in the current thread
-is either REPLACE or LOAD DATA INFILE REPLACE.
-
-2) SQL-query in the current thread
-is INSERT ON DUPLICATE KEY UPDATE.
-
-NOTE that storage/innobase/row/row0ins.c must contain the
-prototype for this function ! */
-extern "C"
-ibool
-innobase_query_is_update(void)
-/*==========================*/
-{
- THD* thd = current_thd;
-
- if (!thd) {
- /* InnoDB's internal threads may run InnoDB stored procedures
- that call this function. Then current_thd is not defined
- (it is probably NULL). */
-
- return(FALSE);
- }
-
- switch (thd->lex->sql_command) {
- case SQLCOM_REPLACE:
- case SQLCOM_REPLACE_SELECT:
- return(TRUE);
- case SQLCOM_LOAD:
- return(thd->lex->duplicates == DUP_REPLACE);
- case SQLCOM_INSERT:
- return(thd->lex->duplicates == DUP_UPDATE);
- default:
- return(FALSE);
- }
-}
/***********************************************************************
This function is used to prepare X/Open XA distributed transaction */
@@ -7453,10 +7594,10 @@ innobase_xa_prepare(
FALSE - the current SQL statement ended */
{
int error = 0;
- trx_t* trx = check_trx_exists(hton, thd);
+ trx_t* trx = check_trx_exists(thd);
- if (thd->lex->sql_command != SQLCOM_XA_PREPARE &&
- (all || !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))))
+ if (thd_sql_command(thd) != SQLCOM_XA_PREPARE &&
+ (all || !thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))
{
/* For ibbackup to work the order of transactions in binlog
@@ -7482,12 +7623,12 @@ innobase_xa_prepare(
trx->active_trans = 2;
}
- if (!thd->variables.innodb_support_xa) {
+ if (!THDVAR(thd, support_xa)) {
return(0);
}
- trx->xid=thd->transaction.xid_state.xid;
+ thd_get_xid(thd, (MYSQL_XID*) &trx->xid);
/* Release a possible FIFO ticket and search latch. Since we will
reserve the kernel mutex, we have to release the search system latch
@@ -7502,7 +7643,7 @@ innobase_xa_prepare(
}
if (all
- || (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))) {
+ || (!thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) {
/* We were instructed to prepare the whole transaction, or
this is an SQL statement end and autocommit is on */
@@ -7514,12 +7655,11 @@ innobase_xa_prepare(
/* We just mark the SQL statement ended and do not do a
transaction prepare */
- if (trx->auto_inc_lock) {
- /* If we had reserved the auto-inc lock for some
- table in this SQL statement we release it now */
+ /* If we had reserved the auto-inc lock for some
+ table in this SQL statement we release it now */
+
+ row_unlock_table_autoinc_for_mysql(trx);
- row_unlock_table_autoinc_for_mysql(trx);
- }
/* Store the current undo_no of the transaction so that we
know where to roll back if we have to roll back the next
SQL statement */
@@ -7614,8 +7754,7 @@ innobase_create_cursor_view(
handlerton *hton, /* in: innobase hton */
THD* thd) /* in: user thread handle */
{
- return(read_cursor_view_create_for_mysql(
- check_trx_exists(hton, thd)));
+ return(read_cursor_view_create_for_mysql(check_trx_exists(thd)));
}
/***********************************************************************
@@ -7630,8 +7769,8 @@ innobase_close_cursor_view(
THD* thd, /* in: user thread handle */
void* curview)/* in: Consistent read view to be closed */
{
- read_cursor_view_close_for_mysql(check_trx_exists(hton, current_thd),
- (cursor_view_t*) curview);
+ read_cursor_view_close_for_mysql(check_trx_exists(thd),
+ (cursor_view_t*) curview);
}
/***********************************************************************
@@ -7647,8 +7786,8 @@ innobase_set_cursor_view(
THD* thd, /* in: user thread handle */
void* curview)/* in: Consistent cursor view to be set */
{
- read_cursor_set_for_mysql(check_trx_exists(hton, current_thd),
- (cursor_view_t*) curview);
+ read_cursor_set_for_mysql(check_trx_exists(thd),
+ (cursor_view_t*) curview);
}
@@ -7694,6 +7833,234 @@ static SHOW_VAR innodb_status_variables_export[]= {
static struct st_mysql_storage_engine innobase_storage_engine=
{ MYSQL_HANDLERTON_INTERFACE_VERSION };
+/* plugin options */
+static MYSQL_SYSVAR_BOOL(checksums, innobase_use_checksums,
+ PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY,
+ "Enable InnoDB checksums validation (enabled by default). "
+ "Disable with --skip-innodb-checksums.",
+ NULL, NULL, TRUE);
+
+static MYSQL_SYSVAR_STR(data_home_dir, innobase_data_home_dir,
+ PLUGIN_VAR_READONLY,
+ "The common part for InnoDB table spaces.",
+ NULL, NULL, NULL);
+
+static MYSQL_SYSVAR_BOOL(doublewrite, innobase_use_doublewrite,
+ PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY,
+ "Enable InnoDB doublewrite buffer (enabled by default). "
+ "Disable with --skip-innodb-doublewrite.",
+ NULL, NULL, TRUE);
+
+static MYSQL_SYSVAR_ULONG(fast_shutdown, innobase_fast_shutdown,
+ PLUGIN_VAR_OPCMDARG,
+ "Speeds up the shutdown process of the InnoDB storage engine. Possible "
+ "values are 0, 1 (faster)"
+ /*
+ NetWare can't close unclosed files, can't automatically kill remaining
+ threads, etc, so on this OS we disable the crash-like InnoDB shutdown.
+ */
+ IF_NETWARE("", " or 2 (fastest - crash-like)")
+ ".",
+ NULL, NULL, 1, 0, IF_NETWARE(1,2), 0);
+
+static MYSQL_SYSVAR_BOOL(file_per_table, innobase_file_per_table,
+ PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY,
+ "Stores each InnoDB table to an .ibd file in the database dir.",
+ NULL, NULL, FALSE);
+
+static MYSQL_SYSVAR_ULONG(flush_log_at_trx_commit, srv_flush_log_at_trx_commit,
+ PLUGIN_VAR_OPCMDARG,
+ "Set to 0 (write and flush once per second),"
+ " 1 (write and flush at each commit)"
+ " or 2 (write at commit, flush once per second).",
+ NULL, NULL, 1, 0, 2, 0);
+
+static MYSQL_SYSVAR_STR(flush_method, innobase_unix_file_flush_method,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "With which method to flush data.", NULL, NULL, NULL);
+
+static MYSQL_SYSVAR_BOOL(locks_unsafe_for_binlog, innobase_locks_unsafe_for_binlog,
+ PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY,
+ "Force InnoDB to not use next-key locking, to use only row-level locking.",
+ NULL, NULL, FALSE);
+
+#ifdef UNIV_LOG_ARCHIVE
+static MYSQL_SYSVAR_STR(log_arch_dir, innobase_log_arch_dir,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "Where full logs should be archived.", NULL, NULL, NULL);
+
+static MYSQL_SYSVAR_BOOL(log_archive, innobase_log_archive,
+ PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_READONLY,
+ "Set to 1 if you want to have logs archived.", NULL, NULL, FALSE);
+#endif /* UNIV_LOG_ARCHIVE */
+
+static MYSQL_SYSVAR_STR(log_group_home_dir, innobase_log_group_home_dir,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "Path to InnoDB log files.", NULL, NULL, NULL);
+
+static MYSQL_SYSVAR_ULONG(max_dirty_pages_pct, srv_max_buf_pool_modified_pct,
+ PLUGIN_VAR_RQCMDARG,
+ "Percentage of dirty pages allowed in bufferpool.",
+ NULL, NULL, 90, 0, 100, 0);
+
+static MYSQL_SYSVAR_ULONG(max_purge_lag, srv_max_purge_lag,
+ PLUGIN_VAR_RQCMDARG,
+ "Desired maximum length of the purge queue (0 = no limit)",
+ NULL, NULL, 0, 0, ~0L, 0);
+
+static MYSQL_SYSVAR_BOOL(rollback_on_timeout, innobase_rollback_on_timeout,
+ PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_READONLY,
+ "Roll back the complete transaction on lock wait timeout, for 4.x compatibility (disabled by default)",
+ NULL, NULL, FALSE);
+
+static MYSQL_SYSVAR_BOOL(status_file, innobase_create_status_file,
+ PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_NOSYSVAR,
+ "Enable SHOW INNODB STATUS output in the innodb_status.<pid> file",
+ NULL, NULL, FALSE);
+
+static MYSQL_SYSVAR_BOOL(stats_on_metadata, innobase_stats_on_metadata,
+ PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_NOSYSVAR,
+ "Enable statistics gathering for metadata commands such as SHOW TABLE STATUS (on by default)",
+ NULL, NULL, TRUE);
+
+static MYSQL_SYSVAR_LONG(additional_mem_pool_size, innobase_additional_mem_pool_size,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "Size of a memory pool InnoDB uses to store data dictionary information and other internal data structures.",
+ NULL, NULL, 1*1024*1024L, 512*1024L, ~0L, 1024);
+
+static MYSQL_SYSVAR_ULONG(autoextend_increment, srv_auto_extend_increment,
+ PLUGIN_VAR_RQCMDARG,
+ "Data file autoextend increment in megabytes",
+ NULL, NULL, 8L, 1L, 1000L, 0);
+
+static MYSQL_SYSVAR_LONGLONG(buffer_pool_size, innobase_buffer_pool_size,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "The size of the memory buffer InnoDB uses to cache data and indexes of its tables.",
+ NULL, NULL, 8*1024*1024L, 1024*1024L, LONGLONG_MAX, 1024*1024L);
+
+static MYSQL_SYSVAR_ULONG(commit_concurrency, srv_commit_concurrency,
+ PLUGIN_VAR_RQCMDARG,
+ "Helps in performance tuning in heavily concurrent environments.",
+ NULL, NULL, 0, 0, 1000, 0);
+
+static MYSQL_SYSVAR_ULONG(concurrency_tickets, srv_n_free_tickets_to_enter,
+ PLUGIN_VAR_RQCMDARG,
+ "Number of times a thread is allowed to enter InnoDB within the same SQL query after it has once got the ticket",
+ NULL, NULL, 500L, 1L, ~0L, 0);
+
+static MYSQL_SYSVAR_LONG(file_io_threads, innobase_file_io_threads,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "Number of file I/O threads in InnoDB.",
+ NULL, NULL, 4, 4, 64, 0);
+
+static MYSQL_SYSVAR_LONG(force_recovery, innobase_force_recovery,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "Helps to save your data in case the disk image of the database becomes corrupt.",
+ NULL, NULL, 0, 0, 6, 0);
+
+static MYSQL_SYSVAR_LONG(lock_wait_timeout, innobase_lock_wait_timeout,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "Timeout in seconds an InnoDB transaction may wait for a lock before being rolled back.",
+ NULL, NULL, 50, 1, 1024 * 1024 * 1024, 0);
+
+static MYSQL_SYSVAR_LONG(log_buffer_size, innobase_log_buffer_size,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "The size of the buffer which InnoDB uses to write log to the log files on disk.",
+ NULL, NULL, 1024*1024L, 256*1024L, ~0L, 1024);
+
+static MYSQL_SYSVAR_LONGLONG(log_file_size, innobase_log_file_size,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "Size of each log file in a log group.",
+ NULL, NULL, 5*1024*1024L, 1*1024*1024L, LONGLONG_MAX, 1024*1024L);
+
+static MYSQL_SYSVAR_LONG(log_files_in_group, innobase_log_files_in_group,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "Number of log files in the log group. InnoDB writes to the files in a circular fashion. Value 3 is recommended here.",
+ NULL, NULL, 2, 2, 100, 0);
+
+static MYSQL_SYSVAR_LONG(mirrored_log_groups, innobase_mirrored_log_groups,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "Number of identical copies of log groups we keep for the database. Currently this should be set to 1.",
+ NULL, NULL, 1, 1, 10, 0);
+
+static MYSQL_SYSVAR_LONG(open_files, innobase_open_files,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "How many files at the maximum InnoDB keeps open at the same time.",
+ NULL, NULL, 300L, 10L, ~0L, 0);
+
+static MYSQL_SYSVAR_ULONG(sync_spin_loops, srv_n_spin_wait_rounds,
+ PLUGIN_VAR_RQCMDARG,
+ "Count of spin-loop rounds in InnoDB mutexes",
+ NULL, NULL, 20L, 0L, ~0L, 0);
+
+static MYSQL_SYSVAR_ULONG(thread_concurrency, srv_thread_concurrency,
+ PLUGIN_VAR_RQCMDARG,
+ "Helps in performance tuning in heavily concurrent environments. Sets the maximum number of threads allowed inside InnoDB. Value 0 will disable the thread throttling.",
+ NULL, NULL, 8, 0, 1000, 0);
+
+static MYSQL_SYSVAR_ULONG(thread_sleep_delay, srv_thread_sleep_delay,
+ PLUGIN_VAR_RQCMDARG,
+ "Time of innodb thread sleeping before joining InnoDB queue (usec). Value 0 disable a sleep",
+ NULL, NULL, 10000L, 0L, ~0L, 0);
+
+static MYSQL_SYSVAR_STR(data_file_path, innobase_data_file_path,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "Path to individual files and their sizes.",
+ NULL, NULL, NULL);
+
+static MYSQL_SYSVAR_LONG(autoinc_lock_mode, innobase_autoinc_lock_mode,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "The AUTOINC lock modes supported by InnoDB:\n"
+ " 0 => Old style AUTOINC locking (for backward compatibility)\n"
+ " 1 => New style AUTOINC locking\n"
+ " 2 => No AUTOINC locking (unsafe for SBR)",
+ NULL, NULL,
+ AUTOINC_NEW_STYLE_LOCKING, /* Default setting */
+ AUTOINC_OLD_STYLE_LOCKING, /* Minimum value */
+ AUTOINC_NO_LOCKING, 0); /* Maximum value */
+
+static struct st_mysql_sys_var* innobase_system_variables[]= {
+ MYSQL_SYSVAR(additional_mem_pool_size),
+ MYSQL_SYSVAR(autoextend_increment),
+ MYSQL_SYSVAR(buffer_pool_size),
+ MYSQL_SYSVAR(checksums),
+ MYSQL_SYSVAR(commit_concurrency),
+ MYSQL_SYSVAR(concurrency_tickets),
+ MYSQL_SYSVAR(data_file_path),
+ MYSQL_SYSVAR(data_home_dir),
+ MYSQL_SYSVAR(doublewrite),
+ MYSQL_SYSVAR(fast_shutdown),
+ MYSQL_SYSVAR(file_io_threads),
+ MYSQL_SYSVAR(file_per_table),
+ MYSQL_SYSVAR(flush_log_at_trx_commit),
+ MYSQL_SYSVAR(flush_method),
+ MYSQL_SYSVAR(force_recovery),
+ MYSQL_SYSVAR(locks_unsafe_for_binlog),
+ MYSQL_SYSVAR(lock_wait_timeout),
+#ifdef UNIV_LOG_ARCHIVE
+ MYSQL_SYSVAR(log_arch_dir),
+ MYSQL_SYSVAR(log_archive),
+#endif /* UNIV_LOG_ARCHIVE */
+ MYSQL_SYSVAR(log_buffer_size),
+ MYSQL_SYSVAR(log_file_size),
+ MYSQL_SYSVAR(log_files_in_group),
+ MYSQL_SYSVAR(log_group_home_dir),
+ MYSQL_SYSVAR(max_dirty_pages_pct),
+ MYSQL_SYSVAR(max_purge_lag),
+ MYSQL_SYSVAR(mirrored_log_groups),
+ MYSQL_SYSVAR(open_files),
+ MYSQL_SYSVAR(rollback_on_timeout),
+ MYSQL_SYSVAR(stats_on_metadata),
+ MYSQL_SYSVAR(status_file),
+ MYSQL_SYSVAR(support_xa),
+ MYSQL_SYSVAR(sync_spin_loops),
+ MYSQL_SYSVAR(table_locks),
+ MYSQL_SYSVAR(thread_concurrency),
+ MYSQL_SYSVAR(thread_sleep_delay),
+ MYSQL_SYSVAR(autoinc_lock_mode),
+ NULL
+};
+
mysql_declare_plugin(innobase)
{
MYSQL_STORAGE_ENGINE_PLUGIN,
@@ -7706,9 +8073,7 @@ mysql_declare_plugin(innobase)
NULL, /* Plugin Deinit */
0x0100 /* 1.0 */,
innodb_status_variables_export,/* status variables */
- NULL, /* system variables */
- NULL /* config options */
+ innobase_system_variables, /* system variables */
+ NULL /* reserved */
}
mysql_declare_plugin_end;
-
-#endif
diff --git a/storage/innobase/handler/ha_innodb.h b/storage/innobase/handler/ha_innodb.h
index 8b6c4d5a3d8..fe5ebd57990 100644
--- a/storage/innobase/handler/ha_innodb.h
+++ b/storage/innobase/handler/ha_innodb.h
@@ -32,7 +32,10 @@ typedef struct st_innobase_share {
} INNOBASE_SHARE;
+struct dict_index_struct;
struct row_prebuilt_struct;
+
+typedef struct dict_index_struct dict_index_t;
typedef struct row_prebuilt_struct row_prebuilt_t;
/* The class defining a handle to an Innodb table */
@@ -47,14 +50,14 @@ class ha_innobase: public handler
THR_LOCK_DATA lock;
INNOBASE_SHARE *share;
- byte* upd_buff; /* buffer used in updates */
- byte* key_val_buff; /* buffer used in converting
+ uchar* upd_buff; /* buffer used in updates */
+ uchar* key_val_buff; /* buffer used in converting
search key values from MySQL format
to Innodb format */
ulong upd_and_key_val_buff_len;
/* the length of each of the previous
two buffers */
- ulong int_table_flags;
+ Table_flags int_table_flags;
uint primary_key;
ulong start_of_scan; /* this is set to 1 when we are
starting a table scan but have not
@@ -65,11 +68,16 @@ class ha_innobase: public handler
uint num_write_row; /* number of write_row() calls */
uint store_key_val_for_row(uint keynr, char* buff, uint buff_len,
- const byte* record);
+ const uchar* record);
int update_thd(THD* thd);
int change_active_index(uint keynr);
- int general_fetch(byte* buf, uint direction, uint match_mode);
+ int general_fetch(uchar* buf, uint direction, uint match_mode);
int innobase_read_and_init_auto_inc(longlong* ret);
+ ulong innobase_autoinc_lock();
+ ulong innobase_set_max_autoinc(ulonglong auto_inc);
+ ulong innobase_reset_autoinc(ulonglong auto_inc);
+ ulong innobase_get_auto_increment(ulonglong* value);
+ dict_index_t* innobase_get_index(uint keynr);
/* Init values for the class: */
public:
@@ -84,7 +92,7 @@ class ha_innobase: public handler
const char* table_type() const { return("InnoDB");}
const char *index_type(uint key_number) { return "BTREE"; }
const char** bas_ext() const;
- ulonglong table_flags() const { return int_table_flags; }
+ Table_flags table_flags() const;
ulong index_flags(uint idx, uint part, bool all_parts) const
{
return (HA_READ_NEXT |
@@ -110,32 +118,32 @@ class ha_innobase: public handler
double scan_time();
double read_time(uint index, uint ranges, ha_rows rows);
- int write_row(byte * buf);
- int update_row(const byte * old_data, byte * new_data);
- int delete_row(const byte * buf);
+ int write_row(uchar * buf);
+ int update_row(const uchar * old_data, uchar * new_data);
+ int delete_row(const uchar * buf);
bool was_semi_consistent_read();
void try_semi_consistent_read(bool yes);
void unlock_row();
int index_init(uint index, bool sorted);
int index_end();
- int index_read(byte * buf, const byte * key,
+ int index_read(uchar * buf, const uchar * key,
uint key_len, enum ha_rkey_function find_flag);
- int index_read_idx(byte * buf, uint index, const byte * key,
+ int index_read_idx(uchar * buf, uint index, const uchar * key,
uint key_len, enum ha_rkey_function find_flag);
- int index_read_last(byte * buf, const byte * key, uint key_len);
- int index_next(byte * buf);
- int index_next_same(byte * buf, const byte *key, uint keylen);
- int index_prev(byte * buf);
- int index_first(byte * buf);
- int index_last(byte * buf);
+ int index_read_last(uchar * buf, const uchar * key, uint key_len);
+ int index_next(uchar * buf);
+ int index_next_same(uchar * buf, const uchar *key, uint keylen);
+ int index_prev(uchar * buf);
+ int index_first(uchar * buf);
+ int index_last(uchar * buf);
int rnd_init(bool scan);
int rnd_end();
- int rnd_next(byte *buf);
- int rnd_pos(byte * buf, byte *pos);
+ int rnd_next(uchar *buf);
+ int rnd_pos(uchar * buf, uchar *pos);
- void position(const byte *record);
+ void position(const uchar *record);
int info(uint);
int analyze(THD* thd,HA_CHECK_OPT* check_opt);
int optimize(THD* thd,HA_CHECK_OPT* check_opt);
@@ -145,7 +153,7 @@ class ha_innobase: public handler
int external_lock(THD *thd, int lock_type);
int transactional_table_lock(THD *thd, int lock_type);
int start_stmt(THD *thd, thr_lock_type lock_type);
- void position(byte *record);
+ void position(uchar *record);
ha_rows records_in_range(uint inx, key_range *min_key, key_range
*max_key);
ha_rows estimate_rows_upper_bound();
@@ -185,44 +193,62 @@ class ha_innobase: public handler
static char *get_mysql_bin_log_name();
static ulonglong get_mysql_bin_log_pos();
bool primary_key_is_clustered() { return true; }
- int cmp_ref(const byte *ref1, const byte *ref2);
+ int cmp_ref(const uchar *ref1, const uchar *ref2);
bool check_if_incompatible_data(HA_CREATE_INFO *info,
uint table_changes);
};
-extern ulong innobase_fast_shutdown;
-extern ulong innobase_large_page_size;
-extern long innobase_mirrored_log_groups, innobase_log_files_in_group;
-extern longlong innobase_buffer_pool_size, innobase_log_file_size;
-extern long innobase_log_buffer_size;
-extern long innobase_additional_mem_pool_size;
-extern long innobase_buffer_pool_awe_mem_mb;
-extern long innobase_file_io_threads, innobase_lock_wait_timeout;
-extern long innobase_force_recovery;
-extern long innobase_open_files;
-extern char *innobase_data_home_dir, *innobase_data_file_path;
-extern char *innobase_log_group_home_dir, *innobase_log_arch_dir;
-extern char *innobase_unix_file_flush_method;
-/* The following variables have to be my_bool for SHOW VARIABLES to work */
-extern my_bool innobase_log_archive,
- innobase_use_doublewrite,
- innobase_use_checksums,
- innobase_use_large_pages,
- innobase_use_native_aio,
- innobase_file_per_table, innobase_locks_unsafe_for_binlog,
- innobase_rollback_on_timeout,
- innobase_create_status_file,
- innobase_stats_on_metadata;
+/* Some accessor functions which the InnoDB plugin needs, but which
+can not be added to mysql/plugin.h as part of the public interface;
+the definitions are bracketed with #ifdef INNODB_COMPATIBILITY_HOOKS */
+
+#ifndef INNODB_COMPATIBILITY_HOOKS
+#error InnoDB needs MySQL to be built with #define INNODB_COMPATIBILITY_HOOKS
+#endif
+
extern "C" {
-extern ulong srv_max_buf_pool_modified_pct;
-extern ulong srv_max_purge_lag;
-extern ulong srv_auto_extend_increment;
-extern ulong srv_n_spin_wait_rounds;
-extern ulong srv_n_free_tickets_to_enter;
-extern ulong srv_thread_sleep_delay;
-extern ulong srv_thread_concurrency;
-extern ulong srv_commit_concurrency;
-extern ulong srv_flush_log_at_trx_commit;
+struct charset_info_st *thd_charset(MYSQL_THD thd);
+char **thd_query(MYSQL_THD thd);
+
+/** Get the file name of the MySQL binlog.
+ * @return the name of the binlog file
+ */
+const char* mysql_bin_log_file_name(void);
+
+/** Get the current position of the MySQL binlog.
+ * @return byte offset from the beginning of the binlog
+ */
+ulonglong mysql_bin_log_file_pos(void);
+
+/**
+ Check if a user thread is a replication slave thread
+ @param thd user thread
+ @retval 0 the user thread is not a replication slave thread
+ @retval 1 the user thread is a replication slave thread
+*/
+int thd_slave_thread(const MYSQL_THD thd);
+
+/**
+ Check if a user thread is running a non-transactional update
+ @param thd user thread
+ @retval 0 the user thread is not running a non-transactional update
+ @retval 1 the user thread is running a non-transactional update
+*/
+int thd_non_transactional_update(const MYSQL_THD thd);
+
+/**
+ Get the user thread's binary logging format
+ @param thd user thread
+ @return Value to be used as index into the binlog_format_names array
+*/
+int thd_binlog_format(const MYSQL_THD thd);
+
+/**
+ Mark transaction to rollback and mark error as fatal to a sub-statement.
+ @param thd Thread handle
+ @param all TRUE <=> rollback main transaction.
+*/
+void thd_mark_transaction_to_rollback(MYSQL_THD thd, bool all);
}
/*
diff --git a/storage/innobase/ibuf/Makefile.am b/storage/innobase/ibuf/Makefile.am
deleted file mode 100644
index 42adda9a4ef..00000000000
--- a/storage/innobase/ibuf/Makefile.am
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-include ../include/Makefile.i
-
-noinst_LIBRARIES = libibuf.a
-
-libibuf_a_SOURCES = ibuf0ibuf.c
-
-EXTRA_PROGRAMS =
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/storage/innobase/ibuf/ibuf0ibuf.c b/storage/innobase/ibuf/ibuf0ibuf.c
index 1cbb6003cfc..44972356304 100644
--- a/storage/innobase/ibuf/ibuf0ibuf.c
+++ b/storage/innobase/ibuf/ibuf0ibuf.c
@@ -150,9 +150,30 @@ ulint ibuf_flush_count = 0;
#define IBUF_COUNT_N_PAGES 2000
/* Buffered entry counts for file pages, used in debugging */
-static ulint* ibuf_counts[IBUF_COUNT_N_SPACES];
+static ulint ibuf_counts[IBUF_COUNT_N_SPACES][IBUF_COUNT_N_PAGES];
-static ibool ibuf_counts_inited = FALSE;
+/**********************************************************************
+Checks that the indexes to ibuf_counts[][] are within limits. */
+UNIV_INLINE
+void
+ibuf_count_check(
+/*=============*/
+ ulint space_id, /* in: space identifier */
+ ulint page_no) /* in: page number */
+{
+ if (space_id < IBUF_COUNT_N_SPACES && page_no < IBUF_COUNT_N_PAGES) {
+ return;
+ }
+
+ fprintf(stderr,
+ "InnoDB: UNIV_IBUF_DEBUG limits space_id and page_no\n"
+ "InnoDB: and breaks crash recovery.\n"
+ "InnoDB: space_id=%lu, should be 0<=space_id<%lu\n"
+ "InnoDB: page_no=%lu, should be 0<=page_no<%lu\n",
+ (ulint) space_id, (ulint) IBUF_COUNT_N_SPACES,
+ (ulint) page_no, (ulint) IBUF_COUNT_N_PAGES);
+ ut_error;
+}
#endif
/* The start address for an insert buffer bitmap page bitmap */
@@ -328,15 +349,9 @@ ibuf_count_get(
ulint space, /* in: space id */
ulint page_no)/* in: page number */
{
- ut_ad(space < IBUF_COUNT_N_SPACES);
- ut_ad(page_no < IBUF_COUNT_N_PAGES);
-
- if (!ibuf_counts_inited) {
-
- return(0);
- }
+ ibuf_count_check(space, page_no);
- return(*(ibuf_counts[space] + page_no));
+ return(ibuf_counts[space][page_no]);
}
/**********************************************************************
@@ -349,11 +364,10 @@ ibuf_count_set(
ulint page_no,/* in: page number */
ulint val) /* in: value to set */
{
- ut_a(space < IBUF_COUNT_N_SPACES);
- ut_a(page_no < IBUF_COUNT_N_PAGES);
+ ibuf_count_check(space, page_no);
ut_a(val < UNIV_PAGE_SIZE);
- *(ibuf_counts[space] + page_no) = val;
+ ibuf_counts[space][page_no] = val;
}
#endif
@@ -378,22 +392,6 @@ ibuf_init_at_db_start(void)
ibuf->size = 0;
-#ifdef UNIV_IBUF_DEBUG
- {
- ulint i, j;
-
- for (i = 0; i < IBUF_COUNT_N_SPACES; i++) {
-
- ibuf_counts[i] = mem_alloc(sizeof(ulint)
- * IBUF_COUNT_N_PAGES);
- for (j = 0; j < IBUF_COUNT_N_PAGES; j++) {
- ibuf_count_set(i, j, 0);
- }
- }
-
- ibuf_counts_inited = TRUE;
- }
-#endif
mutex_create(&ibuf_pessimistic_insert_mutex,
SYNC_IBUF_PESS_INSERT_MUTEX);
@@ -464,7 +462,8 @@ ibuf_data_init_for_space(
page_t* root;
page_t* header_page;
mtr_t mtr;
- char buf[50];
+ char* buf;
+ mem_heap_t* heap;
dict_table_t* table;
dict_index_t* index;
ulint n_used;
@@ -518,16 +517,20 @@ ibuf_data_init_for_space(
ibuf_exit();
+ heap = mem_heap_create(450);
+ buf = mem_heap_alloc(heap, 50);
+
sprintf(buf, "SYS_IBUF_TABLE_%lu", (ulong) space);
/* use old-style record format for the insert buffer */
table = dict_mem_table_create(buf, space, 2, 0);
- dict_mem_table_add_col(table, "PAGE_NO", DATA_BINARY, 0, 0);
- dict_mem_table_add_col(table, "TYPES", DATA_BINARY, 0, 0);
+ dict_mem_table_add_col(table, heap, "PAGE_NO", DATA_BINARY, 0, 0);
+ dict_mem_table_add_col(table, heap, "TYPES", DATA_BINARY, 0, 0);
table->id = ut_dulint_add(DICT_IBUF_ID_MIN, space);
- dict_table_add_to_cache(table);
+ dict_table_add_to_cache(table, heap);
+ mem_heap_free(heap);
index = dict_mem_index_create(
buf, "CLUST_IND", space,
@@ -567,7 +570,8 @@ ibuf_bitmap_page_init(
bit_offset = XDES_DESCRIBED_PER_PAGE * IBUF_BITS_PER_PAGE;
- byte_offset = bit_offset / 8 + 1; /* better: (bit_offset + 7) / 8 */
+ byte_offset = bit_offset / 8 + 1;
+ /* better: byte_offset = UT_BITS_IN_BYTES(bit_offset); */
fil_page_set_type(page, FIL_PAGE_IBUF_BITMAP);
@@ -1140,7 +1144,7 @@ ibuf_dummy_index_add_col(
ulint len) /* in: length of the column */
{
ulint i = index->table->n_def;
- dict_mem_table_add_col(index->table, "DUMMY",
+ dict_mem_table_add_col(index->table, NULL, NULL,
dtype_get_mtype(type),
dtype_get_prtype(type),
dtype_get_len(type));
@@ -1162,11 +1166,6 @@ ibuf_dummy_index_free(
dict_mem_table_free(table);
}
-void
-dict_index_print_low(
-/*=================*/
- dict_index_t* index); /* in: index */
-
/*************************************************************************
Builds the entry to insert into a non-clustered index when we have the
corresponding record in an ibuf index. */
@@ -1441,6 +1440,9 @@ ibuf_entry_build(
*buf2++ = 0; /* write the compact format indicator */
}
for (i = 0; i < n_fields; i++) {
+ ulint fixed_len;
+ const dict_field_t* ifield;
+
/* We add 4 below because we have the 4 extra fields at the
start of an ibuf record */
@@ -1448,10 +1450,30 @@ ibuf_entry_build(
entry_field = dtuple_get_nth_field(entry, i);
dfield_copy(field, entry_field);
+ ifield = dict_index_get_nth_field(index, i);
+ /* Prefix index columns of fixed-length columns are of
+ fixed length. However, in the function call below,
+ dfield_get_type(entry_field) contains the fixed length
+ of the column in the clustered index. Replace it with
+ the fixed length of the secondary index column. */
+ fixed_len = ifield->fixed_len;
+
+#ifdef UNIV_DEBUG
+ if (fixed_len) {
+ /* dict_index_add_col() should guarantee these */
+ ut_ad(fixed_len <= (ulint) entry_field->type.len);
+ if (ifield->prefix_len) {
+ ut_ad(ifield->prefix_len == fixed_len);
+ } else {
+ ut_ad(fixed_len
+ == (ulint) entry_field->type.len);
+ }
+ }
+#endif /* UNIV_DEBUG */
+
dtype_new_store_for_order_and_null_size(
buf2 + i * DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE,
- dfield_get_type(entry_field),
- dict_index_get_nth_field(index, i)->prefix_len);
+ dfield_get_type(entry_field), fixed_len);
}
/* Store the type info in buf2 to field 3 of tuple */
diff --git a/storage/innobase/include/Makefile.i b/storage/innobase/include/Makefile.i
deleted file mode 100644
index db436c702ff..00000000000
--- a/storage/innobase/include/Makefile.i
+++ /dev/null
@@ -1,10 +0,0 @@
-# Makefile included in Makefile.am in every subdirectory
-
-INCLUDES = -I$(top_srcdir)/include -I$(top_builddir)/include \
- -I$(top_srcdir)/regex \
- -I$(top_srcdir)/storage/innobase/include \
- -I$(top_srcdir)/sql \
- -I$(srcdir)
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/storage/innobase/include/buf0buf.ic b/storage/innobase/include/buf0buf.ic
index 031bf6c51b4..b077ff0c181 100644
--- a/storage/innobase/include/buf0buf.ic
+++ b/storage/innobase/include/buf0buf.ic
@@ -28,7 +28,7 @@ buf_block_peek_if_too_old(
buf_block_t* block) /* in: block to make younger */
{
return(buf_pool->freed_page_clock >= block->freed_page_clock
- + 1 + (buf_pool->curr_size / 1024));
+ + 1 + (buf_pool->curr_size / 4));
}
/*************************************************************************
diff --git a/storage/innobase/include/db0err.h b/storage/innobase/include/db0err.h
index 843c70af577..0aa1b87e470 100644
--- a/storage/innobase/include/db0err.h
+++ b/storage/innobase/include/db0err.h
@@ -62,6 +62,11 @@ Created 5/24/1996 Heikki Tuuri
lead to a duplicate key in some
table */
+#define DB_TOO_MANY_CONCURRENT_TRXS 47 /* when InnoDB runs out of the
+ preconfigured undo slots, this can
+ only happen when there are too many
+ concurrent transactions */
+
/* The following are partial failure codes */
#define DB_FAIL 1000
#define DB_OVERFLOW 1001
diff --git a/storage/innobase/include/dict0dict.h b/storage/innobase/include/dict0dict.h
index 836a6290498..2f038b21e8e 100644
--- a/storage/innobase/include/dict0dict.h
+++ b/storage/innobase/include/dict0dict.h
@@ -92,6 +92,17 @@ dict_col_copy_type_noninline(
/*=========================*/
const dict_col_t* col, /* in: column */
dtype_t* type); /* out: data type */
+#ifdef UNIV_DEBUG
+/*************************************************************************
+Assert that a column and a data type match. */
+UNIV_INLINE
+ibool
+dict_col_type_assert_equal(
+/*=======================*/
+ /* out: TRUE */
+ const dict_col_t* col, /* in: column */
+ const dtype_t* type); /* in: data type */
+#endif /* UNIV_DEBUG */
/***************************************************************************
Returns the minimum size of the column. */
UNIV_INLINE
@@ -160,6 +171,13 @@ dict_col_name_is_reserved(
/* out: TRUE if name is reserved */
const char* name); /* in: column name */
/************************************************************************
+Acquire the autoinc lock.*/
+
+void
+dict_table_autoinc_lock(
+/*====================*/
+ dict_table_t* table); /* in: table */
+/************************************************************************
Initializes the autoinc counter. It is not an error to initialize an already
initialized counter. */
@@ -169,22 +187,6 @@ dict_table_autoinc_initialize(
dict_table_t* table, /* in: table */
ib_longlong value); /* in: next value to assign to a row */
/************************************************************************
-Gets the next autoinc value (== autoinc counter value), 0 if not yet
-initialized. If initialized, increments the counter by 1. */
-
-ib_longlong
-dict_table_autoinc_get(
-/*===================*/
- /* out: value for a new row, or 0 */
- dict_table_t* table); /* in: table */
-/************************************************************************
-Decrements the autoinc counter value by 1. */
-
-void
-dict_table_autoinc_decrement(
-/*=========================*/
- dict_table_t* table); /* in: table */
-/************************************************************************
Reads the next autoinc value (== autoinc counter value), 0 if not yet
initialized. */
@@ -194,15 +196,6 @@ dict_table_autoinc_read(
/* out: value for a new row, or 0 */
dict_table_t* table); /* in: table */
/************************************************************************
-Peeks the autoinc counter value, 0 if not yet initialized. Does not
-increment the counter. The read not protected by any mutex! */
-
-ib_longlong
-dict_table_autoinc_peek(
-/*====================*/
- /* out: value of the counter */
- dict_table_t* table); /* in: table */
-/************************************************************************
Updates the autoinc counter if the value supplied is equal or bigger than the
current value. If not inited, does nothing. */
@@ -212,13 +205,29 @@ dict_table_autoinc_update(
dict_table_t* table, /* in: table */
ib_longlong value); /* in: value which was assigned to a row */
+/************************************************************************
+Release the autoinc lock.*/
+
+void
+dict_table_autoinc_unlock(
+/*======================*/
+ dict_table_t* table); /* in: table */
+/**************************************************************************
+Adds system columns to a table object. */
+
+void
+dict_table_add_system_columns(
+/*==========================*/
+ dict_table_t* table, /* in/out: table */
+ mem_heap_t* heap); /* in: temporary heap */
/**************************************************************************
Adds a table object to the dictionary cache. */
void
dict_table_add_to_cache(
/*====================*/
- dict_table_t* table); /* in: table */
+ dict_table_t* table, /* in: table */
+ mem_heap_t* heap); /* in: temporary heap */
/**************************************************************************
Removes a table object from the dictionary cache. */
diff --git a/storage/innobase/include/dict0dict.ic b/storage/innobase/include/dict0dict.ic
index 4a9afd2f3f5..7d38cbcd1fa 100644
--- a/storage/innobase/include/dict0dict.ic
+++ b/storage/innobase/include/dict0dict.ic
@@ -30,6 +30,30 @@ dict_col_copy_type(
type->mbmaxlen = col->mbmaxlen;
}
+#ifdef UNIV_DEBUG
+/*************************************************************************
+Assert that a column and a data type match. */
+UNIV_INLINE
+ibool
+dict_col_type_assert_equal(
+/*=======================*/
+ /* out: TRUE */
+ const dict_col_t* col, /* in: column */
+ const dtype_t* type) /* in: data type */
+{
+ ut_ad(col);
+ ut_ad(type);
+
+ ut_ad(col->mtype == type->mtype);
+ ut_ad(col->prtype == type->prtype);
+ ut_ad(col->len == type->len);
+ ut_ad(col->mbminlen == type->mbminlen);
+ ut_ad(col->mbmaxlen == type->mbmaxlen);
+
+ return(TRUE);
+}
+#endif /* UNIV_DEBUG */
+
/***************************************************************************
Returns the minimum size of the column. */
UNIV_INLINE
diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h
index a23f89954a4..a05bc513efd 100644
--- a/storage/innobase/include/dict0mem.h
+++ b/storage/innobase/include/dict0mem.h
@@ -72,7 +72,8 @@ void
dict_mem_table_add_col(
/*===================*/
dict_table_t* table, /* in: table */
- const char* name, /* in: column name */
+ mem_heap_t* heap, /* in: temporary memory heap, or NULL */
+ const char* name, /* in: column name, or NULL */
ulint mtype, /* in: main datatype */
ulint prtype, /* in: precise type */
ulint len); /* in: precision */
@@ -158,10 +159,13 @@ struct dict_col_struct{
of an index */
};
-/* DICT_MAX_INDEX_COL_LEN is measured in bytes and is the max index column
-length + 1. Starting from 4.1.6, we set it to < 3 * 256, so that one can
-create a column prefix index on 255 characters of a TEXT field also in the
-UTF-8 charset. In that charset, a character may take at most 3 bytes. */
+/* DICT_MAX_INDEX_COL_LEN is measured in bytes and is the maximum
+indexed column length (or indexed prefix length). It is set to 3*256,
+so that one can create a column prefix index on 256 characters of a
+TEXT or VARCHAR column also in the UTF-8 charset. In that charset,
+a character may take at most 3 bytes.
+This constant MUST NOT BE CHANGED, or the compatibility of InnoDB data
+files would be at risk! */
#define DICT_MAX_INDEX_COL_LEN 768
@@ -311,11 +315,11 @@ struct dict_table_struct{
unsigned n_cols:10;/* number of columns */
dict_col_t* cols; /* array of column descriptions */
const char* col_names;
- /* n_def column names packed in an
- "name1\0name2\0...nameN\0" array. until
- n_def reaches n_cols, this is allocated with
- ut_malloc, and the final size array is
- allocated through the table's heap. */
+ /* Column names packed in a character string
+ "name1\0name2\0...nameN\0". Until
+ the string contains n_cols, it will be
+ allocated from a temporary heap. The final
+ string will be allocated from table->heap. */
hash_node_t name_hash; /* hash chain node */
hash_node_t id_hash; /* hash chain node */
UT_LIST_BASE_NODE_T(dict_index_t)
@@ -407,6 +411,21 @@ struct dict_table_struct{
SELECT MAX(auto inc column) */
ib_longlong autoinc;/* autoinc counter value to give to the
next inserted row */
+
+ ib_longlong autoinc_increment;
+ /* The increment step of the auto increment
+ column. Value must be greater than or equal
+ to 1 */
+ ulong n_waiting_or_granted_auto_inc_locks;
+ /* This counter is used to track the number
+ of granted and pending autoinc locks on this
+ table. This value is set after acquiring the
+ kernel mutex but we peek the contents to
+ determine whether other transactions have
+ acquired the AUTOINC lock or not. Of course
+ only one transaction can be granted the
+ lock but there can be multiple waiters. */
+
#ifdef UNIV_DEBUG
ulint magic_n;/* magic number */
# define DICT_TABLE_MAGIC_N 76333786
diff --git a/storage/innobase/include/fsp0fsp.h b/storage/innobase/include/fsp0fsp.h
index d04269fc157..82e95a2e920 100644
--- a/storage/innobase/include/fsp0fsp.h
+++ b/storage/innobase/include/fsp0fsp.h
@@ -245,7 +245,7 @@ will be able to insert new data to the database without running out the
tablespace. Only free extents are taken into account and we also subtract
the safety margin required by the above function fsp_reserve_free_extents. */
-ulint
+ullint
fsp_get_available_space_in_free_extents(
/*====================================*/
/* out: available space in kB */
diff --git a/storage/innobase/include/ha_prototypes.h b/storage/innobase/include/ha_prototypes.h
index 2d27034fdfe..7fb50988941 100644
--- a/storage/innobase/include/ha_prototypes.h
+++ b/storage/innobase/include/ha_prototypes.h
@@ -1,6 +1,9 @@
#ifndef HA_INNODB_PROTOTYPES_H
#define HA_INNODB_PROTOTYPES_H
+#include "univ.i" /* ulint, uint */
+#include "m_ctype.h" /* CHARSET_INFO */
+
/* Prototypes for global functions in ha_innodb.cc that are called by
InnoDB's C-code. */
@@ -19,4 +22,30 @@ innobase_convert_string(
CHARSET_INFO* from_cs,
uint* errors);
+/**********************************************************************
+Returns true if the thread is the replication thread on the slave
+server. Used in srv_conc_enter_innodb() to determine if the thread
+should be allowed to enter InnoDB - the replication thread is treated
+differently than other threads. Also used in
+srv_conc_force_exit_innodb(). */
+
+ibool
+thd_is_replication_slave_thread(
+/*============================*/
+ /* out: true if thd is the replication thread */
+ void* thd); /* in: thread handle (THD*) */
+
+/**********************************************************************
+Returns true if the transaction this thread is processing has edited
+non-transactional tables. Used by the deadlock detector when deciding
+which transaction to rollback in case of a deadlock - we try to avoid
+rolling back transactions that have edited non-transactional tables. */
+
+ibool
+thd_has_edited_nontrans_tables(
+/*===========================*/
+ /* out: true if non-transactional tables have
+ been edited */
+ void* thd); /* in: thread handle (THD*) */
+
#endif
diff --git a/storage/innobase/include/lock0iter.h b/storage/innobase/include/lock0iter.h
new file mode 100644
index 00000000000..d063a360c1f
--- /dev/null
+++ b/storage/innobase/include/lock0iter.h
@@ -0,0 +1,52 @@
+/******************************************************
+Lock queue iterator type and function prototypes.
+
+(c) 2007 Innobase Oy
+
+Created July 16, 2007 Vasil Dimov
+*******************************************************/
+
+#ifndef lock0iter_h
+#define lock0iter_h
+
+#include "univ.i"
+#include "lock0types.h"
+
+typedef struct lock_queue_iterator_struct {
+ lock_t* current_lock;
+ /* In case this is a record lock queue (not table lock queue)
+ then bit_no is the record number within the heap in which the
+ record is stored. */
+ ulint bit_no;
+} lock_queue_iterator_t;
+
+/***********************************************************************
+Initialize lock queue iterator so that it starts to iterate from
+"lock". bit_no specifies the record number within the heap where the
+record is stored. It can be undefined (ULINT_UNDEFINED) in two cases:
+1. If the lock is a table lock, thus we have a table lock queue;
+2. If the lock is a record lock and it is a wait lock. In this case
+ bit_no is calculated in this function by using
+ lock_rec_find_set_bit(). There is exactly one bit set in the bitmap
+ of a wait lock. */
+
+void
+lock_queue_iterator_reset(
+/*======================*/
+ lock_queue_iterator_t* iter, /* out: iterator */
+ lock_t* lock, /* in: lock to start from */
+ ulint bit_no);/* in: record number in the
+ heap */
+
+/***********************************************************************
+Gets the previous lock in the lock queue, returns NULL if there are no
+more locks (i.e. the current lock is the first one). The iterator is
+receded (if not-NULL is returned). */
+
+lock_t*
+lock_queue_iterator_get_prev(
+/*=========================*/
+ /* out: previous lock or NULL */
+ lock_queue_iterator_t* iter); /* in/out: iterator */
+
+#endif /* lock0iter_h */
diff --git a/storage/innobase/include/lock0lock.h b/storage/innobase/include/lock0lock.h
index 6b863e32183..8b08b6284f6 100644
--- a/storage/innobase/include/lock0lock.h
+++ b/storage/innobase/include/lock0lock.h
@@ -519,6 +519,18 @@ lock_is_table_exclusive(
dict_table_t* table, /* in: table */
trx_t* trx); /* in: transaction */
/*************************************************************************
+Checks if a lock request lock1 has to wait for request lock2. */
+
+ibool
+lock_has_to_wait(
+/*=============*/
+ /* out: TRUE if lock1 has to wait for lock2 to be
+ removed */
+ lock_t* lock1, /* in: waiting lock */
+ lock_t* lock2); /* in: another lock; NOTE that it is assumed that this
+ has a lock bit set on the same record as in lock1 if
+ the locks are record locks */
+/*************************************************************************
Checks that a transaction id is sensible, i.e., not in the future. */
ibool
@@ -597,7 +609,7 @@ lock_validate(void);
/* out: TRUE if ok */
/*************************************************************************
Return approximate number or record locks (bits set in the bitmap) for
-this transaction. Since delete-marked records ma ybe removed, the
+this transaction. Since delete-marked records may be removed, the
record count will not be precise. */
ulint
diff --git a/storage/innobase/include/lock0priv.h b/storage/innobase/include/lock0priv.h
new file mode 100644
index 00000000000..7703a2b7def
--- /dev/null
+++ b/storage/innobase/include/lock0priv.h
@@ -0,0 +1,101 @@
+/******************************************************
+Lock module internal structures and methods.
+
+(c) 2007 Innobase Oy
+
+Created July 12, 2007 Vasil Dimov
+*******************************************************/
+
+#ifndef lock0priv_h
+#define lock0priv_h
+
+#ifndef LOCK_MODULE_IMPLEMENTATION
+/* If you need to access members of the structures defined in this
+file, please write appropriate functions that retrieve them and put
+those functions in lock/ */
+#error Do not include lock0priv.h outside of the lock/ module
+#endif
+
+#include "univ.i"
+#include "dict0types.h"
+#include "hash0hash.h"
+#include "trx0types.h"
+#include "ut0lst.h"
+
+/* A table lock */
+typedef struct lock_table_struct lock_table_t;
+struct lock_table_struct {
+ dict_table_t* table; /* database table in dictionary
+ cache */
+ UT_LIST_NODE_T(lock_t)
+ locks; /* list of locks on the same
+ table */
+};
+
+/* Record lock for a page */
+typedef struct lock_rec_struct lock_rec_t;
+struct lock_rec_struct {
+ ulint space; /* space id */
+ ulint page_no; /* page number */
+ ulint n_bits; /* number of bits in the lock
+ bitmap; NOTE: the lock bitmap is
+ placed immediately after the
+ lock struct */
+};
+
+/* Lock struct */
+struct lock_struct {
+ trx_t* trx; /* transaction owning the
+ lock */
+ UT_LIST_NODE_T(lock_t)
+ trx_locks; /* list of the locks of the
+ transaction */
+ ulint type_mode; /* lock type, mode, LOCK_GAP or
+ LOCK_REC_NOT_GAP,
+ LOCK_INSERT_INTENTION,
+ wait flag, ORed */
+ hash_node_t hash; /* hash chain node for a record
+ lock */
+ dict_index_t* index; /* index for a record lock */
+ union {
+ lock_table_t tab_lock;/* table lock */
+ lock_rec_t rec_lock;/* record lock */
+ } un_member;
+};
+
+/*************************************************************************
+Gets the type of a lock. */
+UNIV_INLINE
+ulint
+lock_get_type(
+/*==========*/
+ /* out: LOCK_TABLE or LOCK_REC */
+ const lock_t* lock); /* in: lock */
+
+/**************************************************************************
+Looks for a set bit in a record lock bitmap. Returns ULINT_UNDEFINED,
+if none found. */
+
+ulint
+lock_rec_find_set_bit(
+/*==================*/
+ /* out: bit index == heap number of the record, or
+ ULINT_UNDEFINED if none found */
+ lock_t* lock); /* in: record lock with at least one bit set */
+
+/*************************************************************************
+Gets the previous record lock set on a record. */
+
+lock_t*
+lock_rec_get_prev(
+/*==============*/
+ /* out: previous lock on the same record, NULL if
+ none exists */
+ lock_t* in_lock,/* in: record lock */
+ ulint heap_no);/* in: heap number of the record */
+
+#ifndef UNIV_NONINL
+#include "lock0priv.ic"
+#endif
+
+#endif /* lock0priv_h */
diff --git a/storage/innobase/include/lock0priv.ic b/storage/innobase/include/lock0priv.ic
new file mode 100644
index 00000000000..4bc8397509d
--- /dev/null
+++ b/storage/innobase/include/lock0priv.ic
@@ -0,0 +1,32 @@
+/******************************************************
+Lock module internal inline methods.
+
+(c) 2007 Innobase Oy
+
+Created July 16, 2007 Vasil Dimov
+*******************************************************/
+
+/* This file contains only methods which are used in
+lock/lock0* files, other than lock/lock0lock.c.
+I.e. lock/lock0lock.c contains more internal inline
+methods but they are used only in that file. */
+
+#ifndef LOCK_MODULE_IMPLEMENTATION
+#error Do not include lock0priv.ic outside of the lock/ module
+#endif
+
+/*************************************************************************
+Gets the type of a lock. */
+UNIV_INLINE
+ulint
+lock_get_type(
+/*==========*/
+ /* out: LOCK_TABLE or LOCK_REC */
+ const lock_t* lock) /* in: lock */
+{
+ ut_ad(lock);
+
+ return(lock->type_mode & LOCK_TYPE_MASK);
+}
+
+/* vim: set filetype=c: */
diff --git a/storage/innobase/include/mem0mem.ic b/storage/innobase/include/mem0mem.ic
index cb8fbe92cf0..adae9ad8a33 100644
--- a/storage/innobase/include/mem0mem.ic
+++ b/storage/innobase/include/mem0mem.ic
@@ -167,6 +167,8 @@ mem_heap_alloc(
mem_block_set_free(block, free + MEM_SPACE_NEEDED(n));
#ifdef UNIV_MEM_DEBUG
+ UNIV_MEM_ALLOC(buf,
+ n + MEM_FIELD_HEADER_SIZE + MEM_FIELD_TRAILER_SIZE);
/* In the debug version write debugging info to the field */
mem_field_init((byte*)buf, n);
@@ -177,8 +179,10 @@ mem_heap_alloc(
#endif
#ifdef UNIV_SET_MEM_TO_ZERO
+ UNIV_MEM_ALLOC(buf, n);
memset(buf, '\0', n);
#endif
+ UNIV_MEM_ALLOC(buf, n);
return(buf);
}
@@ -267,15 +271,19 @@ mem_heap_free_heap_top(
ut_ad(mem_block_get_start(block) <= mem_block_get_free(block));
/* In the debug version erase block from top up */
-
- mem_erase_buf(old_top, (byte*)block + block->len - old_top);
+ {
+ ulint len = (byte*)block + block->len - old_top;
+ mem_erase_buf(old_top, len);
+ UNIV_MEM_FREE(old_top, len);
+ }
/* Update allocated memory count */
mutex_enter(&mem_hash_mutex);
mem_current_allocated_memory -= (total_size - size);
mutex_exit(&mem_hash_mutex);
-
-#endif
+#else /* UNIV_MEM_DEBUG */
+ UNIV_MEM_FREE(old_top, (byte*)block + block->len - old_top);
+#endif /* UNIV_MEM_DEBUG */
/* If free == start, we may free the block if it is not the first
one */
@@ -369,6 +377,8 @@ mem_heap_free_top(
if ((heap != block) && (mem_block_get_free(block)
== mem_block_get_start(block))) {
mem_heap_block_free(heap, block);
+ } else {
+ UNIV_MEM_FREE((byte*) block + mem_block_get_free(block), n);
}
}
diff --git a/storage/innobase/include/os0file.h b/storage/innobase/include/os0file.h
index 5ffcdf7e58c..9eb44d3f4a8 100644
--- a/storage/innobase/include/os0file.h
+++ b/storage/innobase/include/os0file.h
@@ -94,7 +94,8 @@ log. */
#define OS_FILE_PATH_ERROR 74
#define OS_FILE_AIO_RESOURCES_RESERVED 75 /* wait for OS aio resources
to become available again */
-#define OS_FILE_ERROR_NOT_SPECIFIED 76
+#define OS_FILE_SHARING_VIOLATION 76
+#define OS_FILE_ERROR_NOT_SPECIFIED 77
/* Types for aio operations */
#define OS_FILE_READ 10
diff --git a/storage/innobase/include/page0page.h b/storage/innobase/include/page0page.h
index 833d268c9de..273007c2778 100644
--- a/storage/innobase/include/page0page.h
+++ b/storage/innobase/include/page0page.h
@@ -531,6 +531,15 @@ page_get_free_space_of_empty(
/* out: free space */
ulint comp) /* in: nonzero=compact page format */
__attribute__((const));
+/*****************************************************************
+Calculates free space if a page is emptied. */
+
+ulint
+page_get_free_space_of_empty_noninline(
+/*===================================*/
+ /* out: free space */
+ ulint comp) /* in: nonzero=compact page format */
+ __attribute__((const));
/****************************************************************
Returns the sum of the sizes of the records in the record list
excluding the infimum and supremum records. */
diff --git a/storage/innobase/include/rem0rec.ic b/storage/innobase/include/rem0rec.ic
index 90a35af74dc..95aa65fabba 100644
--- a/storage/innobase/include/rem0rec.ic
+++ b/storage/innobase/include/rem0rec.ic
@@ -795,7 +795,8 @@ UNIV_INLINE
void
rec_offs_set_n_alloc(
/*=================*/
- ulint* offsets, /* in: array for rec_get_offsets() */
+ ulint* offsets, /* out: array for rec_get_offsets(),
+ must be allocated */
ulint n_alloc) /* in: number of elements */
{
ut_ad(offsets);
@@ -1282,7 +1283,8 @@ UNIV_INLINE
void
rec_offs_set_n_fields(
/*==================*/
- ulint* offsets, /* in: array returned by rec_get_offsets() */
+ ulint* offsets, /* in/out: array returned by
+ rec_get_offsets() */
ulint n_fields) /* in: number of fields */
{
ut_ad(offsets);
diff --git a/storage/innobase/include/row0mysql.h b/storage/innobase/include/row0mysql.h
index 1448efe94fe..aabb7f5f047 100644
--- a/storage/innobase/include/row0mysql.h
+++ b/storage/innobase/include/row0mysql.h
@@ -670,6 +670,7 @@ struct row_prebuilt_struct {
to this heap */
mem_heap_t* old_vers_heap; /* memory heap where a previous
version is built in consistent read */
+ ulonglong last_value; /* last value of AUTO-INC interval */
ulint magic_n2; /* this should be the same as
magic_n */
};
diff --git a/storage/innobase/include/row0sel.h b/storage/innobase/include/row0sel.h
index 96273a18cd5..4bde648f18e 100644
--- a/storage/innobase/include/row0sel.h
+++ b/storage/innobase/include/row0sel.h
@@ -171,7 +171,17 @@ row_search_check_if_query_cache_permitted(
trx_t* trx, /* in: transaction object */
const char* norm_name); /* in: concatenation of database name,
'/' char, table name */
+/***********************************************************************
+Read the max AUTOINC value from an index. */
+ulint
+row_search_max_autoinc(
+/*===================*/
+ /* out: DB_SUCCESS if all OK else
+ error code */
+ dict_index_t* index, /* in: index to search */
+ const char* col_name, /* in: autoinc column name */
+ ib_longlong* value); /* out: AUTOINC value read */
/* A structure for caching column values for prefetched rows */
struct sel_buf_struct{
diff --git a/storage/innobase/include/sync0rw.ic b/storage/innobase/include/sync0rw.ic
index f8b5367739a..b41593d0a96 100644
--- a/storage/innobase/include/sync0rw.ic
+++ b/storage/innobase/include/sync0rw.ic
@@ -231,7 +231,7 @@ rw_lock_s_lock_func(
owns an s-lock here, it may end up in a deadlock with another thread
which requests an x-lock here. Therefore, we will forbid recursive
s-locking of a latch: the following assert will warn the programmer
- of the possibility of a tjis kind of deadlock. If we want to implement
+ of the possibility of this kind of a deadlock. If we want to implement
safe recursive s-locking, we should keep in a list the thread ids of
the threads which have s-locked a latch. This would use some CPU
time. */
diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h
index fe36b0d1a01..5017c15aaf0 100644
--- a/storage/innobase/include/trx0trx.h
+++ b/storage/innobase/include/trx0trx.h
@@ -371,6 +371,18 @@ trx_is_interrupted(
#define trx_is_interrupted(trx) FALSE
#endif /* !UNIV_HOTBACKUP */
+/***********************************************************************
+Compares the "weight" (or size) of two transactions. The weight of one
+transaction is estimated as the number of altered rows + the number of
+locked rows. Transactions that have edited non-transactional tables are
+considered heavier than ones that have not. */
+
+int
+trx_weight_cmp(
+/*===========*/
+ /* out: <0, 0 or >0; similar to strcmp(3) */
+ trx_t* a, /* in: the first transaction to be compared */
+ trx_t* b); /* in: the second transaction to be compared */
/* Signal to a transaction */
struct trx_sig_struct{
@@ -453,7 +465,8 @@ struct trx_struct{
dulint table_id; /* table id if the preceding field is
TRUE */
/*------------------------------*/
- int active_trans; /* 1 - if a transaction in MySQL
+ unsigned duplicates:2; /* TRX_DUP_IGNORE | TRX_DUP_REPLACE */
+ unsigned active_trans:2; /* 1 - if a transaction in MySQL
is active. 2 - if prepare_commit_mutex
was taken */
void* mysql_thd; /* MySQL thread handle corresponding
@@ -469,31 +482,6 @@ struct trx_struct{
ib_longlong mysql_log_offset;/* if MySQL binlog is used, this field
contains the end offset of the binlog
entry */
- const char* mysql_master_log_file_name;
- /* if the database server is a MySQL
- replication slave, we have here the
- master binlog name up to which
- replication has processed; otherwise
- this is a pointer to a null
- character */
- ib_longlong mysql_master_log_pos;
- /* if the database server is a MySQL
- replication slave, this is the
- position in the log file up to which
- replication has processed */
- /* A MySQL variable mysql_thd->synchronous_repl tells if we have
- to use synchronous replication. See ha_innodb.cc. */
- char* repl_wait_binlog_name;/* NULL, or if synchronous MySQL
- replication is used, the binlog name
- up to which we must communicate the
- binlog to the slave, before returning
- from a commit; this is the same as
- mysql_log_file_name, but we allocate
- and copy the name to a separate buffer
- here */
- ib_longlong repl_wait_binlog_pos;/* see above at
- repl_wait_binlog_name */
-
os_thread_id_t mysql_thread_id;/* id of the MySQL thread associated
with this transaction object */
ulint mysql_process_no;/* since in Linux, 'top' reports
@@ -602,7 +590,7 @@ struct trx_struct{
NULL */
ibool was_chosen_as_deadlock_victim;
/* when the transaction decides to wait
- for a lock, this it sets this to FALSE;
+ for a lock, it sets this to FALSE;
if another transaction chooses this
transaction as a victim in deadlock
resolution, it sets this to TRUE */
@@ -643,7 +631,12 @@ struct trx_struct{
cannot be any activity in the undo
logs! */
dulint undo_no; /* next undo log record number to
- assign */
+ assign; since the undo log is
+ private for a transaction, this
+ is a simple ascending sequence
+ with no gaps; thus it represents
+ the number of modified/inserted
+ rows in a transaction */
trx_savept_t last_sql_stat_start;
/* undo_no when the last sql statement
was started: in case of an error, trx
@@ -663,6 +656,9 @@ struct trx_struct{
trx_undo_arr_t* undo_no_arr; /* array of undo numbers of undo log
records which are currently processed
by a rollback operation */
+ ulint n_autoinc_rows; /* no. of AUTO-INC rows required for
+ an SQL statement. This is useful for
+ multi-row INSERTs */
/*------------------------------*/
char detailed_error[256]; /* detailed error message for last
error, or empty. */
@@ -673,19 +669,19 @@ struct trx_struct{
single operation of a
transaction, e.g., a parallel
query */
-/* Transaction concurrency states */
+/* Transaction concurrency states (trx->conc_state) */
#define TRX_NOT_STARTED 1
#define TRX_ACTIVE 2
#define TRX_COMMITTED_IN_MEMORY 3
#define TRX_PREPARED 4 /* Support for 2PC/XA */
-/* Transaction execution states when trx state is TRX_ACTIVE */
+/* Transaction execution states when trx->conc_state == TRX_ACTIVE */
#define TRX_QUE_RUNNING 1 /* transaction is running */
#define TRX_QUE_LOCK_WAIT 2 /* transaction is waiting for a lock */
#define TRX_QUE_ROLLING_BACK 3 /* transaction is rolling back */
#define TRX_QUE_COMMITTING 4 /* transaction is committing */
-/* Transaction isolation levels */
+/* Transaction isolation levels (trx->isolation_level) */
#define TRX_ISO_READ_UNCOMMITTED 1 /* dirty read: non-locking
SELECTs are performed so that
we do not look at a possible
@@ -720,6 +716,12 @@ struct trx_struct{
converted to LOCK IN SHARE
MODE reads */
+/* Treatment of duplicate values (trx->duplicates; for example, in inserts).
+Multiple flags can be combined with bitwise OR. */
+#define TRX_DUP_IGNORE 1 /* duplicate rows are to be updated */
+#define TRX_DUP_REPLACE 2 /* duplicate rows are to be replaced */
+
+
/* Types of a trx signal */
#define TRX_SIG_NO_SIGNAL 100
#define TRX_SIG_TOTAL_ROLLBACK 1
diff --git a/storage/innobase/include/trx0undo.h b/storage/innobase/include/trx0undo.h
index 87849ab42c3..f53c6b01be4 100644
--- a/storage/innobase/include/trx0undo.h
+++ b/storage/innobase/include/trx0undo.h
@@ -222,13 +222,16 @@ trx_undo_lists_init(
Assigns an undo log for a transaction. A new undo log is created or a cached
undo log reused. */
-trx_undo_t*
+ulint
trx_undo_assign_undo(
/*=================*/
- /* out: the undo log, NULL if did not succeed: out of
- space */
- trx_t* trx, /* in: transaction */
- ulint type); /* in: TRX_UNDO_INSERT or TRX_UNDO_UPDATE */
+ /* out: DB_SUCCESS if undo log assign
+ * successful, possible error codes are:
+ * ER_TOO_MANY_CONCURRENT_TRXS
+ * DB_OUT_OF_FILE_SPAC
+ * DB_OUT_OF_MEMORY */
+ trx_t* trx, /* in: transaction */
+ ulint type); /* in: TRX_UNDO_INSERT or TRX_UNDO_UPDATE */
/**********************************************************************
Sets the state of the undo log segment at a transaction finish. */
diff --git a/storage/innobase/include/univ.i b/storage/innobase/include/univ.i
index 957baa0391f..ba8e6e56219 100644
--- a/storage/innobase/include/univ.i
+++ b/storage/innobase/include/univ.i
@@ -83,6 +83,8 @@ memory is read outside the allocated blocks. */
/* Make a non-inline debug version */
#if 0
+#define UNIV_DEBUG_VALGRIND /* Enable extra
+ Valgrind instrumentation */
#define UNIV_DEBUG /* Enable ut_ad() assertions */
#define UNIV_LIST_DEBUG /* debug UT_LIST_ macros */
#define UNIV_MEM_DEBUG /* detect memory leaks etc */
@@ -214,6 +216,8 @@ typedef __int64 ib_longlong;
typedef longlong ib_longlong;
#endif
+typedef unsigned long long int ullint;
+
#ifndef __WIN__
#if SIZEOF_LONG != SIZEOF_VOIDP
#error "Error: InnoDB's ulint must be of the same size as void*"
@@ -298,5 +302,17 @@ typedef void* os_thread_ret_t;
#include "ut0dbg.h"
#include "ut0ut.h"
#include "db0err.h"
+#ifdef UNIV_DEBUG_VALGRIND
+# include <valgrind/memcheck.h>
+# define UNIV_MEM_VALID(addr, size) VALGRIND_MAKE_MEM_DEFINED(addr, size)
+# define UNIV_MEM_INVALID(addr, size) VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
+# define UNIV_MEM_FREE(addr, size) VALGRIND_MAKE_MEM_NOACCESS(addr, size)
+# define UNIV_MEM_ALLOC(addr, size) VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
+#else
+# define UNIV_MEM_VALID(addr, size) do {} while(0)
+# define UNIV_MEM_INVALID(addr, size) do {} while(0)
+# define UNIV_MEM_FREE(addr, size) do {} while(0)
+# define UNIV_MEM_ALLOC(addr, size) do {} while(0)
+#endif
#endif
diff --git a/storage/innobase/include/ut0mem.h b/storage/innobase/include/ut0mem.h
index 90c16f4fad5..e56895bc142 100644
--- a/storage/innobase/include/ut0mem.h
+++ b/storage/innobase/include/ut0mem.h
@@ -63,7 +63,7 @@ ut_test_malloc(
/* out: TRUE if succeeded */
ulint n); /* in: try to allocate this many bytes */
/**************************************************************************
-Frees a memory bloock allocated with ut_malloc. */
+Frees a memory block allocated with ut_malloc. */
void
ut_free(
diff --git a/storage/innobase/include/ut0ut.h b/storage/innobase/include/ut0ut.h
index 8bfc1edd323..825c10d5f11 100644
--- a/storage/innobase/include/ut0ut.h
+++ b/storage/innobase/include/ut0ut.h
@@ -121,6 +121,11 @@ ut_2_power_up(
/* out: first power of 2 which is >= n */
ulint n) /* in: number != 0 */
__attribute__((const));
+
+/* Determine how many bytes (groups of 8 bits) are needed to
+store the given number of bits. */
+#define UT_BITS_IN_BYTES(b) (((b) + 7) / 8)
+
/****************************************************************
Sort function for ulint arrays. */
diff --git a/storage/innobase/lock/Makefile.am b/storage/innobase/lock/Makefile.am
deleted file mode 100644
index 4c6caa49853..00000000000
--- a/storage/innobase/lock/Makefile.am
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-include ../include/Makefile.i
-
-noinst_LIBRARIES = liblock.a
-
-liblock_a_SOURCES = lock0lock.c
-
-EXTRA_PROGRAMS =
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/storage/innobase/lock/lock0iter.c b/storage/innobase/lock/lock0iter.c
new file mode 100644
index 00000000000..0afa7019c86
--- /dev/null
+++ b/storage/innobase/lock/lock0iter.c
@@ -0,0 +1,90 @@
+/******************************************************
+Lock queue iterator. Can iterate over table and record
+lock queues.
+
+(c) 2007 Innobase Oy
+
+Created July 16, 2007 Vasil Dimov
+*******************************************************/
+
+#define LOCK_MODULE_IMPLEMENTATION
+
+#include "univ.i"
+#include "lock0iter.h"
+#include "lock0lock.h"
+#include "lock0priv.h"
+#include "ut0dbg.h"
+#include "ut0lst.h"
+
+/***********************************************************************
+Initialize lock queue iterator so that it starts to iterate from
+"lock". bit_no specifies the record number within the heap where the
+record is stored. It can be undefined (ULINT_UNDEFINED) in two cases:
+1. If the lock is a table lock, thus we have a table lock queue;
+2. If the lock is a record lock and it is a wait lock. In this case
+ bit_no is calculated in this function by using
+ lock_rec_find_set_bit(). There is exactly one bit set in the bitmap
+ of a wait lock. */
+
+void
+lock_queue_iterator_reset(
+/*======================*/
+ lock_queue_iterator_t* iter, /* out: iterator */
+ lock_t* lock, /* in: lock to start from */
+ ulint bit_no) /* in: record number in the
+ heap */
+{
+ iter->current_lock = lock;
+
+ if (bit_no != ULINT_UNDEFINED) {
+
+ iter->bit_no = bit_no;
+ } else {
+
+ switch (lock_get_type(lock)) {
+ case LOCK_TABLE:
+ iter->bit_no = ULINT_UNDEFINED;
+ break;
+ case LOCK_REC:
+ iter->bit_no = lock_rec_find_set_bit(lock);
+ ut_a(iter->bit_no != ULINT_UNDEFINED);
+ break;
+ default:
+ ut_error;
+ }
+ }
+}
+
+/***********************************************************************
+Gets the previous lock in the lock queue, returns NULL if there are no
+more locks (i.e. the current lock is the first one). The iterator is
+receded (if not-NULL is returned). */
+
+lock_t*
+lock_queue_iterator_get_prev(
+/*=========================*/
+ /* out: previous lock or NULL */
+ lock_queue_iterator_t* iter) /* in/out: iterator */
+{
+ lock_t* prev_lock;
+
+ switch (lock_get_type(iter->current_lock)) {
+ case LOCK_REC:
+ prev_lock = lock_rec_get_prev(
+ iter->current_lock, iter->bit_no);
+ break;
+ case LOCK_TABLE:
+ prev_lock = UT_LIST_GET_PREV(
+ un_member.tab_lock.locks, iter->current_lock);
+ break;
+ default:
+ ut_error;
+ }
+
+ if (prev_lock != NULL) {
+
+ iter->current_lock = prev_lock;
+ }
+
+ return(prev_lock);
+}
diff --git a/storage/innobase/lock/lock0lock.c b/storage/innobase/lock/lock0lock.c
index 93a43d9a30f..39cbf83e58e 100644
--- a/storage/innobase/lock/lock0lock.c
+++ b/storage/innobase/lock/lock0lock.c
@@ -6,10 +6,14 @@ The transaction lock system
Created 5/7/1996 Heikki Tuuri
*******************************************************/
+#define LOCK_MODULE_IMPLEMENTATION
+
#include "lock0lock.h"
+#include "lock0priv.h"
#ifdef UNIV_NONINL
#include "lock0lock.ic"
+#include "lock0priv.ic"
#endif
#include "usr0sess.h"
@@ -319,42 +323,6 @@ ibool lock_print_waits = FALSE;
/* The lock system */
lock_sys_t* lock_sys = NULL;
-/* A table lock */
-typedef struct lock_table_struct lock_table_t;
-struct lock_table_struct{
- dict_table_t* table; /* database table in dictionary cache */
- UT_LIST_NODE_T(lock_t)
- locks; /* list of locks on the same table */
-};
-
-/* Record lock for a page */
-typedef struct lock_rec_struct lock_rec_t;
-struct lock_rec_struct{
- ulint space; /* space id */
- ulint page_no; /* page number */
- ulint n_bits; /* number of bits in the lock bitmap */
- /* NOTE: the lock bitmap is placed immediately
- after the lock struct */
-};
-
-/* Lock struct */
-struct lock_struct{
- trx_t* trx; /* transaction owning the lock */
- UT_LIST_NODE_T(lock_t)
- trx_locks; /* list of the locks of the
- transaction */
- ulint type_mode; /* lock type, mode, LOCK_GAP or
- LOCK_REC_NOT_GAP,
- LOCK_INSERT_INTENTION,
- wait flag, ORed */
- hash_node_t hash; /* hash chain node for a record lock */
- dict_index_t* index; /* index for a record lock */
- union {
- lock_table_t tab_lock;/* table lock */
- lock_rec_t rec_lock;/* record lock */
- } un_member;
-};
-
/* We store info on the latest deadlock error to this buffer. InnoDB
Monitor will then fetch it and print */
ibool lock_deadlock_found = FALSE;
@@ -401,20 +369,6 @@ lock_deadlock_recursive(
return LOCK_VICTIM_IS_START */
/*************************************************************************
-Gets the type of a lock. */
-UNIV_INLINE
-ulint
-lock_get_type(
-/*==========*/
- /* out: LOCK_TABLE or LOCK_REC */
- lock_t* lock) /* in: lock */
-{
- ut_ad(lock);
-
- return(lock->type_mode & LOCK_TYPE_MASK);
-}
-
-/*************************************************************************
Gets the nth bit of a record lock. */
UNIV_INLINE
ibool
@@ -611,8 +565,8 @@ UNIV_INLINE
ulint
lock_get_mode(
/*==========*/
- /* out: mode */
- lock_t* lock) /* in: lock */
+ /* out: mode */
+ const lock_t* lock) /* in: lock */
{
ut_ad(lock);
@@ -1017,7 +971,7 @@ lock_rec_has_to_wait(
/*************************************************************************
Checks if a lock request lock1 has to wait for request lock2. */
-static
+
ibool
lock_has_to_wait(
/*=============*/
@@ -1098,7 +1052,7 @@ lock_rec_set_nth_bit(
/**************************************************************************
Looks for a set bit in a record lock bitmap. Returns ULINT_UNDEFINED,
if none found. */
-static
+
ulint
lock_rec_find_set_bit(
/*==================*/
@@ -1390,7 +1344,7 @@ lock_rec_copy(
/*************************************************************************
Gets the previous record lock set on a record. */
-static
+
lock_t*
lock_rec_get_prev(
/*==============*/
@@ -3174,7 +3128,8 @@ lock_deadlock_occurs(
ulint ret;
ulint cost = 0;
- ut_ad(trx && lock);
+ ut_ad(trx);
+ ut_ad(lock);
ut_ad(mutex_own(&kernel_mutex));
retry:
/* We check that adding this trx to the waits-for graph
@@ -3246,7 +3201,9 @@ lock_deadlock_recursive(
trx_t* lock_trx;
ulint ret;
- ut_a(trx && start && wait_lock);
+ ut_a(trx);
+ ut_a(start);
+ ut_a(wait_lock);
ut_ad(mutex_own(&kernel_mutex));
if (trx->deadlock_mark == 1) {
@@ -3357,8 +3314,8 @@ lock_deadlock_recursive(
return(LOCK_VICTIM_IS_START);
}
- if (ut_dulint_cmp(wait_lock->trx->undo_no,
- start->undo_no) >= 0) {
+ if (trx_weight_cmp(wait_lock->trx,
+ start) >= 0) {
/* Our recursion starting point
transaction is 'smaller', let us
choose 'start' as the victim and roll
@@ -3429,6 +3386,10 @@ lock_table_create(
ut_ad(table && trx);
ut_ad(mutex_own(&kernel_mutex));
+ if ((type_mode & LOCK_MODE_MASK) == LOCK_AUTO_INC) {
+ ++table->n_waiting_or_granted_auto_inc_locks;
+ }
+
if (type_mode == LOCK_AUTO_INC) {
/* Only one trx can have the lock on the table
at a time: we may use the memory preallocated
@@ -3479,6 +3440,9 @@ lock_table_remove_low(
if (lock == trx->auto_inc_lock) {
trx->auto_inc_lock = NULL;
+
+ ut_a(table->n_waiting_or_granted_auto_inc_locks > 0);
+ --table->n_waiting_or_granted_auto_inc_locks;
}
UT_LIST_REMOVE(trx_locks, trx->trx_locks, lock);
@@ -4423,12 +4387,9 @@ lock_table_queue_validate(
dict_table_t* table) /* in: table */
{
lock_t* lock;
- ibool is_waiting;
ut_ad(mutex_own(&kernel_mutex));
- is_waiting = FALSE;
-
lock = UT_LIST_GET_FIRST(table->locks);
while (lock) {
@@ -4438,13 +4399,10 @@ lock_table_queue_validate(
if (!lock_get_wait(lock)) {
- ut_a(!is_waiting);
-
ut_a(!lock_table_other_has_incompatible(
lock->trx, 0, table,
lock_get_mode(lock)));
} else {
- is_waiting = TRUE;
ut_a(lock_table_has_to_wait_in_queue(lock));
}
diff --git a/storage/innobase/log/Makefile.am b/storage/innobase/log/Makefile.am
deleted file mode 100644
index a40572a64da..00000000000
--- a/storage/innobase/log/Makefile.am
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-include ../include/Makefile.i
-
-noinst_LIBRARIES = liblog.a
-
-liblog_a_SOURCES = log0log.c log0recv.c
-
-EXTRA_PROGRAMS =
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/storage/innobase/log/log0log.c b/storage/innobase/log/log0log.c
index e9dedf6aac4..b10c348b24d 100644
--- a/storage/innobase/log/log0log.c
+++ b/storage/innobase/log/log0log.c
@@ -3039,10 +3039,22 @@ loop:
mutex_enter(&kernel_mutex);
- /* Check that there are no longer transactions. We need this wait
- even for the 'very fast' shutdown, because the InnoDB layer may have
- committed or prepared transactions and we don't want to lose
- them. */
+ /* We need the monitor threads to stop before we proceed with a
+ normal shutdown. In case of very fast shutdown, however, we can
+ proceed without waiting for monitor threads. */
+
+ if (srv_fast_shutdown < 2
+ && (srv_error_monitor_active
+ || srv_lock_timeout_and_monitor_active)) {
+
+ mutex_exit(&kernel_mutex);
+
+ goto loop;
+ }
+
+ /* Check that there are no longer transactions. We need this wait even
+ for the 'very fast' shutdown, because the InnoDB layer may have
+ committed or prepared transactions and we don't want to lose them. */
if (trx_n_mysql_transactions > 0
|| UT_LIST_GET_LEN(trx_sys->trx_list) > 0) {
@@ -3163,22 +3175,8 @@ loop:
goto loop;
}
- /* The lock timeout thread should now have exited */
-
- if (srv_lock_timeout_and_monitor_active) {
-
- goto loop;
- }
-
- /* We now let also the InnoDB error monitor thread to exit */
-
srv_shutdown_state = SRV_SHUTDOWN_LAST_PHASE;
- if (srv_error_monitor_active) {
-
- goto loop;
- }
-
/* Make some checks that the server really is quiet */
ut_a(srv_n_threads_active[SRV_MASTER] == 0);
ut_a(buf_all_freed());
diff --git a/storage/innobase/log/log0recv.c b/storage/innobase/log/log0recv.c
index ab5f42e3a13..aef58b7b576 100644
--- a/storage/innobase/log/log0recv.c
+++ b/storage/innobase/log/log0recv.c
@@ -57,6 +57,16 @@ ibool recv_needed_recovery = FALSE;
ibool recv_lsn_checks_on = FALSE;
+/* There are two conditions under which we scan the logs, the first
+is normal startup and the second is when we do a recovery from an
+archive.
+This flag is set if we are doing a scan from the last checkpoint during
+startup. If we find log entries that were written after the last checkpoint
+we know that the server was not cleanly shutdown. We must then initialize
+the crash recovery environment before attempting to store these entries in
+the log hash table. */
+ibool recv_log_scan_is_startup_type = FALSE;
+
/* If the following is TRUE, the buffer pool file pages must be invalidated
after recovery and no ibuf operations are allowed; this becomes TRUE if
the log record hash table becomes too full, and log records must be merged
@@ -99,6 +109,16 @@ the recovery failed and the database may be corrupt. */
dulint recv_max_page_lsn;
+/* prototypes */
+
+/***********************************************************
+Initialize crash recovery environment. Can be called iff
+recv_needed_recovery == FALSE. */
+static
+void
+recv_init_crash_recovery(void);
+/*===========================*/
+
/************************************************************
Creates the recovery system. */
@@ -2284,6 +2304,23 @@ recv_scan_log_recs(
if (ut_dulint_cmp(scanned_lsn, recv_sys->scanned_lsn) > 0) {
+ /* We have found more entries. If this scan is
+ of startup type, we must initiate crash recovery
+ environment before parsing these log records. */
+
+ if (recv_log_scan_is_startup_type
+ && !recv_needed_recovery) {
+
+ fprintf(stderr,
+ "InnoDB: Log scan progressed"
+ " past the checkpoint lsn %lu %lu\n",
+ (ulong) ut_dulint_get_high(
+ recv_sys->scanned_lsn),
+ (ulong) ut_dulint_get_low(
+ recv_sys->scanned_lsn));
+ recv_init_crash_recovery();
+ }
+
/* We were able to find more log data: add it to the
parsing buffer if parse_start_lsn is already
non-zero */
@@ -2405,6 +2442,47 @@ recv_group_scan_log_recs(
#endif /* UNIV_DEBUG */
}
+/***********************************************************
+Initialize crash recovery environment. Can be called iff
+recv_needed_recovery == FALSE. */
+static
+void
+recv_init_crash_recovery(void)
+/*==========================*/
+{
+ ut_a(!recv_needed_recovery);
+
+ recv_needed_recovery = TRUE;
+
+ ut_print_timestamp(stderr);
+
+ fprintf(stderr,
+ " InnoDB: Database was not"
+ " shut down normally!\n"
+ "InnoDB: Starting crash recovery.\n");
+
+ fprintf(stderr,
+ "InnoDB: Reading tablespace information"
+ " from the .ibd files...\n");
+
+ fil_load_single_table_tablespaces();
+
+ /* If we are using the doublewrite method, we will
+ check if there are half-written pages in data files,
+ and restore them from the doublewrite buffer if
+ possible */
+
+ if (srv_force_recovery < SRV_FORCE_NO_LOG_REDO) {
+
+ fprintf(stderr,
+ "InnoDB: Restoring possible"
+ " half-written data pages from"
+ " the doublewrite\n"
+ "InnoDB: buffer...\n");
+ trx_sys_doublewrite_init_or_restore_pages(TRUE);
+ }
+}
+
/************************************************************
Recovers from a checkpoint. When this function returns, the database is able
to start processing of new user transactions, but the function
@@ -2532,92 +2610,6 @@ recv_recovery_from_checkpoint_start(
recv_sys->recovered_lsn = checkpoint_lsn;
srv_start_lsn = checkpoint_lsn;
-
- /* NOTE: we always do a 'recovery' at startup, but only if
- there is something wrong we will print a message to the
- user about recovery: */
-
- if (ut_dulint_cmp(checkpoint_lsn, max_flushed_lsn) != 0
- || ut_dulint_cmp(checkpoint_lsn, min_flushed_lsn) != 0) {
-
- if (ut_dulint_cmp(checkpoint_lsn, max_flushed_lsn)
- < 0) {
- fprintf(stderr,
- "InnoDB: #########################"
- "#################################\n"
- "InnoDB: "
- "WARNING!\n"
- "InnoDB: The log sequence number"
- " in ibdata files is higher\n"
- "InnoDB: than the log sequence number"
- " in the ib_logfiles! Are you sure\n"
- "InnoDB: you are using the right"
- " ib_logfiles to start up"
- " the database?\n"
- "InnoDB: Log sequence number in"
- " ib_logfiles is %lu %lu, log\n"
- "InnoDB: sequence numbers stamped"
- " to ibdata file headers are between\n"
- "InnoDB: %lu %lu and %lu %lu.\n"
- "InnoDB: #########################"
- "#################################\n",
- (ulong) ut_dulint_get_high(
- checkpoint_lsn),
- (ulong) ut_dulint_get_low(
- checkpoint_lsn),
- (ulong) ut_dulint_get_high(
- min_flushed_lsn),
- (ulong) ut_dulint_get_low(
- min_flushed_lsn),
- (ulong) ut_dulint_get_high(
- max_flushed_lsn),
- (ulong) ut_dulint_get_low(
- max_flushed_lsn));
- }
-
- recv_needed_recovery = TRUE;
-
- ut_print_timestamp(stderr);
-
- fprintf(stderr,
- " InnoDB: Database was not"
- " shut down normally!\n"
- "InnoDB: Starting crash recovery.\n");
-
- fprintf(stderr,
- "InnoDB: Reading tablespace information"
- " from the .ibd files...\n");
-
- fil_load_single_table_tablespaces();
-
- /* If we are using the doublewrite method, we will
- check if there are half-written pages in data files,
- and restore them from the doublewrite buffer if
- possible */
-
- if (srv_force_recovery < SRV_FORCE_NO_LOG_REDO) {
-
- fprintf(stderr,
- "InnoDB: Restoring possible"
- " half-written data pages from"
- " the doublewrite\n"
- "InnoDB: buffer...\n");
- trx_sys_doublewrite_init_or_restore_pages(
- TRUE);
- }
-
- ut_print_timestamp(stderr);
-
- fprintf(stderr,
- " InnoDB: Starting log scan"
- " based on checkpoint at\n"
- "InnoDB: log sequence number %lu %lu.\n",
- (ulong) ut_dulint_get_high(checkpoint_lsn),
- (ulong) ut_dulint_get_low(checkpoint_lsn));
- } else {
- /* Init the doublewrite buffer memory structure */
- trx_sys_doublewrite_init_or_restore_pages(FALSE);
- }
}
contiguous_lsn = ut_dulint_align_down(recv_sys->scanned_lsn,
@@ -2670,6 +2662,8 @@ recv_recovery_from_checkpoint_start(
group = UT_LIST_GET_NEXT(log_groups, group);
}
+ /* Set the flag to publish that we are doing startup scan. */
+ recv_log_scan_is_startup_type = (type == LOG_CHECKPOINT);
while (group) {
old_scanned_lsn = recv_sys->scanned_lsn;
@@ -2691,6 +2685,69 @@ recv_recovery_from_checkpoint_start(
group = UT_LIST_GET_NEXT(log_groups, group);
}
+ /* Done with startup scan. Clear the flag. */
+ recv_log_scan_is_startup_type = FALSE;
+ if (type == LOG_CHECKPOINT) {
+ /* NOTE: we always do a 'recovery' at startup, but only if
+ there is something wrong we will print a message to the
+ user about recovery: */
+
+ if (ut_dulint_cmp(checkpoint_lsn, max_flushed_lsn) != 0
+ || ut_dulint_cmp(checkpoint_lsn, min_flushed_lsn) != 0) {
+
+ if (ut_dulint_cmp(checkpoint_lsn, max_flushed_lsn)
+ < 0) {
+ fprintf(stderr,
+ "InnoDB: #########################"
+ "#################################\n"
+ "InnoDB: "
+ "WARNING!\n"
+ "InnoDB: The log sequence number"
+ " in ibdata files is higher\n"
+ "InnoDB: than the log sequence number"
+ " in the ib_logfiles! Are you sure\n"
+ "InnoDB: you are using the right"
+ " ib_logfiles to start up"
+ " the database?\n"
+ "InnoDB: Log sequence number in"
+ " ib_logfiles is %lu %lu, log\n"
+ "InnoDB: sequence numbers stamped"
+ " to ibdata file headers are between\n"
+ "InnoDB: %lu %lu and %lu %lu.\n"
+ "InnoDB: #########################"
+ "#################################\n",
+ (ulong) ut_dulint_get_high(
+ checkpoint_lsn),
+ (ulong) ut_dulint_get_low(
+ checkpoint_lsn),
+ (ulong) ut_dulint_get_high(
+ min_flushed_lsn),
+ (ulong) ut_dulint_get_low(
+ min_flushed_lsn),
+ (ulong) ut_dulint_get_high(
+ max_flushed_lsn),
+ (ulong) ut_dulint_get_low(
+ max_flushed_lsn));
+
+
+ }
+
+ if (!recv_needed_recovery) {
+ fprintf(stderr,
+ "InnoDB: The log sequence number"
+ " in ibdata files does not match\n"
+ "InnoDB: the log sequence number"
+ " in the ib_logfiles!\n");
+ recv_init_crash_recovery();
+ }
+
+ }
+ if (!recv_needed_recovery) {
+ /* Init the doublewrite buffer memory structure */
+ trx_sys_doublewrite_init_or_restore_pages(FALSE);
+ }
+ }
+
/* We currently have only one log group */
if (ut_dulint_cmp(group_scanned_lsn, checkpoint_lsn) < 0) {
ut_print_timestamp(stderr);
@@ -2747,20 +2804,9 @@ recv_recovery_from_checkpoint_start(
recv_synchronize_groups(up_to_date_group);
if (!recv_needed_recovery) {
- if (ut_dulint_cmp(checkpoint_lsn, recv_sys->recovered_lsn)
- != 0) {
- fprintf(stderr,
- "InnoDB: Warning: we did not need to do"
- " crash recovery, but log scan\n"
- "InnoDB: progressed past the checkpoint"
- " lsn %lu %lu up to lsn %lu %lu\n",
- (ulong) ut_dulint_get_high(checkpoint_lsn),
- (ulong) ut_dulint_get_low(checkpoint_lsn),
- (ulong) ut_dulint_get_high(
- recv_sys->recovered_lsn),
- (ulong) ut_dulint_get_low(
- recv_sys->recovered_lsn));
- }
+ ut_a(ut_dulint_cmp(checkpoint_lsn,
+ recv_sys->recovered_lsn) == 0);
+
} else {
srv_start_lsn = recv_sys->recovered_lsn;
}
diff --git a/storage/innobase/mach/Makefile.am b/storage/innobase/mach/Makefile.am
deleted file mode 100644
index 1a59cb3e4d7..00000000000
--- a/storage/innobase/mach/Makefile.am
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-include ../include/Makefile.i
-
-noinst_LIBRARIES = libmach.a
-
-libmach_a_SOURCES = mach0data.c
-
-EXTRA_PROGRAMS =
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/storage/innobase/mem/Makefile.am b/storage/innobase/mem/Makefile.am
deleted file mode 100644
index 598dbb96124..00000000000
--- a/storage/innobase/mem/Makefile.am
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-include ../include/Makefile.i
-
-noinst_LIBRARIES = libmem.a
-
-libmem_a_SOURCES = mem0mem.c mem0pool.c
-
-EXTRA_DIST = mem0dbg.c
-
-EXTRA_PROGRAMS =
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/storage/innobase/mem/mem0mem.c b/storage/innobase/mem/mem0mem.c
index 10b359e8e67..d89a3a55d88 100644
--- a/storage/innobase/mem/mem0mem.c
+++ b/storage/innobase/mem/mem0mem.c
@@ -514,6 +514,7 @@ mem_heap_block_free(
mem_erase_buf((byte*)block, len);
#endif
+ UNIV_MEM_FREE(block, len);
if (init_block) {
/* Do not have to free: do nothing */
diff --git a/storage/innobase/mem/mem0pool.c b/storage/innobase/mem/mem0pool.c
index c010ae61160..27da86a0309 100644
--- a/storage/innobase/mem/mem0pool.c
+++ b/storage/innobase/mem/mem0pool.c
@@ -229,6 +229,8 @@ mem_pool_create(
mem_area_set_size(area, ut_2_exp(i));
mem_area_set_free(area, TRUE);
+ UNIV_MEM_FREE(MEM_AREA_EXTRA_SIZE + (byte*) area,
+ ut_2_exp(i) - MEM_AREA_EXTRA_SIZE);
UT_LIST_ADD_FIRST(free_list, pool->free_list[i], area);
@@ -300,6 +302,7 @@ mem_pool_fill_free_list(
UT_LIST_REMOVE(free_list, pool->free_list[i + 1], area);
area2 = (mem_area_t*)(((byte*)area) + ut_2_exp(i));
+ UNIV_MEM_ALLOC(area2, MEM_AREA_EXTRA_SIZE);
mem_area_set_size(area2, ut_2_exp(i));
mem_area_set_free(area2, TRUE);
@@ -400,6 +403,8 @@ mem_area_alloc(
mutex_exit(&(pool->mutex));
ut_ad(mem_pool_validate(pool));
+ UNIV_MEM_ALLOC(MEM_AREA_EXTRA_SIZE + (byte*)area,
+ ut_2_exp(n) - MEM_AREA_EXTRA_SIZE);
return((void*)(MEM_AREA_EXTRA_SIZE + ((byte*)area)));
}
@@ -482,6 +487,7 @@ mem_area_free(
}
size = mem_area_get_size(area);
+ UNIV_MEM_FREE(ptr, size - MEM_AREA_EXTRA_SIZE);
if (size == 0) {
fprintf(stderr,
diff --git a/storage/innobase/mtr/Makefile.am b/storage/innobase/mtr/Makefile.am
deleted file mode 100644
index 80eb7c907be..00000000000
--- a/storage/innobase/mtr/Makefile.am
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-include ../include/Makefile.i
-
-noinst_LIBRARIES = libmtr.a
-
-libmtr_a_SOURCES = mtr0mtr.c mtr0log.c
-
-EXTRA_PROGRAMS =
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/storage/innobase/mtr/mtr0log.c b/storage/innobase/mtr/mtr0log.c
index f9704dc2d20..e5d572bbfa7 100644
--- a/storage/innobase/mtr/mtr0log.c
+++ b/storage/innobase/mtr/mtr0log.c
@@ -517,8 +517,9 @@ mlog_parse_index(
n = mach_read_from_2(ptr);
ptr += 2;
n_uniq = mach_read_from_2(ptr);
+ ptr += 2;
ut_ad(n_uniq <= n);
- if (end_ptr < ptr + (n + 1) * 2) {
+ if (end_ptr < ptr + n * 2) {
return(NULL);
}
} else {
@@ -531,18 +532,18 @@ mlog_parse_index(
ind->table = table;
ind->n_uniq = (unsigned int) n_uniq;
if (n_uniq != n) {
+ ut_a(n_uniq + DATA_ROLL_PTR <= n);
ind->type = DICT_CLUSTERED;
}
- /* avoid ut_ad(index->cached) in dict_index_get_n_unique_in_tree */
- ind->cached = TRUE;
if (comp) {
for (i = 0; i < n; i++) {
- ulint len = mach_read_from_2(ptr += 2);
+ ulint len = mach_read_from_2(ptr);
+ ptr += 2;
/* The high-order bit of len is the NOT NULL flag;
the rest is 0 or 0x7fff for variable-length fields,
and 1..0x7ffe for fixed-length fields. */
dict_mem_table_add_col(
- table, "DUMMY",
+ table, NULL, NULL,
((len + 1) & 0x7fff) <= 1
? DATA_BINARY : DATA_FIXBINARY,
len & 0x8000 ? DATA_NOT_NULL : 0,
@@ -552,8 +553,23 @@ mlog_parse_index(
dict_table_get_nth_col(table, i),
0);
}
- ptr += 2;
+ dict_table_add_system_columns(table, table->heap);
+ if (n_uniq != n) {
+ /* Identify DB_TRX_ID and DB_ROLL_PTR in the index. */
+ ut_a(DATA_TRX_ID_LEN
+ == dict_index_get_nth_col(ind, DATA_TRX_ID - 1
+ + n_uniq)->len);
+ ut_a(DATA_ROLL_PTR_LEN
+ == dict_index_get_nth_col(ind, DATA_ROLL_PTR - 1
+ + n_uniq)->len);
+ ind->fields[DATA_TRX_ID - 1 + n_uniq].col
+ = &table->cols[n + DATA_TRX_ID];
+ ind->fields[DATA_ROLL_PTR - 1 + n_uniq].col
+ = &table->cols[n + DATA_ROLL_PTR];
+ }
}
+ /* avoid ut_ad(index->cached) in dict_index_get_n_unique_in_tree */
+ ind->cached = TRUE;
*index = ind;
return(ptr);
}
diff --git a/storage/innobase/os/Makefile.am b/storage/innobase/os/Makefile.am
deleted file mode 100644
index d5c45eba54e..00000000000
--- a/storage/innobase/os/Makefile.am
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (C) 2001, 2003-2004 MySQL AB & Innobase Oy
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-include ../include/Makefile.i
-
-noinst_LIBRARIES = libos.a
-
-libos_a_SOURCES = os0proc.c os0sync.c os0thread.c os0file.c
-
-EXTRA_PROGRAMS =
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/storage/innobase/os/os0file.c b/storage/innobase/os/os0file.c
index c18ba047d4e..78140cc5ecf 100644
--- a/storage/innobase/os/os0file.c
+++ b/storage/innobase/os/os0file.c
@@ -250,6 +250,15 @@ os_file_get_last_error(
"InnoDB: the directory. It may also be"
" you have created a subdirectory\n"
"InnoDB: of the same name as a data file.\n");
+ } else if (err == ERROR_SHARING_VIOLATION
+ || err == ERROR_LOCK_VIOLATION) {
+ fprintf(stderr,
+ "InnoDB: The error means that another program"
+ " is using InnoDB's files.\n"
+ "InnoDB: This might be a backup or antivirus"
+ " software or another instance\n"
+ "InnoDB: of MySQL."
+ " Please close it to get rid of this error.\n");
} else {
fprintf(stderr,
"InnoDB: Some operating system error numbers"
@@ -268,6 +277,9 @@ os_file_get_last_error(
return(OS_FILE_DISK_FULL);
} else if (err == ERROR_FILE_EXISTS) {
return(OS_FILE_ALREADY_EXISTS);
+ } else if (err == ERROR_SHARING_VIOLATION
+ || err == ERROR_LOCK_VIOLATION) {
+ return(OS_FILE_SHARING_VIOLATION);
} else {
return(100 + err);
}
@@ -388,6 +400,10 @@ os_file_handle_error_cond_exit(
|| err == OS_FILE_PATH_ERROR) {
return(FALSE);
+ } else if (err == OS_FILE_SHARING_VIOLATION) {
+
+ os_thread_sleep(10000000); /* 10 sec */
+ return(TRUE);
} else {
if (name) {
fprintf(stderr, "InnoDB: File name %s\n", name);
@@ -440,10 +456,9 @@ os_file_handle_error_no_exit(
#undef USE_FILE_LOCK
#define USE_FILE_LOCK
-#if defined(UNIV_HOTBACKUP) || defined(__WIN__) || defined(__FreeBSD__) || defined(__NETWARE__)
+#if defined(UNIV_HOTBACKUP) || defined(__WIN__) || defined(__NETWARE__)
/* InnoDB Hot Backup does not lock the data files.
* On Windows, mandatory locking is used.
- * On FreeBSD with LinuxThreads, advisory locking does not work properly.
*/
# undef USE_FILE_LOCK
#endif
diff --git a/storage/innobase/page/Makefile.am b/storage/innobase/page/Makefile.am
deleted file mode 100644
index 1a5b202a2c9..00000000000
--- a/storage/innobase/page/Makefile.am
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-include ../include/Makefile.i
-
-noinst_LIBRARIES = libpage.a
-
-libpage_a_SOURCES = page0page.c page0cur.c
-
-EXTRA_PROGRAMS =
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/storage/innobase/page/page0page.c b/storage/innobase/page/page0page.c
index 4212df7a631..543cf9e34eb 100644
--- a/storage/innobase/page/page0page.c
+++ b/storage/innobase/page/page0page.c
@@ -209,6 +209,18 @@ page_set_max_trx_id(
}
}
+/*****************************************************************
+Calculates free space if a page is emptied. */
+
+ulint
+page_get_free_space_of_empty_noninline(
+/*===================================*/
+ /* out: free space */
+ ulint comp) /* in: nonzero=compact page format */
+{
+ return(page_get_free_space_of_empty(comp));
+}
+
/****************************************************************
Allocates a block of memory from an index page. */
diff --git a/storage/innobase/pars/Makefile.am b/storage/innobase/pars/Makefile.am
deleted file mode 100644
index b10796c3d5e..00000000000
--- a/storage/innobase/pars/Makefile.am
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-include ../include/Makefile.i
-
-noinst_LIBRARIES = libpars.a
-
-noinst_HEADERS = pars0grm.h
-
-libpars_a_SOURCES = pars0grm.c lexyy.c pars0opt.c pars0pars.c pars0sym.c
-
-EXTRA_PROGRAMS =
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/storage/innobase/pars/pars0pars.c b/storage/innobase/pars/pars0pars.c
index 16530494a96..89f6f862995 100644
--- a/storage/innobase/pars/pars0pars.c
+++ b/storage/innobase/pars/pars0pars.c
@@ -1640,7 +1640,8 @@ pars_create_table(
while (column) {
dtype = dfield_get_type(que_node_get_val(column));
- dict_mem_table_add_col(table, column->name, dtype->mtype,
+ dict_mem_table_add_col(table, table->heap,
+ column->name, dtype->mtype,
dtype->prtype, dtype->len);
column->resolved = TRUE;
column->token_type = SYM_COLUMN;
diff --git a/storage/innobase/plug.in b/storage/innobase/plug.in
index 59634523399..b252d471fba 100644
--- a/storage/innobase/plug.in
+++ b/storage/innobase/plug.in
@@ -2,13 +2,10 @@ MYSQL_STORAGE_ENGINE(innobase, innodb, [InnoDB Storage Engine],
[Transactional Tables using InnoDB], [max,max-no-ndb])
MYSQL_PLUGIN_DIRECTORY(innobase, [storage/innobase])
MYSQL_PLUGIN_STATIC(innobase, [libinnobase.a])
+MYSQL_PLUGIN_DYNAMIC(innobase, [ha_innodb.la])
MYSQL_PLUGIN_ACTIONS(innobase, [
AC_CHECK_LIB(rt, aio_read, [innodb_system_libs="-lrt"])
AC_SUBST(innodb_system_libs)
- AC_PROG_CC
- AC_PROG_RANLIB
- AC_PROG_INSTALL
- AC_PROG_LIBTOOL
AC_CHECK_HEADERS(aio.h sched.h)
AC_CHECK_SIZEOF(int, 4)
AC_CHECK_SIZEOF(long, 4)
@@ -38,37 +35,5 @@ MYSQL_PLUGIN_ACTIONS(innobase, [
openbsd*)
CFLAGS="$CFLAGS -DUNIV_MUST_NOT_INLINE";;
esac
- AC_CONFIG_FILES(
- storage/innobase/ut/Makefile
- storage/innobase/btr/Makefile
- storage/innobase/buf/Makefile
- storage/innobase/data/Makefile
- storage/innobase/dict/Makefile
- storage/innobase/dyn/Makefile
- storage/innobase/eval/Makefile
- storage/innobase/fil/Makefile
- storage/innobase/fsp/Makefile
- storage/innobase/fut/Makefile
- storage/innobase/ha/Makefile
- storage/innobase/ibuf/Makefile
- storage/innobase/lock/Makefile
- storage/innobase/log/Makefile
- storage/innobase/mach/Makefile
- storage/innobase/mem/Makefile
- storage/innobase/mtr/Makefile
- storage/innobase/os/Makefile
- storage/innobase/page/Makefile
- storage/innobase/pars/Makefile
- storage/innobase/que/Makefile
- storage/innobase/read/Makefile
- storage/innobase/rem/Makefile
- storage/innobase/row/Makefile
- storage/innobase/srv/Makefile
- storage/innobase/sync/Makefile
- storage/innobase/thr/Makefile
- storage/innobase/trx/Makefile
- storage/innobase/handler/Makefile
- storage/innobase/usr/Makefile)
])
-MYSQL_PLUGIN_DEPENDS_ON_MYSQL_INTERNALS(innobase, [handler/ha_innodb.cc])
diff --git a/storage/innobase/que/Makefile.am b/storage/innobase/que/Makefile.am
deleted file mode 100644
index 73f3fb07af4..00000000000
--- a/storage/innobase/que/Makefile.am
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-include ../include/Makefile.i
-
-noinst_LIBRARIES = libque.a
-
-libque_a_SOURCES = que0que.c
-
-EXTRA_PROGRAMS =
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/storage/innobase/read/Makefile.am b/storage/innobase/read/Makefile.am
deleted file mode 100644
index 1e56a9716c3..00000000000
--- a/storage/innobase/read/Makefile.am
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-include ../include/Makefile.i
-
-noinst_LIBRARIES = libread.a
-
-libread_a_SOURCES = read0read.c
-
-EXTRA_PROGRAMS =
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/storage/innobase/rem/Makefile.am b/storage/innobase/rem/Makefile.am
deleted file mode 100644
index 1026172b815..00000000000
--- a/storage/innobase/rem/Makefile.am
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-include ../include/Makefile.i
-
-noinst_LIBRARIES = librem.a
-
-librem_a_SOURCES = rem0rec.c rem0cmp.c
-
-EXTRA_PROGRAMS =
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/storage/innobase/rem/rem0rec.c b/storage/innobase/rem/rem0rec.c
index 549b5ee8b28..64f8e2d319c 100644
--- a/storage/innobase/rem/rem0rec.c
+++ b/storage/innobase/rem/rem0rec.c
@@ -153,7 +153,6 @@ static
void
rec_init_offsets(
/*=============*/
- /* out: the offsets */
rec_t* rec, /* in: physical record */
dict_index_t* index, /* in: record descriptor */
ulint* offsets)/* in/out: array of offsets;
@@ -189,7 +188,7 @@ rec_init_offsets(
}
nulls = rec - (REC_N_NEW_EXTRA_BYTES + 1);
- lens = nulls - (index->n_nullable + 7) / 8;
+ lens = nulls - UT_BITS_IN_BYTES(index->n_nullable);
offs = 0;
null_mask = 1;
@@ -304,7 +303,7 @@ rec_get_offsets_func(
/* out: the new offsets */
rec_t* rec, /* in: physical record */
dict_index_t* index, /* in: record descriptor */
- ulint* offsets,/* in: array consisting of offsets[0]
+ ulint* offsets,/* in/out: array consisting of offsets[0]
allocated elements, or an array from
rec_get_offsets(), or NULL */
ulint n_fields,/* in: maximum number of initialized fields
@@ -440,7 +439,7 @@ rec_get_converted_size_new(
dtuple_t* dtuple) /* in: data tuple */
{
ulint size = REC_N_NEW_EXTRA_BYTES
- + (index->n_nullable + 7) / 8;
+ + UT_BITS_IN_BYTES(index->n_nullable);
ulint i;
ulint n_fields;
ut_ad(index && dtuple);
@@ -459,10 +458,10 @@ rec_get_converted_size_new(
break;
case REC_STATUS_INFIMUM:
case REC_STATUS_SUPREMUM:
- /* infimum or supremum record, 8 bytes */
- return(size + 8); /* no extra data needed */
+ /* infimum or supremum record, 8 data bytes */
+ return(REC_N_NEW_EXTRA_BYTES + 8);
default:
- ut_a(0);
+ ut_error;
return(ULINT_UNDEFINED);
}
@@ -476,21 +475,31 @@ rec_get_converted_size_new(
len = dtuple_get_nth_field(dtuple, i)->len;
col = dict_field_get_col(field);
- ut_ad(len != UNIV_SQL_NULL || !(col->prtype & DATA_NOT_NULL));
+ ut_ad(dict_col_type_assert_equal(
+ col, dfield_get_type(dtuple_get_nth_field(
+ dtuple, i))));
if (len == UNIV_SQL_NULL) {
/* No length is stored for NULL fields. */
+ ut_ad(!(col->prtype & DATA_NOT_NULL));
continue;
}
ut_ad(len <= col->len || col->mtype == DATA_BLOB);
- ut_ad(!field->fixed_len || len == field->fixed_len);
if (field->fixed_len) {
+ ut_ad(len == field->fixed_len);
+ /* dict_index_add_col() should guarantee this */
+ ut_ad(!field->prefix_len
+ || field->fixed_len == field->prefix_len);
} else if (len < 128
|| (col->len < 256 && col->mtype != DATA_BLOB)) {
size++;
} else {
+ /* For variable-length columns, we look up the
+ maximum length from the column itself. If this
+ is a prefix index column shorter than 256 bytes,
+ this will waste one byte. */
size += 2;
}
size += len;
@@ -586,7 +595,7 @@ rec_set_nth_field_extern_bit_new(
we do not write to log about the change */
{
byte* nulls = rec - (REC_N_NEW_EXTRA_BYTES + 1);
- byte* lens = nulls - (index->n_nullable + 7) / 8;
+ byte* lens = nulls - UT_BITS_IN_BYTES(index->n_nullable);
ulint i;
ulint n_fields;
ulint null_mask = 1;
@@ -744,7 +753,11 @@ rec_convert_dtuple_to_rec_old(
/* Calculate the offset of the origin in the physical record */
rec = buf + rec_get_converted_extra_size(data_size, n_fields);
-
+#ifdef UNIV_DEBUG
+ /* Suppress Valgrind warnings of ut_ad()
+ in mach_write_to_1(), mach_write_to_2() et al. */
+ memset(buf, 0xff, rec - buf + data_size);
+#endif /* UNIV_DEBUG */
/* Store the number of fields */
rec_set_n_fields_old(rec, n_fields);
@@ -875,7 +888,7 @@ rec_convert_dtuple_to_rec_new(
/* Calculate the offset of the origin in the physical record.
We must loop over all fields to do this. */
- rec += (index->n_nullable + 7) / 8;
+ rec += UT_BITS_IN_BYTES(index->n_nullable);
for (i = 0; i < n_fields; i++) {
if (UNIV_UNLIKELY(i == n_node_ptr_field)) {
@@ -892,6 +905,11 @@ rec_convert_dtuple_to_rec_new(
len = dfield_get_len(field);
fixed_len = dict_index_get_nth_field(index, i)->fixed_len;
+ ut_ad(dict_col_type_assert_equal(
+ dict_field_get_col(dict_index_get_nth_field(
+ index, i)),
+ dfield_get_type(field)));
+
if (!(dtype_get_prtype(type) & DATA_NOT_NULL)) {
if (len == UNIV_SQL_NULL)
continue;
@@ -915,7 +933,7 @@ rec_convert_dtuple_to_rec_new(
init:
end = rec;
nulls = rec - (REC_N_NEW_EXTRA_BYTES + 1);
- lens = nulls - (index->n_nullable + 7) / 8;
+ lens = nulls - UT_BITS_IN_BYTES(index->n_nullable);
/* clear the SQL-null flags */
memset (lens + 1, 0, nulls - lens);
@@ -1172,7 +1190,7 @@ rec_copy_prefix_to_buf(
}
nulls = rec - (REC_N_NEW_EXTRA_BYTES + 1);
- lens = nulls - (index->n_nullable + 7) / 8;
+ lens = nulls - UT_BITS_IN_BYTES(index->n_nullable);
UNIV_PREFETCH_R(lens);
prefix_len = 0;
null_mask = 1;
diff --git a/storage/innobase/row/Makefile.am b/storage/innobase/row/Makefile.am
deleted file mode 100644
index 6c1f960055d..00000000000
--- a/storage/innobase/row/Makefile.am
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-include ../include/Makefile.i
-
-noinst_LIBRARIES = librow.a
-
-librow_a_SOURCES = row0ins.c row0mysql.c row0purge.c row0row.c row0sel.c\
- row0uins.c row0umod.c row0undo.c row0upd.c row0vers.c
-
-EXTRA_PROGRAMS =
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/storage/innobase/row/row0ins.c b/storage/innobase/row/row0ins.c
index 1fba0abcdaf..ad14b927170 100644
--- a/storage/innobase/row/row0ins.c
+++ b/storage/innobase/row/row0ins.c
@@ -51,21 +51,6 @@ innobase_invalidate_query_cache(
ulint full_name_len); /* in: full name length where also the null
chars count */
-/**********************************************************************
-This function returns true if
-
-1) SQL-query in the current thread
-is either REPLACE or LOAD DATA INFILE REPLACE.
-
-2) SQL-query in the current thread
-is INSERT ON DUPLICATE KEY UPDATE.
-
-NOTE that /mysql/innobase/row/row0ins.c must contain the
-prototype for this function ! */
-
-ibool
-innobase_query_is_update(void);
-
/*************************************************************************
Creates an insert node struct. */
@@ -448,7 +433,11 @@ row_ins_cascade_calc_update_vec(
ulint i;
ulint j;
- ut_a(node && foreign && cascade && table && index);
+ ut_a(node);
+ ut_a(foreign);
+ ut_a(cascade);
+ ut_a(table);
+ ut_a(index);
/* Calculate the appropriate update vector which will set the fields
in the child index record to the same value (possibly padded with
@@ -791,7 +780,10 @@ row_ins_foreign_check_on_constraint(
trx_t* trx;
mem_heap_t* tmp_heap = NULL;
- ut_a(thr && foreign && pcur && mtr);
+ ut_a(thr);
+ ut_a(foreign);
+ ut_a(pcur);
+ ut_a(mtr);
trx = thr_get_trx(thr);
@@ -1308,7 +1300,8 @@ run_again:
goto exit_func;
}
- ut_a(check_table && check_index);
+ ut_a(check_table);
+ ut_a(check_index);
if (check_table != table) {
/* We already have a LOCK_IX on table, but not necessarily
@@ -1336,11 +1329,9 @@ run_again:
/* Scan index records and check if there is a matching record */
for (;;) {
- page_t* page;
rec = btr_pcur_get_rec(&pcur);
- page = buf_frame_align(rec);
- if (rec == page_get_infimum_rec(page)) {
+ if (page_rec_is_infimum(rec)) {
goto next_rec;
}
@@ -1348,7 +1339,7 @@ run_again:
offsets = rec_get_offsets(rec, check_index,
offsets, ULINT_UNDEFINED, &heap);
- if (rec == page_get_supremum_rec(page)) {
+ if (page_rec_is_supremum(rec)) {
err = row_ins_set_shared_rec_lock(
LOCK_ORDINARY, rec, check_index, offsets, thr);
@@ -1654,6 +1645,7 @@ row_ins_scan_sec_index_for_duplicate(
btr_pcur_t pcur;
ulint err = DB_SUCCESS;
ibool moved;
+ unsigned allow_duplicates;
mtr_t mtr;
mem_heap_t* heap = NULL;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
@@ -1684,12 +1676,14 @@ row_ins_scan_sec_index_for_duplicate(
btr_pcur_open(index, entry, PAGE_CUR_GE, BTR_SEARCH_LEAF, &pcur, &mtr);
+ allow_duplicates = thr_get_trx(thr)->duplicates & TRX_DUP_IGNORE;
+
/* Scan index records and check if there is a duplicate */
for (;;) {
rec = btr_pcur_get_rec(&pcur);
- if (rec == page_get_infimum_rec(buf_frame_align(rec))) {
+ if (page_rec_is_infimum(rec)) {
goto next_rec;
}
@@ -1697,7 +1691,7 @@ row_ins_scan_sec_index_for_duplicate(
offsets = rec_get_offsets(rec, index, offsets,
ULINT_UNDEFINED, &heap);
- if (innobase_query_is_update()) {
+ if (allow_duplicates) {
/* If the SQL-query will update or replace
duplicate key we will take X-lock for
@@ -1826,7 +1820,7 @@ row_ins_duplicate_error_in_clust(
sure that in roll-forward we get the same duplicate
errors as in original execution */
- if (innobase_query_is_update()) {
+ if (trx->duplicates & TRX_DUP_IGNORE) {
/* If the SQL-query will update or replace
duplicate key we will take X-lock for
@@ -1864,7 +1858,7 @@ row_ins_duplicate_error_in_clust(
offsets = rec_get_offsets(rec, cursor->index, offsets,
ULINT_UNDEFINED, &heap);
- if (innobase_query_is_update()) {
+ if (trx->duplicates & TRX_DUP_IGNORE) {
/* If the SQL-query will update or replace
duplicate key we will take X-lock for
diff --git a/storage/innobase/row/row0mysql.c b/storage/innobase/row/row0mysql.c
index 7c9427db0d2..b8d201e3da2 100644
--- a/storage/innobase/row/row0mysql.c
+++ b/storage/innobase/row/row0mysql.c
@@ -476,7 +476,8 @@ handle_new_error:
/* MySQL will roll back the latest SQL statement */
} else if (err == DB_ROW_IS_REFERENCED
|| err == DB_NO_REFERENCED_ROW
- || err == DB_CANNOT_ADD_CONSTRAINT) {
+ || err == DB_CANNOT_ADD_CONSTRAINT
+ || err == DB_TOO_MANY_CONCURRENT_TRXS) {
if (savept) {
/* Roll back the latest, possibly incomplete
insertion or update */
@@ -654,6 +655,8 @@ row_create_prebuilt(
prebuilt->old_vers_heap = NULL;
+ prebuilt->last_value = 0;
+
return(prebuilt);
}
@@ -2893,6 +2896,8 @@ next_rec:
dict_table_change_id_in_cache(table, new_id);
}
+ /* MySQL calls ha_innobase::reset_auto_increment() which does
+ the same thing. */
dict_table_autoinc_initialize(table, 0);
dict_update_statistics(table);
diff --git a/storage/innobase/row/row0row.c b/storage/innobase/row/row0row.c
index efa129d6211..08e50817db9 100644
--- a/storage/innobase/row/row0row.c
+++ b/storage/innobase/row/row0row.c
@@ -142,20 +142,15 @@ row_build_index_entry(
dfield_copy(dfield, dfield2);
/* If a column prefix index, take only the prefix */
- if (ind_field->prefix_len) {
- if (dfield_get_len(dfield2) != UNIV_SQL_NULL) {
+ if (ind_field->prefix_len > 0
+ && dfield_get_len(dfield2) != UNIV_SQL_NULL) {
- storage_len = dtype_get_at_most_n_mbchars(
- col->prtype,
- col->mbminlen, col->mbmaxlen,
- ind_field->prefix_len,
- dfield_get_len(dfield2),
- dfield2->data);
-
- dfield_set_len(dfield, storage_len);
- }
+ storage_len = dtype_get_at_most_n_mbchars(
+ col->prtype, col->mbminlen, col->mbmaxlen,
+ ind_field->prefix_len,
+ dfield_get_len(dfield2), dfield2->data);
- dfield_get_type(dfield)->len = ind_field->prefix_len;
+ dfield_set_len(dfield, storage_len);
}
}
@@ -478,7 +473,9 @@ row_build_row_ref_in_tuple(
ulint* offsets = offsets_;
*offsets_ = (sizeof offsets_) / sizeof *offsets_;
- ut_a(ref && index && rec);
+ ut_a(ref);
+ ut_a(index);
+ ut_a(rec);
if (UNIV_UNLIKELY(!index->table)) {
fputs("InnoDB: table ", stderr);
diff --git a/storage/innobase/row/row0sel.c b/storage/innobase/row/row0sel.c
index a3199055b54..fdf6aa46351 100644
--- a/storage/innobase/row/row0sel.c
+++ b/storage/innobase/row/row0sel.c
@@ -3619,6 +3619,32 @@ shortcut_fails_too_big_rec:
pcur, 0, &mtr);
pcur->trx_if_known = trx;
+
+ rec = btr_pcur_get_rec(pcur);
+
+ if (!moves_up
+ && !page_rec_is_supremum(rec)
+ && set_also_gap_locks
+ && !(srv_locks_unsafe_for_binlog
+ || trx->isolation_level == TRX_ISO_READ_COMMITTED)
+ && prebuilt->select_lock_type != LOCK_NONE) {
+
+ /* Try to place a gap lock on the next index record
+ to prevent phantoms in ORDER BY ... DESC queries */
+
+ offsets = rec_get_offsets(page_rec_get_next(rec),
+ index, offsets,
+ ULINT_UNDEFINED, &heap);
+ err = sel_set_rec_lock(page_rec_get_next(rec),
+ index, offsets,
+ prebuilt->select_lock_type,
+ LOCK_GAP, thr);
+
+ if (err != DB_SUCCESS) {
+
+ goto lock_wait_or_error;
+ }
+ }
} else {
if (mode == PAGE_CUR_G) {
btr_pcur_open_at_index_side(
@@ -4493,3 +4519,149 @@ row_search_check_if_query_cache_permitted(
return(ret);
}
+
+/***********************************************************************
+Read the AUTOINC column from the current row. */
+static
+ib_longlong
+row_search_autoinc_read_column(
+/*===========================*/
+ /* out: value read from the column */
+ dict_index_t* index, /* in: index to read from */
+ const rec_t* rec, /* in: current rec */
+ ulint col_no, /* in: column number */
+ ibool unsigned_type) /* in: signed or unsigned flag */
+{
+ ulint len;
+ const byte* data;
+ ib_longlong value;
+ mem_heap_t* heap = NULL;
+ byte dest[sizeof(value)];
+ ulint offsets_[REC_OFFS_NORMAL_SIZE];
+ ulint* offsets = offsets_;
+
+ *offsets_ = sizeof offsets_ / sizeof *offsets_;
+
+ /* TODO: We have to cast away the const of rec for now. This needs
+ to be fixed later.*/
+ offsets = rec_get_offsets(
+ (rec_t*) rec, index, offsets, ULINT_UNDEFINED, &heap);
+
+ /* TODO: We have to cast away the const of rec for now. This needs
+ to be fixed later.*/
+ data = rec_get_nth_field((rec_t*)rec, offsets, col_no, &len);
+
+ ut_a(len != UNIV_SQL_NULL);
+ ut_a(len <= sizeof value);
+
+ /* Copy integer data and restore sign bit */
+ if (unsigned_type || (data[0] & 128))
+ memset(dest, 0x00, sizeof(dest));
+ else
+ memset(dest, 0xff, sizeof(dest));
+
+ memcpy(dest + (sizeof(value) - len), data, len);
+
+ if (!unsigned_type)
+ dest[sizeof(value) - len] ^= 128;
+
+ /* The assumption here is that the AUTOINC value can't be negative.*/
+ value = (((ib_longlong) mach_read_from_4(dest)) << 32) |
+ ((ib_longlong) mach_read_from_4(dest + 4));
+
+ if (UNIV_LIKELY_NULL(heap)) {
+ mem_heap_free(heap);
+ }
+
+ ut_a(value >= 0);
+
+ return(value);
+}
+
+/***********************************************************************
+Get the last row. */
+static
+const rec_t*
+row_search_autoinc_get_rec(
+/*=======================*/
+ /* out: current rec or NULL */
+ btr_pcur_t* pcur, /* in: the current cursor */
+ mtr_t* mtr) /* in: mini transaction */
+{
+ do {
+ const rec_t* rec = btr_pcur_get_rec(pcur);
+
+ if (page_rec_is_user_rec(rec)) {
+ return(rec);
+ }
+ } while (btr_pcur_move_to_prev(pcur, mtr));
+
+ return(NULL);
+}
+
+/***********************************************************************
+Read the max AUTOINC value from an index. */
+
+ulint
+row_search_max_autoinc(
+/*===================*/
+ /* out: DB_SUCCESS if all OK else
+ error code, DB_RECORD_NOT_FOUND if
+ column name can't be found in index */
+ dict_index_t* index, /* in: index to search */
+ const char* col_name, /* in: name of autoinc column */
+ ib_longlong* value) /* out: AUTOINC value read */
+{
+ ulint i;
+ ulint n_cols;
+ dict_field_t* dfield = NULL;
+ ulint error = DB_SUCCESS;
+
+ n_cols = dict_index_get_n_ordering_defined_by_user(index);
+
+ /* Search the index for the AUTOINC column name */
+ for (i = 0; i < n_cols; ++i) {
+ dfield = dict_index_get_nth_field(index, i);
+
+ if (strcmp(col_name, dfield->name) == 0) {
+ break;
+ }
+ }
+
+ *value = 0;
+
+ /* Must find the AUTOINC column name */
+ if (i < n_cols && dfield) {
+ mtr_t mtr;
+ btr_pcur_t pcur;
+
+ mtr_start(&mtr);
+
+ /* Open at the high/right end (FALSE), and INIT
+ cursor (TRUE) */
+ btr_pcur_open_at_index_side(
+ FALSE, index, BTR_SEARCH_LEAF, &pcur, TRUE, &mtr);
+
+ if (page_get_n_recs(btr_pcur_get_page(&pcur)) > 0) {
+ const rec_t* rec;
+
+ rec = row_search_autoinc_get_rec(&pcur, &mtr);
+
+ if (rec != NULL) {
+ ibool unsigned_type = (
+ dfield->col->prtype & DATA_UNSIGNED);
+
+ *value = row_search_autoinc_read_column(
+ index, rec, i, unsigned_type);
+ }
+ }
+
+ btr_pcur_close(&pcur);
+
+ mtr_commit(&mtr);
+ } else {
+ error = DB_RECORD_NOT_FOUND;
+ }
+
+ return(error);
+}
diff --git a/storage/innobase/srv/Makefile.am b/storage/innobase/srv/Makefile.am
deleted file mode 100644
index e0b5b911b04..00000000000
--- a/storage/innobase/srv/Makefile.am
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (C) 2001, 2003-2004 MySQL AB & Innobase Oy
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-include ../include/Makefile.i
-
-noinst_LIBRARIES = libsrv.a
-
-libsrv_a_SOURCES = srv0srv.c srv0que.c srv0start.c
-
-EXTRA_PROGRAMS =
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/storage/innobase/srv/srv0srv.c b/storage/innobase/srv/srv0srv.c
index 72e8fe751d0..82b55789be2 100644
--- a/storage/innobase/srv/srv0srv.c
+++ b/storage/innobase/srv/srv0srv.c
@@ -47,6 +47,7 @@ Created 10/8/1995 Heikki Tuuri
#include "dict0boot.h"
#include "srv0start.h"
#include "row0mysql.h"
+#include "ha_prototypes.h"
/* This is set to TRUE if the MySQL user has set it in MySQL; currently
affects only FOREIGN KEY definition parsing */
@@ -180,6 +181,16 @@ dulint srv_archive_recovery_limit_lsn;
ulint srv_lock_wait_timeout = 1024 * 1024 * 1024;
+/* This parameter is used to throttle the number of insert buffers that are
+merged in a batch. By increasing this parameter on a faster disk you can
+possibly reduce the number of I/O operations performed to complete the
+merge operation. The value of this parameter is used as is by the
+background loop when the system is idle (low load), on a busy system
+the parameter is scaled down by a factor of 4, this is to avoid putting
+a heavier load on the I/O sub system. */
+
+ulong srv_insert_buffer_batch_size = 20;
+
char* srv_file_flush_method_str = NULL;
ulint srv_unix_file_flush_method = SRV_UNIX_FDATASYNC;
ulint srv_win_file_flush_method = SRV_WIN_IO_UNBUFFERED;
@@ -896,7 +907,7 @@ srv_init(void)
/* create dummy table and index for old-style infimum and supremum */
table = dict_mem_table_create("SYS_DUMMY1",
DICT_HDR_SPACE, 1, 0);
- dict_mem_table_add_col(table, "DUMMY", DATA_CHAR,
+ dict_mem_table_add_col(table, NULL, NULL, DATA_CHAR,
DATA_ENGLISH | DATA_NOT_NULL, 8);
srv_sys->dummy_ind1 = dict_mem_index_create(
@@ -907,7 +918,7 @@ srv_init(void)
/* create dummy table and index for new-style infimum and supremum */
table = dict_mem_table_create("SYS_DUMMY2",
DICT_HDR_SPACE, 1, DICT_TF_COMPACT);
- dict_mem_table_add_col(table, "DUMMY", DATA_CHAR,
+ dict_mem_table_add_col(table, NULL, NULL, DATA_CHAR,
DATA_ENGLISH | DATA_NOT_NULL, 8);
srv_sys->dummy_ind2 = dict_mem_index_create(
"SYS_DUMMY2", "SYS_DUMMY2", DICT_HDR_SPACE, 0, 1);
@@ -977,6 +988,17 @@ srv_conc_enter_innodb(
srv_conc_slot_t* slot = NULL;
ulint i;
+ if (trx->mysql_thd != NULL
+ && thd_is_replication_slave_thread(trx->mysql_thd)) {
+
+ /* TODO Do something more interesting (based on a config
+ parameter). Some users what to give the replication
+ thread very low priority, see http://bugs.mysql.com/25078
+ This can be done by introducing
+ innodb_replication_delay(ms) config parameter */
+ return;
+ }
+
/* If trx has 'free tickets' to enter the engine left, then use one
such ticket */
@@ -1017,7 +1039,7 @@ retry:
if (!has_slept && !trx->has_search_latch
&& NULL == UT_LIST_GET_FIRST(trx->trx_locks)) {
- has_slept = TRUE; /* We let is sleep only once to avoid
+ has_slept = TRUE; /* We let it sleep only once to avoid
starvation */
srv_conc_n_waiting_threads++;
@@ -1130,7 +1152,7 @@ srv_conc_force_enter_innodb(
srv_conc_n_threads++;
trx->declared_to_be_inside_innodb = TRUE;
- trx->n_tickets_to_enter_innodb = 0;
+ trx->n_tickets_to_enter_innodb = 1;
os_fast_mutex_unlock(&srv_conc_mutex);
}
@@ -1152,6 +1174,12 @@ srv_conc_force_exit_innodb(
return;
}
+ if (trx->mysql_thd != NULL
+ && thd_is_replication_slave_thread(trx->mysql_thd)) {
+
+ return;
+ }
+
if (trx->declared_to_be_inside_innodb == FALSE) {
return;
@@ -1853,6 +1881,7 @@ srv_lock_timeout_and_monitor_thread(
double time_elapsed;
time_t current_time;
time_t last_table_monitor_time;
+ time_t last_tablespace_monitor_time;
time_t last_monitor_time;
ibool some_waits;
double wait_time;
@@ -1865,6 +1894,7 @@ srv_lock_timeout_and_monitor_thread(
UT_NOT_USED(arg);
srv_last_monitor_time = time(NULL);
last_table_monitor_time = time(NULL);
+ last_tablespace_monitor_time = time(NULL);
last_monitor_time = time(NULL);
loop:
srv_lock_timeout_and_monitor_active = TRUE;
@@ -1901,9 +1931,9 @@ loop:
}
if (srv_print_innodb_tablespace_monitor
- && difftime(current_time, last_table_monitor_time) > 60) {
-
- last_table_monitor_time = time(NULL);
+ && difftime(current_time,
+ last_tablespace_monitor_time) > 60) {
+ last_tablespace_monitor_time = time(NULL);
fputs("========================"
"========================\n",
@@ -2100,7 +2130,7 @@ loop:
os_thread_sleep(2000000);
- if (srv_shutdown_state < SRV_SHUTDOWN_LAST_PHASE) {
+ if (srv_shutdown_state < SRV_SHUTDOWN_CLEANUP) {
goto loop;
}
@@ -2270,7 +2300,8 @@ loop:
+ buf_pool->n_pages_written;
if (n_pend_ios < 3 && (n_ios - n_ios_old < 5)) {
srv_main_thread_op_info = "doing insert buffer merge";
- ibuf_contract_for_n_pages(TRUE, 5);
+ ibuf_contract_for_n_pages(
+ TRUE, srv_insert_buffer_batch_size / 4);
srv_main_thread_op_info = "flushing log";
@@ -2331,7 +2362,7 @@ loop:
even if the server were active */
srv_main_thread_op_info = "doing insert buffer merge";
- ibuf_contract_for_n_pages(TRUE, 5);
+ ibuf_contract_for_n_pages(TRUE, srv_insert_buffer_batch_size / 4);
srv_main_thread_op_info = "flushing log";
log_buffer_flush_to_disk();
@@ -2469,7 +2500,8 @@ background_loop:
if (srv_fast_shutdown && srv_shutdown_state > 0) {
n_bytes_merged = 0;
} else {
- n_bytes_merged = ibuf_contract_for_n_pages(TRUE, 20);
+ n_bytes_merged = ibuf_contract_for_n_pages(
+ TRUE, srv_insert_buffer_batch_size);
}
srv_main_thread_op_info = "reserving kernel mutex";
diff --git a/storage/innobase/srv/srv0start.c b/storage/innobase/srv/srv0start.c
index 25f6f05e878..dac84e1410d 100644
--- a/storage/innobase/srv/srv0start.c
+++ b/storage/innobase/srv/srv0start.c
@@ -1025,6 +1025,12 @@ innobase_start_or_create_for_mysql(void)
"InnoDB: !!!!!!!! UNIV_DEBUG switched on !!!!!!!!!\n");
#endif
+#ifdef UNIV_IBUF_DEBUG
+ fprintf(stderr,
+ "InnoDB: !!!!!!!! UNIV_IBUF_DEBUG switched on !!!!!!!!!\n"
+ "InnoDB: Crash recovery will fail with UNIV_IBUF_DEBUG\n");
+#endif
+
#ifdef UNIV_SYNC_DEBUG
fprintf(stderr,
"InnoDB: !!!!!!!! UNIV_SYNC_DEBUG switched on !!!!!!!!!\n");
diff --git a/storage/innobase/sync/Makefile.am b/storage/innobase/sync/Makefile.am
deleted file mode 100644
index 7cf274b64e8..00000000000
--- a/storage/innobase/sync/Makefile.am
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (C) 2001, 2003-2004 MySQL AB & Innobase Oy
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-include ../include/Makefile.i
-
-noinst_LIBRARIES = libsync.a
-
-libsync_a_SOURCES = sync0arr.c sync0rw.c sync0sync.c
-
-EXTRA_PROGRAMS =
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/storage/innobase/sync/sync0arr.c b/storage/innobase/sync/sync0arr.c
index e45cd48a6b4..82b08a890e0 100644
--- a/storage/innobase/sync/sync0arr.c
+++ b/storage/innobase/sync/sync0arr.c
@@ -670,7 +670,9 @@ sync_array_detect_deadlock(
ibool ret;
rw_lock_debug_t*debug;
- ut_a(arr && start && cell);
+ ut_a(arr);
+ ut_a(start);
+ ut_a(cell);
ut_ad(cell->wait_object);
ut_ad(os_thread_get_curr_id() == start->thread);
ut_ad(depth < 100);
diff --git a/storage/innobase/sync/sync0rw.c b/storage/innobase/sync/sync0rw.c
index 34b45e2c1c3..4db780c8b3f 100644
--- a/storage/innobase/sync/sync0rw.c
+++ b/storage/innobase/sync/sync0rw.c
@@ -15,16 +15,34 @@ Created 9/11/1995 Heikki Tuuri
#include "mem0mem.h"
#include "srv0srv.h"
+/* number of system calls made during shared latching */
ulint rw_s_system_call_count = 0;
+
+/* number of spin waits on rw-latches,
+resulted during shared (read) locks */
ulint rw_s_spin_wait_count = 0;
+
+/* number of OS waits on rw-latches,
+resulted during shared (read) locks */
ulint rw_s_os_wait_count = 0;
+/* number of unlocks (that unlock shared locks),
+set only when UNIV_SYNC_PERF_STAT is defined */
ulint rw_s_exit_count = 0;
+/* number of system calls made during exclusive latching */
ulint rw_x_system_call_count = 0;
+
+/* number of spin waits on rw-latches,
+resulted during exclusive (write) locks */
ulint rw_x_spin_wait_count = 0;
+
+/* number of OS waits on rw-latches,
+resulted during exclusive (write) locks */
ulint rw_x_os_wait_count = 0;
+/* number of unlocks (that unlock exclusive locks),
+set only when UNIV_SYNC_PERF_STAT is defined */
ulint rw_x_exit_count = 0;
/* The global list of rw-locks */
diff --git a/storage/innobase/sync/sync0sync.c b/storage/innobase/sync/sync0sync.c
index 672e1f93aad..bf3f4d1ff20 100644
--- a/storage/innobase/sync/sync0sync.c
+++ b/storage/innobase/sync/sync0sync.c
@@ -115,6 +115,7 @@ ulint mutex_system_call_count = 0;
/* Number of spin waits on mutexes: for performance monitoring */
+/* round=one iteration of a spin loop */
ulint mutex_spin_round_count = 0;
ulint mutex_spin_wait_count = 0;
ulint mutex_os_wait_count = 0;
diff --git a/storage/innobase/thr/Makefile.am b/storage/innobase/thr/Makefile.am
deleted file mode 100644
index febcdf3e1a3..00000000000
--- a/storage/innobase/thr/Makefile.am
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-include ../include/Makefile.i
-
-noinst_LIBRARIES = libthr.a
-
-libthr_a_SOURCES = thr0loc.c
-
-EXTRA_PROGRAMS =
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/storage/innobase/trx/Makefile.am b/storage/innobase/trx/Makefile.am
deleted file mode 100644
index f9722454ef5..00000000000
--- a/storage/innobase/trx/Makefile.am
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-include ../include/Makefile.i
-
-noinst_LIBRARIES = libtrx.a
-
-libtrx_a_SOURCES = trx0purge.c trx0rec.c trx0roll.c trx0rseg.c\
- trx0sys.c trx0trx.c trx0undo.c
-
-EXTRA_PROGRAMS =
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/storage/innobase/trx/trx0rec.c b/storage/innobase/trx/trx0rec.c
index 69e858fe71d..50f8b011463 100644
--- a/storage/innobase/trx/trx0rec.c
+++ b/storage/innobase/trx/trx0rec.c
@@ -1024,6 +1024,7 @@ trx_undo_report_row_operation(
ibool is_insert;
trx_rseg_t* rseg;
mtr_t mtr;
+ ulint err = DB_SUCCESS;
mem_heap_t* heap = NULL;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
ulint* offsets = offsets_;
@@ -1035,7 +1036,7 @@ trx_undo_report_row_operation(
*roll_ptr = ut_dulint_zero;
- return(DB_SUCCESS);
+ return(err);
}
ut_ad(thr);
@@ -1053,7 +1054,7 @@ trx_undo_report_row_operation(
if (trx->insert_undo == NULL) {
- trx_undo_assign_undo(trx, TRX_UNDO_INSERT);
+ err = trx_undo_assign_undo(trx, TRX_UNDO_INSERT);
}
undo = trx->insert_undo;
@@ -1063,7 +1064,7 @@ trx_undo_report_row_operation(
if (trx->update_undo == NULL) {
- trx_undo_assign_undo(trx, TRX_UNDO_UPDATE);
+ err = trx_undo_assign_undo(trx, TRX_UNDO_UPDATE);
}
@@ -1071,11 +1072,11 @@ trx_undo_report_row_operation(
is_insert = FALSE;
}
- if (undo == NULL) {
- /* Did not succeed: out of space */
+ if (err != DB_SUCCESS) {
+ /* Did not succeed: return the error encountered */
mutex_exit(&(trx->undo_mutex));
- return(DB_OUT_OF_FILE_SPACE);
+ return(err);
}
page_no = undo->last_page_no;
@@ -1107,7 +1108,9 @@ trx_undo_report_row_operation(
if (offset == 0) {
/* The record did not fit on the page. We erase the
end segment of the undo log page and write a log
- record of it to to ensure deterministic contents. */
+ record of it: this is to ensure that in the debug
+ version the replicate page constructed using the log
+ records stays identical to the original page */
trx_undo_erase_page_end(undo_page, &mtr);
}
@@ -1163,7 +1166,7 @@ trx_undo_report_row_operation(
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}
- return(DB_SUCCESS);
+ return(err);
}
/*============== BUILDING PREVIOUS VERSION OF A RECORD ===============*/
diff --git a/storage/innobase/trx/trx0sys.c b/storage/innobase/trx/trx0sys.c
index 307a03bfbc3..40348dd4199 100644
--- a/storage/innobase/trx/trx0sys.c
+++ b/storage/innobase/trx/trx0sys.c
@@ -646,6 +646,7 @@ trx_sys_update_mysql_binlog_offset(
MLOG_4BYTES, mtr);
}
+#ifdef UNIV_HOTBACKUP
/*********************************************************************
Prints to stderr the MySQL binlog info in the system header if the
magic number shows it valid. */
@@ -677,6 +678,7 @@ trx_sys_print_mysql_binlog_offset_from_page(
+ TRX_SYS_MYSQL_LOG_NAME);
}
}
+#endif /* UNIV_HOTBACKUP */
/*********************************************************************
Stores the MySQL binlog offset info in the trx system header if
@@ -868,7 +870,16 @@ trx_sysf_create(
trx_sysf_rseg_set_page_no(sys_header, i, FIL_NULL, mtr);
}
- /* The remaining area (up to the page trailer) is uninitialized. */
+ /* The remaining area (up to the page trailer) is uninitialized.
+ Silence Valgrind warnings about it. */
+ UNIV_MEM_VALID(sys_header + (TRX_SYS_RSEGS
+ + TRX_SYS_N_RSEGS * TRX_SYS_RSEG_SLOT_SIZE
+ + TRX_SYS_RSEG_SPACE),
+ (UNIV_PAGE_SIZE - FIL_PAGE_DATA_END
+ - (TRX_SYS_RSEGS
+ + TRX_SYS_N_RSEGS * TRX_SYS_RSEG_SLOT_SIZE
+ + TRX_SYS_RSEG_SPACE))
+ + page - sys_header);
/* Create the first rollback segment in the SYSTEM tablespace */
page_no = trx_rseg_header_create(TRX_SYS_SPACE, ULINT_MAX, &slot_no,
diff --git a/storage/innobase/trx/trx0trx.c b/storage/innobase/trx/trx0trx.c
index cdea3e9c477..a278ad51984 100644
--- a/storage/innobase/trx/trx0trx.c
+++ b/storage/innobase/trx/trx0trx.c
@@ -25,6 +25,7 @@ Created 3/26/1996 Heikki Tuuri
#include "btr0sea.h"
#include "os0proc.h"
#include "trx0xa.h"
+#include "ha_prototypes.h"
/* Copy of the prototype for innobase_mysql_print_thd: this
copy MUST be equal to the one in mysql/sql/ha_innodb.cc ! */
@@ -130,17 +131,14 @@ trx_create(
trx->mysql_thd = NULL;
trx->mysql_query_str = NULL;
+ trx->active_trans = 0;
+ trx->duplicates = 0;
trx->n_mysql_tables_in_use = 0;
trx->mysql_n_tables_locked = 0;
trx->mysql_log_file_name = NULL;
trx->mysql_log_offset = 0;
- trx->mysql_master_log_file_name = "";
- trx->mysql_master_log_pos = 0;
-
- trx->repl_wait_binlog_name = NULL;
- trx->repl_wait_binlog_pos = 0;
mutex_create(&trx->undo_mutex, SYNC_TRX_UNDO);
@@ -192,6 +190,8 @@ trx_create(
memset(&trx->xid, 0, sizeof(trx->xid));
trx->xid.formatID = -1;
+ trx->n_autoinc_rows = 0;
+
trx_reset_new_rec_lock_info(trx);
return(trx);
@@ -320,11 +320,6 @@ trx_free(
trx_undo_arr_free(trx->undo_no_arr);
}
- if (trx->repl_wait_binlog_name != NULL) {
-
- mem_free(trx->repl_wait_binlog_name);
- }
-
ut_a(UT_LIST_GET_LEN(trx->signals) == 0);
ut_a(UT_LIST_GET_LEN(trx->reply_signals) == 0);
@@ -804,14 +799,6 @@ trx_commit_off_kernel(
trx->mysql_log_file_name = NULL;
}
- if (trx->mysql_master_log_file_name[0] != '\0') {
- /* This database server is a MySQL replication slave */
- trx_sys_update_mysql_binlog_offset(
- trx->mysql_master_log_file_name,
- trx->mysql_master_log_pos,
- TRX_SYS_MYSQL_MASTER_LOG_INFO, &mtr);
- }
-
/* The following call commits the mini-transaction, making the
whole transaction committed in the file-based world, at this
log sequence number. The transaction becomes 'durable' when
@@ -1567,19 +1554,21 @@ trx_commit_for_mysql(
the transaction object does not have an InnoDB session object, and we
set the dummy session that we use for all MySQL transactions. */
- mutex_enter(&kernel_mutex);
-
if (trx->sess == NULL) {
/* Open a dummy session */
if (!trx_dummy_sess) {
- trx_dummy_sess = sess_open();
+ mutex_enter(&kernel_mutex);
+
+ if (!trx_dummy_sess) {
+ trx_dummy_sess = sess_open();
+ }
+
+ mutex_exit(&kernel_mutex);
}
trx->sess = trx_dummy_sess;
}
-
- mutex_exit(&kernel_mutex);
trx_start_if_not_started(trx);
@@ -1771,6 +1760,61 @@ trx_print(
}
}
+/***********************************************************************
+Compares the "weight" (or size) of two transactions. The weight of one
+transaction is estimated as the number of altered rows + the number of
+locked rows. Transactions that have edited non-transactional tables are
+considered heavier than ones that have not. */
+
+int
+trx_weight_cmp(
+/*===========*/
+ /* out: <0, 0 or >0; similar to strcmp(3) */
+ trx_t* a, /* in: the first transaction to be compared */
+ trx_t* b) /* in: the second transaction to be compared */
+{
+ ibool a_notrans_edit;
+ ibool b_notrans_edit;
+
+ /* If mysql_thd is NULL for a transaction we assume that it has
+ not edited non-transactional tables. */
+
+ a_notrans_edit = a->mysql_thd != NULL
+ && thd_has_edited_nontrans_tables(a->mysql_thd);
+
+ b_notrans_edit = b->mysql_thd != NULL
+ && thd_has_edited_nontrans_tables(b->mysql_thd);
+
+ if (a_notrans_edit && !b_notrans_edit) {
+
+ return(1);
+ }
+
+ if (!a_notrans_edit && b_notrans_edit) {
+
+ return(-1);
+ }
+
+ /* Either both had edited non-transactional tables or both had
+ not, we fall back to comparing the number of altered/locked
+ rows. */
+
+#if 0
+ fprintf(stderr,
+ "%s TRX_WEIGHT(a): %lld+%lu, TRX_WEIGHT(b): %lld+%lu\n",
+ __func__,
+ ut_conv_dulint_to_longlong(a->undo_no),
+ UT_LIST_GET_LEN(a->trx_locks),
+ ut_conv_dulint_to_longlong(b->undo_no),
+ UT_LIST_GET_LEN(b->trx_locks));
+#endif
+
+#define TRX_WEIGHT(t) \
+ ut_dulint_add((t)->undo_no, UT_LIST_GET_LEN((t)->trx_locks))
+
+ return(ut_dulint_cmp(TRX_WEIGHT(a), TRX_WEIGHT(b)));
+}
+
/********************************************************************
Prepares a transaction. */
@@ -1889,7 +1933,7 @@ Does the transaction prepare for MySQL. */
ulint
trx_prepare_for_mysql(
-/*====-=============*/
+/*==================*/
/* out: 0 or error number */
trx_t* trx) /* in: trx handle */
{
diff --git a/storage/innobase/trx/trx0undo.c b/storage/innobase/trx/trx0undo.c
index 831e337f513..64e5ad3c9a8 100644
--- a/storage/innobase/trx/trx0undo.c
+++ b/storage/innobase/trx/trx0undo.c
@@ -373,26 +373,31 @@ trx_undo_page_init(
/*******************************************************************
Creates a new undo log segment in file. */
static
-page_t*
+ulint
trx_undo_seg_create(
/*================*/
- /* out: segment header page x-latched, NULL
- if no space left */
+ /* out: DB_SUCCESS if page creation OK
+ possible error codes are:
+ DB_TOO_MANY_CONCURRENT_TRXS
+ DB_OUT_OF_FILE_SPACE */
trx_rseg_t* rseg __attribute__((unused)),/* in: rollback segment */
trx_rsegf_t* rseg_hdr,/* in: rollback segment header, page
x-latched */
ulint type, /* in: type of the segment: TRX_UNDO_INSERT or
TRX_UNDO_UPDATE */
ulint* id, /* out: slot index within rseg header */
+ page_t** undo_page,
+ /* out: segment header page x-latched, NULL
+ if there was an error */
mtr_t* mtr) /* in: mtr */
{
ulint slot_no;
ulint space;
- page_t* undo_page;
trx_upagef_t* page_hdr;
trx_usegf_t* seg_hdr;
ulint n_reserved;
ibool success;
+ ulint err = DB_SUCCESS;
ut_ad(mtr && id && rseg_hdr);
ut_ad(mutex_own(&(rseg->mutex)));
@@ -410,7 +415,7 @@ trx_undo_seg_create(
"InnoDB: many active transactions"
" running concurrently?\n");
- return(NULL);
+ return(DB_TOO_MANY_CONCURRENT_TRXS);
}
space = buf_frame_get_space_id(rseg_hdr);
@@ -419,30 +424,30 @@ trx_undo_seg_create(
mtr);
if (!success) {
- return(NULL);
+ return(DB_OUT_OF_FILE_SPACE);
}
/* Allocate a new file segment for the undo log */
- undo_page = fseg_create_general(space, 0,
+ *undo_page = fseg_create_general(space, 0,
TRX_UNDO_SEG_HDR
+ TRX_UNDO_FSEG_HEADER, TRUE, mtr);
fil_space_release_free_extents(space, n_reserved);
- if (undo_page == NULL) {
+ if (*undo_page == NULL) {
/* No space left */
- return(NULL);
+ return(DB_OUT_OF_FILE_SPACE);
}
#ifdef UNIV_SYNC_DEBUG
- buf_page_dbg_add_level(undo_page, SYNC_TRX_UNDO_PAGE);
+ buf_page_dbg_add_level(*undo_page, SYNC_TRX_UNDO_PAGE);
#endif /* UNIV_SYNC_DEBUG */
- page_hdr = undo_page + TRX_UNDO_PAGE_HDR;
- seg_hdr = undo_page + TRX_UNDO_SEG_HDR;
+ page_hdr = *undo_page + TRX_UNDO_PAGE_HDR;
+ seg_hdr = *undo_page + TRX_UNDO_SEG_HDR;
- trx_undo_page_init(undo_page, type, mtr);
+ trx_undo_page_init(*undo_page, type, mtr);
mlog_write_ulint(page_hdr + TRX_UNDO_PAGE_FREE,
TRX_UNDO_SEG_HDR + TRX_UNDO_SEG_HDR_SIZE,
@@ -456,10 +461,11 @@ trx_undo_seg_create(
page_hdr + TRX_UNDO_PAGE_NODE, mtr);
trx_rsegf_set_nth_undo(rseg_hdr, slot_no,
- buf_frame_get_page_no(undo_page), mtr);
+ buf_frame_get_page_no(*undo_page), mtr);
+
*id = slot_no;
- return(undo_page);
+ return(err);
}
/**************************************************************************
@@ -1387,6 +1393,11 @@ trx_undo_mem_create(
undo = mem_alloc(sizeof(trx_undo_t));
+ if (undo == NULL) {
+
+ return NULL;
+ }
+
undo->id = id;
undo->type = type;
undo->state = TRX_UNDO_ACTIVE;
@@ -1464,11 +1475,15 @@ trx_undo_mem_free(
/**************************************************************************
Creates a new undo log. */
static
-trx_undo_t*
+ulint
trx_undo_create(
/*============*/
- /* out: undo log object, NULL if did not
- succeed: out of space */
+ /* out: DB_SUCCESS if successful in creating
+ the new undo lob object, possible error
+ codes are:
+ DB_TOO_MANY_CONCURRENT_TRXS
+ DB_OUT_OF_FILE_SPACE
+ DB_OUT_OF_MEMORY*/
trx_t* trx, /* in: transaction */
trx_rseg_t* rseg, /* in: rollback segment memory copy */
ulint type, /* in: type of the log: TRX_UNDO_INSERT or
@@ -1476,34 +1491,37 @@ trx_undo_create(
dulint trx_id, /* in: id of the trx for which the undo log
is created */
XID* xid, /* in: X/Open transaction identification*/
+ trx_undo_t** undo, /* out: the new undo log object, undefined
+ * if did not succeed */
mtr_t* mtr) /* in: mtr */
{
trx_rsegf_t* rseg_header;
ulint page_no;
ulint offset;
ulint id;
- trx_undo_t* undo;
page_t* undo_page;
+ ulint err;
ut_ad(mutex_own(&(rseg->mutex)));
if (rseg->curr_size == rseg->max_size) {
- return(NULL);
+ return(DB_OUT_OF_FILE_SPACE);
}
rseg->curr_size++;
rseg_header = trx_rsegf_get(rseg->space, rseg->page_no, mtr);
- undo_page = trx_undo_seg_create(rseg, rseg_header, type, &id, mtr);
+ err = trx_undo_seg_create(rseg, rseg_header, type, &id,
+ &undo_page, mtr);
- if (undo_page == NULL) {
+ if (err != DB_SUCCESS) {
/* Did not succeed */
rseg->curr_size--;
- return(NULL);
+ return(err);
}
page_no = buf_frame_get_page_no(undo_page);
@@ -1515,9 +1533,14 @@ trx_undo_create(
undo_page + offset, mtr);
}
- undo = trx_undo_mem_create(rseg, id, type, trx_id, xid,
+ *undo = trx_undo_mem_create(rseg, id, type, trx_id, xid,
page_no, offset);
- return(undo);
+ if (*undo == NULL) {
+
+ err = DB_OUT_OF_MEMORY;
+ }
+
+ return(err);
}
/*================ UNDO LOG ASSIGNMENT AND CLEANUP =====================*/
@@ -1634,17 +1657,20 @@ trx_undo_mark_as_dict_operation(
Assigns an undo log for a transaction. A new undo log is created or a cached
undo log reused. */
-trx_undo_t*
+ulint
trx_undo_assign_undo(
/*=================*/
- /* out: the undo log, NULL if did not succeed: out of
- space */
- trx_t* trx, /* in: transaction */
- ulint type) /* in: TRX_UNDO_INSERT or TRX_UNDO_UPDATE */
+ /* out: DB_SUCCESS if undo log assign
+ successful, possible error codes are:
+ DD_TOO_MANY_CONCURRENT_TRXS
+ DB_OUT_OF_FILE_SPACE DB_OUT_OF_MEMORY*/
+ trx_t* trx, /* in: transaction */
+ ulint type) /* in: TRX_UNDO_INSERT or TRX_UNDO_UPDATE */
{
trx_rseg_t* rseg;
trx_undo_t* undo;
mtr_t mtr;
+ ulint err = DB_SUCCESS;
ut_ad(trx);
ut_ad(trx->rseg);
@@ -1662,15 +1688,11 @@ trx_undo_assign_undo(
undo = trx_undo_reuse_cached(trx, rseg, type, trx->id, &trx->xid,
&mtr);
if (undo == NULL) {
- undo = trx_undo_create(trx, rseg, type, trx->id, &trx->xid,
- &mtr);
- if (undo == NULL) {
- /* Did not succeed */
+ err = trx_undo_create(trx, rseg, type, trx->id, &trx->xid,
+ &undo, &mtr);
+ if (err != DB_SUCCESS) {
- mutex_exit(&(rseg->mutex));
- mtr_commit(&mtr);
-
- return(NULL);
+ goto func_exit;
}
}
@@ -1688,10 +1710,11 @@ trx_undo_assign_undo(
trx_undo_mark_as_dict_operation(trx, undo, &mtr);
}
+func_exit:
mutex_exit(&(rseg->mutex));
mtr_commit(&mtr);
- return(undo);
+ return err;
}
/**********************************************************************
diff --git a/storage/innobase/usr/Makefile.am b/storage/innobase/usr/Makefile.am
deleted file mode 100644
index ea485022f71..00000000000
--- a/storage/innobase/usr/Makefile.am
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-include ../include/Makefile.i
-
-noinst_LIBRARIES = libusr.a
-
-libusr_a_SOURCES = usr0sess.c
-
-EXTRA_PROGRAMS =
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/storage/innobase/ut/Makefile.am b/storage/innobase/ut/Makefile.am
deleted file mode 100644
index d79184759c1..00000000000
--- a/storage/innobase/ut/Makefile.am
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (C) 2001, 2003 MySQL AB & Innobase Oy
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-include ../include/Makefile.i
-
-noinst_LIBRARIES = libut.a
-
-libut_a_SOURCES = ut0byte.c ut0dbg.c ut0mem.c ut0rnd.c ut0ut.c ut0vec.c ut0list.c ut0wqueue.c
-
-EXTRA_PROGRAMS =
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/storage/innobase/ut/ut0mem.c b/storage/innobase/ut/ut0mem.c
index 4fd515c35e6..b466a5f6872 100644
--- a/storage/innobase/ut/ut0mem.c
+++ b/storage/innobase/ut/ut0mem.c
@@ -162,6 +162,8 @@ retry:
#endif
}
+ UNIV_MEM_ALLOC(ret, n + sizeof(ut_mem_block_t));
+
((ut_mem_block_t*)ret)->size = n + sizeof(ut_mem_block_t);
((ut_mem_block_t*)ret)->magic_n = UT_MEM_MAGIC_N;
diff --git a/storage/innobase/ut/ut0ut.c b/storage/innobase/ut/ut0ut.c
index bc6778f4c2f..389063ad821 100644
--- a/storage/innobase/ut/ut0ut.c
+++ b/storage/innobase/ut/ut0ut.c
@@ -14,6 +14,7 @@ Created 5/11/1994 Heikki Tuuri
#include <stdarg.h>
#include <string.h>
+#include <ctype.h>
#include "ut0sort.h"
#include "trx0trx.h"
diff --git a/storage/myisam/CMakeLists.txt b/storage/myisam/CMakeLists.txt
index ad0efe4a4e4..9d91bf0560a 100644..100755
--- a/storage/myisam/CMakeLists.txt
+++ b/storage/myisam/CMakeLists.txt
@@ -21,7 +21,8 @@ INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include ${CMAKE_SOURCE_DIR}/zlib
${CMAKE_SOURCE_DIR}/sql
${CMAKE_SOURCE_DIR}/regex
${CMAKE_SOURCE_DIR}/extra/yassl/include)
-ADD_LIBRARY(myisam ft_boolean_search.c ft_nlq_search.c ft_parser.c ft_static.c ft_stem.c
+
+SET(MYISAM_SOURCES ft_boolean_search.c ft_nlq_search.c ft_parser.c ft_static.c ft_stem.c
ha_myisam.cc
ft_stopwords.c ft_update.c mi_cache.c mi_changed.c mi_check.c
mi_checksum.c mi_close.c mi_create.c mi_dbug.c mi_delete.c
@@ -33,21 +34,27 @@ ADD_LIBRARY(myisam ft_boolean_search.c ft_nlq_search.c ft_parser.c ft_static.c f
mi_unique.c mi_update.c mi_write.c rt_index.c rt_key.c rt_mbr.c
rt_split.c sort.c sp_key.c ft_eval.h myisamdef.h rt_index.h mi_rkey.c)
-ADD_EXECUTABLE(myisam_ftdump myisam_ftdump.c)
-TARGET_LINK_LIBRARIES(myisam_ftdump myisam mysys dbug strings zlib wsock32)
+IF(NOT SOURCE_SUBLIBS)
+
+ ADD_LIBRARY(myisam ${MYISAM_SOURCES})
+
+ ADD_EXECUTABLE(myisam_ftdump myisam_ftdump.c)
+ TARGET_LINK_LIBRARIES(myisam_ftdump myisam mysys debug dbug strings zlib wsock32)
+
+ ADD_EXECUTABLE(myisamchk myisamchk.c)
+ TARGET_LINK_LIBRARIES(myisamchk myisam mysys debug dbug strings zlib wsock32)
-ADD_EXECUTABLE(myisamchk myisamchk.c)
-TARGET_LINK_LIBRARIES(myisamchk myisam mysys dbug strings zlib wsock32)
+ ADD_EXECUTABLE(myisamlog myisamlog.c)
+ TARGET_LINK_LIBRARIES(myisamlog myisam mysys debug dbug strings zlib wsock32)
-ADD_EXECUTABLE(myisamlog myisamlog.c)
-TARGET_LINK_LIBRARIES(myisamlog myisam mysys dbug strings zlib wsock32)
+ ADD_EXECUTABLE(myisampack myisampack.c)
+ TARGET_LINK_LIBRARIES(myisampack myisam mysys debug dbug strings zlib wsock32)
-ADD_EXECUTABLE(myisampack myisampack.c)
-TARGET_LINK_LIBRARIES(myisampack myisam mysys dbug strings zlib wsock32)
+ IF(EMBED_MANIFESTS)
+ MYSQL_EMBED_MANIFEST("myisam_ftdump" "asInvoker")
+ MYSQL_EMBED_MANIFEST("myisamchk" "asInvoker")
+ MYSQL_EMBED_MANIFEST("myisamlog" "asInvoker")
+ MYSQL_EMBED_MANIFEST("myisampack" "asInvoker")
+ ENDIF(EMBED_MANIFESTS)
-IF(EMBED_MANIFESTS)
- MYSQL_EMBED_MANIFEST("myisam_ftdump" "asInvoker")
- MYSQL_EMBED_MANIFEST("myisamchk" "asInvoker")
- MYSQL_EMBED_MANIFEST("myisamlog" "asInvoker")
- MYSQL_EMBED_MANIFEST("myisampack" "asInvoker")
-ENDIF(EMBED_MANIFESTS)
+ENDIF(NOT SOURCE_SUBLIBS)
diff --git a/storage/myisam/ft_boolean_search.c b/storage/myisam/ft_boolean_search.c
index 68076d7e401..15f4e1e1d34 100644
--- a/storage/myisam/ft_boolean_search.c
+++ b/storage/myisam/ft_boolean_search.c
@@ -23,8 +23,14 @@
inside plus subtree. max_docid could be used by any word in plus
subtree, but it could be updated by plus-word only.
+ Fulltext "smarter index merge" optimization assumes that rows
+ it gets are ordered by doc_id. That is not the case when we
+ search for a word with truncation operator. It may return
+ rows in random order. Thus we may not use "smarter index merge"
+ optimization with "trunc-words".
+
The idea is: there is no need to search for docid smaller than
- biggest docid inside current plus subtree.
+ biggest docid inside current plus subtree or any upper plus subtree.
Examples:
+word1 word2
@@ -36,6 +42,13 @@
+(word1 -word2) +(+word3 word4)
share same max_docid
max_docid updated by word3
+ +word1 word2 (+word3 word4 (+word5 word6))
+ three subexpressions (including the top-level one),
+ every one has its own max_docid, updated by its plus word.
+ but for the search word6 uses
+ max(word1.max_docid, word3.max_docid, word5.max_docid),
+ while word4 uses, accordingly,
+ max(word1.max_docid, word3.max_docid).
*/
#define FT_CORE
@@ -104,14 +117,14 @@ typedef struct st_ftb_word
/* ^^^^^^^^^^^^^^^^^^ FTB_{EXPR,WORD} common section */
my_off_t docid[2]; /* for index search and for scan */
my_off_t key_root;
- my_off_t *max_docid;
+ FTB_EXPR *max_docid_expr;
MI_KEYDEF *keyinfo;
struct st_ftb_word *prev;
float weight;
uint ndepth;
uint len;
uchar off;
- byte word[1];
+ uchar word[1];
} FTB_WORD;
typedef struct st_ft_info
@@ -161,7 +174,7 @@ typedef struct st_my_ftb_param
{
FTB *ftb;
FTB_EXPR *ftbe;
- byte *up_quot;
+ uchar *up_quot;
uint depth;
} MY_FTB_PARAM;
@@ -208,13 +221,13 @@ static int ftb_query_add_word(MYSQL_FTPARSER_PARAM *param,
for (tmp_expr= ftb_param->ftbe; tmp_expr->up; tmp_expr= tmp_expr->up)
if (! (tmp_expr->flags & FTB_FLAG_YES))
break;
- ftbw->max_docid= &tmp_expr->max_docid;
+ ftbw->max_docid_expr= tmp_expr;
/* fall through */
case FT_TOKEN_STOPWORD:
if (! ftb_param->up_quot) break;
phrase_word= (FT_WORD *)alloc_root(&ftb_param->ftb->mem_root, sizeof(FT_WORD));
tmp_element= (LIST *)alloc_root(&ftb_param->ftb->mem_root, sizeof(LIST));
- phrase_word->pos= word;
+ phrase_word->pos= (uchar*) word;
phrase_word->len= word_len;
tmp_element->data= (void *)phrase_word;
ftb_param->ftbe->phrase= list_add(ftb_param->ftbe->phrase, tmp_element);
@@ -240,7 +253,7 @@ static int ftb_query_add_word(MYSQL_FTPARSER_PARAM *param,
if (info->yesno > 0) ftbe->up->ythresh++;
ftb_param->ftbe= ftbe;
ftb_param->depth++;
- ftb_param->up_quot= info->quot;
+ ftb_param->up_quot= (uchar*) info->quot;
break;
case FT_TOKEN_RIGHT_PAREN:
if (ftb_param->ftbe->document)
@@ -274,20 +287,20 @@ static int ftb_parse_query_internal(MYSQL_FTPARSER_PARAM *param,
MY_FTB_PARAM *ftb_param= param->mysql_ftparam;
MYSQL_FTPARSER_BOOLEAN_INFO info;
CHARSET_INFO *cs= ftb_param->ftb->charset;
- char **start= &query;
- char *end= query + len;
+ uchar **start= (uchar**) &query;
+ uchar *end= (uchar*) query + len;
FT_WORD w;
info.prev= ' ';
info.quot= 0;
while (ft_get_word(cs, start, end, &w, &info))
- param->mysql_add_word(param, w.pos, w.len, &info);
+ param->mysql_add_word(param, (char*) w.pos, w.len, &info);
return(0);
}
-static void _ftb_parse_query(FTB *ftb, byte *query, uint len,
- struct st_mysql_ftparser *parser)
+static int _ftb_parse_query(FTB *ftb, uchar *query, uint len,
+ struct st_mysql_ftparser *parser)
{
MYSQL_FTPARSER_PARAM *param;
MY_FTB_PARAM ftb_param;
@@ -295,9 +308,9 @@ static void _ftb_parse_query(FTB *ftb, byte *query, uint len,
DBUG_ASSERT(parser);
if (ftb->state != UNINITIALIZED)
- DBUG_VOID_RETURN;
+ DBUG_RETURN(0);
if (! (param= ftparser_call_initializer(ftb->info, ftb->keynr, 0)))
- DBUG_VOID_RETURN;
+ DBUG_RETURN(1);
ftb_param.ftb= ftb;
ftb_param.depth= 0;
@@ -308,12 +321,11 @@ static void _ftb_parse_query(FTB *ftb, byte *query, uint len,
param->mysql_add_word= ftb_query_add_word;
param->mysql_ftparam= (void *)&ftb_param;
param->cs= ftb->charset;
- param->doc= query;
+ param->doc= (char*) query;
param->length= len;
param->flags= 0;
param->mode= MYSQL_FTPARSER_FULL_BOOLEAN_INFO;
- parser->parse(param);
- DBUG_VOID_RETURN;
+ DBUG_RETURN(parser->parse(param));
}
@@ -331,7 +343,7 @@ static int _ft2_search(FTB *ftb, FTB_WORD *ftbw, my_bool init_search)
my_bool can_go_down;
MI_INFO *info=ftb->info;
uint off, extra=HA_FT_WLEN+info->s->base.rec_reflength;
- byte *lastkey_buf=ftbw->word+ftbw->off;
+ uchar *lastkey_buf=ftbw->word+ftbw->off;
LINT_INIT(off);
if (ftbw->flags & FTB_FLAG_TRUNC)
@@ -348,11 +360,17 @@ static int _ft2_search(FTB *ftb, FTB_WORD *ftbw, my_bool init_search)
else
{
uint sflag= SEARCH_BIGGER;
- if (ftbw->docid[0] < *ftbw->max_docid)
+ my_off_t max_docid=0;
+ FTB_EXPR *tmp;
+
+ for (tmp= ftbw->max_docid_expr; tmp; tmp= tmp->up)
+ set_if_bigger(max_docid, tmp->max_docid);
+
+ if (ftbw->docid[0] < max_docid)
{
sflag|= SEARCH_SAME;
_mi_dpointer(info, (uchar *)(ftbw->word + ftbw->len + HA_FT_WLEN),
- *ftbw->max_docid);
+ max_docid);
}
r=_mi_search(info, ftbw->keyinfo, (uchar*) lastkey_buf,
USE_WHOLE_KEY, sflag, ftbw->key_root);
@@ -431,8 +449,8 @@ static int _ft2_search(FTB *ftb, FTB_WORD *ftbw, my_bool init_search)
memcpy(lastkey_buf+off, info->lastkey, info->lastkey_length);
}
ftbw->docid[0]=info->lastpos;
- if (ftbw->flags & FTB_FLAG_YES)
- *ftbw->max_docid= info->lastpos;
+ if (ftbw->flags & FTB_FLAG_YES && !(ftbw->flags & FTB_FLAG_TRUNC))
+ ftbw->max_docid_expr->max_docid= info->lastpos;
return 0;
}
@@ -475,7 +493,8 @@ static void _ftb_init_index_search(FT_INFO *ftb)
ftbe->up->flags|= FTB_FLAG_TRUNC, ftbe=ftbe->up)
{
if (ftbe->flags & FTB_FLAG_NO || /* 2 */
- ftbe->up->ythresh - ftbe->up->yweaks >1) /* 1 */
+ ftbe->up->ythresh - ftbe->up->yweaks >
+ (uint) test(ftbe->flags & FTB_FLAG_YES)) /* 1 */
{
FTB_EXPR *top_ftbe=ftbe->up;
ftbw->docid[0]=HA_OFFSET_ERROR;
@@ -505,7 +524,7 @@ static void _ftb_init_index_search(FT_INFO *ftb)
}
-FT_INFO * ft_init_boolean_search(MI_INFO *info, uint keynr, byte *query,
+FT_INFO * ft_init_boolean_search(MI_INFO *info, uint keynr, uchar *query,
uint query_len, CHARSET_INFO *cs)
{
FTB *ftb;
@@ -538,21 +557,22 @@ FT_INFO * ft_init_boolean_search(MI_INFO *info, uint keynr, byte *query,
ftbe->phrase= NULL;
ftbe->document= 0;
ftb->root=ftbe;
- _ftb_parse_query(ftb, query, query_len, keynr == NO_SUCH_KEY ?
- &ft_default_parser :
- info->s->keyinfo[keynr].parser);
+ if (unlikely(_ftb_parse_query(ftb, query, query_len,
+ keynr == NO_SUCH_KEY ? &ft_default_parser :
+ info->s->keyinfo[keynr].parser)))
+ goto err;
/*
Hack: instead of init_queue, we'll use reinit queue to be able
to alloc queue with alloc_root()
*/
- if (! (ftb->queue.root= (byte **)alloc_root(&ftb->mem_root,
+ if (! (ftb->queue.root= (uchar **)alloc_root(&ftb->mem_root,
(ftb->queue.max_elements + 1) *
sizeof(void *))))
goto err;
reinit_queue(&ftb->queue, ftb->queue.max_elements, 0, 0,
- (int (*)(void*, byte*, byte*))FTB_WORD_cmp, 0);
+ (int (*)(void*, uchar*, uchar*))FTB_WORD_cmp, 0);
for (ftbw= ftb->last_word; ftbw; ftbw= ftbw->prev)
- queue_insert(&ftb->queue, (byte *)ftbw);
+ queue_insert(&ftb->queue, (uchar *)ftbw);
ftb->list=(FTB_WORD **)alloc_root(&ftb->mem_root,
sizeof(FTB_WORD *)*ftb->queue.elements);
memcpy(ftb->list, ftb->queue.root+1, sizeof(FTB_WORD *)*ftb->queue.elements);
@@ -563,7 +583,7 @@ FT_INFO * ft_init_boolean_search(MI_INFO *info, uint keynr, byte *query,
return ftb;
err:
free_root(& ftb->mem_root, MYF(0));
- my_free((gptr)ftb,MYF(0));
+ my_free((uchar*)ftb,MYF(0));
return 0;
}
@@ -586,7 +606,7 @@ static int ftb_phrase_add_word(MYSQL_FTPARSER_PARAM *param,
MY_FTB_PHRASE_PARAM *phrase_param= param->mysql_ftparam;
FT_WORD *w= (FT_WORD *)phrase_param->document->data;
LIST *phrase, *document;
- w->pos= word;
+ w->pos= (uchar*) word;
w->len= word_len;
phrase_param->document= phrase_param->document->prev;
if (phrase_param->phrase_length > phrase_param->document_length)
@@ -616,12 +636,13 @@ static int ftb_check_phrase_internal(MYSQL_FTPARSER_PARAM *param,
{
FT_WORD word;
MY_FTB_PHRASE_PARAM *phrase_param= param->mysql_ftparam;
- const char *docend= document + len;
- while (ft_simple_get_word(phrase_param->cs, &document, docend, &word, FALSE))
+ const uchar *docend= (uchar*) document + len;
+ while (ft_simple_get_word(phrase_param->cs, (uchar**) &document, docend,
+ &word, FALSE))
{
- param->mysql_add_word(param, word.pos, word.len, 0);
+ param->mysql_add_word(param, (char*) word.pos, word.len, 0);
if (phrase_param->match)
- return 1;
+ break;
}
return 0;
}
@@ -639,9 +660,10 @@ static int ftb_check_phrase_internal(MYSQL_FTPARSER_PARAM *param,
RETURN VALUE
1 is returned if phrase found, 0 else.
+ -1 is returned if error occurs.
*/
-static int _ftb_check_phrase(FTB *ftb, const byte *document, uint len,
+static int _ftb_check_phrase(FTB *ftb, const uchar *document, uint len,
FTB_EXPR *ftbe, struct st_mysql_ftparser *parser)
{
MY_FTB_PHRASE_PARAM ftb_param;
@@ -663,16 +685,17 @@ static int _ftb_check_phrase(FTB *ftb, const byte *document, uint len,
param->mysql_add_word= ftb_phrase_add_word;
param->mysql_ftparam= (void *)&ftb_param;
param->cs= ftb->charset;
- param->doc= (byte *)document;
+ param->doc= (char *) document;
param->length= len;
param->flags= 0;
param->mode= MYSQL_FTPARSER_WITH_STOPWORDS;
- parser->parse(param);
+ if (unlikely(parser->parse(param)))
+ return -1;
DBUG_RETURN(ftb_param.match ? 1 : 0);
}
-static void _ftb_climb_the_tree(FTB *ftb, FTB_WORD *ftbw, FT_SEG_ITERATOR *ftsi_orig)
+static int _ftb_climb_the_tree(FTB *ftb, FTB_WORD *ftbw, FT_SEG_ITERATOR *ftsi_orig)
{
FT_SEG_ITERATOR ftsi;
FTB_EXPR *ftbe;
@@ -704,17 +727,19 @@ static void _ftb_climb_the_tree(FTB *ftb, FTB_WORD *ftbw, FT_SEG_ITERATOR *ftsi_
weight=ftbe->cur_weight*ftbe->weight;
if (mode && ftbe->phrase)
{
- int not_found=1;
+ int found= 0;
memcpy(&ftsi, ftsi_orig, sizeof(ftsi));
- while (_mi_ft_segiterator(&ftsi) && not_found)
+ while (_mi_ft_segiterator(&ftsi) && !found)
{
if (!ftsi.pos)
continue;
- not_found = ! _ftb_check_phrase(ftb, ftsi.pos, ftsi.len,
- ftbe, parser);
+ found= _ftb_check_phrase(ftb, ftsi.pos, ftsi.len, ftbe, parser);
+ if (unlikely(found < 0))
+ return 1;
}
- if (not_found) break;
+ if (!found)
+ break;
} /* ftbe->quot */
}
else
@@ -746,6 +771,7 @@ static void _ftb_climb_the_tree(FTB *ftb, FTB_WORD *ftbw, FT_SEG_ITERATOR *ftsi_
weight*= ftbe->weight;
}
}
+ return 0;
}
@@ -778,7 +804,11 @@ int ft_boolean_read_next(FT_INFO *ftb, char *record)
{
while (curdoc == (ftbw=(FTB_WORD *)queue_top(& ftb->queue))->docid[0])
{
- _ftb_climb_the_tree(ftb, ftbw, 0);
+ if (unlikely(_ftb_climb_the_tree(ftb, ftbw, 0)))
+ {
+ my_errno= HA_ERR_OUT_OF_MEM;
+ goto err;
+ }
/* update queue */
_ft2_search(ftb, ftbw, 0);
@@ -800,10 +830,11 @@ int ft_boolean_read_next(FT_INFO *ftb, char *record)
/* Clear all states, except that the table was updated */
info->update&= (HA_STATE_CHANGED | HA_STATE_ROW_CHANGED);
- if (!(*info->read_record)(info,curdoc,record))
+ if (!(*info->read_record)(info,curdoc, (uchar*) record))
{
info->update|= HA_STATE_AKTIV; /* Record is read */
- if (ftb->with_scan && ft_boolean_find_relevance(ftb,record,0)==0)
+ if (ftb->with_scan &&
+ ft_boolean_find_relevance(ftb,(uchar*) record,0)==0)
continue; /* no match */
my_errno=0;
goto err;
@@ -854,7 +885,8 @@ static int ftb_find_relevance_add_word(MYSQL_FTPARSER_PARAM *param,
if (ftbw->docid[1] == ftb->info->lastpos)
continue;
ftbw->docid[1]= ftb->info->lastpos;
- _ftb_climb_the_tree(ftb, ftbw, ftb_param->ftsi);
+ if (unlikely(_ftb_climb_the_tree(ftb, ftbw, ftb_param->ftsi)))
+ return 1;
}
return(0);
}
@@ -865,15 +897,15 @@ static int ftb_find_relevance_parse(MYSQL_FTPARSER_PARAM *param,
{
MY_FTB_FIND_PARAM *ftb_param= param->mysql_ftparam;
FT_INFO *ftb= ftb_param->ftb;
- char *end= doc + len;
+ uchar *end= (uchar*) doc + len;
FT_WORD w;
- while (ft_simple_get_word(ftb->charset, &doc, end, &w, TRUE))
- param->mysql_add_word(param, w.pos, w.len, 0);
+ while (ft_simple_get_word(ftb->charset, (uchar**) &doc, end, &w, TRUE))
+ param->mysql_add_word(param, (char*) w.pos, w.len, 0);
return(0);
}
-float ft_boolean_find_relevance(FT_INFO *ftb, byte *record, uint length)
+float ft_boolean_find_relevance(FT_INFO *ftb, uchar *record, uint length)
{
FTB_EXPR *ftbe;
FT_SEG_ITERATOR ftsi, ftsi2;
@@ -924,9 +956,10 @@ float ft_boolean_find_relevance(FT_INFO *ftb, byte *record, uint length)
{
if (!ftsi.pos)
continue;
- param->doc= (byte *)ftsi.pos;
+ param->doc= (char *)ftsi.pos;
param->length= ftsi.len;
- parser->parse(param);
+ if (unlikely(parser->parse(param)))
+ return 0;
}
ftbe=ftb->root;
if (ftbe->docid[1]==docid && ftbe->cur_weight>0 &&
@@ -948,7 +981,7 @@ void ft_boolean_close_search(FT_INFO *ftb)
delete_tree(& ftb->no_dupes);
}
free_root(& ftb->mem_root, MYF(0));
- my_free((gptr)ftb,MYF(0));
+ my_free((uchar*)ftb,MYF(0));
}
diff --git a/storage/myisam/ft_nlq_search.c b/storage/myisam/ft_nlq_search.c
index 5c6f66897ee..282fa6751d8 100644
--- a/storage/myisam/ft_nlq_search.c
+++ b/storage/myisam/ft_nlq_search.c
@@ -83,7 +83,7 @@ static int walk_and_match(FT_WORD *word, uint32 count, ALL_IN_ONE *aio)
word->weight=LWS_FOR_QUERY;
- keylen=_ft_make_key(info,aio->keynr,(char*) keybuff,word,0);
+ keylen=_ft_make_key(info,aio->keynr,keybuff,word,0);
keylen-=HA_FT_WLEN;
doc_cnt=0;
@@ -189,7 +189,7 @@ static int walk_and_push(FT_SUPERDOC *from,
DBUG_ENTER("walk_and_copy");
from->doc.weight+=from->tmp_weight*from->word_ptr->weight;
set_if_smaller(best->elements, ft_query_expansion_limit-1);
- queue_insert(best, (byte *)& from->doc);
+ queue_insert(best, (uchar *)& from->doc);
DBUG_RETURN(0);
}
@@ -201,8 +201,8 @@ static int FT_DOC_cmp(void *unused __attribute__((unused)),
}
-FT_INFO *ft_init_nlq_search(MI_INFO *info, uint keynr, byte *query,
- uint query_len, uint flags, byte *record)
+FT_INFO *ft_init_nlq_search(MI_INFO *info, uint keynr, uchar *query,
+ uint query_len, uint flags, uchar *record)
{
TREE wtree;
ALL_IN_ONE aio;
@@ -257,8 +257,12 @@ FT_INFO *ft_init_nlq_search(MI_INFO *info, uint keynr, byte *query,
{
info->update|= HA_STATE_AKTIV;
ftparser_param->flags= MYSQL_FTFLAGS_NEED_COPY;
- _mi_ft_parse(&wtree, info, keynr, record, ftparser_param,
- &wtree.mem_root);
+ if (unlikely(_mi_ft_parse(&wtree, info, keynr, record, ftparser_param,
+ &wtree.mem_root)))
+ {
+ delete_queue(&best);
+ goto err;
+ }
}
}
delete_queue(&best);
@@ -313,7 +317,7 @@ int ft_nlq_read_next(FT_INFO *handler, char *record)
info->update&= (HA_STATE_CHANGED | HA_STATE_ROW_CHANGED);
info->lastpos=handler->doc[handler->curdoc].dpos;
- if (!(*info->read_record)(info,info->lastpos,record))
+ if (!(*info->read_record)(info,info->lastpos,(uchar*) record))
{
info->update|= HA_STATE_AKTIV; /* Record is read */
return 0;
@@ -323,7 +327,7 @@ int ft_nlq_read_next(FT_INFO *handler, char *record)
float ft_nlq_find_relevance(FT_INFO *handler,
- byte *record __attribute__((unused)),
+ uchar *record __attribute__((unused)),
uint length __attribute__((unused)))
{
int a,b,c;
@@ -352,7 +356,7 @@ float ft_nlq_find_relevance(FT_INFO *handler,
void ft_nlq_close_search(FT_INFO *handler)
{
- my_free((gptr)handler,MYF(0));
+ my_free((uchar*)handler,MYF(0));
}
diff --git a/storage/myisam/ft_parser.c b/storage/myisam/ft_parser.c
index 5992d9c118e..df2423aa50f 100644
--- a/storage/myisam/ft_parser.c
+++ b/storage/myisam/ft_parser.c
@@ -78,12 +78,12 @@ FT_WORD * ft_linearize(TREE *wtree, MEM_ROOT *mem_root)
DBUG_RETURN(wlist);
}
-my_bool ft_boolean_check_syntax_string(const byte *str)
+my_bool ft_boolean_check_syntax_string(const uchar *str)
{
uint i, j;
if (!str ||
- (strlen(str)+1 != sizeof(ft_boolean_syntax)) ||
+ (strlen((char*) str)+1 != sizeof(ft_boolean_syntax)) ||
(str[0] != ' ' && str[1] != ' '))
return 1;
for (i=0; i<sizeof(ft_boolean_syntax); i++)
@@ -106,12 +106,13 @@ my_bool ft_boolean_check_syntax_string(const byte *str)
3 - right bracket
4 - stopword found
*/
-byte ft_get_word(CHARSET_INFO *cs, byte **start, byte *end,
- FT_WORD *word, MYSQL_FTPARSER_BOOLEAN_INFO *param)
+uchar ft_get_word(CHARSET_INFO *cs, uchar **start, uchar *end,
+ FT_WORD *word, MYSQL_FTPARSER_BOOLEAN_INFO *param)
{
- byte *doc=*start;
+ uchar *doc=*start;
int ctype;
- uint mwc, length, mbl;
+ uint mwc, length;
+ int mbl;
param->yesno=(FTB_YES==' ') ? 1 : (param->quot != 0);
param->weight_adjust= param->wasign= 0;
@@ -119,14 +120,14 @@ byte ft_get_word(CHARSET_INFO *cs, byte **start, byte *end,
while (doc<end)
{
- for (; doc < end; doc+= (mbl > 0 ? mbl : 1))
+ for (; doc < end; doc+= (mbl > 0 ? mbl : (mbl < 0 ? -mbl : 1)))
{
mbl= cs->cset->ctype(cs, &ctype, (uchar*)doc, (uchar*)end);
if (true_word_char(ctype, *doc))
break;
if (*doc == FTB_RQUOT && param->quot)
{
- param->quot=doc;
+ param->quot= (char*) doc;
*start=doc+1;
param->type= FT_TOKEN_RIGHT_PAREN;
goto ret;
@@ -137,7 +138,8 @@ byte ft_get_word(CHARSET_INFO *cs, byte **start, byte *end,
{
/* param->prev=' '; */
*start=doc+1;
- if (*doc == FTB_LQUOT) param->quot=*start;
+ if (*doc == FTB_LQUOT)
+ param->quot= (char*) *start;
param->type= (*doc == FTB_RBR ? FT_TOKEN_RIGHT_PAREN : FT_TOKEN_LEFT_PAREN);
goto ret;
}
@@ -157,7 +159,8 @@ byte ft_get_word(CHARSET_INFO *cs, byte **start, byte *end,
}
mwc=length=0;
- for (word->pos= doc; doc < end; length++, doc+= (mbl > 0 ? mbl : 1))
+ for (word->pos= doc; doc < end; length++,
+ doc+= (mbl > 0 ? mbl : (mbl < 0 ? -mbl : 1)))
{
mbl= cs->cset->ctype(cs, &ctype, (uchar*)doc, (uchar*)end);
if (true_word_char(ctype, *doc))
@@ -172,7 +175,8 @@ byte ft_get_word(CHARSET_INFO *cs, byte **start, byte *end,
if ((param->trunc=(doc<end && *doc == FTB_TRUNC)))
doc++;
- if (((length >= ft_min_word_len && !is_stopword(word->pos, word->len))
+ if (((length >= ft_min_word_len && !is_stopword((char*) word->pos,
+ word->len))
|| param->trunc) && length < ft_max_word_len)
{
*start=doc;
@@ -188,7 +192,8 @@ byte ft_get_word(CHARSET_INFO *cs, byte **start, byte *end,
}
if (param->quot)
{
- param->quot=*start=doc;
+ *start= doc;
+ param->quot= (char*) doc;
param->type= 3; /* FT_RBR */
goto ret;
}
@@ -196,17 +201,18 @@ ret:
return param->type;
}
-byte ft_simple_get_word(CHARSET_INFO *cs, byte **start, const byte *end,
- FT_WORD *word, my_bool skip_stopwords)
+uchar ft_simple_get_word(CHARSET_INFO *cs, uchar **start, const uchar *end,
+ FT_WORD *word, my_bool skip_stopwords)
{
- byte *doc= *start;
- uint mwc, length, mbl;
+ uchar *doc= *start;
+ uint mwc, length;
+ int mbl;
int ctype;
DBUG_ENTER("ft_simple_get_word");
do
{
- for (;; doc+= (mbl > 0 ? mbl : 1))
+ for (;; doc+= (mbl > 0 ? mbl : (mbl < 0 ? -mbl : 1)))
{
if (doc >= end)
DBUG_RETURN(0);
@@ -216,7 +222,8 @@ byte ft_simple_get_word(CHARSET_INFO *cs, byte **start, const byte *end,
}
mwc= length= 0;
- for (word->pos= doc; doc < end; length++, doc+= (mbl > 0 ? mbl : 1))
+ for (word->pos= doc; doc < end; length++,
+ doc+= (mbl > 0 ? mbl : (mbl < 0 ? -mbl : 1)))
{
mbl= cs->cset->ctype(cs, &ctype, (uchar*)doc, (uchar*)end);
if (true_word_char(ctype, *doc))
@@ -231,7 +238,7 @@ byte ft_simple_get_word(CHARSET_INFO *cs, byte **start, const byte *end,
if (skip_stopwords == FALSE ||
(length >= ft_min_word_len && length < ft_max_word_len &&
- !is_stopword(word->pos, word->len)))
+ !is_stopword((char*) word->pos, word->len)))
{
*start= doc;
DBUG_RETURN(1);
@@ -260,14 +267,14 @@ static int ft_add_word(MYSQL_FTPARSER_PARAM *param,
wtree= ft_param->wtree;
if (param->flags & MYSQL_FTFLAGS_NEED_COPY)
{
- byte *ptr;
+ uchar *ptr;
DBUG_ASSERT(wtree->with_delete == 0);
- ptr= (byte *)alloc_root(ft_param->mem_root, word_len);
+ ptr= (uchar *)alloc_root(ft_param->mem_root, word_len);
memcpy(ptr, word, word_len);
w.pos= ptr;
}
else
- w.pos= word;
+ w.pos= (uchar*) word;
w.len= word_len;
if (!tree_insert(wtree, &w, 0, wtree->custom_arg))
{
@@ -279,24 +286,25 @@ static int ft_add_word(MYSQL_FTPARSER_PARAM *param,
static int ft_parse_internal(MYSQL_FTPARSER_PARAM *param,
- char *doc, int doc_len)
+ char *doc_arg, int doc_len)
{
- byte *end=doc+doc_len;
+ uchar *doc= (uchar*) doc_arg;
+ uchar *end= doc + doc_len;
MY_FT_PARSER_PARAM *ft_param=param->mysql_ftparam;
TREE *wtree= ft_param->wtree;
FT_WORD w;
DBUG_ENTER("ft_parse_internal");
while (ft_simple_get_word(wtree->custom_arg, &doc, end, &w, TRUE))
- if (param->mysql_add_word(param, w.pos, w.len, 0))
+ if (param->mysql_add_word(param, (char*) w.pos, w.len, 0))
DBUG_RETURN(1);
DBUG_RETURN(0);
}
-int ft_parse(TREE *wtree, byte *doc, int doclen,
- struct st_mysql_ftparser *parser,
- MYSQL_FTPARSER_PARAM *param, MEM_ROOT *mem_root)
+int ft_parse(TREE *wtree, uchar *doc, int doclen,
+ struct st_mysql_ftparser *parser,
+ MYSQL_FTPARSER_PARAM *param, MEM_ROOT *mem_root)
{
MY_FT_PARSER_PARAM my_param;
DBUG_ENTER("ft_parse");
@@ -309,7 +317,7 @@ int ft_parse(TREE *wtree, byte *doc, int doclen,
param->mysql_add_word= ft_add_word;
param->mysql_ftparam= &my_param;
param->cs= wtree->custom_arg;
- param->doc= doc;
+ param->doc= (char*) doc;
param->length= doclen;
param->mode= MYSQL_FTPARSER_SIMPLE_MODE;
DBUG_RETURN(parser->parse(param));
@@ -387,7 +395,9 @@ MYSQL_FTPARSER_PARAM *ftparser_call_initializer(MI_INFO *info,
mysql_add_word == 0 - parser is not initialized
mysql_add_word != 0 - parser is initialized, or no
initialization needed. */
- info->ftparser_param[ftparser_nr].mysql_add_word= (void *)1;
+ info->ftparser_param[ftparser_nr].mysql_add_word=
+ (int (*)(struct st_mysql_ftparser_param *, char *, int,
+ MYSQL_FTPARSER_BOOLEAN_INFO *)) 1;
if (parser->init && parser->init(&info->ftparser_param[ftparser_nr]))
return 0;
}
diff --git a/storage/myisam/ft_static.c b/storage/myisam/ft_static.c
index 34608be1721..610c20eede6 100644
--- a/storage/myisam/ft_static.c
+++ b/storage/myisam/ft_static.c
@@ -56,8 +56,8 @@ const struct _ft_vft _ft_vft_boolean = {
FT_INFO *ft_init_search(uint flags, void *info, uint keynr,
- byte *query, uint query_len, CHARSET_INFO *cs,
- byte *record)
+ uchar *query, uint query_len, CHARSET_INFO *cs,
+ uchar *record)
{
FT_INFO *res;
if (flags & FT_BOOL)
diff --git a/storage/myisam/ft_stopwords.c b/storage/myisam/ft_stopwords.c
index 63732ebadc9..59866d9a351 100644
--- a/storage/myisam/ft_stopwords.c
+++ b/storage/myisam/ft_stopwords.c
@@ -38,7 +38,7 @@ static void FT_STOPWORD_free(FT_STOPWORD *w, TREE_FREE action,
void *arg __attribute__((unused)))
{
if (action == free_free)
- my_free((gptr) w->pos, MYF(0));
+ my_free((uchar*) w->pos, MYF(0));
}
static int ft_add_stopword(const char *w)
@@ -65,7 +65,7 @@ int ft_init_stopwords()
{
File fd;
uint len;
- byte *buffer, *start, *end;
+ uchar *buffer, *start, *end;
FT_WORD w;
int error=-1;
@@ -82,7 +82,7 @@ int ft_init_stopwords()
end=start+len;
while (ft_simple_get_word(default_charset_info, &start, end, &w, TRUE))
{
- if (ft_add_stopword(my_strndup(w.pos, w.len, MYF(0))))
+ if (ft_add_stopword(my_strndup((char*) w.pos, w.len, MYF(0))))
goto err1;
}
error=0;
diff --git a/storage/myisam/ft_update.c b/storage/myisam/ft_update.c
index e176d550b1d..e3e4c62158f 100644
--- a/storage/myisam/ft_update.c
+++ b/storage/myisam/ft_update.c
@@ -20,7 +20,7 @@
#include "ftdefs.h"
#include <math.h>
-void _mi_ft_segiterator_init(MI_INFO *info, uint keynr, const byte *record,
+void _mi_ft_segiterator_init(MI_INFO *info, uint keynr, const uchar *record,
FT_SEG_ITERATOR *ftsi)
{
DBUG_ENTER("_mi_ft_segiterator_init");
@@ -31,7 +31,7 @@ void _mi_ft_segiterator_init(MI_INFO *info, uint keynr, const byte *record,
DBUG_VOID_RETURN;
}
-void _mi_ft_segiterator_dummy_init(const byte *record, uint len,
+void _mi_ft_segiterator_dummy_init(const uchar *record, uint len,
FT_SEG_ITERATOR *ftsi)
{
DBUG_ENTER("_mi_ft_segiterator_dummy_init");
@@ -94,7 +94,7 @@ uint _mi_ft_segiterator(register FT_SEG_ITERATOR *ftsi)
/* parses a document i.e. calls ft_parse for every keyseg */
-uint _mi_ft_parse(TREE *parsed, MI_INFO *info, uint keynr, const byte *record,
+uint _mi_ft_parse(TREE *parsed, MI_INFO *info, uint keynr, const uchar *record,
MYSQL_FTPARSER_PARAM *param, MEM_ROOT *mem_root)
{
FT_SEG_ITERATOR ftsi;
@@ -108,13 +108,13 @@ uint _mi_ft_parse(TREE *parsed, MI_INFO *info, uint keynr, const byte *record,
while (_mi_ft_segiterator(&ftsi))
{
if (ftsi.pos)
- if (ft_parse(parsed, (byte *)ftsi.pos, ftsi.len, parser, param, mem_root))
+ if (ft_parse(parsed, (uchar *)ftsi.pos, ftsi.len, parser, param, mem_root))
DBUG_RETURN(1);
}
DBUG_RETURN(0);
}
-FT_WORD *_mi_ft_parserecord(MI_INFO *info, uint keynr, const byte *record,
+FT_WORD *_mi_ft_parserecord(MI_INFO *info, uint keynr, const uchar *record,
MEM_ROOT *mem_root)
{
TREE ptree;
@@ -130,7 +130,7 @@ FT_WORD *_mi_ft_parserecord(MI_INFO *info, uint keynr, const byte *record,
DBUG_RETURN(ft_linearize(&ptree, mem_root));
}
-static int _mi_ft_store(MI_INFO *info, uint keynr, byte *keybuf,
+static int _mi_ft_store(MI_INFO *info, uint keynr, uchar *keybuf,
FT_WORD *wlist, my_off_t filepos)
{
uint key_length;
@@ -145,7 +145,7 @@ static int _mi_ft_store(MI_INFO *info, uint keynr, byte *keybuf,
DBUG_RETURN(0);
}
-static int _mi_ft_erase(MI_INFO *info, uint keynr, byte *keybuf,
+static int _mi_ft_erase(MI_INFO *info, uint keynr, uchar *keybuf,
FT_WORD *wlist, my_off_t filepos)
{
uint key_length, err=0;
@@ -168,7 +168,7 @@ static int _mi_ft_erase(MI_INFO *info, uint keynr, byte *keybuf,
#define THOSE_TWO_DAMN_KEYS_ARE_REALLY_DIFFERENT 1
#define GEE_THEY_ARE_ABSOLUTELY_IDENTICAL 0
-int _mi_ft_cmp(MI_INFO *info, uint keynr, const byte *rec1, const byte *rec2)
+int _mi_ft_cmp(MI_INFO *info, uint keynr, const uchar *rec1, const uchar *rec2)
{
FT_SEG_ITERATOR ftsi1, ftsi2;
CHARSET_INFO *cs=info->s->keyinfo[keynr].seg->charset;
@@ -190,8 +190,8 @@ int _mi_ft_cmp(MI_INFO *info, uint keynr, const byte *rec1, const byte *rec2)
/* update a document entry */
-int _mi_ft_update(MI_INFO *info, uint keynr, byte *keybuf,
- const byte *oldrec, const byte *newrec, my_off_t pos)
+int _mi_ft_update(MI_INFO *info, uint keynr, uchar *keybuf,
+ const uchar *oldrec, const uchar *newrec, my_off_t pos)
{
int error= -1;
FT_WORD *oldlist,*newlist, *old_word, *new_word;
@@ -241,7 +241,7 @@ err:
/* adds a document to the collection */
-int _mi_ft_add(MI_INFO *info, uint keynr, byte *keybuf, const byte *record,
+int _mi_ft_add(MI_INFO *info, uint keynr, uchar *keybuf, const uchar *record,
my_off_t pos)
{
int error= -1;
@@ -260,7 +260,7 @@ int _mi_ft_add(MI_INFO *info, uint keynr, byte *keybuf, const byte *record,
/* removes a document from the collection */
-int _mi_ft_del(MI_INFO *info, uint keynr, byte *keybuf, const byte *record,
+int _mi_ft_del(MI_INFO *info, uint keynr, uchar *keybuf, const uchar *record,
my_off_t pos)
{
int error= -1;
@@ -276,10 +276,10 @@ int _mi_ft_del(MI_INFO *info, uint keynr, byte *keybuf, const byte *record,
DBUG_RETURN(error);
}
-uint _ft_make_key(MI_INFO *info, uint keynr, byte *keybuf, FT_WORD *wptr,
+uint _ft_make_key(MI_INFO *info, uint keynr, uchar *keybuf, FT_WORD *wptr,
my_off_t filepos)
{
- byte buf[HA_FT_MAXBYTELEN+16];
+ uchar buf[HA_FT_MAXBYTELEN+16];
DBUG_ENTER("_ft_make_key");
#if HA_FT_WTYPE == HA_KEYTYPE_FLOAT
diff --git a/storage/myisam/ftdefs.h b/storage/myisam/ftdefs.h
index 26f5e4f266e..22443807b87 100644
--- a/storage/myisam/ftdefs.h
+++ b/storage/myisam/ftdefs.h
@@ -96,44 +96,44 @@
#define FTB_RQUOT (ft_boolean_syntax[11])
typedef struct st_ft_word {
- byte * pos;
+ uchar * pos;
uint len;
double weight;
} FT_WORD;
int is_stopword(char *word, uint len);
-uint _ft_make_key(MI_INFO *, uint , byte *, FT_WORD *, my_off_t);
+uint _ft_make_key(MI_INFO *, uint , uchar *, FT_WORD *, my_off_t);
-byte ft_get_word(CHARSET_INFO *, byte **, byte *, FT_WORD *,
- MYSQL_FTPARSER_BOOLEAN_INFO *);
-byte ft_simple_get_word(CHARSET_INFO *, byte **, const byte *,
- FT_WORD *, my_bool);
+uchar ft_get_word(CHARSET_INFO *, uchar **, uchar *, FT_WORD *,
+ MYSQL_FTPARSER_BOOLEAN_INFO *);
+uchar ft_simple_get_word(CHARSET_INFO *, uchar **, const uchar *,
+ FT_WORD *, my_bool);
typedef struct _st_ft_seg_iterator {
uint num, len;
HA_KEYSEG *seg;
- const byte *rec, *pos;
+ const uchar *rec, *pos;
} FT_SEG_ITERATOR;
-void _mi_ft_segiterator_init(MI_INFO *, uint, const byte *, FT_SEG_ITERATOR *);
-void _mi_ft_segiterator_dummy_init(const byte *, uint, FT_SEG_ITERATOR *);
+void _mi_ft_segiterator_init(MI_INFO *, uint, const uchar *, FT_SEG_ITERATOR *);
+void _mi_ft_segiterator_dummy_init(const uchar *, uint, FT_SEG_ITERATOR *);
uint _mi_ft_segiterator(FT_SEG_ITERATOR *);
void ft_parse_init(TREE *, CHARSET_INFO *);
-int ft_parse(TREE *, byte *, int, struct st_mysql_ftparser *parser,
+int ft_parse(TREE *, uchar *, int, struct st_mysql_ftparser *parser,
MYSQL_FTPARSER_PARAM *, MEM_ROOT *);
FT_WORD * ft_linearize(TREE *, MEM_ROOT *);
-FT_WORD * _mi_ft_parserecord(MI_INFO *, uint, const byte *, MEM_ROOT *);
-uint _mi_ft_parse(TREE *, MI_INFO *, uint, const byte *,
+FT_WORD * _mi_ft_parserecord(MI_INFO *, uint, const uchar *, MEM_ROOT *);
+uint _mi_ft_parse(TREE *, MI_INFO *, uint, const uchar *,
MYSQL_FTPARSER_PARAM *, MEM_ROOT *);
-FT_INFO *ft_init_nlq_search(MI_INFO *, uint, byte *, uint, uint, byte *);
-FT_INFO *ft_init_boolean_search(MI_INFO *, uint, byte *, uint, CHARSET_INFO *);
+FT_INFO *ft_init_nlq_search(MI_INFO *, uint, uchar *, uint, uint, uchar *);
+FT_INFO *ft_init_boolean_search(MI_INFO *, uint, uchar *, uint, CHARSET_INFO *);
extern const struct _ft_vft _ft_vft_nlq;
int ft_nlq_read_next(FT_INFO *, char *);
-float ft_nlq_find_relevance(FT_INFO *, byte *, uint);
+float ft_nlq_find_relevance(FT_INFO *, uchar *, uint);
void ft_nlq_close_search(FT_INFO *);
float ft_nlq_get_relevance(FT_INFO *);
my_off_t ft_nlq_get_docid(FT_INFO *);
@@ -141,7 +141,7 @@ void ft_nlq_reinit_search(FT_INFO *);
extern const struct _ft_vft _ft_vft_boolean;
int ft_boolean_read_next(FT_INFO *, char *);
-float ft_boolean_find_relevance(FT_INFO *, byte *, uint);
+float ft_boolean_find_relevance(FT_INFO *, uchar *, uint);
void ft_boolean_close_search(FT_INFO *);
float ft_boolean_get_relevance(FT_INFO *);
my_off_t ft_boolean_get_docid(FT_INFO *);
diff --git a/storage/myisam/fulltext.h b/storage/myisam/fulltext.h
index bea2fa96969..856e93e034d 100644
--- a/storage/myisam/fulltext.h
+++ b/storage/myisam/fulltext.h
@@ -29,9 +29,9 @@
extern const HA_KEYSEG ft_keysegs[FT_SEGS];
-int _mi_ft_cmp(MI_INFO *, uint, const byte *, const byte *);
-int _mi_ft_add(MI_INFO *, uint, byte *, const byte *, my_off_t);
-int _mi_ft_del(MI_INFO *, uint, byte *, const byte *, my_off_t);
+int _mi_ft_cmp(MI_INFO *, uint, const uchar *, const uchar *);
+int _mi_ft_add(MI_INFO *, uint, uchar *, const uchar *, my_off_t);
+int _mi_ft_del(MI_INFO *, uint, uchar *, const uchar *, my_off_t);
uint _mi_ft_convert_to_ft2(MI_INFO *, uint, uchar *);
diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc
index f5086e05487..81690d8feee 100644
--- a/storage/myisam/ha_myisam.cc
+++ b/storage/myisam/ha_myisam.cc
@@ -84,6 +84,14 @@ static void mi_check_print_msg(MI_CHECK *param, const char* msg_type,
}
length=(uint) (strxmov(name, param->db_name,".",param->table_name,NullS) -
name);
+ /*
+ TODO: switch from protocol to push_warning here. The main reason we didn't
+ it yet is parallel repair. Due to following trace:
+ mi_check_print_msg/push_warning/sql_alloc/my_pthread_getspecific_ptr.
+
+ Also we likely need to lock mutex here (in both cases with protocol and
+ push_warning).
+ */
protocol->prepare_for_resend();
protocol->store(name, length, system_charset_info);
protocol->store(param->op_name, system_charset_info);
@@ -121,7 +129,7 @@ int table2myisam(TABLE *table_arg, MI_KEYDEF **keydef_out,
{
uint i, j, recpos, minpos, fieldpos, temp_length, length;
enum ha_base_keytype type= HA_KEYTYPE_BINARY;
- byte *record;
+ uchar *record;
KEY *pos;
MI_KEYDEF *keydef;
MI_COLUMNDEF *recinfo, *recinfo_pos;
@@ -474,6 +482,7 @@ void mi_check_print_warning(MI_CHECK *param, const char *fmt,...)
ha_myisam::ha_myisam(handlerton *hton, TABLE_SHARE *table_arg)
:handler(hton, table_arg), file(0),
int_table_flags(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER |
+ HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE |
HA_DUPLICATE_POS | HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY |
HA_FILE_BASED | HA_CAN_GEOMETRY | HA_NO_TRANSACTIONS |
HA_CAN_INSERT_DELAYED | HA_CAN_BIT_FIELD | HA_CAN_RTREEKEYS |
@@ -531,7 +540,7 @@ int ha_myisam::net_read_dump(NET* net)
error= -1;
goto err;
}
- if (my_write(data_fd, (byte*)net->read_pos, (uint) packet_len,
+ if (my_write(data_fd, (uchar*)net->read_pos, (uint) packet_len,
MYF(MY_WME|MY_FNABP)))
{
error = errno;
@@ -550,7 +559,7 @@ int ha_myisam::dump(THD* thd, int fd)
uint blocksize = share->blocksize;
my_off_t bytes_to_read = share->state.state.data_file_length;
int data_fd = file->dfile;
- byte * buf = (byte*) my_malloc(blocksize, MYF(MY_WME));
+ uchar *buf = (uchar*) my_malloc(blocksize, MYF(MY_WME));
if (!buf)
return ENOMEM;
@@ -558,7 +567,7 @@ int ha_myisam::dump(THD* thd, int fd)
my_seek(data_fd, 0L, MY_SEEK_SET, MYF(MY_WME));
for (; bytes_to_read > 0;)
{
- uint bytes = my_read(data_fd, buf, blocksize, MYF(MY_WME));
+ size_t bytes = my_read(data_fd, buf, blocksize, MYF(MY_WME));
if (bytes == MY_FILE_ERROR)
{
error = errno;
@@ -575,7 +584,7 @@ int ha_myisam::dump(THD* thd, int fd)
}
else
{
- if (my_net_write(net, (char*) buf, bytes))
+ if (my_net_write(net, buf, bytes))
{
error = errno ? errno : EPIPE;
goto err;
@@ -586,53 +595,19 @@ int ha_myisam::dump(THD* thd, int fd)
if (fd < 0)
{
- if (my_net_write(net, "", 0))
+ if (my_net_write(net, (uchar*) "", 0))
error = errno ? errno : EPIPE;
net_flush(net);
}
err:
- my_free((gptr) buf, MYF(0));
+ my_free((uchar*) buf, MYF(0));
return error;
}
#endif /* HAVE_REPLICATION */
-bool ha_myisam::check_if_locking_is_allowed(uint sql_command,
- ulong type, TABLE *table,
- uint count, uint current,
- uint *system_count,
- bool called_by_privileged_thread)
-{
- /*
- To be able to open and lock for reading system tables like 'mysql.proc',
- when we already have some tables opened and locked, and avoid deadlocks
- we have to disallow write-locking of these tables with any other tables.
- */
- if (table->s->system_table &&
- table->reginfo.lock_type >= TL_WRITE_ALLOW_WRITE)
- (*system_count)++;
-
- /* 'current' is an index, that's why '<=' below. */
- if (*system_count > 0 && *system_count <= current)
- {
- my_error(ER_WRONG_LOCK_OF_SYSTEM_TABLE, MYF(0));
- return FALSE;
- }
-
- /*
- Deny locking of the log tables, which is incompatible with
- concurrent insert. Unless called from a logger THD (general_log_thd
- or slow_log_thd) or by a privileged thread.
- */
- if (!called_by_privileged_thread)
- return check_if_log_table_locking_is_allowed(sql_command, type, table);
-
- return TRUE;
-}
-
- /* Name is here without an extension */
-
+/* Name is here without an extension */
int ha_myisam::open(const char *name, int mode, uint test_if_locked)
{
MI_KEYDEF *keyinfo;
@@ -694,10 +669,10 @@ int ha_myisam::open(const char *name, int mode, uint test_if_locked)
for (i= 0; i < table->s->keys; i++)
{
- struct st_plugin_int *parser= table->key_info[i].parser;
+ plugin_ref parser= table->key_info[i].parser;
if (table->key_info[i].flags & HA_USES_PARSER)
file->s->keyinfo[i].parser=
- (struct st_mysql_ftparser *)parser->plugin->info;
+ (struct st_mysql_ftparser *)plugin_decl(parser)->info;
table->key_info[i].block_size= file->s->keyinfo[i].block_length;
}
my_errno= 0;
@@ -710,7 +685,7 @@ int ha_myisam::open(const char *name, int mode, uint test_if_locked)
recinfo must be freed.
*/
if (recinfo)
- my_free((gptr) recinfo, MYF(0));
+ my_free((uchar*) recinfo, MYF(0));
return my_errno;
}
@@ -721,9 +696,9 @@ int ha_myisam::close(void)
return mi_close(tmp);
}
-int ha_myisam::write_row(byte * buf)
+int ha_myisam::write_row(uchar *buf)
{
- statistic_increment(table->in_use->status_var.ha_write_count,&LOCK_status);
+ ha_statistic_increment(&SSV::ha_write_count);
/* If we have a timestamp column, update it to the current time */
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
@@ -1037,8 +1012,8 @@ int ha_myisam::optimize(THD* thd, HA_CHECK_OPT *check_opt)
param.sort_buffer_length= check_opt->sort_buffer_size;
if ((error= repair(thd,param,1)) && param.retry_repair)
{
- sql_print_warning("Warning: Optimize table got errno %d, retrying",
- my_errno);
+ sql_print_warning("Warning: Optimize table got errno %d on %s.%s, retrying",
+ my_errno, param.db_name, param.table_name);
param.testflag&= ~T_REP_BY_SORT;
error= repair(thd,param,1);
}
@@ -1216,11 +1191,7 @@ int ha_myisam::assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt)
table->keys_in_use_for_query.clear_all();
if (table_list->process_index_hints(table))
- {
- errmsg= thd->net.last_error;
- error= HA_ADMIN_FAILED;
- goto err;
- }
+ DBUG_RETURN(HA_ADMIN_FAILED);
map= ~(ulonglong) 0;
if (!table->keys_in_use_for_query.is_clear_all())
/* use all keys if there's no list specified by the user through hints */
@@ -1235,7 +1206,6 @@ int ha_myisam::assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt)
error= HA_ADMIN_CORRUPT;
}
- err:
if (error != HA_ADMIN_OK)
{
/* Send error to user */
@@ -1263,17 +1233,14 @@ int ha_myisam::preload_keys(THD* thd, HA_CHECK_OPT *check_opt)
ulonglong map;
TABLE_LIST *table_list= table->pos_in_table_list;
my_bool ignore_leaves= table_list->ignore_leaves;
+ char buf[ERRMSGSIZE+20];
DBUG_ENTER("ha_myisam::preload_keys");
table->keys_in_use_for_query.clear_all();
if (table_list->process_index_hints(table))
- {
- errmsg= thd->net.last_error;
- error= HA_ADMIN_FAILED;
- goto err;
- }
+ DBUG_RETURN(HA_ADMIN_FAILED);
map= ~(ulonglong) 0;
/* Check validity of the index references */
@@ -1294,7 +1261,6 @@ int ha_myisam::preload_keys(THD* thd, HA_CHECK_OPT *check_opt)
errmsg= "Failed to allocate buffer";
break;
default:
- char buf[ERRMSGSIZE+20];
my_snprintf(buf, ERRMSGSIZE,
"Failed to read from index file (errno: %d)", my_errno);
errmsg= buf;
@@ -1427,8 +1393,8 @@ int ha_myisam::enable_indexes(uint mode)
param.tmpdir=&mysql_tmpdir_list;
if ((error= (repair(thd,param,0) != HA_ADMIN_OK)) && param.retry_repair)
{
- sql_print_warning("Warning: Enabling keys got errno %d, retrying",
- my_errno);
+ sql_print_warning("Warning: Enabling keys got errno %d on %s.%s, retrying",
+ my_errno, param.db_name, param.table_name);
/* Repairing by sort failed. Now try standard repair method. */
param.testflag&= ~(T_REP_BY_SORT | T_QUICK);
error= (repair(thd,param,0) != HA_ADMIN_OK);
@@ -1437,8 +1403,10 @@ int ha_myisam::enable_indexes(uint mode)
might have been set by the first repair. They can still be seen
with SHOW WARNINGS then.
*/
+#ifndef EMBEDDED_LIBRARY
if (! error)
thd->clear_error();
+#endif /* EMBEDDED_LIBRARY */
}
info(HA_STATUS_CONST);
thd_proc_info(thd, save_proc_info);
@@ -1493,7 +1461,7 @@ void ha_myisam::start_bulk_insert(ha_rows rows)
DBUG_ENTER("ha_myisam::start_bulk_insert");
THD *thd= current_thd;
ulong size= min(thd->variables.read_buff_size,
- table->s->avg_row_length*rows);
+ (ulong) (table->s->avg_row_length*rows));
DBUG_PRINT("info",("start_bulk_insert: rows %lu size %lu",
(ulong) rows, size));
@@ -1594,102 +1562,95 @@ bool ha_myisam::is_crashed() const
(my_disable_locking && file->s->state.open_count));
}
-int ha_myisam::update_row(const byte * old_data, byte * new_data)
+int ha_myisam::update_row(const uchar *old_data, uchar *new_data)
{
- statistic_increment(table->in_use->status_var.ha_update_count,&LOCK_status);
+ ha_statistic_increment(&SSV::ha_update_count);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
table->timestamp_field->set_time();
return mi_update(file,old_data,new_data);
}
-int ha_myisam::delete_row(const byte * buf)
+int ha_myisam::delete_row(const uchar *buf)
{
- statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status);
+ ha_statistic_increment(&SSV::ha_delete_count);
return mi_delete(file,buf);
}
-int ha_myisam::index_read(byte *buf, const byte *key, key_part_map keypart_map,
- enum ha_rkey_function find_flag)
+int ha_myisam::index_read_map(uchar *buf, const uchar *key,
+ key_part_map keypart_map,
+ enum ha_rkey_function find_flag)
{
DBUG_ASSERT(inited==INDEX);
- statistic_increment(table->in_use->status_var.ha_read_key_count,
- &LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_key_count);
int error=mi_rkey(file, buf, active_index, key, keypart_map, find_flag);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
}
-int ha_myisam::index_read_idx(byte *buf, uint index, const byte *key,
- key_part_map keypart_map,
- enum ha_rkey_function find_flag)
+int ha_myisam::index_read_idx_map(uchar *buf, uint index, const uchar *key,
+ key_part_map keypart_map,
+ enum ha_rkey_function find_flag)
{
- statistic_increment(table->in_use->status_var.ha_read_key_count,
- &LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_key_count);
int error=mi_rkey(file, buf, index, key, keypart_map, find_flag);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
}
-int ha_myisam::index_read_last(byte *buf, const byte *key,
- key_part_map keypart_map)
+int ha_myisam::index_read_last_map(uchar *buf, const uchar *key,
+ key_part_map keypart_map)
{
DBUG_ENTER("ha_myisam::index_read_last");
DBUG_ASSERT(inited==INDEX);
- statistic_increment(table->in_use->status_var.ha_read_key_count,
- &LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_key_count);
int error=mi_rkey(file, buf, active_index, key, keypart_map,
HA_READ_PREFIX_LAST);
table->status=error ? STATUS_NOT_FOUND: 0;
DBUG_RETURN(error);
}
-int ha_myisam::index_next(byte * buf)
+int ha_myisam::index_next(uchar *buf)
{
DBUG_ASSERT(inited==INDEX);
- statistic_increment(table->in_use->status_var.ha_read_next_count,
- &LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_next_count);
int error=mi_rnext(file,buf,active_index);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
}
-int ha_myisam::index_prev(byte * buf)
+int ha_myisam::index_prev(uchar *buf)
{
DBUG_ASSERT(inited==INDEX);
- statistic_increment(table->in_use->status_var.ha_read_prev_count,
- &LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_prev_count);
int error=mi_rprev(file,buf, active_index);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
}
-int ha_myisam::index_first(byte * buf)
+int ha_myisam::index_first(uchar *buf)
{
DBUG_ASSERT(inited==INDEX);
- statistic_increment(table->in_use->status_var.ha_read_first_count,
- &LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_first_count);
int error=mi_rfirst(file, buf, active_index);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
}
-int ha_myisam::index_last(byte * buf)
+int ha_myisam::index_last(uchar *buf)
{
DBUG_ASSERT(inited==INDEX);
- statistic_increment(table->in_use->status_var.ha_read_last_count,
- &LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_last_count);
int error=mi_rlast(file, buf, active_index);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
}
-int ha_myisam::index_next_same(byte * buf,
- const byte *key __attribute__((unused)),
+int ha_myisam::index_next_same(uchar *buf,
+ const uchar *key __attribute__((unused)),
uint length __attribute__((unused)))
{
DBUG_ASSERT(inited==INDEX);
- statistic_increment(table->in_use->status_var.ha_read_next_count,
- &LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_next_count);
int error=mi_rnext_same(file,buf);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
@@ -1703,30 +1664,28 @@ int ha_myisam::rnd_init(bool scan)
return mi_reset(file); // Free buffers
}
-int ha_myisam::rnd_next(byte *buf)
+int ha_myisam::rnd_next(uchar *buf)
{
- statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
- &LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_rnd_next_count);
int error=mi_scan(file, buf);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
}
-int ha_myisam::restart_rnd_next(byte *buf, byte *pos)
+int ha_myisam::restart_rnd_next(uchar *buf, uchar *pos)
{
return rnd_pos(buf,pos);
}
-int ha_myisam::rnd_pos(byte * buf, byte *pos)
+int ha_myisam::rnd_pos(uchar *buf, uchar *pos)
{
- statistic_increment(table->in_use->status_var.ha_read_rnd_count,
- &LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_rnd_count);
int error=mi_rrnd(file, buf, my_get_ptr(pos,ref_length));
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
}
-void ha_myisam::position(const byte* record)
+void ha_myisam::position(const uchar *record)
{
my_off_t row_position= mi_position(file);
my_store_ptr(ref, ref_length, row_position);
@@ -1897,6 +1856,8 @@ int ha_myisam::create(const char *name, register TABLE *table_arg,
if (ha_create_info->options & HA_LEX_CREATE_TMP_TABLE)
create_flags|= HA_CREATE_TMP_TABLE;
+ if (ha_create_info->options & HA_CREATE_KEEP_FILES)
+ create_flags|= HA_CREATE_KEEP_FILES;
if (options & HA_OPTION_PACK_RECORD)
create_flags|= HA_PACK_RECORD;
if (options & HA_OPTION_CHECKSUM)
@@ -1911,7 +1872,7 @@ int ha_myisam::create(const char *name, register TABLE *table_arg,
records, recinfo,
0, (MI_UNIQUEDEF*) 0,
&create_info, create_flags);
- my_free((gptr) recinfo, MYF(0));
+ my_free((uchar*) recinfo, MYF(0));
DBUG_RETURN(error);
}
@@ -1929,7 +1890,7 @@ void ha_myisam::get_auto_increment(ulonglong offset, ulonglong increment,
{
ulonglong nr;
int error;
- byte key[MI_MAX_KEY_LENGTH];
+ uchar key[MI_MAX_KEY_LENGTH];
if (!table->s->next_number_key_offset)
{ // Autoincrement at key-start
@@ -2002,7 +1963,7 @@ ha_rows ha_myisam::records_in_range(uint inx, key_range *min_key,
}
-int ha_myisam::ft_read(byte * buf)
+int ha_myisam::ft_read(uchar *buf)
{
int error;
@@ -2083,3 +2044,78 @@ mysql_declare_plugin(myisam)
}
mysql_declare_plugin_end;
+
+#ifdef HAVE_QUERY_CACHE
+/**
+ @brief Register a named table with a call back function to the query cache.
+
+ @param thd The thread handle
+ @param table_key A pointer to the table name in the table cache
+ @param key_length The length of the table name
+ @param[out] engine_callback The pointer to the storage engine call back
+ function, currently 0
+ @param[out] engine_data Engine data will be set to 0.
+
+ @note Despite the name of this function, it is used to check each statement
+ before it is cached and not to register a table or callback function.
+
+ @see handler::register_query_cache_table
+
+ @return The error code. The engine_data and engine_callback will be set to 0.
+ @retval TRUE Success
+ @retval FALSE An error occured
+*/
+
+my_bool ha_myisam::register_query_cache_table(THD *thd, char *table_name,
+ uint table_name_len,
+ qc_engine_callback
+ *engine_callback,
+ ulonglong *engine_data)
+{
+ /*
+ No call back function is needed to determine if a cached statement
+ is valid or not.
+ */
+ *engine_callback= 0;
+
+ /*
+ No engine data is needed.
+ */
+ *engine_data= 0;
+
+ /*
+ If a concurrent INSERT has happened just before the currently processed
+ SELECT statement, the total size of the table is unknown.
+
+ To determine if the table size is known, the current thread's snap shot of
+ the table size with the actual table size are compared.
+
+ If the table size is unknown the SELECT statement can't be cached.
+ */
+ ulonglong actual_data_file_length;
+ ulonglong current_data_file_length;
+
+ /*
+ POSIX visibility rules specify that "2. Whatever memory values a
+ thread can see when it unlocks a mutex <...> can also be seen by any
+ thread that later locks the same mutex". In this particular case,
+ concurrent insert thread had modified the data_file_length in
+ MYISAM_SHARE before it has unlocked (or even locked)
+ structure_guard_mutex. So, here we're guaranteed to see at least that
+ value after we've locked the same mutex. We can see a later value
+ (modified by some other thread) though, but it's ok, as we only want
+ to know if the variable was changed, the actual new value doesn't matter
+ */
+ actual_data_file_length= file->s->state.state.data_file_length;
+ current_data_file_length= file->save_state.data_file_length;
+
+ if (current_data_file_length != actual_data_file_length)
+ {
+ /* Don't cache current statement. */
+ return FALSE;
+ }
+
+ /* It is ok to try to cache current statement. */
+ return TRUE;
+}
+#endif
diff --git a/storage/myisam/ha_myisam.h b/storage/myisam/ha_myisam.h
index bb439e9914d..e8594fc9039 100644
--- a/storage/myisam/ha_myisam.h
+++ b/storage/myisam/ha_myisam.h
@@ -60,26 +60,22 @@ class ha_myisam: public handler
uint max_supported_key_part_length() const { return MI_MAX_KEY_LENGTH; }
uint checksum() const;
- virtual bool check_if_locking_is_allowed(uint sql_command,
- ulong type, TABLE *table,
- uint count, uint current,
- uint *system_count,
- bool called_by_logger_thread);
int open(const char *name, int mode, uint test_if_locked);
int close(void);
- int write_row(byte * buf);
- int update_row(const byte * old_data, byte * new_data);
- int delete_row(const byte * buf);
- int index_read(byte *buf, const byte *key, key_part_map keypart_map,
- enum ha_rkey_function find_flag);
- int index_read_idx(byte *buf, uint index, const byte *key,
- key_part_map keypart_map, enum ha_rkey_function find_flag);
- int index_read_last(byte *buf, const byte *key, key_part_map keypart_map);
- int index_next(byte * buf);
- int index_prev(byte * buf);
- int index_first(byte * buf);
- int index_last(byte * buf);
- int index_next_same(byte *buf, const byte *key, uint keylen);
+ int write_row(uchar * buf);
+ int update_row(const uchar * old_data, uchar * new_data);
+ int delete_row(const uchar * buf);
+ int index_read_map(uchar *buf, const uchar *key, key_part_map keypart_map,
+ enum ha_rkey_function find_flag);
+ int index_read_idx_map(uchar *buf, uint index, const uchar *key,
+ key_part_map keypart_map,
+ enum ha_rkey_function find_flag);
+ int index_read_last_map(uchar *buf, const uchar *key, key_part_map keypart_map);
+ int index_next(uchar * buf);
+ int index_prev(uchar * buf);
+ int index_first(uchar * buf);
+ int index_last(uchar * buf);
+ int index_next_same(uchar *buf, const uchar *key, uint keylen);
int ft_init()
{
if (!ft_handler)
@@ -90,15 +86,15 @@ class ha_myisam: public handler
FT_INFO *ft_init_ext(uint flags, uint inx,String *key)
{
return ft_init_search(flags,file,inx,
- (byte *)key->ptr(), key->length(), key->charset(),
+ (uchar *)key->ptr(), key->length(), key->charset(),
table->record[0]);
}
- int ft_read(byte *buf);
+ int ft_read(uchar *buf);
int rnd_init(bool scan);
- int rnd_next(byte *buf);
- int rnd_pos(byte * buf, byte *pos);
- int restart_rnd_next(byte *buf, byte *pos);
- void position(const byte *record);
+ int rnd_next(uchar *buf);
+ int rnd_pos(uchar * buf, uchar *pos);
+ int restart_rnd_next(uchar *buf, uchar *pos);
+ void position(const uchar *record);
int info(uint);
int extra(enum ha_extra_function operation);
int extra_opt(enum ha_extra_function operation, ulong cache_size);
@@ -137,4 +133,11 @@ class ha_myisam: public handler
int dump(THD* thd, int fd);
int net_read_dump(NET* net);
#endif
+#ifdef HAVE_QUERY_CACHE
+ my_bool register_query_cache_table(THD *thd, char *table_key,
+ uint key_length,
+ qc_engine_callback
+ *engine_callback,
+ ulonglong *engine_data);
+#endif
};
diff --git a/storage/myisam/mi_cache.c b/storage/myisam/mi_cache.c
index 59c9b2c8812..d6dcc431a8d 100644
--- a/storage/myisam/mi_cache.c
+++ b/storage/myisam/mi_cache.c
@@ -35,12 +35,12 @@
#include "myisamdef.h"
-int _mi_read_cache(IO_CACHE *info, byte *buff, my_off_t pos, uint length,
+int _mi_read_cache(IO_CACHE *info, uchar *buff, my_off_t pos, uint length,
int flag)
{
uint read_length,in_buff_length;
my_off_t offset;
- char *in_buff_pos;
+ uchar *in_buff_pos;
DBUG_ENTER("_mi_read_cache");
if (pos < info->pos_in_file)
@@ -61,7 +61,7 @@ int _mi_read_cache(IO_CACHE *info, byte *buff, my_off_t pos, uint length,
(my_off_t) (info->read_end - info->request_pos))
{
in_buff_pos=info->request_pos+(uint) offset;
- in_buff_length= min(length,(uint) (info->read_end-in_buff_pos));
+ in_buff_length= min(length, (size_t) (info->read_end-in_buff_pos));
memcpy(buff,info->request_pos+(uint) offset,(size_t) in_buff_length);
if (!(length-=in_buff_length))
DBUG_RETURN(0);
diff --git a/storage/myisam/mi_check.c b/storage/myisam/mi_check.c
index 7a4d47954a5..fe6b716877c 100644
--- a/storage/myisam/mi_check.c
+++ b/storage/myisam/mi_check.c
@@ -83,12 +83,12 @@ static int sort_delete_record(MI_SORT_PARAM *sort_param);
/*static int flush_pending_blocks(MI_CHECK *param);*/
static SORT_KEY_BLOCKS *alloc_key_blocks(MI_CHECK *param, uint blocks,
uint buffer_length);
-static ha_checksum mi_byte_checksum(const byte *buf, uint length);
+static ha_checksum mi_byte_checksum(const uchar *buf, uint length);
static void set_data_file_type(SORT_INFO *sort_info, MYISAM_SHARE *share);
void myisamchk_init(MI_CHECK *param)
{
- bzero((gptr) param,sizeof(*param));
+ bzero((uchar*) param,sizeof(*param));
param->opt_follow_links=1;
param->keys_in_use= ~(ulonglong) 0;
param->search_after_block=HA_OFFSET_ERROR;
@@ -173,7 +173,7 @@ int chk_del(MI_CHECK *param, register MI_INFO *info, uint test_flag)
printf(" %9s",llstr(next_link,buff));
if (next_link >= info->state->data_file_length)
goto wrong;
- if (my_pread(info->dfile,(char*) buff,delete_link_length,
+ if (my_pread(info->dfile, (uchar*) buff,delete_link_length,
next_link,MYF(MY_NABP)))
{
if (test_flag & T_VERBOSE) puts("");
@@ -250,7 +250,8 @@ static int check_k_link(MI_CHECK *param, register MI_INFO *info, uint nr)
my_off_t next_link;
uint block_size=(nr+1)*MI_MIN_KEY_BLOCK_LENGTH;
ha_rows records;
- char llbuff[21], llbuff2[21], *buff;
+ char llbuff[21], llbuff2[21];
+ uchar *buff;
DBUG_ENTER("check_k_link");
DBUG_PRINT("enter", ("block_size: %u", block_size));
@@ -296,7 +297,7 @@ static int check_k_link(MI_CHECK *param, register MI_INFO *info, uint nr)
*/
if (!(buff=key_cache_read(info->s->key_cache,
info->s->kfile, next_link, DFLT_INIT_HITS,
- (byte*) info->buff, MI_MIN_KEY_BLOCK_LENGTH,
+ (uchar*) info->buff, MI_MIN_KEY_BLOCK_LENGTH,
MI_MIN_KEY_BLOCK_LENGTH, 1)))
{
/* purecov: begin tested */
@@ -335,7 +336,7 @@ int chk_size(MI_CHECK *param, register MI_INFO *info)
flush_key_blocks(info->s->key_cache,
info->s->kfile, FLUSH_FORCE_WRITE);
- size=my_seek(info->s->kfile,0L,MY_SEEK_END,MYF(0));
+ size= my_seek(info->s->kfile, 0L, MY_SEEK_END, MYF(MY_THREADSAFE));
if ((skr=(my_off_t) info->state->key_file_length) != size)
{
/* Don't give error if file generated by myisampack */
@@ -531,7 +532,7 @@ int chk_key(MI_CHECK *param, register MI_INFO *info)
/* Check that there isn't a row with auto_increment = 0 in the table */
mi_extra(info,HA_EXTRA_KEYREAD,0);
bzero(info->lastkey,keyinfo->seg->length);
- if (!mi_rkey(info, info->rec_buff, key, (const byte*) info->lastkey,
+ if (!mi_rkey(info, info->rec_buff, key, (const uchar*) info->lastkey,
(key_part_map)1, HA_READ_KEY_EXACT))
{
/* Don't count this as a real warning, as myisamchk can't correct it */
@@ -595,7 +596,8 @@ static int chk_index_down(MI_CHECK *param, MI_INFO *info, MI_KEYDEF *keyinfo,
{
/* purecov: begin tested */
/* Give it a chance to fit in the real file size. */
- my_off_t max_length= my_seek(info->s->kfile, 0L, MY_SEEK_END, MYF(0));
+ my_off_t max_length= my_seek(info->s->kfile, 0L, MY_SEEK_END,
+ MYF(MY_THREADSAFE));
mi_check_print_error(param, "Invalid key block position: %s "
"key block size: %u file_length: %s",
llstr(page, llbuff), keyinfo->block_length,
@@ -740,7 +742,7 @@ static int chk_index(MI_CHECK *param, MI_INFO *info, MI_KEYDEF *keyinfo,
char llbuff[22];
uint diff_pos[2];
DBUG_ENTER("chk_index");
- DBUG_DUMP("buff",(byte*) buff,mi_getint(buff));
+ DBUG_DUMP("buff",(uchar*) buff,mi_getint(buff));
/* TODO: implement appropriate check for RTree keys */
if (keyinfo->flag & HA_SPATIAL)
@@ -798,8 +800,8 @@ static int chk_index(MI_CHECK *param, MI_INFO *info, MI_KEYDEF *keyinfo,
(flag=ha_key_cmp(keyinfo->seg,info->lastkey,key,key_length,
comp_flag, diff_pos)) >=0)
{
- DBUG_DUMP("old",(byte*) info->lastkey, info->lastkey_length);
- DBUG_DUMP("new",(byte*) key, key_length);
+ DBUG_DUMP("old",(uchar*) info->lastkey, info->lastkey_length);
+ DBUG_DUMP("new",(uchar*) key, key_length);
DBUG_DUMP("new_in_page",(char*) old_keypos,(uint) (keypos-old_keypos));
if (comp_flag & SEARCH_FIND && flag == 0)
@@ -831,7 +833,7 @@ static int chk_index(MI_CHECK *param, MI_INFO *info, MI_KEYDEF *keyinfo,
key);
}
}
- (*key_checksum)+= mi_byte_checksum((byte*) key,
+ (*key_checksum)+= mi_byte_checksum((uchar*) key,
key_length- info->s->rec_reflength);
record= _mi_dpos(info,0,key+key_length);
if (keyinfo->flag & HA_FULLTEXT) /* special handling for ft2 */
@@ -869,7 +871,7 @@ static int chk_index(MI_CHECK *param, MI_INFO *info, MI_KEYDEF *keyinfo,
DBUG_PRINT("test",("page: %s record: %s filelength: %s",
llstr(page,llbuff),llstr(record,llbuff2),
llstr(info->state->data_file_length,llbuff3)));
- DBUG_DUMP("key",(byte*) key,key_length);
+ DBUG_DUMP("key",(uchar*) key,key_length);
DBUG_DUMP("new_in_page",(char*) old_keypos,(uint) (keypos-old_keypos));
goto err;
}
@@ -881,10 +883,10 @@ static int chk_index(MI_CHECK *param, MI_INFO *info, MI_KEYDEF *keyinfo,
llstr(page,llbuff), used_length, (keypos - buff));
goto err;
}
- my_afree((byte*) temp_buff);
+ my_afree((uchar*) temp_buff);
DBUG_RETURN(0);
err:
- my_afree((byte*) temp_buff);
+ my_afree((uchar*) temp_buff);
DBUG_RETURN(1);
} /* chk_index */
@@ -939,7 +941,7 @@ int chk_data_link(MI_CHECK *param, MI_INFO *info,int extend)
ha_rows records,del_blocks;
my_off_t used,empty,pos,splits,start_recpos,
del_length,link_used,start_block;
- byte *record,*to;
+ uchar *record,*to;
char llbuff[22],llbuff2[22],llbuff3[22];
ha_checksum intern_record_checksum;
ha_checksum key_checksum[MI_MAX_POSSIBLE_KEY];
@@ -956,7 +958,7 @@ int chk_data_link(MI_CHECK *param, MI_INFO *info,int extend)
puts("- check record links");
}
- if (!(record= (byte*) my_malloc(info->s->base.pack_reclength,MYF(0))))
+ if (!(record= (uchar*) my_malloc(info->s->base.pack_reclength,MYF(0))))
{
mi_check_print_error(param,"Not enough memory for record");
DBUG_RETURN(-1);
@@ -991,7 +993,7 @@ int chk_data_link(MI_CHECK *param, MI_INFO *info,int extend)
goto err2;
switch (info->s->data_file_type) {
case STATIC_RECORD:
- if (my_b_read(&param->read_cache,(byte*) record,
+ if (my_b_read(&param->read_cache,(uchar*) record,
info->s->base.pack_reclength))
goto err;
start_recpos=pos;
@@ -1011,7 +1013,7 @@ int chk_data_link(MI_CHECK *param, MI_INFO *info,int extend)
block_info.next_filepos=pos;
do
{
- if (_mi_read_cache(&param->read_cache,(byte*) block_info.header,
+ if (_mi_read_cache(&param->read_cache,(uchar*) block_info.header,
(start_block=block_info.next_filepos),
sizeof(block_info.header),
(flag ? 0 : READING_NEXT) | READING_HEADER))
@@ -1115,7 +1117,7 @@ int chk_data_link(MI_CHECK *param, MI_INFO *info,int extend)
got_error=1;
break;
}
- if (_mi_read_cache(&param->read_cache,(byte*) to,block_info.filepos,
+ if (_mi_read_cache(&param->read_cache,(uchar*) to,block_info.filepos,
(uint) block_info.data_len,
flag == 1 ? READING_NEXT : 0))
goto err;
@@ -1176,7 +1178,7 @@ int chk_data_link(MI_CHECK *param, MI_INFO *info,int extend)
pos=block_info.filepos+block_info.block_len;
break;
case COMPRESSED_RECORD:
- if (_mi_read_cache(&param->read_cache,(byte*) block_info.header, pos,
+ if (_mi_read_cache(&param->read_cache,(uchar*) block_info.header, pos,
info->s->pack.ref_length, READING_NEXT))
goto err;
start_recpos=pos;
@@ -1193,7 +1195,7 @@ int chk_data_link(MI_CHECK *param, MI_INFO *info,int extend)
got_error=1;
break;
}
- if (_mi_read_cache(&param->read_cache,(byte*) info->rec_buff,
+ if (_mi_read_cache(&param->read_cache,(uchar*) info->rec_buff,
block_info.filepos, block_info.rec_len, READING_NEXT))
goto err;
if (_mi_pack_rec_unpack(info, &info->bit_buff, record,
@@ -1253,7 +1255,7 @@ int chk_data_link(MI_CHECK *param, MI_INFO *info,int extend)
}
}
else
- key_checksum[key]+=mi_byte_checksum((byte*) info->lastkey,
+ key_checksum[key]+=mi_byte_checksum((uchar*) info->lastkey,
key_length);
}
}
@@ -1363,12 +1365,12 @@ int chk_data_link(MI_CHECK *param, MI_INFO *info,int extend)
printf("Lost space: %12s Linkdata: %10s\n",
llstr(empty,llbuff),llstr(link_used,llbuff2));
}
- my_free((gptr) record,MYF(0));
+ my_free((uchar*) record,MYF(0));
DBUG_RETURN (error);
err:
mi_check_print_error(param,"got error: %d when reading datafile at record: %s",my_errno, llstr(records,llbuff));
err2:
- my_free((gptr) record,MYF(0));
+ my_free((uchar*) record,MYF(0));
param->testflag|=T_RETRY_WITHOUT_QUICK;
DBUG_RETURN(1);
} /* chk_data_link */
@@ -1378,7 +1380,7 @@ int chk_data_link(MI_CHECK *param, MI_INFO *info,int extend)
/* Save new datafile-name in temp_filename */
int mi_repair(MI_CHECK *param, register MI_INFO *info,
- my_string name, int rep_quick)
+ char * name, int rep_quick)
{
int error,got_error;
uint i;
@@ -1427,7 +1429,7 @@ int mi_repair(MI_CHECK *param, register MI_INFO *info,
MYF(MY_WME | MY_WAIT_IF_FULL)))
goto err;
info->opt_flag|=WRITE_CACHE_USED;
- if (!(sort_param.record=(byte*) my_malloc((uint) share->base.pack_reclength,
+ if (!(sort_param.record=(uchar*) my_malloc((uint) share->base.pack_reclength,
MYF(0))) ||
!mi_alloc_rec_buff(info, -1, &sort_param.rec_buff))
{
@@ -1514,7 +1516,7 @@ int mi_repair(MI_CHECK *param, register MI_INFO *info,
{
if (my_errno != HA_ERR_FOUND_DUPP_KEY)
goto err;
- DBUG_DUMP("record",(byte*) sort_param.record,share->base.pack_reclength);
+ DBUG_DUMP("record",(uchar*) sort_param.record,share->base.pack_reclength);
mi_check_print_info(param,"Duplicate key %2d for record at %10s against new record at %10s",
info->errkey+1,
llstr(sort_param.start_recpos,llbuff),
@@ -1660,7 +1662,7 @@ static int writekeys(MI_SORT_PARAM *sort_param)
register uint i;
uchar *key;
MI_INFO *info= sort_param->sort_info->info;
- byte *buff= sort_param->record;
+ uchar *buff= sort_param->record;
my_off_t filepos= sort_param->filepos;
DBUG_ENTER("writekeys");
@@ -1671,7 +1673,7 @@ static int writekeys(MI_SORT_PARAM *sort_param)
{
if (info->s->keyinfo[i].flag & HA_FULLTEXT )
{
- if (_mi_ft_add(info,i,(char*) key,buff,filepos))
+ if (_mi_ft_add(info, i, key, buff, filepos))
goto err;
}
#ifdef HAVE_SPATIAL
@@ -1702,7 +1704,7 @@ static int writekeys(MI_SORT_PARAM *sort_param)
{
if (info->s->keyinfo[i].flag & HA_FULLTEXT)
{
- if (_mi_ft_del(info,i,(char*) key,buff,filepos))
+ if (_mi_ft_del(info,i, key,buff,filepos))
break;
}
else
@@ -1724,7 +1726,7 @@ static int writekeys(MI_SORT_PARAM *sort_param)
/* Change all key-pointers that points to a records */
-int movepoint(register MI_INFO *info, byte *record, my_off_t oldpos,
+int movepoint(register MI_INFO *info, uchar *record, my_off_t oldpos,
my_off_t newpos, uint prot_key)
{
register uint i;
@@ -1801,7 +1803,7 @@ int flush_blocks(MI_CHECK *param, KEY_CACHE *key_cache, File file)
/* Sort index for more efficent reads */
-int mi_sort_index(MI_CHECK *param, register MI_INFO *info, my_string name)
+int mi_sort_index(MI_CHECK *param, register MI_INFO *info, char * name)
{
reg2 uint key;
reg1 MI_KEYDEF *keyinfo;
@@ -1944,7 +1946,7 @@ static int sort_one_index(MI_CHECK *param, MI_INFO *info, MI_KEYDEF *keyinfo,
("From page: %ld, keyoffset: %lu used_length: %d",
(ulong) pagepos, (ulong) (keypos - buff),
(int) used_length));
- DBUG_DUMP("buff",(byte*) buff,used_length);
+ DBUG_DUMP("buff",(uchar*) buff,used_length);
goto err;
}
}
@@ -1973,17 +1975,17 @@ static int sort_one_index(MI_CHECK *param, MI_INFO *info, MI_KEYDEF *keyinfo,
/* Fill block with zero and write it to the new index file */
length=mi_getint(buff);
- bzero((byte*) buff+length,keyinfo->block_length-length);
- if (my_pwrite(new_file,(byte*) buff,(uint) keyinfo->block_length,
+ bzero((uchar*) buff+length,keyinfo->block_length-length);
+ if (my_pwrite(new_file,(uchar*) buff,(uint) keyinfo->block_length,
new_page_pos,MYF(MY_NABP | MY_WAIT_IF_FULL)))
{
mi_check_print_error(param,"Can't write indexblock, error: %d",my_errno);
goto err;
}
- my_afree((gptr) buff);
+ my_afree((uchar*) buff);
DBUG_RETURN(0);
err:
- my_afree((gptr) buff);
+ my_afree((uchar*) buff);
DBUG_RETURN(1);
} /* sort_one_index */
@@ -2054,13 +2056,13 @@ int filecopy(MI_CHECK *param, File to,File from,my_off_t start,
VOID(my_seek(from,start,MY_SEEK_SET,MYF(0)));
while (length > buff_length)
{
- if (my_read(from,(byte*) buff,buff_length,MYF(MY_NABP)) ||
- my_write(to,(byte*) buff,buff_length,param->myf_rw))
+ if (my_read(from,(uchar*) buff,buff_length,MYF(MY_NABP)) ||
+ my_write(to,(uchar*) buff,buff_length,param->myf_rw))
goto err;
length-= buff_length;
}
- if (my_read(from,(byte*) buff,(uint) length,MYF(MY_NABP)) ||
- my_write(to,(byte*) buff,(uint) length,param->myf_rw))
+ if (my_read(from,(uchar*) buff,(uint) length,MYF(MY_NABP)) ||
+ my_write(to,(uchar*) buff,(uint) length,param->myf_rw))
goto err;
if (buff != tmp_buff)
my_free(buff,MYF(0));
@@ -2141,7 +2143,7 @@ int mi_repair_by_sort(MI_CHECK *param, register MI_INFO *info,
info->opt_flag|=WRITE_CACHE_USED;
info->rec_cache.file=info->dfile; /* for sort_delete_record */
- if (!(sort_param.record=(byte*) my_malloc((uint) share->base.pack_reclength,
+ if (!(sort_param.record=(uchar*) my_malloc((uint) share->base.pack_reclength,
MYF(0))) ||
!mi_alloc_rec_buff(info, -1, &sort_param.rec_buff))
{
@@ -2454,8 +2456,8 @@ err:
my_free(mi_get_rec_buff_ptr(info, sort_param.rec_buff),
MYF(MY_ALLOW_ZERO_PTR));
my_free(sort_param.record,MYF(MY_ALLOW_ZERO_PTR));
- my_free((gptr) sort_info.key_block,MYF(MY_ALLOW_ZERO_PTR));
- my_free((gptr) sort_info.ft_buf, MYF(MY_ALLOW_ZERO_PTR));
+ my_free((uchar*) sort_info.key_block,MYF(MY_ALLOW_ZERO_PTR));
+ my_free((uchar*) sort_info.ft_buf, MYF(MY_ALLOW_ZERO_PTR));
my_free(sort_info.buff,MYF(MY_ALLOW_ZERO_PTR));
VOID(end_io_cache(&param->read_cache));
info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED);
@@ -2741,7 +2743,7 @@ int mi_repair_parallel(MI_CHECK *param, register MI_INFO *info,
sort_param[i].filepos=new_header_length;
sort_param[i].max_pos=sort_param[i].pos=share->pack.header_length;
- sort_param[i].record= (((char *)(sort_param+share->base.keys))+
+ sort_param[i].record= (((uchar *)(sort_param+share->base.keys))+
(share->base.pack_reclength * i));
if (!mi_alloc_rec_buff(info, -1, &sort_param[i].rec_buff))
{
@@ -2987,9 +2989,9 @@ err:
pthread_cond_destroy (&sort_info.cond);
pthread_mutex_destroy(&sort_info.mutex);
- my_free((gptr) sort_info.ft_buf, MYF(MY_ALLOW_ZERO_PTR));
- my_free((gptr) sort_info.key_block,MYF(MY_ALLOW_ZERO_PTR));
- my_free((gptr) sort_param,MYF(MY_ALLOW_ZERO_PTR));
+ my_free((uchar*) sort_info.ft_buf, MYF(MY_ALLOW_ZERO_PTR));
+ my_free((uchar*) sort_info.key_block,MYF(MY_ALLOW_ZERO_PTR));
+ my_free((uchar*) sort_param,MYF(MY_ALLOW_ZERO_PTR));
my_free(sort_info.buff,MYF(MY_ALLOW_ZERO_PTR));
VOID(end_io_cache(&param->read_cache));
info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED);
@@ -3119,7 +3121,7 @@ static int sort_get_next_record(MI_SORT_PARAM *sort_param)
int parallel_flag;
uint found_record,b_type,left_length;
my_off_t pos;
- byte *to;
+ uchar *to;
MI_BLOCK_INFO block_info;
SORT_INFO *sort_info=sort_param->sort_info;
MI_CHECK *param=sort_info->param;
@@ -3197,7 +3199,7 @@ static int sort_get_next_record(MI_SORT_PARAM *sort_param)
llstr(param->search_after_block,llbuff),
llstr(sort_param->start_recpos,llbuff2));
if (_mi_read_cache(&sort_param->read_cache,
- (byte*) block_info.header,pos,
+ (uchar*) block_info.header,pos,
MI_BLOCK_INFO_HEADER_LENGTH,
(! found_record ? READING_NEXT : 0) |
parallel_flag | READING_HEADER))
@@ -3461,7 +3463,7 @@ static int sort_get_next_record(MI_SORT_PARAM *sort_param)
case COMPRESSED_RECORD:
for (searching=0 ;; searching=1, sort_param->pos++)
{
- if (_mi_read_cache(&sort_param->read_cache,(byte*) block_info.header,
+ if (_mi_read_cache(&sort_param->read_cache,(uchar*) block_info.header,
sort_param->pos,
share->pack.ref_length,READING_NEXT))
DBUG_RETURN(-1);
@@ -3489,7 +3491,7 @@ static int sort_get_next_record(MI_SORT_PARAM *sort_param)
llstr(sort_param->pos,llbuff));
continue;
}
- if (_mi_read_cache(&sort_param->read_cache,(byte*) sort_param->rec_buff,
+ if (_mi_read_cache(&sort_param->read_cache,(uchar*) sort_param->rec_buff,
block_info.filepos, block_info.rec_len,
READING_NEXT))
{
@@ -3545,8 +3547,8 @@ int sort_write_record(MI_SORT_PARAM *sort_param)
int flag;
uint length;
ulong block_length,reclength;
- byte *from;
- byte block_buff[8];
+ uchar *from;
+ uchar block_buff[8];
SORT_INFO *sort_info=sort_param->sort_info;
MI_CHECK *param=sort_info->param;
MI_INFO *info=sort_info->info;
@@ -3585,7 +3587,7 @@ int sort_write_record(MI_SORT_PARAM *sort_param)
DBUG_RETURN(1);
sort_info->buff_length=reclength;
}
- from=sort_info->buff+ALIGN_SIZE(MI_MAX_DYN_BLOCK_HEADER);
+ from= sort_info->buff+ALIGN_SIZE(MI_MAX_DYN_BLOCK_HEADER);
}
/* We can use info->checksum here as only one thread calls this. */
info->checksum=mi_checksum(info,sort_param->record);
@@ -3622,7 +3624,7 @@ int sort_write_record(MI_SORT_PARAM *sort_param)
length+= save_pack_length((uint) share->pack.version,
block_buff + length, info->blob_length);
if (my_b_write(&info->rec_cache,block_buff,length) ||
- my_b_write(&info->rec_cache,(byte*) sort_param->rec_buff,reclength))
+ my_b_write(&info->rec_cache,(uchar*) sort_param->rec_buff,reclength))
{
mi_check_print_error(param,"%d when writing to datafile",my_errno);
DBUG_RETURN(1);
@@ -3922,7 +3924,7 @@ static int sort_insert_key(MI_SORT_PARAM *sort_param,
/* Fill block with end-zero and write filled block */
mi_putint(anc_buff,key_block->last_length,nod_flag);
- bzero((byte*) anc_buff+key_block->last_length,
+ bzero((uchar*) anc_buff+key_block->last_length,
keyinfo->block_length- key_block->last_length);
key_file_length=info->state->key_file_length;
if ((filepos=_mi_new(info,keyinfo,DFLT_INIT_HITS)) == HA_OFFSET_ERROR)
@@ -3934,10 +3936,10 @@ static int sort_insert_key(MI_SORT_PARAM *sort_param,
if (_mi_write_keypage(info, keyinfo, filepos, DFLT_INIT_HITS, anc_buff))
DBUG_RETURN(1);
}
- else if (my_pwrite(info->s->kfile,(byte*) anc_buff,
+ else if (my_pwrite(info->s->kfile,(uchar*) anc_buff,
(uint) keyinfo->block_length,filepos, param->myf_rw))
DBUG_RETURN(1);
- DBUG_DUMP("buff",(byte*) anc_buff,mi_getint(anc_buff));
+ DBUG_DUMP("buff",(uchar*) anc_buff,mi_getint(anc_buff));
/* Write separator-key to block in next level */
if (sort_insert_key(sort_param,key_block+1,key_block->lastkey,filepos))
@@ -4028,7 +4030,7 @@ int flush_pending_blocks(MI_SORT_PARAM *sort_param)
if (nod_flag)
_mi_kpointer(info,key_block->end_pos,filepos);
key_file_length=info->state->key_file_length;
- bzero((byte*) key_block->buff+length, keyinfo->block_length-length);
+ bzero((uchar*) key_block->buff+length, keyinfo->block_length-length);
if ((filepos=_mi_new(info,keyinfo,DFLT_INIT_HITS)) == HA_OFFSET_ERROR)
DBUG_RETURN(1);
@@ -4039,10 +4041,10 @@ int flush_pending_blocks(MI_SORT_PARAM *sort_param)
DFLT_INIT_HITS, key_block->buff))
DBUG_RETURN(1);
}
- else if (my_pwrite(info->s->kfile,(byte*) key_block->buff,
+ else if (my_pwrite(info->s->kfile,(uchar*) key_block->buff,
(uint) keyinfo->block_length,filepos, myf_rw))
DBUG_RETURN(1);
- DBUG_DUMP("buff",(byte*) key_block->buff,length);
+ DBUG_DUMP("buff",(uchar*) key_block->buff,length);
nod_flag=1;
}
info->s->state.key_root[sort_param->key]=filepos; /* Last is root for tree */
@@ -4080,10 +4082,10 @@ int test_if_almost_full(MI_INFO *info)
{
if (info->s->options & HA_OPTION_COMPRESS_RECORD)
return 0;
- return (my_seek(info->s->kfile,0L,MY_SEEK_END,MYF(0))/10*9 >
- (my_off_t) (info->s->base.max_key_file_length) ||
- my_seek(info->dfile,0L,MY_SEEK_END,MYF(0))/10*9 >
- (my_off_t) info->s->base.max_data_file_length);
+ return my_seek(info->s->kfile, 0L, MY_SEEK_END, MYF(MY_THREADSAFE)) / 10 * 9 >
+ (my_off_t) info->s->base.max_key_file_length ||
+ my_seek(info->dfile, 0L, MY_SEEK_END, MYF(0)) / 10 * 9 >
+ (my_off_t) info->s->base.max_data_file_length;
}
/* Recreate table with bigger more alloced record-data */
@@ -4113,34 +4115,34 @@ int recreate_table(MI_CHECK *param, MI_INFO **org_info, char *filename)
(param->testflag & T_UNPACK);
if (!(keyinfo=(MI_KEYDEF*) my_alloca(sizeof(MI_KEYDEF)*share.base.keys)))
DBUG_RETURN(0);
- memcpy((byte*) keyinfo,(byte*) share.keyinfo,
+ memcpy((uchar*) keyinfo,(uchar*) share.keyinfo,
(size_t) (sizeof(MI_KEYDEF)*share.base.keys));
key_parts= share.base.all_key_parts;
if (!(keysegs=(HA_KEYSEG*) my_alloca(sizeof(HA_KEYSEG)*
(key_parts+share.base.keys))))
{
- my_afree((gptr) keyinfo);
+ my_afree((uchar*) keyinfo);
DBUG_RETURN(1);
}
if (!(recdef=(MI_COLUMNDEF*)
my_alloca(sizeof(MI_COLUMNDEF)*(share.base.fields+1))))
{
- my_afree((gptr) keyinfo);
- my_afree((gptr) keysegs);
+ my_afree((uchar*) keyinfo);
+ my_afree((uchar*) keysegs);
DBUG_RETURN(1);
}
if (!(uniquedef=(MI_UNIQUEDEF*)
my_alloca(sizeof(MI_UNIQUEDEF)*(share.state.header.uniques+1))))
{
- my_afree((gptr) recdef);
- my_afree((gptr) keyinfo);
- my_afree((gptr) keysegs);
+ my_afree((uchar*) recdef);
+ my_afree((uchar*) keyinfo);
+ my_afree((uchar*) keysegs);
DBUG_RETURN(1);
}
/* Copy the column definitions */
- memcpy((byte*) recdef,(byte*) share.rec,
+ memcpy((uchar*) recdef,(uchar*) share.rec,
(size_t) (sizeof(MI_COLUMNDEF)*(share.base.fields+1)));
for (rec=recdef,end=recdef+share.base.fields; rec != end ; rec++)
{
@@ -4152,7 +4154,7 @@ int recreate_table(MI_CHECK *param, MI_INFO **org_info, char *filename)
}
/* Change the new key to point at the saved key segments */
- memcpy((byte*) keysegs,(byte*) share.keyparts,
+ memcpy((uchar*) keysegs,(uchar*) share.keyparts,
(size_t) (sizeof(HA_KEYSEG)*(key_parts+share.base.keys+
share.state.header.uniques)));
keyseg=keysegs;
@@ -4169,7 +4171,7 @@ int recreate_table(MI_CHECK *param, MI_INFO **org_info, char *filename)
/* Copy the unique definitions and change them to point at the new key
segments*/
- memcpy((byte*) uniquedef,(byte*) share.uniqueinfo,
+ memcpy((uchar*) uniquedef,(uchar*) share.uniqueinfo,
(size_t) (sizeof(MI_UNIQUEDEF)*(share.state.header.uniques)));
for (u_ptr=uniquedef,u_end=uniquedef+share.state.header.uniques;
u_ptr != u_end ; u_ptr++)
@@ -4251,10 +4253,10 @@ int recreate_table(MI_CHECK *param, MI_INFO **org_info, char *filename)
goto end;
error=0;
end:
- my_afree((gptr) uniquedef);
- my_afree((gptr) keyinfo);
- my_afree((gptr) recdef);
- my_afree((gptr) keysegs);
+ my_afree((uchar*) uniquedef);
+ my_afree((uchar*) keyinfo);
+ my_afree((uchar*) recdef);
+ my_afree((uchar*) keysegs);
DBUG_RETURN(error);
}
@@ -4267,7 +4269,7 @@ int write_data_suffix(SORT_INFO *sort_info, my_bool fix_datafile)
if (info->s->options & HA_OPTION_COMPRESS_RECORD && fix_datafile)
{
- char buff[MEMMAP_EXTRA_MARGIN];
+ uchar buff[MEMMAP_EXTRA_MARGIN];
bzero(buff,sizeof(buff));
if (my_b_write(&info->rec_cache,buff,sizeof(buff)))
{
@@ -4357,7 +4359,7 @@ err:
void update_auto_increment_key(MI_CHECK *param, MI_INFO *info,
my_bool repair_only)
{
- byte *record;
+ uchar *record;
DBUG_ENTER("update_auto_increment_key");
if (!info->s->base.auto_key ||
@@ -4376,7 +4378,7 @@ void update_auto_increment_key(MI_CHECK *param, MI_INFO *info,
We have to use an allocated buffer instead of info->rec_buff as
_mi_put_key_in_record() may use info->rec_buff
*/
- if (!(record= (byte*) my_malloc((uint) info->s->base.pack_reclength,
+ if (!(record= (uchar*) my_malloc((uint) info->s->base.pack_reclength,
MYF(0))))
{
mi_check_print_error(param,"Not enough memory for extra record");
@@ -4504,10 +4506,10 @@ void update_key_parts(MI_KEYDEF *keyinfo, ulong *rec_per_key_part,
}
-static ha_checksum mi_byte_checksum(const byte *buf, uint length)
+static ha_checksum mi_byte_checksum(const uchar *buf, uint length)
{
ha_checksum crc;
- const byte *end=buf+length;
+ const uchar *end=buf+length;
for (crc=0; buf != end; buf++)
crc=((crc << 1) + *((uchar*) buf)) +
test(crc & (((ha_checksum) 1) << (8*sizeof(ha_checksum)-1)));
diff --git a/storage/myisam/mi_checksum.c b/storage/myisam/mi_checksum.c
index 711e87c1547..4e87de373bd 100644
--- a/storage/myisam/mi_checksum.c
+++ b/storage/myisam/mi_checksum.c
@@ -17,7 +17,7 @@
#include "myisamdef.h"
-ha_checksum mi_checksum(MI_INFO *info, const byte *buf)
+ha_checksum mi_checksum(MI_INFO *info, const uchar *buf)
{
uint i;
ha_checksum crc=0;
@@ -25,7 +25,7 @@ ha_checksum mi_checksum(MI_INFO *info, const byte *buf)
for (i=info->s->base.fields ; i-- ; buf+=(rec++)->length)
{
- const byte *pos;
+ const uchar *pos;
ulong length;
switch (rec->type) {
case FIELD_BLOB:
@@ -52,13 +52,13 @@ ha_checksum mi_checksum(MI_INFO *info, const byte *buf)
pos=buf;
break;
}
- crc=my_checksum(crc, pos ? pos : "", length);
+ crc=my_checksum(crc, pos ? pos : (uchar*) "", length);
}
return crc;
}
-ha_checksum mi_static_checksum(MI_INFO *info, const byte *pos)
+ha_checksum mi_static_checksum(MI_INFO *info, const uchar *pos)
{
return my_checksum(0, pos, info->s->base.reclength);
}
diff --git a/storage/myisam/mi_close.c b/storage/myisam/mi_close.c
index 47b7ba855c0..07105aea88d 100644
--- a/storage/myisam/mi_close.c
+++ b/storage/myisam/mi_close.c
@@ -87,8 +87,8 @@ int mi_close(register MI_INFO *info)
#endif
if (share->decode_trees)
{
- my_free((gptr) share->decode_trees,MYF(0));
- my_free((gptr) share->decode_tables,MYF(0));
+ my_free((uchar*) share->decode_trees,MYF(0));
+ my_free((uchar*) share->decode_tables,MYF(0));
}
#ifdef THREAD
thr_lock_delete(&share->lock);
@@ -102,19 +102,19 @@ int mi_close(register MI_INFO *info)
}
}
#endif
- my_free((gptr) info->s,MYF(0));
+ my_free((uchar*) info->s,MYF(0));
}
pthread_mutex_unlock(&THR_LOCK_myisam);
if (info->ftparser_param)
{
- my_free((gptr)info->ftparser_param, MYF(0));
+ my_free((uchar*)info->ftparser_param, MYF(0));
info->ftparser_param= 0;
}
if (info->dfile >= 0 && my_close(info->dfile,MYF(0)))
error = my_errno;
myisam_log_command(MI_LOG_CLOSE,info,NULL,0,error);
- my_free((gptr) info,MYF(0));
+ my_free((uchar*) info,MYF(0));
if (error)
{
diff --git a/storage/myisam/mi_create.c b/storage/myisam/mi_create.c
index 71d377c8b6b..0cac5f08b3b 100644
--- a/storage/myisam/mi_create.c
+++ b/storage/myisam/mi_create.c
@@ -76,7 +76,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
LINT_INIT(file);
errpos=0;
options=0;
- bzero((byte*) &share,sizeof(share));
+ bzero((uchar*) &share,sizeof(share));
if (flags & HA_DONT_TOUCH_DATA)
{
@@ -498,7 +498,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
goto err;
}
- bmove(share.state.header.file_version,(byte*) myisam_file_magic,4);
+ bmove(share.state.header.file_version,(uchar*) myisam_file_magic,4);
ci->old_options=options| (ci->old_options & HA_OPTION_TEMP_COMPRESS_RECORD ?
HA_OPTION_COMPRESS_RECORD |
HA_OPTION_TEMP_COMPRESS_RECORD: 0);
@@ -615,7 +615,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
(have_iext ? MY_REPLACE_EXT : MY_APPEND_EXT));
linkname_ptr=0;
/* Replace the current file */
- create_flag=MY_DELETE_OLD;
+ create_flag=(flags & HA_CREATE_KEEP_FILES) ? 0 : MY_DELETE_OLD;
}
/*
@@ -689,7 +689,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
fn_format(filename,name,"", MI_NAME_DEXT,
MY_UNPACK_FILENAME | MY_APPEND_EXT);
linkname_ptr=0;
- create_flag=MY_DELETE_OLD;
+ create_flag=(flags & HA_CREATE_KEEP_FILES) ? 0 : MY_DELETE_OLD;
}
if ((dfile=
my_create_with_symlink(linkname_ptr, filename, 0, create_mode,
diff --git a/storage/myisam/mi_delete.c b/storage/myisam/mi_delete.c
index 409930ff7fb..6fe31f30c19 100644
--- a/storage/myisam/mi_delete.c
+++ b/storage/myisam/mi_delete.c
@@ -32,7 +32,7 @@ static int _mi_ck_real_delete(register MI_INFO *info,MI_KEYDEF *keyinfo,
uchar *key, uint key_length, my_off_t *root);
-int mi_delete(MI_INFO *info,const byte *record)
+int mi_delete(MI_INFO *info,const uchar *record)
{
uint i;
uchar *old_key;
@@ -78,7 +78,7 @@ int mi_delete(MI_INFO *info,const byte *record)
info->s->keyinfo[i].version++;
if (info->s->keyinfo[i].flag & HA_FULLTEXT )
{
- if (_mi_ft_del(info,i,(char*) old_key,record,info->lastpos))
+ if (_mi_ft_del(info,i, old_key,record,info->lastpos))
goto err;
}
else
@@ -100,7 +100,7 @@ int mi_delete(MI_INFO *info,const byte *record)
info->state->records--;
mi_sizestore(lastpos,info->lastpos);
- myisam_log_command(MI_LOG_DELETE,info,(byte*) lastpos,sizeof(lastpos),0);
+ myisam_log_command(MI_LOG_DELETE,info,(uchar*) lastpos,sizeof(lastpos),0);
VOID(_mi_writeinfo(info,WRITEINFO_UPDATE_KEYFILE));
allow_break(); /* Allow SIGHUP & SIGINT */
if (info->invalidator != 0)
@@ -114,7 +114,7 @@ int mi_delete(MI_INFO *info,const byte *record)
err:
save_errno=my_errno;
mi_sizestore(lastpos,info->lastpos);
- myisam_log_command(MI_LOG_DELETE,info,(byte*) lastpos, sizeof(lastpos),0);
+ myisam_log_command(MI_LOG_DELETE,info,(uchar*) lastpos, sizeof(lastpos),0);
if (save_errno != HA_ERR_RECORD_CHANGED)
{
mi_print_error(info->s, HA_ERR_CRASHED);
@@ -198,7 +198,7 @@ static int _mi_ck_real_delete(register MI_INFO *info, MI_KEYDEF *keyinfo,
}
}
err:
- my_afree((gptr) root_buff);
+ my_afree((uchar*) root_buff);
DBUG_PRINT("exit",("Return: %d",error));
DBUG_RETURN(error);
} /* _mi_ck_real_delete */
@@ -223,7 +223,7 @@ static int d_search(register MI_INFO *info, register MI_KEYDEF *keyinfo,
my_off_t leaf_page,next_block;
uchar lastkey[MI_MAX_KEY_BUFF];
DBUG_ENTER("d_search");
- DBUG_DUMP("page",(byte*) anc_buff,mi_getint(anc_buff));
+ DBUG_DUMP("page",(uchar*) anc_buff,mi_getint(anc_buff));
search_key_length= (comp_flag & SEARCH_FIND) ? key_length : USE_WHOLE_KEY;
flag=(*keyinfo->bin_search)(info,keyinfo,anc_buff,key, search_key_length,
@@ -250,7 +250,7 @@ static int d_search(register MI_INFO *info, register MI_KEYDEF *keyinfo,
if (info->ft1_to_ft2)
{
/* we're in ft1->ft2 conversion mode. Saving key data */
- insert_dynamic(info->ft1_to_ft2, (char*) (lastkey+off));
+ insert_dynamic(info->ft1_to_ft2, (lastkey+off));
}
else
{
@@ -381,14 +381,14 @@ static int d_search(register MI_INFO *info, register MI_KEYDEF *keyinfo,
ret_value|=_mi_write_keypage(info,keyinfo,page,DFLT_INIT_HITS,anc_buff);
else
{
- DBUG_DUMP("page",(byte*) anc_buff,mi_getint(anc_buff));
+ DBUG_DUMP("page",(uchar*) anc_buff,mi_getint(anc_buff));
}
- my_afree((byte*) leaf_buff);
+ my_afree((uchar*) leaf_buff);
DBUG_PRINT("exit",("Return: %d",ret_value));
DBUG_RETURN(ret_value);
err:
- my_afree((byte*) leaf_buff);
+ my_afree((uchar*) leaf_buff);
DBUG_PRINT("exit",("Error: %d",my_errno));
DBUG_RETURN (-1);
} /* d_search */
@@ -411,7 +411,7 @@ static int del(register MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *key,
DBUG_ENTER("del");
DBUG_PRINT("enter",("leaf_page: %ld keypos: 0x%lx", (long) leaf_page,
(ulong) keypos));
- DBUG_DUMP("leaf_buff",(byte*) leaf_buff,mi_getint(leaf_buff));
+ DBUG_DUMP("leaf_buff",(uchar*) leaf_buff,mi_getint(leaf_buff));
endpos=leaf_buff+mi_getint(leaf_buff);
if (!(key_start=_mi_get_last_key(info,keyinfo,leaf_buff,keybuff,endpos,
@@ -428,7 +428,7 @@ static int del(register MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *key,
ret_value= -1;
else
{
- DBUG_DUMP("next_page",(byte*) next_buff,mi_getint(next_buff));
+ DBUG_DUMP("next_page",(uchar*) next_buff,mi_getint(next_buff));
if ((ret_value=del(info,keyinfo,key,anc_buff,next_page,next_buff,
keypos,next_block,ret_key)) >0)
{
@@ -455,7 +455,7 @@ static int del(register MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *key,
if (_mi_write_keypage(info,keyinfo,leaf_page,DFLT_INIT_HITS,leaf_buff))
goto err;
}
- my_afree((byte*) next_buff);
+ my_afree((uchar*) next_buff);
DBUG_RETURN(ret_value);
}
@@ -479,7 +479,7 @@ static int del(register MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *key,
prev_key, prev_key,
keybuff,&s_temp);
if (length > 0)
- bmove_upp((byte*) endpos+length,(byte*) endpos,(uint) (endpos-keypos));
+ bmove_upp((uchar*) endpos+length,(uchar*) endpos,(uint) (endpos-keypos));
else
bmove(keypos,keypos-length, (int) (endpos-keypos)+length);
(*keyinfo->store_key)(keyinfo,keypos,&s_temp);
@@ -517,8 +517,8 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo,
DBUG_ENTER("underflow");
DBUG_PRINT("enter",("leaf_page: %ld keypos: 0x%lx",(long) leaf_page,
(ulong) keypos));
- DBUG_DUMP("anc_buff",(byte*) anc_buff,mi_getint(anc_buff));
- DBUG_DUMP("leaf_buff",(byte*) leaf_buff,mi_getint(leaf_buff));
+ DBUG_DUMP("anc_buff",(uchar*) anc_buff,mi_getint(anc_buff));
+ DBUG_DUMP("leaf_buff",(uchar*) leaf_buff,mi_getint(leaf_buff));
buff=info->buff;
info->buff_used=1;
@@ -554,10 +554,10 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo,
if (!_mi_fetch_keypage(info,keyinfo,next_page,DFLT_INIT_HITS,buff,0))
goto err;
buff_length=mi_getint(buff);
- DBUG_DUMP("next",(byte*) buff,buff_length);
+ DBUG_DUMP("next",(uchar*) buff,buff_length);
/* find keys to make a big key-page */
- bmove((byte*) next_keypos-key_reflength,(byte*) buff+2,
+ bmove((uchar*) next_keypos-key_reflength,(uchar*) buff+2,
key_reflength);
if (!_mi_get_last_key(info,keyinfo,anc_buff,anc_key,next_keypos,&length)
|| !_mi_get_last_key(info,keyinfo,leaf_buff,leaf_key,
@@ -572,8 +572,8 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo,
length=buff_length-p_length;
endpos=buff+length+leaf_length+t_length;
/* buff will always be larger than before !*/
- bmove_upp((byte*) endpos, (byte*) buff+buff_length,length);
- memcpy((byte*) buff, (byte*) leaf_buff,(size_t) leaf_length);
+ bmove_upp((uchar*) endpos, (uchar*) buff+buff_length,length);
+ memcpy((uchar*) buff, (uchar*) leaf_buff,(size_t) leaf_length);
(*keyinfo->store_key)(keyinfo,buff+leaf_length,&s_temp);
buff_length=(uint) (endpos-buff);
mi_putint(buff,buff_length,nod_flag);
@@ -589,7 +589,7 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo,
if (buff_length <= keyinfo->block_length)
{ /* Keys in one page */
- memcpy((byte*) leaf_buff,(byte*) buff,(size_t) buff_length);
+ memcpy((uchar*) leaf_buff,(uchar*) buff,(size_t) buff_length);
if (_mi_dispose(info,keyinfo,next_page,DFLT_INIT_HITS))
goto err;
}
@@ -605,7 +605,7 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo,
&key_length, &after_key)))
goto err;
length=(uint) (half_pos-buff);
- memcpy((byte*) leaf_buff,(byte*) buff,(size_t) length);
+ memcpy((uchar*) leaf_buff,(uchar*) buff,(size_t) length);
mi_putint(leaf_buff,length,nod_flag);
/* Correct new keypointer to leaf_page */
@@ -619,7 +619,7 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo,
prev_key, prev_key,
leaf_key, &s_temp);
if (t_length >= 0)
- bmove_upp((byte*) endpos+t_length,(byte*) endpos,
+ bmove_upp((uchar*) endpos+t_length,(uchar*) endpos,
(uint) (endpos-keypos));
else
bmove(keypos,keypos-t_length,(uint) (endpos-keypos)+t_length);
@@ -628,7 +628,7 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo,
/* Store key first in new page */
if (nod_flag)
- bmove((byte*) buff+2,(byte*) half_pos-nod_flag,(size_t) nod_flag);
+ bmove((uchar*) buff+2,(uchar*) half_pos-nod_flag,(size_t) nod_flag);
if (!(*keyinfo->get_key)(keyinfo,nod_flag,&half_pos,leaf_key))
goto err;
t_length=(int) (*keyinfo->pack_key)(keyinfo, nod_flag, (uchar*) 0,
@@ -636,7 +636,7 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo,
leaf_key, &s_temp);
/* t_length will always be > 0 for a new page !*/
length=(uint) ((buff+mi_getint(buff))-half_pos);
- bmove((byte*) buff+p_length+t_length,(byte*) half_pos,(size_t) length);
+ bmove((uchar*) buff+p_length+t_length,(uchar*) half_pos,(size_t) length);
(*keyinfo->store_key)(keyinfo,buff+p_length,&s_temp);
mi_putint(buff,length+t_length+p_length,nod_flag);
@@ -659,10 +659,10 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo,
goto err;
buff_length=mi_getint(buff);
endpos=buff+buff_length;
- DBUG_DUMP("prev",(byte*) buff,buff_length);
+ DBUG_DUMP("prev",(uchar*) buff,buff_length);
/* find keys to make a big key-page */
- bmove((byte*) next_keypos - key_reflength,(byte*) leaf_buff+2,
+ bmove((uchar*) next_keypos - key_reflength,(uchar*) leaf_buff+2,
key_reflength);
next_keypos=keypos;
if (!(*keyinfo->get_key)(keyinfo,key_reflength,&next_keypos,
@@ -679,10 +679,10 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo,
prev_key, prev_key,
anc_key, &s_temp);
if (t_length >= 0)
- bmove((byte*) endpos+t_length,(byte*) leaf_buff+p_length,
+ bmove((uchar*) endpos+t_length,(uchar*) leaf_buff+p_length,
(size_t) (leaf_length-p_length));
else /* We gained space */
- bmove((byte*) endpos,(byte*) leaf_buff+((int) p_length-t_length),
+ bmove((uchar*) endpos,(uchar*) leaf_buff+((int) p_length-t_length),
(size_t) (leaf_length-p_length+t_length));
(*keyinfo->store_key)(keyinfo,endpos,&s_temp);
@@ -715,8 +715,8 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo,
goto err;
_mi_kpointer(info,leaf_key+key_length,leaf_page);
/* Save key in anc_buff */
- DBUG_DUMP("anc_buff",(byte*) anc_buff,anc_length);
- DBUG_DUMP("key_to_anc",(byte*) leaf_key,key_length);
+ DBUG_DUMP("anc_buff",(uchar*) anc_buff,anc_length);
+ DBUG_DUMP("key_to_anc",(uchar*) leaf_key,key_length);
temp_pos=anc_buff+anc_length;
t_length=(*keyinfo->pack_key)(keyinfo,key_reflength,
@@ -725,7 +725,7 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo,
anc_pos, anc_pos,
leaf_key,&s_temp);
if (t_length > 0)
- bmove_upp((byte*) temp_pos+t_length,(byte*) temp_pos,
+ bmove_upp((uchar*) temp_pos+t_length,(uchar*) temp_pos,
(uint) (temp_pos-keypos));
else
bmove(keypos,keypos-t_length,(uint) (temp_pos-keypos)+t_length);
@@ -734,15 +734,15 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo,
/* Store first key on new page */
if (nod_flag)
- bmove((byte*) leaf_buff+2,(byte*) half_pos-nod_flag,(size_t) nod_flag);
+ bmove((uchar*) leaf_buff+2,(uchar*) half_pos-nod_flag,(size_t) nod_flag);
if (!(length=(*keyinfo->get_key)(keyinfo,nod_flag,&half_pos,leaf_key)))
goto err;
- DBUG_DUMP("key_to_leaf",(byte*) leaf_key,length);
+ DBUG_DUMP("key_to_leaf",(uchar*) leaf_key,length);
t_length=(*keyinfo->pack_key)(keyinfo,nod_flag, (uchar*) 0,
(uchar*) 0, (uchar*) 0, leaf_key, &s_temp);
length=(uint) ((buff+buff_length)-half_pos);
DBUG_PRINT("info",("t_length: %d length: %d",t_length,(int) length));
- bmove((byte*) leaf_buff+p_length+t_length,(byte*) half_pos,
+ bmove((uchar*) leaf_buff+p_length+t_length,(uchar*) half_pos,
(size_t) length);
(*keyinfo->store_key)(keyinfo,leaf_buff+p_length,&s_temp);
mi_putint(leaf_buff,length+t_length+p_length,nod_flag);
@@ -806,7 +806,7 @@ static uint remove_key(MI_KEYDEF *keyinfo, uint nod_flag,
if (next_length > prev_length)
{
/* We have to copy data from the current key to the next key */
- bmove_upp((char*) keypos,(char*) (lastkey+next_length),
+ bmove_upp(keypos, (lastkey+next_length),
(next_length-prev_length));
keypos-=(next_length-prev_length)+prev_pack_length;
store_key_length(keypos,prev_length);
@@ -853,7 +853,7 @@ static uint remove_key(MI_KEYDEF *keyinfo, uint nod_flag,
if (next_length >= prev_length)
{ /* Key after is based on deleted key */
uint pack_length,tmp;
- bmove_upp((char*) keypos,(char*) (lastkey+next_length),
+ bmove_upp(keypos, (lastkey+next_length),
tmp=(next_length-prev_length));
rest_length+=tmp;
pack_length= prev_length ? get_pack_length(rest_length): 0;
@@ -886,7 +886,7 @@ static uint remove_key(MI_KEYDEF *keyinfo, uint nod_flag,
}
}
end:
- bmove((byte*) start,(byte*) start+s_length,
+ bmove((uchar*) start,(uchar*) start+s_length,
(uint) (page_end-start-s_length));
DBUG_RETURN((uint) s_length);
} /* remove_key */
diff --git a/storage/myisam/mi_delete_all.c b/storage/myisam/mi_delete_all.c
index a17514486d5..dea0385cbca 100644
--- a/storage/myisam/mi_delete_all.c
+++ b/storage/myisam/mi_delete_all.c
@@ -47,7 +47,7 @@ int mi_delete_all_rows(MI_INFO *info)
for (i=0 ; i < share->base.keys ; i++)
state->key_root[i]= HA_OFFSET_ERROR;
- myisam_log_command(MI_LOG_DELETE_ALL,info,(byte*) 0,0,0);
+ myisam_log_command(MI_LOG_DELETE_ALL,info,(uchar*) 0,0,0);
/*
If we are using delayed keys or if the user has done changes to the tables
since it was locked then there may be key blocks in the key cache
diff --git a/storage/myisam/mi_dynrec.c b/storage/myisam/mi_dynrec.c
index 5342619c79b..cdd70abe9ad 100644
--- a/storage/myisam/mi_dynrec.c
+++ b/storage/myisam/mi_dynrec.c
@@ -28,15 +28,15 @@
/* Enough for comparing if number is zero */
static char zero_string[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
-static int write_dynamic_record(MI_INFO *info,const byte *record,
+static int write_dynamic_record(MI_INFO *info,const uchar *record,
ulong reclength);
static int _mi_find_writepos(MI_INFO *info,ulong reclength,my_off_t *filepos,
ulong *length);
-static int update_dynamic_record(MI_INFO *info,my_off_t filepos,byte *record,
+static int update_dynamic_record(MI_INFO *info,my_off_t filepos,uchar *record,
ulong reclength);
static int delete_dynamic_record(MI_INFO *info,my_off_t filepos,
uint second_read);
-static int _mi_cmp_buffer(File file, const byte *buff, my_off_t filepos,
+static int _mi_cmp_buffer(File file, const uchar *buff, my_off_t filepos,
uint length);
#ifdef THREAD
@@ -79,19 +79,19 @@ my_bool mi_dynmap_file(MI_INFO *info, my_off_t size)
mapping. When swap space is not reserved one might get SIGSEGV
upon a write if no physical memory is available.
*/
- info->s->file_map= (byte*)
+ info->s->file_map= (uchar*)
my_mmap(0, (size_t)(size + MEMMAP_EXTRA_MARGIN),
info->s->mode==O_RDONLY ? PROT_READ :
PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_NORESERVE,
info->dfile, 0L);
- if (info->s->file_map == (byte*) MAP_FAILED)
+ if (info->s->file_map == (uchar*) MAP_FAILED)
{
info->s->file_map= NULL;
DBUG_RETURN(1);
}
#if defined(HAVE_MADVISE)
- madvise(info->s->file_map, size, MADV_RANDOM);
+ madvise((char*) info->s->file_map, size, MADV_RANDOM);
#endif
info->s->mmaped_length= size;
DBUG_RETURN(0);
@@ -112,7 +112,7 @@ void mi_remap_file(MI_INFO *info, my_off_t size)
{
if (info->s->file_map)
{
- VOID(my_munmap(info->s->file_map,
+ VOID(my_munmap((char*) info->s->file_map,
(size_t) info->s->mmaped_length + MEMMAP_EXTRA_MARGIN));
mi_dynmap_file(info, size);
}
@@ -135,8 +135,8 @@ void mi_remap_file(MI_INFO *info, my_off_t size)
0 ok
*/
-uint mi_mmap_pread(MI_INFO *info, byte *Buffer,
- uint Count, my_off_t offset, myf MyFlags)
+size_t mi_mmap_pread(MI_INFO *info, uchar *Buffer,
+ size_t Count, my_off_t offset, myf MyFlags)
{
DBUG_PRINT("info", ("mi_read with mmap %d\n", info->dfile));
if (info->s->concurrent_insert)
@@ -167,8 +167,8 @@ uint mi_mmap_pread(MI_INFO *info, byte *Buffer,
/* wrapper for my_pread in case if mmap isn't used */
-uint mi_nommap_pread(MI_INFO *info, byte *Buffer,
- uint Count, my_off_t offset, myf MyFlags)
+size_t mi_nommap_pread(MI_INFO *info, uchar *Buffer,
+ size_t Count, my_off_t offset, myf MyFlags)
{
return my_pread(info->dfile, Buffer, Count, offset, MyFlags);
}
@@ -190,8 +190,8 @@ uint mi_nommap_pread(MI_INFO *info, byte *Buffer,
!=0 error. In this case return error from pwrite
*/
-uint mi_mmap_pwrite(MI_INFO *info, byte *Buffer,
- uint Count, my_off_t offset, myf MyFlags)
+size_t mi_mmap_pwrite(MI_INFO *info, const uchar *Buffer,
+ size_t Count, my_off_t offset, myf MyFlags)
{
DBUG_PRINT("info", ("mi_write with mmap %d\n", info->dfile));
if (info->s->concurrent_insert)
@@ -224,28 +224,28 @@ uint mi_mmap_pwrite(MI_INFO *info, byte *Buffer,
/* wrapper for my_pwrite in case if mmap isn't used */
-uint mi_nommap_pwrite(MI_INFO *info, byte *Buffer,
- uint Count, my_off_t offset, myf MyFlags)
+size_t mi_nommap_pwrite(MI_INFO *info, const uchar *Buffer,
+ size_t Count, my_off_t offset, myf MyFlags)
{
return my_pwrite(info->dfile, Buffer, Count, offset, MyFlags);
}
-int _mi_write_dynamic_record(MI_INFO *info, const byte *record)
+int _mi_write_dynamic_record(MI_INFO *info, const uchar *record)
{
ulong reclength=_mi_rec_pack(info,info->rec_buff,record);
return (write_dynamic_record(info,info->rec_buff,reclength));
}
-int _mi_update_dynamic_record(MI_INFO *info, my_off_t pos, const byte *record)
+int _mi_update_dynamic_record(MI_INFO *info, my_off_t pos, const uchar *record)
{
uint length=_mi_rec_pack(info,info->rec_buff,record);
return (update_dynamic_record(info,pos,info->rec_buff,length));
}
-int _mi_write_blob_record(MI_INFO *info, const byte *record)
+int _mi_write_blob_record(MI_INFO *info, const uchar *record)
{
- byte *rec_buff;
+ uchar *rec_buff;
int error;
ulong reclength,reclength2,extra;
@@ -260,7 +260,7 @@ int _mi_write_blob_record(MI_INFO *info, const byte *record)
return -1;
}
#endif
- if (!(rec_buff=(byte*) my_alloca(reclength)))
+ if (!(rec_buff=(uchar*) my_alloca(reclength)))
{
my_errno= HA_ERR_OUT_OF_MEM; /* purecov: inspected */
return(-1);
@@ -277,9 +277,9 @@ int _mi_write_blob_record(MI_INFO *info, const byte *record)
}
-int _mi_update_blob_record(MI_INFO *info, my_off_t pos, const byte *record)
+int _mi_update_blob_record(MI_INFO *info, my_off_t pos, const uchar *record)
{
- byte *rec_buff;
+ uchar *rec_buff;
int error;
ulong reclength,extra;
@@ -294,7 +294,7 @@ int _mi_update_blob_record(MI_INFO *info, my_off_t pos, const byte *record)
return -1;
}
#endif
- if (!(rec_buff=(byte*) my_alloca(reclength)))
+ if (!(rec_buff=(uchar*) my_alloca(reclength)))
{
my_errno= HA_ERR_OUT_OF_MEM; /* purecov: inspected */
return(-1);
@@ -317,7 +317,7 @@ int _mi_delete_dynamic_record(MI_INFO *info)
/* Write record to data-file */
-static int write_dynamic_record(MI_INFO *info, const byte *record,
+static int write_dynamic_record(MI_INFO *info, const uchar *record,
ulong reclength)
{
int flag;
@@ -333,7 +333,7 @@ static int write_dynamic_record(MI_INFO *info, const byte *record,
if (_mi_write_part_record(info,filepos,length,
(info->append_insert_at_end ?
HA_OFFSET_ERROR : info->s->state.dellink),
- (byte**) &record,&reclength,&flag))
+ (uchar**) &record,&reclength,&flag))
goto err;
} while (reclength);
@@ -424,7 +424,7 @@ static bool unlink_deleted_block(MI_INFO *info, MI_BLOCK_INFO *block_info)
& BLOCK_DELETED))
DBUG_RETURN(1); /* Something is wrong */
mi_sizestore(tmp.header+4,block_info->next_filepos);
- if (info->s->file_write(info,(char*) tmp.header+4,8,
+ if (info->s->file_write(info, tmp.header+4,8,
block_info->prev_filepos+4, MYF(MY_NABP)))
DBUG_RETURN(1);
/* Unlink block from next block */
@@ -434,7 +434,7 @@ static bool unlink_deleted_block(MI_INFO *info, MI_BLOCK_INFO *block_info)
& BLOCK_DELETED))
DBUG_RETURN(1); /* Something is wrong */
mi_sizestore(tmp.header+12,block_info->prev_filepos);
- if (info->s->file_write(info,(char*) tmp.header+12,8,
+ if (info->s->file_write(info, tmp.header+12,8,
block_info->next_filepos+12,
MYF(MY_NABP)))
DBUG_RETURN(1);
@@ -483,7 +483,7 @@ static int update_backward_delete_link(MI_INFO *info, my_off_t delete_block,
if (_mi_get_block_info(&block_info,info->dfile,delete_block)
& BLOCK_DELETED)
{
- char buff[8];
+ uchar buff[8];
mi_sizestore(buff,filepos);
if (info->s->file_write(info,buff, 8, delete_block+12, MYF(MY_NABP)))
DBUG_RETURN(1); /* Error on write */
@@ -543,7 +543,7 @@ static int delete_dynamic_record(MI_INFO *info, my_off_t filepos,
bfill(block_info.header+12,8,255);
else
mi_sizestore(block_info.header+12,block_info.next_filepos);
- if (info->s->file_write(info,(byte*) block_info.header,20,filepos,
+ if (info->s->file_write(info,(uchar*) block_info.header,20,filepos,
MYF(MY_NABP)))
DBUG_RETURN(1);
info->s->state.dellink = filepos;
@@ -566,12 +566,12 @@ int _mi_write_part_record(MI_INFO *info,
my_off_t filepos, /* points at empty block */
ulong length, /* length of block */
my_off_t next_filepos,/* Next empty block */
- byte **record, /* pointer to record ptr */
+ uchar **record, /* pointer to record ptr */
ulong *reclength, /* length of *record */
int *flag) /* *flag == 0 if header */
{
ulong head_length,res_length,extra_length,long_block,del_length;
- byte *pos,*record_end;
+ uchar *pos,*record_end;
my_off_t next_delete_block;
uchar temp[MI_SPLIT_LENGTH+MI_DYN_DELETE_BLOCK_HEADER];
DBUG_ENTER("_mi_write_part_record");
@@ -615,7 +615,7 @@ int _mi_write_part_record(MI_INFO *info,
temp[0]=13;
mi_int4store(temp+1,*reclength);
mi_int3store(temp+5,length-head_length);
- mi_sizestore((byte*) temp+8,next_filepos);
+ mi_sizestore((uchar*) temp+8,next_filepos);
}
else
{
@@ -625,13 +625,13 @@ int _mi_write_part_record(MI_INFO *info,
{
mi_int3store(temp+1,*reclength);
mi_int3store(temp+4,length-head_length);
- mi_sizestore((byte*) temp+7,next_filepos);
+ mi_sizestore((uchar*) temp+7,next_filepos);
}
else
{
mi_int2store(temp+1,*reclength);
mi_int2store(temp+3,length-head_length);
- mi_sizestore((byte*) temp+5,next_filepos);
+ mi_sizestore((uchar*) temp+5,next_filepos);
}
}
}
@@ -642,12 +642,12 @@ int _mi_write_part_record(MI_INFO *info,
if (long_block)
{
mi_int3store(temp+1,length-head_length);
- mi_sizestore((byte*) temp+4,next_filepos);
+ mi_sizestore((uchar*) temp+4,next_filepos);
}
else
{
mi_int2store(temp+1,length-head_length);
- mi_sizestore((byte*) temp+3,next_filepos);
+ mi_sizestore((uchar*) temp+3,next_filepos);
}
}
}
@@ -668,14 +668,14 @@ int _mi_write_part_record(MI_INFO *info,
}
length= *reclength+head_length; /* Write only what is needed */
}
- DBUG_DUMP("header",(byte*) temp,head_length);
+ DBUG_DUMP("header",(uchar*) temp,head_length);
/* Make a long block for one write */
record_end= *record+length-head_length;
del_length=(res_length ? MI_DYN_DELETE_BLOCK_HEADER : 0);
- bmove((byte*) (*record-head_length),(byte*) temp,head_length);
+ bmove((uchar*) (*record-head_length),(uchar*) temp,head_length);
memcpy(temp,record_end,(size_t) (extra_length+del_length));
- bzero((byte*) record_end,extra_length);
+ bzero((uchar*) record_end,extra_length);
if (res_length)
{
@@ -715,18 +715,18 @@ int _mi_write_part_record(MI_INFO *info,
if (info->update & HA_STATE_EXTEND_BLOCK)
{
info->update&= ~HA_STATE_EXTEND_BLOCK;
- if (my_block_write(&info->rec_cache,(byte*) *record-head_length,
+ if (my_block_write(&info->rec_cache,(uchar*) *record-head_length,
length+extra_length+del_length,filepos))
goto err;
}
- else if (my_b_write(&info->rec_cache,(byte*) *record-head_length,
+ else if (my_b_write(&info->rec_cache,(uchar*) *record-head_length,
length+extra_length+del_length))
goto err;
}
else
{
info->rec_cache.seek_not_done=1;
- if (info->s->file_write(info,(byte*) *record-head_length,length+extra_length+
+ if (info->s->file_write(info,(uchar*) *record-head_length,length+extra_length+
del_length,filepos,info->s->write_flag))
goto err;
}
@@ -752,7 +752,7 @@ err:
/* update record from datafile */
-static int update_dynamic_record(MI_INFO *info, my_off_t filepos, byte *record,
+static int update_dynamic_record(MI_INFO *info, my_off_t filepos, uchar *record,
ulong reclength)
{
int flag;
@@ -836,7 +836,7 @@ static int update_dynamic_record(MI_INFO *info, my_off_t filepos, byte *record,
mi_int3store(del_block.header+1, rest_length);
mi_sizestore(del_block.header+4,info->s->state.dellink);
bfill(del_block.header+12,8,255);
- if (info->s->file_write(info,(byte*) del_block.header,20, next_pos,
+ if (info->s->file_write(info,(uchar*) del_block.header,20, next_pos,
MYF(MY_NABP)))
DBUG_RETURN(1);
info->s->state.dellink= next_pos;
@@ -875,10 +875,11 @@ err:
/* Pack a record. Return new reclength */
-uint _mi_rec_pack(MI_INFO *info, register byte *to, register const byte *from)
+uint _mi_rec_pack(MI_INFO *info, register uchar *to,
+ register const uchar *from)
{
uint length,new_length,flag,bit,i;
- char *pos,*end,*startpos,*packpos;
+ uchar *pos,*end,*startpos,*packpos;
enum en_fieldtype type;
reg3 MI_COLUMNDEF *rec;
MI_BLOB *blob;
@@ -901,7 +902,7 @@ uint _mi_rec_pack(MI_INFO *info, register byte *to, register const byte *from)
{
char *temp_pos;
size_t tmp_length=length-mi_portable_sizeof_char_ptr;
- memcpy((byte*) to,from,tmp_length);
+ memcpy((uchar*) to,from,tmp_length);
memcpy_fixed(&temp_pos,from+tmp_length,sizeof(char*));
memcpy(to+tmp_length,temp_pos,(size_t) blob->length);
to+=tmp_length+blob->length;
@@ -910,17 +911,17 @@ uint _mi_rec_pack(MI_INFO *info, register byte *to, register const byte *from)
}
else if (type == FIELD_SKIP_ZERO)
{
- if (memcmp((byte*) from,zero_string,length) == 0)
+ if (memcmp((uchar*) from,zero_string,length) == 0)
flag|=bit;
else
{
- memcpy((byte*) to,from,(size_t) length); to+=length;
+ memcpy((uchar*) to,from,(size_t) length); to+=length;
}
}
else if (type == FIELD_SKIP_ENDSPACE ||
type == FIELD_SKIP_PRESPACE)
{
- pos= (byte*) from; end= (byte*) from + length;
+ pos= (uchar*) from; end= (uchar*) from + length;
if (type == FIELD_SKIP_ENDSPACE)
{ /* Pack trailing spaces */
while (end > from && *(end-1) == ' ')
@@ -943,7 +944,7 @@ uint _mi_rec_pack(MI_INFO *info, register byte *to, register const byte *from)
}
else
*to++= (char) new_length;
- memcpy((byte*) to,pos,(size_t) new_length); to+=new_length;
+ memcpy((uchar*) to,pos,(size_t) new_length); to+=new_length;
flag|=bit;
}
else
@@ -1000,11 +1001,11 @@ uint _mi_rec_pack(MI_INFO *info, register byte *to, register const byte *from)
Returns 0 if record is ok.
*/
-my_bool _mi_rec_check(MI_INFO *info,const char *record, byte *rec_buff,
+my_bool _mi_rec_check(MI_INFO *info,const uchar *record, uchar *rec_buff,
ulong packed_length, my_bool with_checksum)
{
uint length,new_length,flag,bit,i;
- char *pos,*end,*packpos,*to;
+ uchar *pos,*end,*packpos,*to;
enum en_fieldtype type;
reg3 MI_COLUMNDEF *rec;
DBUG_ENTER("_mi_rec_check");
@@ -1029,7 +1030,7 @@ my_bool _mi_rec_check(MI_INFO *info,const char *record, byte *rec_buff,
}
else if (type == FIELD_SKIP_ZERO)
{
- if (memcmp((byte*) record,zero_string,length) == 0)
+ if (memcmp((uchar*) record,zero_string,length) == 0)
{
if (!(flag & bit))
goto err;
@@ -1040,7 +1041,7 @@ my_bool _mi_rec_check(MI_INFO *info,const char *record, byte *rec_buff,
else if (type == FIELD_SKIP_ENDSPACE ||
type == FIELD_SKIP_PRESPACE)
{
- pos= (byte*) record; end= (byte*) record + length;
+ pos= (uchar*) record; end= (uchar*) record + length;
if (type == FIELD_SKIP_ENDSPACE)
{ /* Pack trailing spaces */
while (end > record && *(end-1) == ' ')
@@ -1122,12 +1123,12 @@ err:
/* Returns -1 and my_errno =HA_ERR_RECORD_DELETED if reclength isn't */
/* right. Returns reclength (>0) if ok */
-ulong _mi_rec_unpack(register MI_INFO *info, register byte *to, byte *from,
+ulong _mi_rec_unpack(register MI_INFO *info, register uchar *to, uchar *from,
ulong found_length)
{
uint flag,bit,length,rec_length,min_pack_length;
enum en_fieldtype type;
- byte *from_end,*to_end,*packpos;
+ uchar *from_end,*to_end,*packpos;
reg3 MI_COLUMNDEF *rec,*end_field;
DBUG_ENTER("_mi_rec_unpack");
@@ -1173,7 +1174,7 @@ ulong _mi_rec_unpack(register MI_INFO *info, register byte *to, byte *from,
if (flag & bit)
{
if (type == FIELD_BLOB || type == FIELD_SKIP_ZERO)
- bzero((byte*) to,rec_length);
+ bzero((uchar*) to,rec_length);
else if (type == FIELD_SKIP_ENDSPACE ||
type == FIELD_SKIP_PRESPACE)
{
@@ -1195,13 +1196,13 @@ ulong _mi_rec_unpack(register MI_INFO *info, register byte *to, byte *from,
goto err;
if (type == FIELD_SKIP_ENDSPACE)
{
- memcpy(to,(byte*) from,(size_t) length);
- bfill((byte*) to+length,rec_length-length,' ');
+ memcpy(to,(uchar*) from,(size_t) length);
+ bfill((uchar*) to+length,rec_length-length,' ');
}
else
{
- bfill((byte*) to,rec_length-length,' ');
- memcpy(to+rec_length-length,(byte*) from,(size_t) length);
+ bfill((uchar*) to,rec_length-length,' ');
+ memcpy(to+rec_length-length,(uchar*) from,(size_t) length);
}
from+=length;
}
@@ -1215,9 +1216,9 @@ ulong _mi_rec_unpack(register MI_INFO *info, register byte *to, byte *from,
from_left - size_length < blob_length ||
from_left - size_length - blob_length < min_pack_length)
goto err;
- memcpy((byte*) to,(byte*) from,(size_t) size_length);
+ memcpy((uchar*) to,(uchar*) from,(size_t) size_length);
from+=size_length;
- memcpy_fixed((byte*) to+size_length,(byte*) &from,sizeof(char*));
+ memcpy_fixed((uchar*) to+size_length,(uchar*) &from,sizeof(char*));
from+=blob_length;
}
else
@@ -1226,7 +1227,7 @@ ulong _mi_rec_unpack(register MI_INFO *info, register byte *to, byte *from,
min_pack_length--;
if (min_pack_length + rec_length > (uint) (from_end - from))
goto err;
- memcpy(to,(byte*) from,(size_t) rec_length); from+=rec_length;
+ memcpy(to,(uchar*) from,(size_t) rec_length); from+=rec_length;
}
if ((bit= bit << 1) >= 256)
{
@@ -1238,7 +1239,7 @@ ulong _mi_rec_unpack(register MI_INFO *info, register byte *to, byte *from,
if (min_pack_length > (uint) (from_end - from))
goto err;
min_pack_length-=rec_length;
- memcpy(to, (byte*) from, (size_t) rec_length);
+ memcpy(to, (uchar*) from, (size_t) rec_length);
from+=rec_length;
}
}
@@ -1251,14 +1252,14 @@ err:
my_errno= HA_ERR_WRONG_IN_RECORD;
DBUG_PRINT("error",("to_end: 0x%lx -> 0x%lx from_end: 0x%lx -> 0x%lx",
(long) to, (long) to_end, (long) from, (long) from_end));
- DBUG_DUMP("from",(byte*) info->rec_buff,info->s->base.min_pack_length);
+ DBUG_DUMP("from",(uchar*) info->rec_buff,info->s->base.min_pack_length);
DBUG_RETURN(MY_FILE_ERROR);
} /* _mi_rec_unpack */
/* Calc length of blob. Update info in blobs->length */
-ulong _my_calc_total_blob_length(MI_INFO *info, const byte *record)
+ulong _my_calc_total_blob_length(MI_INFO *info, const uchar *record)
{
ulong length;
MI_BLOB *blob,*end;
@@ -1274,7 +1275,7 @@ ulong _my_calc_total_blob_length(MI_INFO *info, const byte *record)
}
-ulong _mi_calc_blob_length(uint length, const byte *pos)
+ulong _mi_calc_blob_length(uint length, const uchar *pos)
{
switch (length) {
case 1:
@@ -1292,7 +1293,7 @@ ulong _mi_calc_blob_length(uint length, const byte *pos)
}
-void _my_store_blob_length(byte *pos,uint pack_length,uint length)
+void _my_store_blob_length(uchar *pos,uint pack_length,uint length)
{
switch (pack_length) {
case 1:
@@ -1345,11 +1346,11 @@ void _my_store_blob_length(byte *pos,uint pack_length,uint length)
-1 Error
*/
-int _mi_read_dynamic_record(MI_INFO *info, my_off_t filepos, byte *buf)
+int _mi_read_dynamic_record(MI_INFO *info, my_off_t filepos, uchar *buf)
{
int block_of_record;
uint b_type,left_length;
- byte *to;
+ uchar *to;
MI_BLOCK_INFO block_info;
File file;
DBUG_ENTER("mi_read_dynamic_record");
@@ -1405,7 +1406,7 @@ int _mi_read_dynamic_record(MI_INFO *info, my_off_t filepos, byte *buf)
prefetch_len= block_info.data_len;
if (prefetch_len)
{
- memcpy((byte*) to, block_info.header + offset, prefetch_len);
+ memcpy((uchar*) to, block_info.header + offset, prefetch_len);
block_info.data_len-= prefetch_len;
left_length-= prefetch_len;
to+= prefetch_len;
@@ -1423,7 +1424,7 @@ int _mi_read_dynamic_record(MI_INFO *info, my_off_t filepos, byte *buf)
there is no equivalent without seeking. We are at the right
position already. :(
*/
- if (info->s->file_read(info, (byte*) to, block_info.data_len,
+ if (info->s->file_read(info, (uchar*) to, block_info.data_len,
filepos, MYF(MY_NABP)))
goto panic;
left_length-=block_info.data_len;
@@ -1450,9 +1451,9 @@ err:
/* compare unique constraint between stored rows */
int _mi_cmp_dynamic_unique(MI_INFO *info, MI_UNIQUEDEF *def,
- const byte *record, my_off_t pos)
+ const uchar *record, my_off_t pos)
{
- byte *rec_buff,*old_record;
+ uchar *rec_buff,*old_record;
int error;
DBUG_ENTER("_mi_cmp_dynamic_unique");
@@ -1478,11 +1479,11 @@ int _mi_cmp_dynamic_unique(MI_INFO *info, MI_UNIQUEDEF *def,
/* Compare of record one disk with packed record in memory */
-int _mi_cmp_dynamic_record(register MI_INFO *info, register const byte *record)
+int _mi_cmp_dynamic_record(register MI_INFO *info, register const uchar *record)
{
uint flag,reclength,b_type;
my_off_t filepos;
- byte *buffer;
+ uchar *buffer;
MI_BLOCK_INFO block_info;
DBUG_ENTER("_mi_cmp_dynamic_record");
@@ -1504,7 +1505,7 @@ int _mi_cmp_dynamic_record(register MI_INFO *info, register const byte *record)
{ /* If check isn't disabled */
if (info->s->base.blobs)
{
- if (!(buffer=(byte*) my_alloca(info->s->base.pack_reclength+
+ if (!(buffer=(uchar*) my_alloca(info->s->base.pack_reclength+
_my_calc_total_blob_length(info,record))))
DBUG_RETURN(-1);
}
@@ -1552,18 +1553,18 @@ int _mi_cmp_dynamic_record(register MI_INFO *info, register const byte *record)
my_errno=0;
err:
if (buffer != info->rec_buff)
- my_afree((gptr) buffer);
+ my_afree((uchar*) buffer);
DBUG_RETURN(my_errno);
}
/* Compare file to buffert */
-static int _mi_cmp_buffer(File file, const byte *buff, my_off_t filepos,
+static int _mi_cmp_buffer(File file, const uchar *buff, my_off_t filepos,
uint length)
{
uint next_length;
- char temp_buff[IO_SIZE*2];
+ uchar temp_buff[IO_SIZE*2];
DBUG_ENTER("_mi_cmp_buffer");
next_length= IO_SIZE*2 - (uint) (filepos & (IO_SIZE-1));
@@ -1571,7 +1572,7 @@ static int _mi_cmp_buffer(File file, const byte *buff, my_off_t filepos,
while (length > IO_SIZE*2)
{
if (my_pread(file,temp_buff,next_length,filepos, MYF(MY_NABP)) ||
- memcmp((byte*) buff,temp_buff,next_length))
+ memcmp(buff, temp_buff, next_length))
goto err;
filepos+=next_length;
buff+=next_length;
@@ -1580,7 +1581,7 @@ static int _mi_cmp_buffer(File file, const byte *buff, my_off_t filepos,
}
if (my_pread(file,temp_buff,length,filepos,MYF(MY_NABP)))
goto err;
- DBUG_RETURN(memcmp((byte*) buff,temp_buff,length));
+ DBUG_RETURN(memcmp(buff,temp_buff,length));
err:
DBUG_RETURN(1);
}
@@ -1620,13 +1621,13 @@ err:
!= 0 Error
*/
-int _mi_read_rnd_dynamic_record(MI_INFO *info, byte *buf,
+int _mi_read_rnd_dynamic_record(MI_INFO *info, uchar *buf,
register my_off_t filepos,
my_bool skip_deleted_blocks)
{
int block_of_record, info_read, save_errno;
uint left_len,b_type;
- byte *to;
+ uchar *to;
MI_BLOCK_INFO block_info;
MYISAM_SHARE *share=info->s;
DBUG_ENTER("_mi_read_rnd_dynamic_record");
@@ -1672,7 +1673,7 @@ int _mi_read_rnd_dynamic_record(MI_INFO *info, byte *buf,
}
if (info->opt_flag & READ_CACHE_USED)
{
- if (_mi_read_cache(&info->rec_cache,(byte*) block_info.header,filepos,
+ if (_mi_read_cache(&info->rec_cache,(uchar*) block_info.header,filepos,
sizeof(block_info.header),
(!block_of_record && skip_deleted_blocks ?
READING_NEXT : 0) | READING_HEADER))
@@ -1735,7 +1736,7 @@ int _mi_read_rnd_dynamic_record(MI_INFO *info, byte *buf,
tmp_length= block_info.data_len;
if (tmp_length)
{
- memcpy((byte*) to, block_info.header+offset,tmp_length);
+ memcpy((uchar*) to, block_info.header+offset,tmp_length);
block_info.data_len-=tmp_length;
left_len-=tmp_length;
to+=tmp_length;
@@ -1747,7 +1748,7 @@ int _mi_read_rnd_dynamic_record(MI_INFO *info, byte *buf,
{
if (info->opt_flag & READ_CACHE_USED)
{
- if (_mi_read_cache(&info->rec_cache,(byte*) to,filepos,
+ if (_mi_read_cache(&info->rec_cache,(uchar*) to,filepos,
block_info.data_len,
(!block_of_record && skip_deleted_blocks) ?
READING_NEXT : 0))
@@ -1761,7 +1762,7 @@ int _mi_read_rnd_dynamic_record(MI_INFO *info, byte *buf,
flush_io_cache(&info->rec_cache))
goto err;
/* VOID(my_seek(info->dfile,filepos,MY_SEEK_SET,MYF(0))); */
- if (my_read(info->dfile,(byte*) to,block_info.data_len,MYF(MY_NABP)))
+ if (my_read(info->dfile,(uchar*) to,block_info.data_len,MYF(MY_NABP)))
{
if (my_errno == -1)
my_errno= HA_ERR_WRONG_IN_RECORD; /* Unexpected end of file */
@@ -1814,11 +1815,11 @@ uint _mi_get_block_info(MI_BLOCK_INFO *info, File file, my_off_t filepos)
my_pread() may leave the file pointer untouched.
*/
VOID(my_seek(file,filepos,MY_SEEK_SET,MYF(0)));
- if (my_read(file,(char*) header,sizeof(info->header),MYF(0)) !=
+ if (my_read(file, header, sizeof(info->header),MYF(0)) !=
sizeof(info->header))
goto err;
}
- DBUG_DUMP("header",(byte*) header,MI_BLOCK_INFO_HEADER_LENGTH);
+ DBUG_DUMP("header",header,MI_BLOCK_INFO_HEADER_LENGTH);
if (info->second_read)
{
if (info->header[0] <= 6 || info->header[0] == 13)
diff --git a/storage/myisam/mi_extra.c b/storage/myisam/mi_extra.c
index 729174b6f88..1b4c79d13de 100644
--- a/storage/myisam/mi_extra.c
+++ b/storage/myisam/mi_extra.c
@@ -78,7 +78,7 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg)
if (_mi_memmap_file(info))
{
/* We don't nead MADV_SEQUENTIAL if small file */
- madvise(share->file_map,share->state.state.data_file_length,
+ madvise((char*) share->file_map, share->state.state.data_file_length,
share->state.state.data_file_length <= RECORD_CACHE_SIZE*16 ?
MADV_RANDOM : MADV_SEQUENTIAL);
pthread_mutex_unlock(&share->intern_lock);
@@ -158,7 +158,8 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg)
}
#if defined(HAVE_MMAP) && defined(HAVE_MADVISE)
if (info->opt_flag & MEMMAP_USED)
- madvise(share->file_map,share->state.state.data_file_length,MADV_RANDOM);
+ madvise((char*) share->file_map, share->state.state.data_file_length,
+ MADV_RANDOM);
#endif
break;
case HA_EXTRA_FLUSH_CACHE:
@@ -180,8 +181,8 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg)
case HA_EXTRA_KEYREAD: /* Read only keys to record */
case HA_EXTRA_REMEMBER_POS:
info->opt_flag |= REMEMBER_OLD_POS;
- bmove((byte*) info->lastkey+share->base.max_key_length*2,
- (byte*) info->lastkey,info->lastkey_length);
+ bmove((uchar*) info->lastkey+share->base.max_key_length*2,
+ (uchar*) info->lastkey,info->lastkey_length);
info->save_update= info->update;
info->save_lastinx= info->lastinx;
info->save_lastpos= info->lastpos;
@@ -197,8 +198,8 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg)
case HA_EXTRA_RESTORE_POS:
if (info->opt_flag & REMEMBER_OLD_POS)
{
- bmove((byte*) info->lastkey,
- (byte*) info->lastkey+share->base.max_key_length*2,
+ bmove((uchar*) info->lastkey,
+ (uchar*) info->lastkey+share->base.max_key_length*2,
info->save_lastkey_length);
info->update= info->save_update | HA_STATE_WRITTEN;
info->lastinx= info->save_lastinx;
@@ -258,7 +259,7 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg)
case HA_EXTRA_PREPARE_FOR_DELETE:
pthread_mutex_lock(&THR_LOCK_myisam);
share->last_version= 0L; /* Impossible version */
-#ifdef __WIN__
+#ifdef __WIN__REMOVE_OBSOLETE_WORKAROUND
/* Close the isam and data files as Win32 can't drop an open table */
pthread_mutex_lock(&share->intern_lock);
if (flush_key_blocks(share->key_cache, share->kfile,
@@ -385,7 +386,7 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg)
{
char tmp[1];
tmp[0]=function;
- myisam_log_command(MI_LOG_EXTRA,info,(byte*) tmp,1,error);
+ myisam_log_command(MI_LOG_EXTRA,info,(uchar*) tmp,1,error);
}
DBUG_RETURN(error);
} /* mi_extra */
@@ -435,7 +436,8 @@ int mi_reset(MI_INFO *info)
mi_alloc_rec_buff(info, -1, &info->rec_buff);
#if defined(HAVE_MMAP) && defined(HAVE_MADVISE)
if (info->opt_flag & MEMMAP_USED)
- madvise(share->file_map,share->state.state.data_file_length,MADV_RANDOM);
+ madvise((char*) share->file_map, share->state.state.data_file_length,
+ MADV_RANDOM);
#endif
info->opt_flag&= ~(KEY_READ_USED | REMEMBER_OLD_POS);
info->quick_mode=0;
diff --git a/storage/myisam/mi_key.c b/storage/myisam/mi_key.c
index 2f4915dec39..3f445ebf44d 100644
--- a/storage/myisam/mi_key.c
+++ b/storage/myisam/mi_key.c
@@ -31,7 +31,7 @@
set_if_smaller(char_length,length); \
} while(0)
-static int _mi_put_key_in_record(MI_INFO *info,uint keynr,byte *record);
+static int _mi_put_key_in_record(MI_INFO *info,uint keynr,uchar *record);
/*
Make a intern key from a record
@@ -49,9 +49,9 @@ static int _mi_put_key_in_record(MI_INFO *info,uint keynr,byte *record);
*/
uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key,
- const byte *record, my_off_t filepos)
+ const uchar *record, my_off_t filepos)
{
- byte *pos;
+ uchar *pos;
uchar *start;
reg1 HA_KEYSEG *keyseg;
my_bool is_ft= info->s->keyinfo[keynr].flag & HA_FULLTEXT;
@@ -90,7 +90,7 @@ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key,
char_length= ((!is_ft && cs && cs->mbmaxlen > 1) ? length/cs->mbmaxlen :
length);
- pos= (byte*) record+keyseg->start;
+ pos= (uchar*) record+keyseg->start;
if (type == HA_KEYTYPE_BIT)
{
if (keyseg->bit_length)
@@ -100,7 +100,7 @@ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key,
*key++= bits;
length--;
}
- memcpy((byte*) key, pos, length);
+ memcpy((uchar*) key, pos, length);
key+= length;
continue;
}
@@ -108,18 +108,18 @@ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key,
{
if (type != HA_KEYTYPE_NUM)
{
- length= cs->cset->lengthsp(cs, pos, length);
+ length= cs->cset->lengthsp(cs, (char*) pos, length);
}
else
{
- byte *end= pos + length;
+ uchar *end= pos + length;
while (pos < end && pos[0] == ' ')
pos++;
length=(uint) (end-pos);
}
FIX_LENGTH(cs, pos, length, char_length);
store_key_length_inc(key,char_length);
- memcpy((byte*) key,(byte*) pos,(size_t) char_length);
+ memcpy((uchar*) key,(uchar*) pos,(size_t) char_length);
key+=char_length;
continue;
}
@@ -132,18 +132,18 @@ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key,
set_if_smaller(length,tmp_length);
FIX_LENGTH(cs, pos, length, char_length);
store_key_length_inc(key,char_length);
- memcpy((byte*) key,(byte*) pos,(size_t) char_length);
+ memcpy((uchar*) key,(uchar*) pos,(size_t) char_length);
key+= char_length;
continue;
}
else if (keyseg->flag & HA_BLOB_PART)
{
uint tmp_length=_mi_calc_blob_length(keyseg->bit_start,pos);
- memcpy_fixed((byte*) &pos,pos+keyseg->bit_start,sizeof(char*));
+ memcpy_fixed((uchar*) &pos,pos+keyseg->bit_start,sizeof(char*));
set_if_smaller(length,tmp_length);
FIX_LENGTH(cs, pos, length, char_length);
store_key_length_inc(key,char_length);
- memcpy((byte*) key,(byte*) pos,(size_t) char_length);
+ memcpy((uchar*) key,(uchar*) pos,(size_t) char_length);
key+= char_length;
continue;
}
@@ -182,14 +182,14 @@ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key,
continue;
}
FIX_LENGTH(cs, pos, length, char_length);
- memcpy((byte*) key, pos, char_length);
+ memcpy((uchar*) key, pos, char_length);
if (length > char_length)
cs->cset->fill(cs, (char*) key+char_length, length-char_length, ' ');
key+= length;
}
_mi_dpointer(info,key,filepos);
DBUG_PRINT("exit",("keynr: %d",keynr));
- DBUG_DUMP("key",(byte*) start,(uint) (key-start)+keyseg->length);
+ DBUG_DUMP("key",(uchar*) start,(uint) (key-start)+keyseg->length);
DBUG_EXECUTE("key",
_mi_print_key(DBUG_FILE,info->s->keyinfo[keynr].seg,start,
(uint) (key-start)););
@@ -254,20 +254,20 @@ uint _mi_pack_key(register MI_INFO *info, uint keynr, uchar *key, uchar *old,
if (keyseg->flag & HA_SPACE_PACK)
{
uchar *end=pos+length;
- if (type != HA_KEYTYPE_NUM)
- {
- while (end > pos && end[-1] == ' ')
- end--;
- }
- else
+ if (type == HA_KEYTYPE_NUM)
{
while (pos < end && pos[0] == ' ')
pos++;
}
+ else if (type != HA_KEYTYPE_BINARY)
+ {
+ while (end > pos && end[-1] == ' ')
+ end--;
+ }
length=(uint) (end-pos);
FIX_LENGTH(cs, pos, length, char_length);
store_key_length_inc(key,char_length);
- memcpy((byte*) key,pos,(size_t) char_length);
+ memcpy((uchar*) key,pos,(size_t) char_length);
key+= char_length;
continue;
}
@@ -280,7 +280,7 @@ uint _mi_pack_key(register MI_INFO *info, uint keynr, uchar *key, uchar *old,
FIX_LENGTH(cs, pos, length, char_length);
store_key_length_inc(key,char_length);
old+=2; /* Skip length */
- memcpy((byte*) key, pos,(size_t) char_length);
+ memcpy((uchar*) key, pos,(size_t) char_length);
key+= char_length;
continue;
}
@@ -292,7 +292,7 @@ uint _mi_pack_key(register MI_INFO *info, uint keynr, uchar *key, uchar *old,
continue;
}
FIX_LENGTH(cs, pos, length, char_length);
- memcpy((byte*) key, pos, char_length);
+ memcpy((uchar*) key, pos, char_length);
if (length > char_length)
cs->cset->fill(cs, (char*) key+char_length, length-char_length, ' ');
key+= length;
@@ -325,16 +325,16 @@ uint _mi_pack_key(register MI_INFO *info, uint keynr, uchar *key, uchar *old,
*/
static int _mi_put_key_in_record(register MI_INFO *info, uint keynr,
- byte *record)
+ uchar *record)
{
- reg2 byte *key;
- byte *pos,*key_end;
+ reg2 uchar *key;
+ uchar *pos,*key_end;
reg1 HA_KEYSEG *keyseg;
- byte *blob_ptr;
+ uchar *blob_ptr;
DBUG_ENTER("_mi_put_key_in_record");
- blob_ptr= (byte*) info->lastkey2; /* Place to put blob parts */
- key=(byte*) info->lastkey; /* KEy that was read */
+ blob_ptr= (uchar*) info->lastkey2; /* Place to put blob parts */
+ key=(uchar*) info->lastkey; /* KEy that was read */
key_end=key+info->lastkey_length;
for (keyseg=info->s->keyinfo[keynr].seg ; keyseg->type ;keyseg++)
{
@@ -363,7 +363,7 @@ static int _mi_put_key_in_record(register MI_INFO *info, uint keynr,
clr_rec_bits(record + keyseg->bit_pos, keyseg->bit_start,
keyseg->bit_length);
}
- memcpy(record + keyseg->start, (byte*) key, length);
+ memcpy(record + keyseg->start, (uchar*) key, length);
key+= length;
continue;
}
@@ -380,7 +380,8 @@ static int _mi_put_key_in_record(register MI_INFO *info, uint keynr,
{
memcpy(pos,key,(size_t) length);
keyseg->charset->cset->fill(keyseg->charset,
- pos + length, keyseg->length - length,
+ (char*) pos + length,
+ keyseg->length - length,
' ');
}
else
@@ -406,7 +407,7 @@ static int _mi_put_key_in_record(register MI_INFO *info, uint keynr,
else
int2store(record+keyseg->start, length);
/* And key data */
- memcpy(record+keyseg->start + keyseg->bit_start, (byte*) key, length);
+ memcpy(record+keyseg->start + keyseg->bit_start, (uchar*) key, length);
key+= length;
}
else if (keyseg->flag & HA_BLOB_PART)
@@ -431,8 +432,8 @@ static int _mi_put_key_in_record(register MI_INFO *info, uint keynr,
}
else if (keyseg->flag & HA_SWAP_KEY)
{
- byte *to= record+keyseg->start+keyseg->length;
- byte *end= key+keyseg->length;
+ uchar *to= record+keyseg->start+keyseg->length;
+ uchar *end= key+keyseg->length;
#ifdef CHECK_KEYS
if (end > key_end)
goto err;
@@ -449,7 +450,7 @@ static int _mi_put_key_in_record(register MI_INFO *info, uint keynr,
if (key+keyseg->length > key_end)
goto err;
#endif
- memcpy(record+keyseg->start,(byte*) key,
+ memcpy(record+keyseg->start,(uchar*) key,
(size_t) keyseg->length);
key+= keyseg->length;
}
@@ -463,7 +464,7 @@ err:
/* Here when key reads are used */
-int _mi_read_key_record(MI_INFO *info, my_off_t filepos, byte *buf)
+int _mi_read_key_record(MI_INFO *info, my_off_t filepos, uchar *buf)
{
fast_mi_writeinfo(info);
if (filepos != HA_OFFSET_ERROR)
@@ -498,7 +499,7 @@ int _mi_read_key_record(MI_INFO *info, my_off_t filepos, byte *buf)
less than zero.
*/
-ulonglong retrieve_auto_increment(MI_INFO *info,const byte *record)
+ulonglong retrieve_auto_increment(MI_INFO *info,const uchar *record)
{
ulonglong value= 0; /* Store unsigned values here */
longlong s_value= 0; /* Store signed values here */
diff --git a/storage/myisam/mi_keycache.c b/storage/myisam/mi_keycache.c
index 6694893e9c3..5cf3fede1ae 100644
--- a/storage/myisam/mi_keycache.c
+++ b/storage/myisam/mi_keycache.c
@@ -104,7 +104,8 @@ int mi_assign_to_key_cache(MI_INFO *info,
share->key_cache= key_cache;
/* store the key cache in the global hash structure for future opens */
- if (multi_key_cache_set(share->unique_file_name, share->unique_name_length,
+ if (multi_key_cache_set((uchar*) share->unique_file_name,
+ share->unique_name_length,
share->key_cache))
error= my_errno;
pthread_mutex_unlock(&share->intern_lock);
diff --git a/storage/myisam/mi_locking.c b/storage/myisam/mi_locking.c
index e822ea9e6da..ec359d13a14 100644
--- a/storage/myisam/mi_locking.c
+++ b/storage/myisam/mi_locking.c
@@ -254,7 +254,7 @@ int mi_lock_database(MI_INFO *info, int lock_type)
pthread_mutex_unlock(&share->intern_lock);
#if defined(FULL_LOG) || defined(_lint)
lock_type|=(int) (flag << 8); /* Set bit to set if real lock */
- myisam_log_command(MI_LOG_LOCK,info,(byte*) &lock_type,sizeof(lock_type),
+ myisam_log_command(MI_LOG_LOCK,info,(uchar*) &lock_type,sizeof(lock_type),
error);
#endif
DBUG_RETURN(error);
@@ -520,7 +520,7 @@ int _mi_test_if_changed(register MI_INFO *info)
int _mi_mark_file_changed(MI_INFO *info)
{
- char buff[3];
+ uchar buff[3];
register MYISAM_SHARE *share=info->s;
DBUG_ENTER("_mi_mark_file_changed");
@@ -553,7 +553,7 @@ int _mi_mark_file_changed(MI_INFO *info)
int _mi_decrement_open_count(MI_INFO *info)
{
- char buff[2];
+ uchar buff[2];
register MYISAM_SHARE *share=info->s;
int lock_error=0,write_error=0;
if (share->global_changed)
diff --git a/storage/myisam/mi_log.c b/storage/myisam/mi_log.c
index f720f752a06..8b9ca038fec 100644
--- a/storage/myisam/mi_log.c
+++ b/storage/myisam/mi_log.c
@@ -74,9 +74,9 @@ int mi_log(int activate_log)
/* All logs starts with command(1) dfile(2) process(4) result(2) */
void _myisam_log(enum myisam_log_commands command, MI_INFO *info,
- const byte *buffert, uint length)
+ const uchar *buffert, uint length)
{
- char buff[11];
+ uchar buff[11];
int error,old_errno;
ulong pid=(ulong) GETPID();
old_errno=my_errno;
@@ -98,9 +98,9 @@ void _myisam_log(enum myisam_log_commands command, MI_INFO *info,
void _myisam_log_command(enum myisam_log_commands command, MI_INFO *info,
- const byte *buffert, uint length, int result)
+ const uchar *buffert, uint length, int result)
{
- char buff[9];
+ uchar buff[9];
int error,old_errno;
ulong pid=(ulong) GETPID();
@@ -122,9 +122,9 @@ void _myisam_log_command(enum myisam_log_commands command, MI_INFO *info,
void _myisam_log_record(enum myisam_log_commands command, MI_INFO *info,
- const byte *record, my_off_t filepos, int result)
+ const uchar *record, my_off_t filepos, int result)
{
- char buff[21],*pos;
+ uchar buff[21],*pos;
int error,old_errno;
uint length;
ulong pid=(ulong) GETPID();
@@ -134,7 +134,7 @@ void _myisam_log_record(enum myisam_log_commands command, MI_INFO *info,
length=info->s->base.reclength;
else
length=info->s->base.reclength+ _my_calc_total_blob_length(info,record);
- buff[0]=(char) command;
+ buff[0]=(uchar) command;
mi_int2store(buff+1,info->dfile);
mi_int4store(buff+3,pid);
mi_int2store(buff+7,result);
@@ -142,8 +142,8 @@ void _myisam_log_record(enum myisam_log_commands command, MI_INFO *info,
mi_int4store(buff+17,length);
pthread_mutex_lock(&THR_LOCK_myisam);
error=my_lock(myisam_log_file,F_WRLCK,0L,F_TO_EOF,MYF(MY_SEEK_NOT_DONE));
- VOID(my_write(myisam_log_file,buff,sizeof(buff),MYF(0)));
- VOID(my_write(myisam_log_file,(byte*) record,info->s->base.reclength,MYF(0)));
+ VOID(my_write(myisam_log_file, buff,sizeof(buff),MYF(0)));
+ VOID(my_write(myisam_log_file, record,info->s->base.reclength,MYF(0)));
if (info->s->base.blobs)
{
MI_BLOB *blob,*end;
@@ -152,7 +152,8 @@ void _myisam_log_record(enum myisam_log_commands command, MI_INFO *info,
blob != end ;
blob++)
{
- memcpy_fixed(&pos,record+blob->offset+blob->pack_length,sizeof(char*));
+ memcpy_fixed((uchar*) &pos, record+blob->offset+blob->pack_length,
+ sizeof(char*));
VOID(my_write(myisam_log_file,pos,blob->length,MYF(0)));
}
}
diff --git a/storage/myisam/mi_open.c b/storage/myisam/mi_open.c
index f4e089b6313..b848c822f75 100644
--- a/storage/myisam/mi_open.c
+++ b/storage/myisam/mi_open.c
@@ -79,7 +79,7 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
key_parts,unique_key_parts,fulltext_keys,uniques;
char name_buff[FN_REFLEN], org_name[FN_REFLEN], index_name[FN_REFLEN],
data_name[FN_REFLEN];
- char *disk_cache, *disk_pos, *end_pos;
+ uchar *disk_cache, *disk_pos, *end_pos;
MI_INFO info,*m_info,*old_info;
MYISAM_SHARE share_buff,*share;
ulong rec_per_key_part[MI_MAX_POSSIBLE_KEY*MI_MAX_KEY_SEG];
@@ -92,7 +92,7 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
lock_error=1;
errpos=0;
head_length=sizeof(share_buff.state.header);
- bzero((byte*) &info,sizeof(info));
+ bzero((uchar*) &info,sizeof(info));
my_realpath(name_buff, fn_format(org_name,name,"",MI_NAME_IEXT,
MY_UNPACK_FILENAME),MYF(0));
@@ -100,11 +100,12 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
if (!(old_info=test_if_reopen(name_buff)))
{
share= &share_buff;
- bzero((gptr) &share_buff,sizeof(share_buff));
+ bzero((uchar*) &share_buff,sizeof(share_buff));
share_buff.state.rec_per_key_part=rec_per_key_part;
share_buff.state.key_root=key_root;
share_buff.state.key_del=key_del;
- share_buff.key_cache= multi_key_cache_search(name_buff, strlen(name_buff));
+ share_buff.key_cache= multi_key_cache_search((uchar*) name_buff,
+ strlen(name_buff));
DBUG_EXECUTE_IF("myisam_pretend_crashed_table_on_open",
if (strstr(name, "/t1"))
@@ -121,14 +122,14 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
}
share->mode=open_mode;
errpos=1;
- if (my_read(kfile,(char*) share->state.header.file_version,head_length,
+ if (my_read(kfile, share->state.header.file_version, head_length,
MYF(MY_NABP)))
{
my_errno= HA_ERR_NOT_A_TABLE;
goto err;
}
- if (memcmp((byte*) share->state.header.file_version,
- (byte*) myisam_file_magic, 4))
+ if (memcmp((uchar*) share->state.header.file_version,
+ (uchar*) myisam_file_magic, 4))
{
DBUG_PRINT("error",("Wrong header in %s",name_buff));
DBUG_DUMP("error_dump",(char*) share->state.header.file_version,
@@ -165,7 +166,7 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
info_length=mi_uint2korr(share->state.header.header_length);
base_pos=mi_uint2korr(share->state.header.base_pos);
- if (!(disk_cache=(char*) my_alloca(info_length+128)))
+ if (!(disk_cache= (uchar*) my_alloca(info_length+128)))
{
my_errno=ENOMEM;
goto err;
@@ -202,15 +203,14 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
}
share->state_diff_length=len-MI_STATE_INFO_SIZE;
- mi_state_info_read((uchar*) disk_cache, &share->state);
+ mi_state_info_read(disk_cache, &share->state);
len= mi_uint2korr(share->state.header.base_info_length);
if (len != MI_BASE_INFO_SIZE)
{
DBUG_PRINT("warning",("saved_base_info_length: %d base_info_length: %d",
len,MI_BASE_INFO_SIZE));
}
- disk_pos= (char*)
- my_n_base_info_read((uchar*) disk_cache + base_pos, &share->base);
+ disk_pos= my_n_base_info_read(disk_cache + base_pos, &share->base);
share->state.state_length=base_pos;
if (!(open_flags & HA_OPEN_FOR_REPAIR) &&
@@ -236,7 +236,7 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
key_parts+=fulltext_keys*FT_SEGS;
if (share->base.max_key_length > MI_MAX_KEY_BUFF || keys > MI_MAX_KEY ||
- key_parts >= MI_MAX_KEY * MI_MAX_KEY_SEG)
+ key_parts > MI_MAX_KEY * MI_MAX_KEY_SEG)
{
DBUG_PRINT("error",("Wrong key info: Max_key_length: %d keys: %d key_parts: %d", share->base.max_key_length, keys, key_parts));
my_errno=HA_ERR_UNSUPPORTED;
@@ -504,7 +504,7 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
}
else if (share->options & HA_OPTION_PACK_RECORD)
share->data_file_type = DYNAMIC_RECORD;
- my_afree((gptr) disk_cache);
+ my_afree(disk_cache);
mi_setup_functions(share);
share->is_log_table= FALSE;
#ifdef THREAD
@@ -642,7 +642,7 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
if (myisam_log_file >= 0)
{
intern_filename(name_buff,share->index_file_name);
- _myisam_log(MI_LOG_OPEN,m_info,name_buff,(uint) strlen(name_buff));
+ _myisam_log(MI_LOG_OPEN, m_info, (uchar*) name_buff, strlen(name_buff));
}
DBUG_RETURN(m_info);
@@ -654,7 +654,7 @@ err:
mi_report_error(save_errno, name);
switch (errpos) {
case 6:
- my_free((gptr) m_info,MYF(0));
+ my_free((uchar*) m_info,MYF(0));
/* fall through */
case 5:
VOID(my_close(info.dfile,MYF(0)));
@@ -662,14 +662,14 @@ err:
break; /* Don't remove open table */
/* fall through */
case 4:
- my_free((gptr) share,MYF(0));
+ my_free((uchar*) share,MYF(0));
/* fall through */
case 3:
if (! lock_error)
VOID(my_lock(kfile, F_UNLCK, 0L, F_TO_EOF, MYF(MY_SEEK_NOT_DONE)));
/* fall through */
case 2:
- my_afree((gptr) disk_cache);
+ my_afree(disk_cache);
/* fall through */
case 1:
VOID(my_close(kfile,MYF(0)));
@@ -684,7 +684,7 @@ err:
} /* mi_open */
-byte *mi_alloc_rec_buff(MI_INFO *info, ulong length, byte **buf)
+uchar *mi_alloc_rec_buff(MI_INFO *info, ulong length, uchar **buf)
{
uint extra;
uint32 old_length;
@@ -692,7 +692,7 @@ byte *mi_alloc_rec_buff(MI_INFO *info, ulong length, byte **buf)
if (! *buf || length > (old_length=mi_get_rec_buff_len(info, *buf)))
{
- byte *newptr = *buf;
+ uchar *newptr = *buf;
/* to simplify initial init of info->rec_buf in mi_open and mi_extra */
if (length == (ulong) -1)
@@ -709,7 +709,7 @@ byte *mi_alloc_rec_buff(MI_INFO *info, ulong length, byte **buf)
MI_REC_BUFF_OFFSET : 0);
if (extra && newptr)
newptr-= MI_REC_BUFF_OFFSET;
- if (!(newptr=(byte*) my_realloc((gptr)newptr, length+extra+8,
+ if (!(newptr=(uchar*) my_realloc((uchar*)newptr, length+extra+8,
MYF(MY_ALLOW_ZERO_PTR))))
return newptr;
*((uint32 *) newptr)= (uint32) length;
@@ -899,10 +899,10 @@ uint mi_state_info_write(File file, MI_STATE_INFO *state, uint pWrite)
}
if (pWrite & 1)
- DBUG_RETURN(my_pwrite(file,(char*) buff, (uint) (ptr-buff), 0L,
- MYF(MY_NABP | MY_THREADSAFE)));
- DBUG_RETURN(my_write(file, (char*) buff, (uint) (ptr-buff),
- MYF(MY_NABP)));
+ DBUG_RETURN(my_pwrite(file, buff, (size_t) (ptr-buff), 0L,
+ MYF(MY_NABP | MY_THREADSAFE)) != 0);
+ DBUG_RETURN(my_write(file, buff, (size_t) (ptr-buff),
+ MYF(MY_NABP)) != 0);
}
@@ -961,18 +961,18 @@ uchar *mi_state_info_read(uchar *ptr, MI_STATE_INFO *state)
uint mi_state_info_read_dsk(File file, MI_STATE_INFO *state, my_bool pRead)
{
- char buff[MI_STATE_INFO_SIZE + MI_STATE_EXTRA_SIZE];
+ uchar buff[MI_STATE_INFO_SIZE + MI_STATE_EXTRA_SIZE];
if (!myisam_single_user)
{
if (pRead)
{
if (my_pread(file, buff, state->state_length,0L, MYF(MY_NABP)))
- return (MY_FILE_ERROR);
+ return 1;
}
else if (my_read(file, buff, state->state_length,MYF(MY_NABP)))
- return (MY_FILE_ERROR);
- mi_state_info_read((uchar*) buff, state);
+ return 1;
+ mi_state_info_read(buff, state);
}
return 0;
}
@@ -1013,7 +1013,7 @@ uint mi_base_info_write(File file, MI_BASE_INFO *base)
mi_int2store(ptr,base->raid_chunks); ptr +=2;
mi_int4store(ptr,base->raid_chunksize); ptr +=4;
bzero(ptr,6); ptr +=6; /* extra */
- return my_write(file,(char*) buff, (uint) (ptr-buff), MYF(MY_NABP));
+ return my_write(file, buff, (size_t) (ptr-buff), MYF(MY_NABP)) != 0;
}
@@ -1073,10 +1073,10 @@ uint mi_keydef_write(File file, MI_KEYDEF *keydef)
mi_int2store(ptr,keydef->keylength); ptr +=2;
mi_int2store(ptr,keydef->minlength); ptr +=2;
mi_int2store(ptr,keydef->maxlength); ptr +=2;
- return my_write(file,(char*) buff, (uint) (ptr-buff), MYF(MY_NABP));
+ return my_write(file, buff, (size_t) (ptr-buff), MYF(MY_NABP)) != 0;
}
-char *mi_keydef_read(char *ptr, MI_KEYDEF *keydef)
+uchar *mi_keydef_read(uchar *ptr, MI_KEYDEF *keydef)
{
keydef->keysegs = (uint) *ptr++;
keydef->key_alg = *ptr++; /* Rtree or Btree */
@@ -1117,11 +1117,11 @@ int mi_keyseg_write(File file, const HA_KEYSEG *keyseg)
mi_int4store(ptr, pos);
ptr+=4;
- return my_write(file,(char*) buff, (uint) (ptr-buff), MYF(MY_NABP));
+ return my_write(file, buff, (size_t) (ptr-buff), MYF(MY_NABP)) != 0;
}
-char *mi_keyseg_read(char *ptr, HA_KEYSEG *keyseg)
+uchar *mi_keyseg_read(uchar *ptr, HA_KEYSEG *keyseg)
{
keyseg->type = *ptr++;
keyseg->language = *ptr++;
@@ -1157,10 +1157,10 @@ uint mi_uniquedef_write(File file, MI_UNIQUEDEF *def)
*ptr++= (uchar) def->key;
*ptr++ = (uchar) def->null_are_equal;
- return my_write(file,(char*) buff, (uint) (ptr-buff), MYF(MY_NABP));
+ return my_write(file, buff, (size_t) (ptr-buff), MYF(MY_NABP)) != 0;
}
-char *mi_uniquedef_read(char *ptr, MI_UNIQUEDEF *def)
+uchar *mi_uniquedef_read(uchar *ptr, MI_UNIQUEDEF *def)
{
def->keysegs = mi_uint2korr(ptr);
def->key = ptr[2];
@@ -1181,10 +1181,10 @@ uint mi_recinfo_write(File file, MI_COLUMNDEF *recinfo)
mi_int2store(ptr,recinfo->length); ptr +=2;
*ptr++ = recinfo->null_bit;
mi_int2store(ptr,recinfo->null_pos); ptr+= 2;
- return my_write(file,(char*) buff, (uint) (ptr-buff), MYF(MY_NABP));
+ return my_write(file, buff, (size_t) (ptr-buff), MYF(MY_NABP)) != 0;
}
-char *mi_recinfo_read(char *ptr, MI_COLUMNDEF *recinfo)
+uchar *mi_recinfo_read(uchar *ptr, MI_COLUMNDEF *recinfo)
{
recinfo->type= mi_sint2korr(ptr); ptr +=2;
recinfo->length=mi_uint2korr(ptr); ptr +=2;
@@ -1202,7 +1202,8 @@ The argument file_to_dup is here for the future if there would on some OS
exist a dup()-like call that would give us two different file descriptors.
*************************************************************************/
-int mi_open_datafile(MI_INFO *info, MYISAM_SHARE *share, File file_to_dup __attribute__((unused)))
+int mi_open_datafile(MI_INFO *info, MYISAM_SHARE *share,
+ File file_to_dup __attribute__((unused)))
{
#ifdef USE_RAID
if (share->base.raid_type)
diff --git a/storage/myisam/mi_packrec.c b/storage/myisam/mi_packrec.c
index a5a9aaededd..305b7e5532c 100644
--- a/storage/myisam/mi_packrec.c
+++ b/storage/myisam/mi_packrec.c
@@ -48,7 +48,7 @@
#define OFFSET_TABLE_SIZE 512
static uint read_huff_table(MI_BIT_BUFF *bit_buff,MI_DECODE_TREE *decode_tree,
- uint16 **decode_table,byte **intervall_buff,
+ uint16 **decode_table,uchar **intervall_buff,
uint16 *tmp_buff);
static void make_quick_table(uint16 *to_table,uint16 *decode_table,
uint *next_free,uint value,uint bits,
@@ -107,7 +107,7 @@ static void fill_buffer(MI_BIT_BUFF *bit_buff);
static uint max_bit(uint value);
#ifdef HAVE_MMAP
static uchar *_mi_mempack_get_block_info(MI_INFO *myisam, MI_BIT_BUFF *bit_buff,
- MI_BLOCK_INFO *info, byte **rec_buff_p,
+ MI_BLOCK_INFO *info, uchar **rec_buff_p,
uchar *header);
#endif
@@ -136,7 +136,8 @@ my_bool _mi_read_pack_info(MI_INFO *info, pbool fix_keys)
uint i,trees,huff_tree_bits,rec_reflength,length;
uint16 *decode_table,*tmp_buff;
ulong elements,intervall_length;
- char *disk_cache,*intervall_buff;
+ uchar *disk_cache;
+ uchar *intervall_buff;
uchar header[HEAD_LENGTH];
MYISAM_SHARE *share=info->s;
MI_BIT_BUFF bit_buff;
@@ -149,14 +150,14 @@ my_bool _mi_read_pack_info(MI_INFO *info, pbool fix_keys)
file=info->dfile;
my_errno=0;
- if (my_read(file,(byte*) header,sizeof(header),MYF(MY_NABP)))
+ if (my_read(file,(uchar*) header,sizeof(header),MYF(MY_NABP)))
{
if (!my_errno)
my_errno=HA_ERR_END_OF_FILE;
goto err0;
}
/* Only the first three bytes of magic number are independent of version. */
- if (memcmp((byte*) header, (byte*) myisam_pack_file_magic, 3))
+ if (memcmp((uchar*) header, (uchar*) myisam_pack_file_magic, 3))
{
my_errno=HA_ERR_WRONG_IN_RECORD;
goto err0;
@@ -195,10 +196,10 @@ my_bool _mi_read_pack_info(MI_INFO *info, pbool fix_keys)
*/
if (!(share->decode_trees=(MI_DECODE_TREE*)
my_malloc((uint) (trees*sizeof(MI_DECODE_TREE)+
- intervall_length*sizeof(byte)),
+ intervall_length*sizeof(uchar)),
MYF(MY_WME))))
goto err0;
- intervall_buff=(byte*) (share->decode_trees+trees);
+ intervall_buff=(uchar*) (share->decode_trees+trees);
/*
Memory segment #2:
@@ -215,7 +216,7 @@ my_bool _mi_read_pack_info(MI_INFO *info, pbool fix_keys)
MYF(MY_WME | MY_ZEROFILL))))
goto err1;
tmp_buff=share->decode_tables+length;
- disk_cache=(byte*) (tmp_buff+OFFSET_TABLE_SIZE);
+ disk_cache= (uchar*) (tmp_buff+OFFSET_TABLE_SIZE);
if (my_read(file,disk_cache,
(uint) (share->pack.header_length-sizeof(header)),
@@ -223,7 +224,7 @@ my_bool _mi_read_pack_info(MI_INFO *info, pbool fix_keys)
goto err2;
huff_tree_bits=max_bit(trees ? trees-1 : 0);
- init_bit_buffer(&bit_buff, (uchar*) disk_cache,
+ init_bit_buffer(&bit_buff, disk_cache,
(uint) (share->pack.header_length-sizeof(header)));
/* Read new info for each field */
for (i=0 ; i < share->base.fields ; i++)
@@ -250,8 +251,8 @@ my_bool _mi_read_pack_info(MI_INFO *info, pbool fix_keys)
goto err3;
/* Reallocate the decoding tables to the used size. */
decode_table=(uint16*)
- my_realloc((gptr) share->decode_tables,
- (uint) ((byte*) decode_table - (byte*) share->decode_tables),
+ my_realloc((uchar*) share->decode_tables,
+ (uint) ((uchar*) decode_table - (uchar*) share->decode_tables),
MYF(MY_HOLD_ON_ERROR));
/* Fix the table addresses in the tree heads. */
{
@@ -291,9 +292,9 @@ my_bool _mi_read_pack_info(MI_INFO *info, pbool fix_keys)
err3:
my_errno=HA_ERR_WRONG_IN_RECORD;
err2:
- my_free((gptr) share->decode_tables,MYF(0));
+ my_free((uchar*) share->decode_tables,MYF(0));
err1:
- my_free((gptr) share->decode_trees,MYF(0));
+ my_free((uchar*) share->decode_trees,MYF(0));
err0:
DBUG_RETURN(1);
}
@@ -318,7 +319,7 @@ err0:
*/
static uint read_huff_table(MI_BIT_BUFF *bit_buff, MI_DECODE_TREE *decode_tree,
- uint16 **decode_table, byte **intervall_buff,
+ uint16 **decode_table, uchar **intervall_buff,
uint16 *tmp_buff)
{
uint min_chr,elements,char_bits,offset_bits,size,intervall_length,table_bits,
@@ -697,7 +698,7 @@ static uint find_longest_bitstream(uint16 *table, uint16 *end)
HA_ERR_WRONG_IN_RECORD or -1 on error
*/
-int _mi_read_pack_record(MI_INFO *info, my_off_t filepos, byte *buf)
+int _mi_read_pack_record(MI_INFO *info, my_off_t filepos, uchar *buf)
{
MI_BLOCK_INFO block_info;
File file;
@@ -710,7 +711,7 @@ int _mi_read_pack_record(MI_INFO *info, my_off_t filepos, byte *buf)
if (_mi_pack_get_block_info(info, &info->bit_buff, &block_info,
&info->rec_buff, file, filepos))
goto err;
- if (my_read(file,(byte*) info->rec_buff + block_info.offset ,
+ if (my_read(file,(uchar*) info->rec_buff + block_info.offset ,
block_info.rec_len - block_info.offset, MYF(MY_NABP)))
goto panic;
info->update|= HA_STATE_AKTIV;
@@ -725,9 +726,9 @@ err:
int _mi_pack_rec_unpack(register MI_INFO *info, MI_BIT_BUFF *bit_buff,
- register byte *to, byte *from, ulong reclength)
+ register uchar *to, uchar *from, ulong reclength)
{
- byte *end_field;
+ uchar *end_field;
reg3 MI_COLUMNDEF *end;
MI_COLUMNDEF *current_field;
MYISAM_SHARE *share=info->s;
@@ -834,7 +835,7 @@ static void uf_space_normal(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, uchar *to,
uchar *end)
{
if (get_bit(bit_buff))
- bfill((byte*) to,(end-to),' ');
+ bfill((uchar*) to,(end-to),' ');
else
decode_bytes(rec,bit_buff,to,end);
}
@@ -844,7 +845,7 @@ static void uf_space_endspace_selected(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff,
{
uint spaces;
if (get_bit(bit_buff))
- bfill((byte*) to,(end-to),' ');
+ bfill((uchar*) to,(end-to),' ');
else
{
if (get_bit(bit_buff))
@@ -856,7 +857,7 @@ static void uf_space_endspace_selected(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff,
}
if (to+spaces != end)
decode_bytes(rec,bit_buff,to,end-spaces);
- bfill((byte*) end-spaces,spaces,' ');
+ bfill((uchar*) end-spaces,spaces,' ');
}
else
decode_bytes(rec,bit_buff,to,end);
@@ -876,7 +877,7 @@ static void uf_endspace_selected(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff,
}
if (to+spaces != end)
decode_bytes(rec,bit_buff,to,end-spaces);
- bfill((byte*) end-spaces,spaces,' ');
+ bfill((uchar*) end-spaces,spaces,' ');
}
else
decode_bytes(rec,bit_buff,to,end);
@@ -887,7 +888,7 @@ static void uf_space_endspace(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, uchar *t
{
uint spaces;
if (get_bit(bit_buff))
- bfill((byte*) to,(end-to),' ');
+ bfill((uchar*) to,(end-to),' ');
else
{
if ((spaces=get_bits(bit_buff,rec->space_length_bits))+to > end)
@@ -897,7 +898,7 @@ static void uf_space_endspace(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, uchar *t
}
if (to+spaces != end)
decode_bytes(rec,bit_buff,to,end-spaces);
- bfill((byte*) end-spaces,spaces,' ');
+ bfill((uchar*) end-spaces,spaces,' ');
}
}
@@ -912,7 +913,7 @@ static void uf_endspace(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, uchar *to,
}
if (to+spaces != end)
decode_bytes(rec,bit_buff,to,end-spaces);
- bfill((byte*) end-spaces,spaces,' ');
+ bfill((uchar*) end-spaces,spaces,' ');
}
static void uf_space_prespace_selected(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff,
@@ -920,7 +921,7 @@ static void uf_space_prespace_selected(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff,
{
uint spaces;
if (get_bit(bit_buff))
- bfill((byte*) to,(end-to),' ');
+ bfill((uchar*) to,(end-to),' ');
else
{
if (get_bit(bit_buff))
@@ -930,7 +931,7 @@ static void uf_space_prespace_selected(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff,
bit_buff->error=1;
return;
}
- bfill((byte*) to,spaces,' ');
+ bfill((uchar*) to,spaces,' ');
if (to+spaces != end)
decode_bytes(rec,bit_buff,to+spaces,end);
}
@@ -951,7 +952,7 @@ static void uf_prespace_selected(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff,
bit_buff->error=1;
return;
}
- bfill((byte*) to,spaces,' ');
+ bfill((uchar*) to,spaces,' ');
if (to+spaces != end)
decode_bytes(rec,bit_buff,to+spaces,end);
}
@@ -965,7 +966,7 @@ static void uf_space_prespace(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, uchar *t
{
uint spaces;
if (get_bit(bit_buff))
- bfill((byte*) to,(end-to),' ');
+ bfill((uchar*) to,(end-to),' ');
else
{
if ((spaces=get_bits(bit_buff,rec->space_length_bits))+to > end)
@@ -973,7 +974,7 @@ static void uf_space_prespace(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, uchar *t
bit_buff->error=1;
return;
}
- bfill((byte*) to,spaces,' ');
+ bfill((uchar*) to,spaces,' ');
if (to+spaces != end)
decode_bytes(rec,bit_buff,to+spaces,end);
}
@@ -988,7 +989,7 @@ static void uf_prespace(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, uchar *to,
bit_buff->error=1;
return;
}
- bfill((byte*) to,spaces,' ');
+ bfill((uchar*) to,spaces,' ');
if (to+spaces != end)
decode_bytes(rec,bit_buff,to+spaces,end);
}
@@ -1031,7 +1032,7 @@ static void uf_blob(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff,
uchar *to, uchar *end)
{
if (get_bit(bit_buff))
- bzero((byte*) to,(end-to));
+ bzero((uchar*) to,(end-to));
else
{
ulong length=get_bits(bit_buff,rec->space_length_bits);
@@ -1039,11 +1040,11 @@ static void uf_blob(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff,
if (bit_buff->blob_pos+length > bit_buff->blob_end)
{
bit_buff->error=1;
- bzero((byte*) to,(end-to));
+ bzero((uchar*) to,(end-to));
return;
}
decode_bytes(rec,bit_buff,bit_buff->blob_pos,bit_buff->blob_pos+length);
- _my_store_blob_length((byte*) to,pack_length,length);
+ _my_store_blob_length((uchar*) to,pack_length,length);
memcpy_fixed((char*) to+pack_length,(char*) &bit_buff->blob_pos,
sizeof(char*));
bit_buff->blob_pos+=length;
@@ -1286,7 +1287,7 @@ static uint decode_pos(MI_BIT_BUFF *bit_buff, MI_DECODE_TREE *decode_tree)
}
-int _mi_read_rnd_pack_record(MI_INFO *info, byte *buf,
+int _mi_read_rnd_pack_record(MI_INFO *info, uchar *buf,
register my_off_t filepos,
my_bool skip_deleted_blocks)
{
@@ -1303,7 +1304,7 @@ int _mi_read_rnd_pack_record(MI_INFO *info, byte *buf,
if (info->opt_flag & READ_CACHE_USED)
{
- if (_mi_read_cache(&info->rec_cache, (byte*) block_info.header,
+ if (_mi_read_cache(&info->rec_cache, (uchar*) block_info.header,
filepos, share->pack.ref_length,
skip_deleted_blocks ? READING_NEXT : 0))
goto err;
@@ -1325,14 +1326,14 @@ int _mi_read_rnd_pack_record(MI_INFO *info, byte *buf,
if (info->opt_flag & READ_CACHE_USED)
{
- if (_mi_read_cache(&info->rec_cache, (byte*) info->rec_buff,
+ if (_mi_read_cache(&info->rec_cache, (uchar*) info->rec_buff,
block_info.filepos, block_info.rec_len,
skip_deleted_blocks ? READING_NEXT : 0))
goto err;
}
else
{
- if (my_read(info->dfile,(byte*) info->rec_buff + block_info.offset,
+ if (my_read(info->dfile,(uchar*) info->rec_buff + block_info.offset,
block_info.rec_len-block_info.offset,
MYF(MY_NABP)))
goto err;
@@ -1352,7 +1353,7 @@ int _mi_read_rnd_pack_record(MI_INFO *info, byte *buf,
/* Read and process header from a huff-record-file */
uint _mi_pack_get_block_info(MI_INFO *myisam, MI_BIT_BUFF *bit_buff,
- MI_BLOCK_INFO *info, byte **rec_buff_p,
+ MI_BLOCK_INFO *info, uchar **rec_buff_p,
File file, my_off_t filepos)
{
uchar *header=info->header;
@@ -1367,9 +1368,9 @@ uint _mi_pack_get_block_info(MI_INFO *myisam, MI_BIT_BUFF *bit_buff,
position is ok
*/
VOID(my_seek(file,filepos,MY_SEEK_SET,MYF(0)));
- if (my_read(file,(char*) header,ref_length,MYF(MY_NABP)))
+ if (my_read(file, header,ref_length,MYF(MY_NABP)))
return BLOCK_FATAL_ERROR;
- DBUG_DUMP("header",(byte*) header,ref_length);
+ DBUG_DUMP("header",(uchar*) header,ref_length);
}
head_length= read_pack_length((uint) myisam->s->pack.version, header,
&info->rec_len);
@@ -1478,8 +1479,8 @@ static uint max_bit(register uint value)
#ifdef HAVE_MMAP
-static int _mi_read_mempack_record(MI_INFO *info,my_off_t filepos,byte *buf);
-static int _mi_read_rnd_mempack_record(MI_INFO*, byte *,my_off_t, my_bool);
+static int _mi_read_mempack_record(MI_INFO *info,my_off_t filepos,uchar *buf);
+static int _mi_read_rnd_mempack_record(MI_INFO*, uchar *,my_off_t, my_bool);
my_bool _mi_memmap_file(MI_INFO *info)
{
@@ -1506,13 +1507,13 @@ my_bool _mi_memmap_file(MI_INFO *info)
void _mi_unmap_file(MI_INFO *info)
{
- VOID(my_munmap(info->s->file_map,
+ VOID(my_munmap((char*) info->s->file_map,
(size_t) info->s->mmaped_length + MEMMAP_EXTRA_MARGIN));
}
static uchar *_mi_mempack_get_block_info(MI_INFO *myisam, MI_BIT_BUFF *bit_buff,
- MI_BLOCK_INFO *info, byte **rec_buff_p,
+ MI_BLOCK_INFO *info, uchar **rec_buff_p,
uchar *header)
{
header+= read_pack_length((uint) myisam->s->pack.version, header,
@@ -1532,17 +1533,17 @@ static uchar *_mi_mempack_get_block_info(MI_INFO *myisam, MI_BIT_BUFF *bit_buff,
}
-static int _mi_read_mempack_record(MI_INFO *info, my_off_t filepos, byte *buf)
+static int _mi_read_mempack_record(MI_INFO *info, my_off_t filepos, uchar *buf)
{
MI_BLOCK_INFO block_info;
MYISAM_SHARE *share=info->s;
- byte *pos;
+ uchar *pos;
DBUG_ENTER("mi_read_mempack_record");
if (filepos == HA_OFFSET_ERROR)
DBUG_RETURN(-1); /* _search() didn't find record */
- if (!(pos= (byte*) _mi_mempack_get_block_info(info, &info->bit_buff,
+ if (!(pos= (uchar*) _mi_mempack_get_block_info(info, &info->bit_buff,
&block_info, &info->rec_buff,
(uchar*) share->file_map+
filepos)))
@@ -1553,14 +1554,14 @@ static int _mi_read_mempack_record(MI_INFO *info, my_off_t filepos, byte *buf)
/*ARGSUSED*/
-static int _mi_read_rnd_mempack_record(MI_INFO *info, byte *buf,
+static int _mi_read_rnd_mempack_record(MI_INFO *info, uchar *buf,
register my_off_t filepos,
my_bool skip_deleted_blocks
__attribute__((unused)))
{
MI_BLOCK_INFO block_info;
MYISAM_SHARE *share=info->s;
- byte *pos,*start;
+ uchar *pos,*start;
DBUG_ENTER("_mi_read_rnd_mempack_record");
if (filepos >= share->state.state.data_file_length)
@@ -1568,7 +1569,7 @@ static int _mi_read_rnd_mempack_record(MI_INFO *info, byte *buf,
my_errno=HA_ERR_END_OF_FILE;
goto err;
}
- if (!(pos= (byte*) _mi_mempack_get_block_info(info, &info->bit_buff,
+ if (!(pos= (uchar*) _mi_mempack_get_block_info(info, &info->bit_buff,
&block_info, &info->rec_buff,
(uchar*)
(start=share->file_map+
@@ -1596,7 +1597,7 @@ static int _mi_read_rnd_mempack_record(MI_INFO *info, byte *buf,
/* Save length of row */
-uint save_pack_length(uint version, byte *block_buff, ulong length)
+uint save_pack_length(uint version, uchar *block_buff, ulong length)
{
if (length < 254)
{
diff --git a/storage/myisam/mi_page.c b/storage/myisam/mi_page.c
index da9e19275c9..23a2526f756 100644
--- a/storage/myisam/mi_page.c
+++ b/storage/myisam/mi_page.c
@@ -29,7 +29,7 @@ uchar *_mi_fetch_keypage(register MI_INFO *info, MI_KEYDEF *keyinfo,
DBUG_PRINT("enter",("page: %ld", (long) page));
tmp=(uchar*) key_cache_read(info->s->key_cache,
- info->s->kfile, page, level, (byte*) buff,
+ info->s->kfile, page, level, (uchar*) buff,
(uint) keyinfo->block_length,
(uint) keyinfo->block_length,
return_buffer);
@@ -80,7 +80,7 @@ int _mi_write_keypage(register MI_INFO *info, register MI_KEYDEF *keyinfo,
DBUG_RETURN((-1));
}
DBUG_PRINT("page",("write page at: %lu",(long) page));
- DBUG_DUMP("buff",(byte*) buff,mi_getint(buff));
+ DBUG_DUMP("buff",(uchar*) buff,mi_getint(buff));
#endif
if ((length=keyinfo->block_length) > IO_SIZE*2 &&
@@ -89,12 +89,12 @@ int _mi_write_keypage(register MI_INFO *info, register MI_KEYDEF *keyinfo,
#ifdef HAVE_purify
{
length=mi_getint(buff);
- bzero((byte*) buff+length,keyinfo->block_length-length);
+ bzero((uchar*) buff+length,keyinfo->block_length-length);
length=keyinfo->block_length;
}
#endif
DBUG_RETURN((key_cache_write(info->s->key_cache,
- info->s->kfile,page, level, (byte*) buff,length,
+ info->s->kfile,page, level, (uchar*) buff,length,
(uint) keyinfo->block_length,
(int) ((info->lock_type != F_UNLCK) ||
info->s->delay_key_write))));
@@ -107,7 +107,7 @@ int _mi_dispose(register MI_INFO *info, MI_KEYDEF *keyinfo, my_off_t pos,
int level)
{
my_off_t old_link;
- char buff[8];
+ uchar buff[8];
DBUG_ENTER("_mi_dispose");
DBUG_PRINT("enter",("pos: %ld", (long) pos));
@@ -128,7 +128,7 @@ int _mi_dispose(register MI_INFO *info, MI_KEYDEF *keyinfo, my_off_t pos,
my_off_t _mi_new(register MI_INFO *info, MI_KEYDEF *keyinfo, int level)
{
my_off_t pos;
- char buff[8];
+ uchar buff[8];
DBUG_ENTER("_mi_new");
if ((pos= info->s->state.key_del[keyinfo->block_size_index]) ==
diff --git a/storage/myisam/mi_preload.c b/storage/myisam/mi_preload.c
index 78729f18424..60ab55106cb 100644
--- a/storage/myisam/mi_preload.c
+++ b/storage/myisam/mi_preload.c
@@ -55,12 +55,17 @@ int mi_preload(MI_INFO *info, ulonglong key_map, my_bool ignore_leaves)
block_length= keyinfo[0].block_length;
- /* Check whether all indexes use the same block size */
- for (i= 1 ; i < keys ; i++)
+ if (ignore_leaves)
{
- if (keyinfo[i].block_length != block_length)
- DBUG_RETURN(my_errno= HA_ERR_NON_UNIQUE_BLOCK_SIZE);
+ /* Check whether all indexes use the same block size */
+ for (i= 1 ; i < keys ; i++)
+ {
+ if (keyinfo[i].block_length != block_length)
+ DBUG_RETURN(my_errno= HA_ERR_NON_UNIQUE_BLOCK_SIZE);
+ }
}
+ else
+ block_length= share->key_cache->key_cache_block_size;
length= info->preload_buff_size/block_length * block_length;
set_if_bigger(length, block_length);
@@ -76,7 +81,7 @@ int mi_preload(MI_INFO *info, ulonglong key_map, my_bool ignore_leaves)
/* Read the next block of index file into the preload buffer */
if ((my_off_t) length > (key_file_length-pos))
length= (ulong) (key_file_length-pos);
- if (my_pread(share->kfile, (byte*) buff, length, pos, MYF(MY_FAE|MY_FNABP)))
+ if (my_pread(share->kfile, (uchar*) buff, length, pos, MYF(MY_FAE|MY_FNABP)))
goto err;
if (ignore_leaves)
@@ -88,7 +93,7 @@ int mi_preload(MI_INFO *info, ulonglong key_map, my_bool ignore_leaves)
{
if (key_cache_insert(share->key_cache,
share->kfile, pos, DFLT_INIT_HITS,
- (byte*) buff, block_length))
+ (uchar*) buff, block_length))
goto err;
}
pos+= block_length;
@@ -100,7 +105,7 @@ int mi_preload(MI_INFO *info, ulonglong key_map, my_bool ignore_leaves)
{
if (key_cache_insert(share->key_cache,
share->kfile, pos, DFLT_INIT_HITS,
- (byte*) buff, length))
+ (uchar*) buff, length))
goto err;
pos+= length;
}
diff --git a/storage/myisam/mi_range.c b/storage/myisam/mi_range.c
index c7ab6731a2c..932a4abd1b3 100644
--- a/storage/myisam/mi_range.c
+++ b/storage/myisam/mi_range.c
@@ -21,7 +21,7 @@
#include "myisamdef.h"
#include "rt_index.h"
-static ha_rows _mi_record_pos(MI_INFO *, const byte *, key_part_map,
+static ha_rows _mi_record_pos(MI_INFO *, const uchar *, key_part_map,
enum ha_rkey_function);
static double _mi_search_pos(MI_INFO *,MI_KEYDEF *,uchar *, uint,uint,my_off_t);
static uint _mi_keynr(MI_INFO *info,MI_KEYDEF *,uchar *, uchar *,uint *);
@@ -116,7 +116,7 @@ ha_rows mi_records_in_range(MI_INFO *info, int inx,
/* Find relative position (in records) for key in index-tree */
-static ha_rows _mi_record_pos(MI_INFO *info, const byte *key,
+static ha_rows _mi_record_pos(MI_INFO *info, const uchar *key,
key_part_map keypart_map,
enum ha_rkey_function search_flag)
{
diff --git a/storage/myisam/mi_rfirst.c b/storage/myisam/mi_rfirst.c
index d23bda46b1a..5a8b27b3e85 100644
--- a/storage/myisam/mi_rfirst.c
+++ b/storage/myisam/mi_rfirst.c
@@ -17,7 +17,7 @@
/* Read first row through a specfic key */
-int mi_rfirst(MI_INFO *info, byte *buf, int inx)
+int mi_rfirst(MI_INFO *info, uchar *buf, int inx)
{
DBUG_ENTER("mi_rfirst");
info->lastpos= HA_OFFSET_ERROR;
diff --git a/storage/myisam/mi_rkey.c b/storage/myisam/mi_rkey.c
index a69e276a6d8..f1d35810d36 100644
--- a/storage/myisam/mi_rkey.c
+++ b/storage/myisam/mi_rkey.c
@@ -21,7 +21,7 @@
/* Read a record using key */
/* Ordinary search_flag is 0 ; Give error if no record with key */
-int mi_rkey(MI_INFO *info, byte *buf, int inx, const byte *key,
+int mi_rkey(MI_INFO *info, uchar *buf, int inx, const uchar *key,
key_part_map keypart_map, enum ha_rkey_function search_flag)
{
uchar *key_buff;
@@ -45,6 +45,7 @@ int mi_rkey(MI_INFO *info, byte *buf, int inx, const byte *key,
info->once_flags&= ~USE_PACKED_KEYS; /* Reset flag */
/*
key is already packed!; This happens when we are using a MERGE TABLE
+ In this key 'key_part_map' is the length of the key !
*/
key_buff=info->lastkey+info->s->base.max_key_length;
pack_key_length= keypart_map;
@@ -94,42 +95,63 @@ int mi_rkey(MI_INFO *info, byte *buf, int inx, const byte *key,
myisam_read_vec[search_flag], info->s->state.key_root[inx]))
{
/*
- If we searching for a partial key (or using >, >=, < or <=) and
- the data is outside of the data file, we need to continue searching
- for the first key inside the data file
+ Found a key, but it might not be usable. We cannot use rows that
+ are inserted by other threads after we got our table lock
+ ("concurrent inserts"). The record may not even be present yet.
+ Keys are inserted into the index(es) before the record is
+ inserted into the data file. When we got our table lock, we
+ saved the current data_file_length. Concurrent inserts always go
+ to the end of the file. So we can test if the found key
+ references a new record.
*/
- if (info->lastpos >= info->state->data_file_length &&
- (search_flag != HA_READ_KEY_EXACT ||
- last_used_keyseg != keyinfo->seg + keyinfo->keysegs))
+ if (info->lastpos >= info->state->data_file_length)
{
- do
+ /* The key references a concurrently inserted record. */
+ if (search_flag == HA_READ_KEY_EXACT &&
+ last_used_keyseg == keyinfo->seg + keyinfo->keysegs)
+ {
+ /* Simply ignore the key if it matches exactly. (Bug #29838) */
+ my_errno= HA_ERR_KEY_NOT_FOUND;
+ info->lastpos= HA_OFFSET_ERROR;
+ }
+ else
{
- uint not_used[2];
- /*
- Skip rows that are inserted by other threads since we got a lock
- Note that this can only happen if we are not searching after an
- full length exact key, because the keys are sorted
- according to position
- */
- if (_mi_search_next(info, keyinfo, info->lastkey,
- info->lastkey_length,
- myisam_readnext_vec[search_flag],
- info->s->state.key_root[inx]))
- break;
/*
- Check that the found key does still match the search.
- _mi_search_next() delivers the next key regardless of its
- value.
+ If searching for a partial key (or using >, >=, < or <=) and
+ the data is outside of the data file, we need to continue
+ searching for the first key inside the data file.
*/
- if (search_flag == HA_READ_KEY_EXACT &&
- ha_key_cmp(keyinfo->seg, key_buff, info->lastkey, use_key_length,
- SEARCH_FIND, not_used))
+ do
{
- my_errno= HA_ERR_KEY_NOT_FOUND;
- info->lastpos= HA_OFFSET_ERROR;
- break;
- }
- } while (info->lastpos >= info->state->data_file_length);
+ uint not_used[2];
+ /*
+ Skip rows that are inserted by other threads since we got
+ a lock. Note that this can only happen if we are not
+ searching after a full length exact key, because the keys
+ are sorted according to position.
+ */
+ if (_mi_search_next(info, keyinfo, info->lastkey,
+ info->lastkey_length,
+ myisam_readnext_vec[search_flag],
+ info->s->state.key_root[inx]))
+ break; /* purecov: inspected */
+ /*
+ Check that the found key does still match the search.
+ _mi_search_next() delivers the next key regardless of its
+ value.
+ */
+ if (search_flag == HA_READ_KEY_EXACT &&
+ ha_key_cmp(keyinfo->seg, key_buff, info->lastkey,
+ use_key_length, SEARCH_FIND, not_used))
+ {
+ /* purecov: begin inspected */
+ my_errno= HA_ERR_KEY_NOT_FOUND;
+ info->lastpos= HA_OFFSET_ERROR;
+ break;
+ /* purecov: end */
+ }
+ } while (info->lastpos >= info->state->data_file_length);
+ }
}
}
}
diff --git a/storage/myisam/mi_rlast.c b/storage/myisam/mi_rlast.c
index 7805755ab70..07be619617f 100644
--- a/storage/myisam/mi_rlast.c
+++ b/storage/myisam/mi_rlast.c
@@ -17,7 +17,7 @@
/* Read last row with the same key as the previous read. */
-int mi_rlast(MI_INFO *info, byte *buf, int inx)
+int mi_rlast(MI_INFO *info, uchar *buf, int inx)
{
DBUG_ENTER("mi_rlast");
info->lastpos= HA_OFFSET_ERROR;
diff --git a/storage/myisam/mi_rnext.c b/storage/myisam/mi_rnext.c
index f6a0a47413e..7ce66d41e0f 100644
--- a/storage/myisam/mi_rnext.c
+++ b/storage/myisam/mi_rnext.c
@@ -24,7 +24,7 @@
based on the position of the last used key!
*/
-int mi_rnext(MI_INFO *info, byte *buf, int inx)
+int mi_rnext(MI_INFO *info, uchar *buf, int inx)
{
int error,changed;
uint flag;
diff --git a/storage/myisam/mi_rnext_same.c b/storage/myisam/mi_rnext_same.c
index 3a7004bf47c..1892fe3e1e0 100644
--- a/storage/myisam/mi_rnext_same.c
+++ b/storage/myisam/mi_rnext_same.c
@@ -24,7 +24,7 @@
based on the position of the last used key!
*/
-int mi_rnext_same(MI_INFO *info, byte *buf)
+int mi_rnext_same(MI_INFO *info, uchar *buf)
{
int error;
uint inx,not_used[2];
diff --git a/storage/myisam/mi_rprev.c b/storage/myisam/mi_rprev.c
index 09802627185..d1407012590 100644
--- a/storage/myisam/mi_rprev.c
+++ b/storage/myisam/mi_rprev.c
@@ -22,7 +22,7 @@
based on the position of the last used key!
*/
-int mi_rprev(MI_INFO *info, byte *buf, int inx)
+int mi_rprev(MI_INFO *info, uchar *buf, int inx)
{
int error,changed;
register uint flag;
diff --git a/storage/myisam/mi_rrnd.c b/storage/myisam/mi_rrnd.c
index d31e6c24a37..211e5fa51cc 100644
--- a/storage/myisam/mi_rrnd.c
+++ b/storage/myisam/mi_rrnd.c
@@ -29,7 +29,7 @@
HA_ERR_END_OF_FILE = EOF.
*/
-int mi_rrnd(MI_INFO *info, byte *buf, register my_off_t filepos)
+int mi_rrnd(MI_INFO *info, uchar *buf, register my_off_t filepos)
{
my_bool skip_deleted_blocks;
DBUG_ENTER("mi_rrnd");
diff --git a/storage/myisam/mi_rsame.c b/storage/myisam/mi_rsame.c
index 4831ebb3d7c..8093498483f 100644
--- a/storage/myisam/mi_rsame.c
+++ b/storage/myisam/mi_rsame.c
@@ -25,7 +25,7 @@
*/
-int mi_rsame(MI_INFO *info, byte *record, int inx)
+int mi_rsame(MI_INFO *info, uchar *record, int inx)
{
DBUG_ENTER("mi_rsame");
diff --git a/storage/myisam/mi_rsamepos.c b/storage/myisam/mi_rsamepos.c
index 717b9ab52d5..6a1e462b686 100644
--- a/storage/myisam/mi_rsamepos.c
+++ b/storage/myisam/mi_rsamepos.c
@@ -27,7 +27,7 @@
** HA_ERR_END_OF_FILE = End of file
*/
-int mi_rsame_with_pos(MI_INFO *info, byte *record, int inx, my_off_t filepos)
+int mi_rsame_with_pos(MI_INFO *info, uchar *record, int inx, my_off_t filepos)
{
DBUG_ENTER("mi_rsame_with_pos");
DBUG_PRINT("enter",("index: %d filepos: %ld", inx, (long) filepos));
diff --git a/storage/myisam/mi_scan.c b/storage/myisam/mi_scan.c
index 87debb67b37..a225b399660 100644
--- a/storage/myisam/mi_scan.c
+++ b/storage/myisam/mi_scan.c
@@ -36,7 +36,7 @@ int mi_scan_init(register MI_INFO *info)
HA_ERR_END_OF_FILE = EOF.
*/
-int mi_scan(MI_INFO *info, byte *buf)
+int mi_scan(MI_INFO *info, uchar *buf)
{
DBUG_ENTER("mi_scan");
/* Init all but update-flag */
diff --git a/storage/myisam/mi_search.c b/storage/myisam/mi_search.c
index d313619e007..2195ac178dd 100644
--- a/storage/myisam/mi_search.c
+++ b/storage/myisam/mi_search.c
@@ -78,7 +78,7 @@ int _mi_search(register MI_INFO *info, register MI_KEYDEF *keyinfo,
if (!(buff=_mi_fetch_keypage(info,keyinfo,pos,DFLT_INIT_HITS,info->buff,
test(!(nextflag & SEARCH_SAVE_BUFF)))))
goto err;
- DBUG_DUMP("page",(byte*) buff,mi_getint(buff));
+ DBUG_DUMP("page",(uchar*) buff,mi_getint(buff));
flag=(*keyinfo->bin_search)(info,keyinfo,buff,key,key_len,nextflag,
&keypos,lastkey, &last_key);
@@ -753,7 +753,7 @@ void _mi_dpointer(MI_INFO *info, uchar *buff, my_off_t pos)
uint _mi_get_static_key(register MI_KEYDEF *keyinfo, uint nod_flag,
register uchar **page, register uchar *key)
{
- memcpy((byte*) key,(byte*) *page,
+ memcpy((uchar*) key,(uchar*) *page,
(size_t) (keyinfo->keylength+nod_flag));
*page+=keyinfo->keylength+nod_flag;
return(keyinfo->keylength);
@@ -836,7 +836,7 @@ uint _mi_get_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag,
if (tot_length >= 255 && *start != 255)
{
/* length prefix changed from a length of one to a length of 3 */
- bmove_upp((char*) key+length+3,(char*) key+length+1,length);
+ bmove_upp(key+length+3, key+length+1, length);
*key=255;
mi_int2store(key+1,tot_length);
key+=3+length;
@@ -897,12 +897,12 @@ uint _mi_get_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag,
else
length=keyseg->length;
}
- memcpy((byte*) key,(byte*) page,(size_t) length);
+ memcpy((uchar*) key,(uchar*) page,(size_t) length);
key+=length;
page+=length;
}
length=keyseg->length+nod_flag;
- bmove((byte*) key,(byte*) page,length);
+ bmove((uchar*) key,(uchar*) page,length);
*page_pos= page+length;
return ((uint) (key-start_key)+keyseg->length);
} /* _mi_get_pack_key */
@@ -1003,7 +1003,7 @@ uint _mi_get_binary_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag,
}
DBUG_PRINT("info",("key: 0x%lx from: 0x%lx length: %u",
(long) key, (long) from, length));
- memmove((byte*) key, (byte*) from, (size_t) length);
+ memmove((uchar*) key, (uchar*) from, (size_t) length);
key+=length;
from+=length;
}
@@ -1035,7 +1035,7 @@ uint _mi_get_binary_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag,
DBUG_RETURN(0); /* Error */
}
/* Copy data pointer and, if appropriate, key block pointer. */
- memcpy((byte*) key,(byte*) from,(size_t) length);
+ memcpy((uchar*) key,(uchar*) from,(size_t) length);
*page_pos= from+length;
}
DBUG_RETURN((uint) (key-start_key)+keyseg->length);
@@ -1054,7 +1054,7 @@ uchar *_mi_get_key(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *page,
nod_flag=mi_test_if_nod(page);
if (! (keyinfo->flag & (HA_VAR_LENGTH_KEY | HA_BINARY_PACK_KEY)))
{
- bmove((byte*) key,(byte*) keypos,keyinfo->keylength+nod_flag);
+ bmove((uchar*) key,(uchar*) keypos,keyinfo->keylength+nod_flag);
DBUG_RETURN(keypos+keyinfo->keylength+nod_flag);
}
else
@@ -1092,7 +1092,7 @@ static my_bool _mi_get_prev_key(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *page,
if (! (keyinfo->flag & (HA_VAR_LENGTH_KEY | HA_BINARY_PACK_KEY)))
{
*return_key_length=keyinfo->keylength;
- bmove((byte*) key,(byte*) keypos- *return_key_length-nod_flag,
+ bmove((uchar*) key,(uchar*) keypos- *return_key_length-nod_flag,
*return_key_length);
DBUG_RETURN(0);
}
@@ -1134,7 +1134,7 @@ uchar *_mi_get_last_key(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *page,
lastpos=endpos-keyinfo->keylength-nod_flag;
*return_key_length=keyinfo->keylength;
if (lastpos > page)
- bmove((byte*) lastkey,(byte*) lastpos,keyinfo->keylength+nod_flag);
+ bmove((uchar*) lastkey,(uchar*) lastpos,keyinfo->keylength+nod_flag);
}
else
{
@@ -1225,7 +1225,7 @@ uint _mi_keylength_part(MI_KEYDEF *keyinfo, register uchar *key,
uchar *_mi_move_key(MI_KEYDEF *keyinfo, uchar *to, uchar *from)
{
reg1 uint length;
- memcpy((byte*) to, (byte*) from,
+ memcpy((uchar*) to, (uchar*) from,
(size_t) (length=_mi_keylength(keyinfo,from)));
return to+length;
}
@@ -1827,7 +1827,7 @@ void _mi_store_static_key(MI_KEYDEF *keyinfo __attribute__((unused)),
register uchar *key_pos,
register MI_KEY_PARAM *s_temp)
{
- memcpy((byte*) key_pos,(byte*) s_temp->key,(size_t) s_temp->totlength);
+ memcpy((uchar*) key_pos,(uchar*) s_temp->key,(size_t) s_temp->totlength);
}
@@ -1860,7 +1860,7 @@ void _mi_store_var_pack_key(MI_KEYDEF *keyinfo __attribute__((unused)),
/* Not packed against previous key */
store_pack_length(s_temp->pack_marker == 128,key_pos,s_temp->key_length);
}
- bmove((byte*) key_pos,(byte*) s_temp->key,
+ bmove((uchar*) key_pos,(uchar*) s_temp->key,
(length=s_temp->totlength-(uint) (key_pos-start)));
if (!s_temp->next_key_pos) /* No following key */
diff --git a/storage/myisam/mi_static.c b/storage/myisam/mi_static.c
index 21a25f66b7c..c92d577b621 100644
--- a/storage/myisam/mi_static.c
+++ b/storage/myisam/mi_static.c
@@ -27,7 +27,7 @@ uchar NEAR myisam_file_magic[]=
{ (uchar) 254, (uchar) 254,'\007', '\001', };
uchar NEAR myisam_pack_file_magic[]=
{ (uchar) 254, (uchar) 254,'\010', '\002', };
-my_string myisam_log_filename=(char*) "myisam.log";
+char * myisam_log_filename=(char*) "myisam.log";
File myisam_log_file= -1;
uint myisam_quick_table_bits=9;
ulong myisam_block_size= MI_KEY_BLOCK_LENGTH; /* Best by test */
diff --git a/storage/myisam/mi_statrec.c b/storage/myisam/mi_statrec.c
index 3f92ec31d4c..e3771560c01 100644
--- a/storage/myisam/mi_statrec.c
+++ b/storage/myisam/mi_statrec.c
@@ -18,7 +18,7 @@
#include "myisamdef.h"
-int _mi_write_static_record(MI_INFO *info, const byte *record)
+int _mi_write_static_record(MI_INFO *info, const uchar *record)
{
uchar temp[8]; /* max pointer length */
if (info->s->state.dellink != HA_OFFSET_ERROR &&
@@ -26,14 +26,14 @@ int _mi_write_static_record(MI_INFO *info, const byte *record)
{
my_off_t filepos=info->s->state.dellink;
info->rec_cache.seek_not_done=1; /* We have done a seek */
- if (info->s->file_read(info,(char*) &temp[0],info->s->base.rec_reflength,
+ if (info->s->file_read(info, &temp[0],info->s->base.rec_reflength,
info->s->state.dellink+1,
MYF(MY_NABP)))
goto err;
info->s->state.dellink= _mi_rec_pos(info->s,temp);
info->state->del--;
info->state->empty-=info->s->base.pack_reclength;
- if (info->s->file_write(info, (char*) record, info->s->base.reclength,
+ if (info->s->file_write(info, record, info->s->base.reclength,
filepos,
MYF(MY_NABP)))
goto err;
@@ -48,29 +48,29 @@ int _mi_write_static_record(MI_INFO *info, const byte *record)
}
if (info->opt_flag & WRITE_CACHE_USED)
{ /* Cash in use */
- if (my_b_write(&info->rec_cache, (byte*) record,
+ if (my_b_write(&info->rec_cache, record,
info->s->base.reclength))
goto err;
if (info->s->base.pack_reclength != info->s->base.reclength)
{
uint length=info->s->base.pack_reclength - info->s->base.reclength;
- bzero((char*) temp,length);
- if (my_b_write(&info->rec_cache, (byte*) temp,length))
+ bzero(temp,length);
+ if (my_b_write(&info->rec_cache, temp,length))
goto err;
}
}
else
{
info->rec_cache.seek_not_done=1; /* We have done a seek */
- if (info->s->file_write(info,(char*) record,info->s->base.reclength,
+ if (info->s->file_write(info, record, info->s->base.reclength,
info->state->data_file_length,
info->s->write_flag))
goto err;
if (info->s->base.pack_reclength != info->s->base.reclength)
{
uint length=info->s->base.pack_reclength - info->s->base.reclength;
- bzero((char*) temp,length);
- if (info->s->file_write(info, (byte*) temp,length,
+ bzero(temp,length);
+ if (info->s->file_write(info, temp,length,
info->state->data_file_length+
info->s->base.reclength,
info->s->write_flag))
@@ -85,13 +85,13 @@ int _mi_write_static_record(MI_INFO *info, const byte *record)
return 1;
}
-int _mi_update_static_record(MI_INFO *info, my_off_t pos, const byte *record)
+int _mi_update_static_record(MI_INFO *info, my_off_t pos, const uchar *record)
{
info->rec_cache.seek_not_done=1; /* We have done a seek */
return (info->s->file_write(info,
- (char*) record,info->s->base.reclength,
- pos,
- MYF(MY_NABP)) != 0);
+ record, info->s->base.reclength,
+ pos,
+ MYF(MY_NABP)) != 0);
}
@@ -105,12 +105,12 @@ int _mi_delete_static_record(MI_INFO *info)
_mi_dpointer(info,temp+1,info->s->state.dellink);
info->s->state.dellink = info->lastpos;
info->rec_cache.seek_not_done=1;
- return (info->s->file_write(info,(byte*) temp, 1+info->s->rec_reflength,
+ return (info->s->file_write(info,(uchar*) temp, 1+info->s->rec_reflength,
info->lastpos, MYF(MY_NABP)) != 0);
}
-int _mi_cmp_static_record(register MI_INFO *info, register const byte *old)
+int _mi_cmp_static_record(register MI_INFO *info, register const uchar *old)
{
DBUG_ENTER("_mi_cmp_static_record");
@@ -129,11 +129,11 @@ int _mi_cmp_static_record(register MI_INFO *info, register const byte *old)
if ((info->opt_flag & READ_CHECK_USED))
{ /* If check isn't disabled */
info->rec_cache.seek_not_done=1; /* We have done a seek */
- if (info->s->file_read(info, (char*) info->rec_buff, info->s->base.reclength,
+ if (info->s->file_read(info, info->rec_buff, info->s->base.reclength,
info->lastpos,
MYF(MY_NABP)))
DBUG_RETURN(-1);
- if (memcmp((byte*) info->rec_buff, (byte*) old,
+ if (memcmp(info->rec_buff, old,
(uint) info->s->base.reclength))
{
DBUG_DUMP("read",old,info->s->base.reclength);
@@ -147,12 +147,12 @@ int _mi_cmp_static_record(register MI_INFO *info, register const byte *old)
int _mi_cmp_static_unique(MI_INFO *info, MI_UNIQUEDEF *def,
- const byte *record, my_off_t pos)
+ const uchar *record, my_off_t pos)
{
DBUG_ENTER("_mi_cmp_static_unique");
info->rec_cache.seek_not_done=1; /* We have done a seek */
- if (info->s->file_read(info, (char*) info->rec_buff, info->s->base.reclength,
+ if (info->s->file_read(info, info->rec_buff, info->s->base.reclength,
pos, MYF(MY_NABP)))
DBUG_RETURN(-1);
DBUG_RETURN(mi_unique_comp(def, record, info->rec_buff,
@@ -166,7 +166,7 @@ int _mi_cmp_static_unique(MI_INFO *info, MI_UNIQUEDEF *def,
/* MY_FILE_ERROR on read-error or locking-error */
int _mi_read_static_record(register MI_INFO *info, register my_off_t pos,
- register byte *record)
+ register uchar *record)
{
int error;
@@ -178,7 +178,7 @@ int _mi_read_static_record(register MI_INFO *info, register my_off_t pos,
return(-1);
info->rec_cache.seek_not_done=1; /* We have done a seek */
- error=info->s->file_read(info,(char*) record,info->s->base.reclength,
+ error=info->s->file_read(info, record, info->s->base.reclength,
pos,MYF(MY_NABP)) != 0;
fast_mi_writeinfo(info);
if (! error)
@@ -199,7 +199,7 @@ int _mi_read_static_record(register MI_INFO *info, register my_off_t pos,
-int _mi_read_rnd_static_record(MI_INFO *info, byte *buf,
+int _mi_read_rnd_static_record(MI_INFO *info, uchar *buf,
register my_off_t filepos,
my_bool skip_deleted_blocks)
{
@@ -274,11 +274,11 @@ int _mi_read_rnd_static_record(MI_INFO *info, byte *buf,
}
/* Read record with cacheing */
- error=my_b_read(&info->rec_cache,(byte*) buf,share->base.reclength);
+ error=my_b_read(&info->rec_cache,(uchar*) buf,share->base.reclength);
if (info->s->base.pack_reclength != info->s->base.reclength && !error)
{
char tmp[8]; /* Skill fill bytes */
- error=my_b_read(&info->rec_cache,(byte*) tmp,
+ error=my_b_read(&info->rec_cache,(uchar*) tmp,
info->s->base.pack_reclength - info->s->base.reclength);
}
if (locked)
diff --git a/storage/myisam/mi_test1.c b/storage/myisam/mi_test1.c
index ebb9cdcb2f7..a68bcbed56c 100644
--- a/storage/myisam/mi_test1.c
+++ b/storage/myisam/mi_test1.c
@@ -40,9 +40,9 @@ static HA_KEYSEG uniqueseg[10];
static int run_test(const char *filename);
static void get_options(int argc, char *argv[]);
-static void create_key(char *key,uint rownr);
-static void create_record(char *record,uint rownr);
-static void update_record(char *record);
+static void create_key(uchar *key,uint rownr);
+static void create_record(uchar *record,uint rownr);
+static void update_record(uchar *record);
int main(int argc,char *argv[])
{
@@ -62,7 +62,7 @@ static int run_test(const char *filename)
int i,j,error,deleted,rec_length,uniques=0;
ha_rows found,row_count;
my_off_t pos;
- char record[MAX_REC_LENGTH],key[MAX_REC_LENGTH],read_record[MAX_REC_LENGTH];
+ uchar record[MAX_REC_LENGTH],key[MAX_REC_LENGTH],read_record[MAX_REC_LENGTH];
MI_UNIQUEDEF uniquedef;
MI_CREATE_INFO create_info;
@@ -109,7 +109,7 @@ static int run_test(const char *filename)
}
keyinfo[0].flag = (uint8) (pack_keys | unique_key);
- bzero((byte*) flags,sizeof(flags));
+ bzero((uchar*) flags,sizeof(flags));
if (opt_unique)
{
uint start;
@@ -258,7 +258,8 @@ static int run_test(const char *filename)
continue;
create_key(key,j);
my_errno=0;
- if ((error = mi_rkey(file,read_record,0,key,0,HA_READ_KEY_EXACT)))
+ if ((error = mi_rkey(file,read_record,0,key,HA_WHOLE_KEY,
+ HA_READ_KEY_EXACT)))
{
if (verbose || (flags[j] >= 1 ||
(error && my_errno != HA_ERR_KEY_NOT_FOUND)))
@@ -285,7 +286,7 @@ static int run_test(const char *filename)
{
create_key(key,i);
my_errno=0;
- error=mi_rkey(file,read_record,0,key,0,HA_READ_KEY_EXACT);
+ error=mi_rkey(file,read_record,0,key,HA_WHOLE_KEY,HA_READ_KEY_EXACT);
if (verbose ||
(error == 0 && flags[i] == 0 && unique_key) ||
(error && (flags[i] != 0 || my_errno != HA_ERR_KEY_NOT_FOUND)))
@@ -326,20 +327,20 @@ err:
}
-static void create_key_part(char *key,uint rownr)
+static void create_key_part(uchar *key,uint rownr)
{
if (!unique_key)
rownr&=7; /* Some identical keys */
if (keyinfo[0].seg[0].type == HA_KEYTYPE_NUM)
{
- sprintf(key,"%*d",keyinfo[0].seg[0].length,rownr);
+ sprintf((char*) key,"%*d",keyinfo[0].seg[0].length,rownr);
}
else if (keyinfo[0].seg[0].type == HA_KEYTYPE_VARTEXT1 ||
keyinfo[0].seg[0].type == HA_KEYTYPE_VARTEXT2)
{ /* Alpha record */
/* Create a key that may be easily packed */
bfill(key,keyinfo[0].seg[0].length,rownr < 10 ? 'A' : 'B');
- sprintf(key+keyinfo[0].seg[0].length-2,"%-2d",rownr);
+ sprintf((char*) key+keyinfo[0].seg[0].length-2,"%-2d",rownr);
if ((rownr & 7) == 0)
{
/* Change the key to force a unpack of the next key */
@@ -349,12 +350,12 @@ static void create_key_part(char *key,uint rownr)
else
{ /* Alpha record */
if (keyinfo[0].seg[0].flag & HA_SPACE_PACK)
- sprintf(key,"%-*d",keyinfo[0].seg[0].length,rownr);
+ sprintf((char*) key,"%-*d",keyinfo[0].seg[0].length,rownr);
else
{
/* Create a key that may be easily packed */
bfill(key,keyinfo[0].seg[0].length,rownr < 10 ? 'A' : 'B');
- sprintf(key+keyinfo[0].seg[0].length-2,"%-2d",rownr);
+ sprintf((char*) key+keyinfo[0].seg[0].length-2,"%-2d",rownr);
if ((rownr & 7) == 0)
{
/* Change the key to force a unpack of the next key */
@@ -365,7 +366,7 @@ static void create_key_part(char *key,uint rownr)
}
-static void create_key(char *key,uint rownr)
+static void create_key(uchar *key,uint rownr)
{
if (keyinfo[0].seg[0].null_bit)
{
@@ -381,7 +382,7 @@ static void create_key(char *key,uint rownr)
{
uint tmp;
create_key_part(key+2,rownr);
- tmp=strlen(key+2);
+ tmp=strlen((char*) key+2);
int2store(key,tmp);
}
else
@@ -389,13 +390,13 @@ static void create_key(char *key,uint rownr)
}
-static char blob_key[MAX_REC_LENGTH];
-static char blob_record[MAX_REC_LENGTH+20*20];
+static uchar blob_key[MAX_REC_LENGTH];
+static uchar blob_record[MAX_REC_LENGTH+20*20];
-static void create_record(char *record,uint rownr)
+static void create_record(uchar *record,uint rownr)
{
- char *pos;
+ uchar *pos;
bzero((char*) record,MAX_REC_LENGTH);
record[0]=1; /* delete marker */
if (rownr == 0 && keyinfo[0].seg[0].null_bit)
@@ -405,9 +406,9 @@ static void create_record(char *record,uint rownr)
if (recinfo[1].type == FIELD_BLOB)
{
uint tmp;
- char *ptr;
+ uchar *ptr;
create_key_part(blob_key,rownr);
- tmp=strlen(blob_key);
+ tmp=strlen((char*) blob_key);
int4store(pos,tmp);
ptr=blob_key;
memcpy_fixed(pos+4,&ptr,sizeof(char*));
@@ -417,7 +418,7 @@ static void create_record(char *record,uint rownr)
{
uint tmp, pack_length= HA_VARCHAR_PACKLENGTH(recinfo[1].length-1);
create_key_part(pos+pack_length,rownr);
- tmp= strlen(pos+pack_length);
+ tmp= strlen((char*) pos+pack_length);
if (pack_length == 1)
*(uchar*) pos= (uchar) tmp;
else
@@ -432,10 +433,10 @@ static void create_record(char *record,uint rownr)
if (recinfo[2].type == FIELD_BLOB)
{
uint tmp;
- char *ptr;;
- sprintf(blob_record,"... row: %d", rownr);
- strappend(blob_record,max(MAX_REC_LENGTH-rownr,10),' ');
- tmp=strlen(blob_record);
+ uchar *ptr;;
+ sprintf((char*) blob_record,"... row: %d", rownr);
+ strappend((char*) blob_record,max(MAX_REC_LENGTH-rownr,10),' ');
+ tmp=strlen((char*) blob_record);
int4store(pos,tmp);
ptr=blob_record;
memcpy_fixed(pos+4,&ptr,sizeof(char*));
@@ -443,28 +444,28 @@ static void create_record(char *record,uint rownr)
else if (recinfo[2].type == FIELD_VARCHAR)
{
uint tmp, pack_length= HA_VARCHAR_PACKLENGTH(recinfo[1].length-1);
- sprintf(pos+pack_length, "... row: %d", rownr);
- tmp= strlen(pos+pack_length);
+ sprintf((char*) pos+pack_length, "... row: %d", rownr);
+ tmp= strlen((char*) pos+pack_length);
if (pack_length == 1)
- *(uchar*) pos= (uchar) tmp;
+ *pos= (uchar) tmp;
else
int2store(pos,tmp);
}
else
{
- sprintf(pos,"... row: %d", rownr);
- strappend(pos,recinfo[2].length,' ');
+ sprintf((char*) pos,"... row: %d", rownr);
+ strappend((char*) pos,recinfo[2].length,' ');
}
}
/* change row to test re-packing of rows and reallocation of keys */
-static void update_record(char *record)
+static void update_record(uchar *record)
{
- char *pos=record+1;
+ uchar *pos=record+1;
if (recinfo[1].type == FIELD_BLOB)
{
- char *column,*ptr;
+ uchar *column,*ptr;
int length;
length=uint4korr(pos); /* Long blob */
memcpy_fixed(&column,pos+4,sizeof(char*));
@@ -473,7 +474,8 @@ static void update_record(char *record)
memcpy_fixed(pos+4,&ptr,sizeof(char*)); /* Store pointer to new key */
if (keyinfo[0].seg[0].type != HA_KEYTYPE_NUM)
default_charset_info->cset->casedn(default_charset_info,
- blob_key, length, blob_key, length);
+ (char*) blob_key, length,
+ (char*) blob_key, length);
pos+=recinfo[1].length;
}
else if (recinfo[1].type == FIELD_VARCHAR)
@@ -481,22 +483,22 @@ static void update_record(char *record)
uint pack_length= HA_VARCHAR_PACKLENGTH(recinfo[1].length-1);
uint length= pack_length == 1 ? (uint) *(uchar*) pos : uint2korr(pos);
default_charset_info->cset->casedn(default_charset_info,
- pos + pack_length, length,
- pos + pack_length, length);
+ (char*) pos + pack_length, length,
+ (char*) pos + pack_length, length);
pos+=recinfo[1].length;
}
else
{
if (keyinfo[0].seg[0].type != HA_KEYTYPE_NUM)
default_charset_info->cset->casedn(default_charset_info,
- pos, keyinfo[0].seg[0].length,
- pos, keyinfo[0].seg[0].length);
+ (char*) pos, keyinfo[0].seg[0].length,
+ (char*) pos, keyinfo[0].seg[0].length);
pos+=recinfo[1].length;
}
if (recinfo[2].type == FIELD_BLOB)
{
- char *column;
+ uchar *column;
int length;
length=uint4korr(pos);
memcpy_fixed(&column,pos+4,sizeof(char*));
@@ -504,7 +506,7 @@ static void update_record(char *record)
bfill(blob_record+length,20,'.'); /* Make it larger */
length+=20;
int4store(pos,length);
- column=blob_record;
+ column= blob_record;
memcpy_fixed(pos+4,&column,sizeof(char*));
}
else if (recinfo[2].type == FIELD_VARCHAR)
@@ -534,21 +536,21 @@ static struct my_option my_long_options[] =
{"debug", '#', "Undocumented",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#endif
- {"delete_rows", 'd', "Undocumented", (gptr*) &remove_count,
- (gptr*) &remove_count, 0, GET_UINT, REQUIRED_ARG, 1000, 0, 0, 0, 0, 0},
+ {"delete_rows", 'd', "Undocumented", (uchar**) &remove_count,
+ (uchar**) &remove_count, 0, GET_UINT, REQUIRED_ARG, 1000, 0, 0, 0, 0, 0},
{"help", '?', "Display help and exit",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
- {"insert_rows", 'i', "Undocumented", (gptr*) &insert_count,
- (gptr*) &insert_count, 0, GET_UINT, REQUIRED_ARG, 1000, 0, 0, 0, 0, 0},
+ {"insert_rows", 'i', "Undocumented", (uchar**) &insert_count,
+ (uchar**) &insert_count, 0, GET_UINT, REQUIRED_ARG, 1000, 0, 0, 0, 0, 0},
{"key_alpha", 'a', "Use a key of type HA_KEYTYPE_TEXT",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"key_binary_pack", 'B', "Undocumented",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"key_blob", 'b', "Undocumented",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
- {"key_cache", 'K', "Undocumented", (gptr*) &key_cacheing,
- (gptr*) &key_cacheing, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
- {"key_length", 'k', "Undocumented", (gptr*) &key_length, (gptr*) &key_length,
+ {"key_cache", 'K', "Undocumented", (uchar**) &key_cacheing,
+ (uchar**) &key_cacheing, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"key_length", 'k', "Undocumented", (uchar**) &key_length, (uchar**) &key_length,
0, GET_UINT, REQUIRED_ARG, 6, 0, 0, 0, 0, 0},
{"key_multiple", 'm', "Undocumented",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
@@ -559,21 +561,21 @@ static struct my_option my_long_options[] =
{"key_varchar", 'w', "Test VARCHAR keys",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"null_fields", 'N', "Define fields with NULL",
- (gptr*) &null_fields, (gptr*) &null_fields, 0, GET_BOOL, NO_ARG,
+ (uchar**) &null_fields, (uchar**) &null_fields, 0, GET_BOOL, NO_ARG,
0, 0, 0, 0, 0, 0},
{"row_fixed_size", 'S', "Undocumented",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
- {"row_pointer_size", 'R', "Undocumented", (gptr*) &rec_pointer_size,
- (gptr*) &rec_pointer_size, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"row_pointer_size", 'R', "Undocumented", (uchar**) &rec_pointer_size,
+ (uchar**) &rec_pointer_size, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"silent", 's', "Undocumented",
- (gptr*) &silent, (gptr*) &silent, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
- {"skip_update", 'U', "Undocumented", (gptr*) &skip_update,
- (gptr*) &skip_update, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
- {"unique", 'C', "Undocumented", (gptr*) &opt_unique, (gptr*) &opt_unique, 0,
+ (uchar**) &silent, (uchar**) &silent, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"skip_update", 'U', "Undocumented", (uchar**) &skip_update,
+ (uchar**) &skip_update, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"unique", 'C', "Undocumented", (uchar**) &opt_unique, (uchar**) &opt_unique, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
- {"update_rows", 'u', "Undocumented", (gptr*) &update_count,
- (gptr*) &update_count, 0, GET_UINT, REQUIRED_ARG, 1000, 0, 0, 0, 0, 0},
- {"verbose", 'v', "Be more verbose", (gptr*) &verbose, (gptr*) &verbose, 0,
+ {"update_rows", 'u', "Undocumented", (uchar**) &update_count,
+ (uchar**) &update_count, 0, GET_UINT, REQUIRED_ARG, 1000, 0, 0, 0, 0, 0},
+ {"verbose", 'v', "Be more verbose", (uchar**) &verbose, (uchar**) &verbose, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"version", 'V', "Print version number and exit",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
diff --git a/storage/myisam/mi_test2.c b/storage/myisam/mi_test2.c
index 878bba31ea8..902801b5e6e 100644
--- a/storage/myisam/mi_test2.c
+++ b/storage/myisam/mi_test2.c
@@ -36,8 +36,8 @@
static void get_options(int argc, char *argv[]);
static uint rnd(uint max_value);
-static void fix_length(byte *record,uint length);
-static void put_blob_in_record(char *blob_pos,char **blob_buffer);
+static void fix_length(uchar *record,uint length);
+static void put_blob_in_record(uchar *blob_pos,char **blob_buffer);
static void copy_key(struct st_myisam_info *info,uint inx,
uchar *record,uchar *key);
@@ -53,8 +53,8 @@ static uint key_cache_block_size= KEY_CACHE_BLOCK_SIZE;
static uint keys=MYISAM_KEYS,recant=1000;
static uint use_blob=0;
static uint16 key1[1001],key3[5000];
-static char record[300],record2[300],key[100],key2[100],
- read_record[300],read_record2[300],read_record3[300];
+static uchar record[300],record2[300],key[100],key2[100];
+static uchar read_record[300],read_record2[300],read_record3[300];
static HA_KEYSEG glob_keyseg[MYISAM_KEYS][MAX_PARTS];
/* Test program */
@@ -231,7 +231,7 @@ int main(int argc, char *argv[])
for (i=0 ; i < recant ; i++)
{
n1=rnd(1000); n2=rnd(100); n3=rnd(5000);
- sprintf(record,"%6d:%4d:%8d:Pos: %4d ",n1,n2,n3,write_count);
+ sprintf((char*) record,"%6d:%4d:%8d:Pos: %4d ",n1,n2,n3,write_count);
int4store(record+STANDARD_LENGTH-4,(long) i);
fix_length(record,(uint) STANDARD_LENGTH+rnd(60));
put_blob_in_record(record+blob_pos,&blob_buffer);
@@ -262,8 +262,8 @@ int main(int argc, char *argv[])
for (j=rnd(1000)+1 ; j>0 && key1[j] == 0 ; j--) ;
if (!j)
for (j=999 ; j>0 && key1[j] == 0 ; j--) ;
- sprintf(key,"%6d",j);
- if (mi_rkey(file,read_record,0,key,0,HA_READ_KEY_EXACT))
+ sprintf((char*) key,"%6d",j);
+ if (mi_rkey(file,read_record,0,key,HA_WHOLE_KEY,HA_READ_KEY_EXACT))
{
printf("Test in loop: Can't find key: \"%s\"\n",key);
goto err;
@@ -290,8 +290,8 @@ int main(int argc, char *argv[])
for (j=rnd(1000)+1 ; j>0 && key1[j] == 0 ; j--) ;
if (j != 0)
{
- sprintf(key,"%6d",j);
- if (mi_rkey(file,read_record,0,key,0,HA_READ_KEY_EXACT))
+ sprintf((char*) key,"%6d",j);
+ if (mi_rkey(file,read_record,0,key,HA_WHOLE_KEY,HA_READ_KEY_EXACT))
{
printf("can't find key1: \"%s\"\n",key);
goto err;
@@ -304,8 +304,8 @@ int main(int argc, char *argv[])
goto err;
}
opt_delete++;
- key1[atoi(read_record+keyinfo[0].seg[0].start)]--;
- key3[atoi(read_record+keyinfo[2].seg[0].start)]=0;
+ key1[atoi((char*) read_record+keyinfo[0].seg[0].start)]--;
+ key3[atoi((char*) read_record+keyinfo[2].seg[0].start)]=0;
}
else
puts("Warning: Skipping delete test because no dupplicate keys");
@@ -317,17 +317,17 @@ int main(int argc, char *argv[])
for (i=0 ; i<recant/10 ; i++)
{
n1=rnd(1000); n2=rnd(100); n3=rnd(5000);
- sprintf(record2,"%6d:%4d:%8d:XXX: %4d ",n1,n2,n3,update);
+ sprintf((char*) record2,"%6d:%4d:%8d:XXX: %4d ",n1,n2,n3,update);
int4store(record2+STANDARD_LENGTH-4,(long) i);
fix_length(record2,(uint) STANDARD_LENGTH+rnd(60));
for (j=rnd(1000)+1 ; j>0 && key1[j] == 0 ; j--) ;
if (j != 0)
{
- sprintf(key,"%6d",j);
- if (mi_rkey(file,read_record,0,key,0,HA_READ_KEY_EXACT))
+ sprintf((char*) key,"%6d",j);
+ if (mi_rkey(file,read_record,0,key,HA_WHOLE_KEY,HA_READ_KEY_EXACT))
{
- printf("can't find key1: \"%s\"\n",key);
+ printf("can't find key1: \"%s\"\n",(char*) key);
goto err;
}
if (use_blob)
@@ -350,8 +350,8 @@ int main(int argc, char *argv[])
}
else
{
- key1[atoi(read_record+keyinfo[0].seg[0].start)]--;
- key3[atoi(read_record+keyinfo[2].seg[0].start)]=0;
+ key1[atoi((char*) read_record+keyinfo[0].seg[0].start)]--;
+ key3[atoi((char*) read_record+keyinfo[2].seg[0].start)]=0;
key1[n1]++; key3[n3]=1;
update++;
}
@@ -367,7 +367,7 @@ int main(int argc, char *argv[])
dupp_keys=key1[i]; j=i;
}
}
- sprintf(key,"%6d",j);
+ sprintf((char*) key,"%6d",j);
start=keyinfo[0].seg[0].start;
length=keyinfo[0].seg[0].length;
if (dupp_keys)
@@ -377,7 +377,7 @@ int main(int argc, char *argv[])
DBUG_PRINT("progpos",("first - next -> last - prev -> first"));
if (verbose) printf(" Using key: \"%s\" Keys: %d\n",key,dupp_keys);
- if (mi_rkey(file,read_record,0,key,0,HA_READ_KEY_EXACT))
+ if (mi_rkey(file,read_record,0,key,HA_WHOLE_KEY,HA_READ_KEY_EXACT))
goto err;
if (mi_rsame(file,read_record2,-1))
goto err;
@@ -422,7 +422,7 @@ int main(int argc, char *argv[])
}
/* Check of mi_rnext_same */
- if (mi_rkey(file,read_record,0,key,0,HA_READ_KEY_EXACT))
+ if (mi_rkey(file,read_record,0,key,HA_WHOLE_KEY,HA_READ_KEY_EXACT))
goto err;
ant=1;
while (!mi_rnext_same(file,read_record3) && ant < dupp_keys+10)
@@ -455,8 +455,8 @@ int main(int argc, char *argv[])
bcmp(read_record2,read_record3,reclength))
{
printf("Can't find last record\n");
- DBUG_DUMP("record2",(byte*) read_record2,reclength);
- DBUG_DUMP("record3",(byte*) read_record3,reclength);
+ DBUG_DUMP("record2",(uchar*) read_record2,reclength);
+ DBUG_DUMP("record3",(uchar*) read_record3,reclength);
goto end;
}
ant=1;
@@ -496,7 +496,7 @@ int main(int argc, char *argv[])
goto err;
if (bcmp(read_record2,read_record3,reclength))
printf("Can't find last record\n");
-
+#ifdef NOT_ANYMORE
if (!silent)
puts("- Test read key-part");
strmov(key2,key);
@@ -514,12 +514,14 @@ int main(int argc, char *argv[])
goto end;
}
}
+#endif
if (dupp_keys > 2)
{
if (!silent)
printf("- Read key (first) - next - delete - next -> last\n");
DBUG_PRINT("progpos",("first - next - delete - next -> last"));
- if (mi_rkey(file,read_record,0,key,0,HA_READ_KEY_EXACT)) goto err;
+ if (mi_rkey(file,read_record,0,key,HA_WHOLE_KEY,HA_READ_KEY_EXACT))
+ goto err;
if (mi_rnext(file,read_record3,0)) goto err;
if (mi_delete(file,read_record3)) goto err;
opt_delete++;
@@ -555,7 +557,8 @@ int main(int argc, char *argv[])
if (!silent)
printf("- Read first - delete - next -> last\n");
DBUG_PRINT("progpos",("first - delete - next -> last"));
- if (mi_rkey(file,read_record3,0,key,0,HA_READ_KEY_EXACT)) goto err;
+ if (mi_rkey(file,read_record3,0,key,HA_WHOLE_KEY,HA_READ_KEY_EXACT))
+ goto err;
if (mi_delete(file,read_record3)) goto err;
opt_delete++;
ant=1;
@@ -618,10 +621,10 @@ int main(int argc, char *argv[])
copy_key(file,(uint) i,(uchar*) read_record,(uchar*) key);
copy_key(file,(uint) i,(uchar*) read_record2,(uchar*) key2);
min_key.key= key;
- min_key.length= USE_WHOLE_KEY;
+ min_key.keypart_map= HA_WHOLE_KEY;
min_key.flag= HA_READ_KEY_EXACT;
max_key.key= key2;
- max_key.length= USE_WHOLE_KEY;
+ max_key.keypart_map= HA_WHOLE_KEY;
max_key.flag= HA_READ_AFTER_KEY;
range_records= mi_records_in_range(file,(int) i, &min_key, &max_key);
@@ -649,8 +652,8 @@ int main(int argc, char *argv[])
key_range min_key, max_key;
if (j > k)
swap_variables(int, j, k);
- sprintf(key,"%6d",j);
- sprintf(key2,"%6d",k);
+ sprintf((char*) key,"%6d",j);
+ sprintf((char*) key2,"%6d",k);
min_key.key= key;
min_key.length= USE_WHOLE_KEY;
@@ -1001,18 +1004,18 @@ static uint rnd(uint max_value)
/* Create a variable length record */
-static void fix_length(byte *rec, uint length)
+static void fix_length(uchar *rec, uint length)
{
bmove(rec+STANDARD_LENGTH,
"0123456789012345678901234567890123456789012345678901234567890",
length-STANDARD_LENGTH);
- strfill(rec+length,STANDARD_LENGTH+60-length,' ');
+ strfill((char*) rec+length,STANDARD_LENGTH+60-length,' ');
} /* fix_length */
/* Put maybe a blob in record */
-static void put_blob_in_record(char *blob_pos, char **blob_buffer)
+static void put_blob_in_record(uchar *blob_pos, char **blob_buffer)
{
ulong i,length;
if (use_blob)
diff --git a/storage/myisam/mi_test3.c b/storage/myisam/mi_test3.c
index 3987c20ab69..5bdc33b8518 100644
--- a/storage/myisam/mi_test3.c
+++ b/storage/myisam/mi_test3.c
@@ -48,9 +48,9 @@ int test_read(MI_INFO *,int),test_write(MI_INFO *,int,int),
test_update(MI_INFO *,int,int),test_rrnd(MI_INFO *,int);
struct record {
- char id[8];
- char nr[4];
- char text[10];
+ uchar id[8];
+ uchar nr[4];
+ uchar text[10];
} record;
@@ -243,8 +243,8 @@ int test_read(MI_INFO *file,int id)
for (i=0 ; i < 100 ; i++)
{
find=rnd(100000);
- if (!mi_rkey(file,record.id,1,(byte*) &find,
- sizeof(find),HA_READ_KEY_EXACT))
+ if (!mi_rkey(file,record.id,1,(uchar*) &find, HA_WHOLE_KEY,
+ HA_READ_KEY_EXACT))
found++;
else
{
@@ -362,8 +362,8 @@ int test_write(MI_INFO *file,int id,int lock_type)
mi_extra(file,HA_EXTRA_WRITE_CACHE,0);
}
- sprintf(record.id,"%7d",getpid());
- strnmov(record.text,"Testing...", sizeof(record.text));
+ sprintf((char*) record.id,"%7d",getpid());
+ strnmov((char*) record.text,"Testing...", sizeof(record.text));
tries=(uint) rnd(100)+10;
for (i=count=0 ; i < tries ; i++)
@@ -419,15 +419,15 @@ int test_update(MI_INFO *file,int id,int lock_type)
}
}
bzero((char*) &new_record,sizeof(new_record));
- strmov(new_record.text,"Updated");
+ strmov((char*) new_record.text,"Updated");
found=next=prev=update=0;
for (i=0 ; i < 100 ; i++)
{
tmp=rnd(100000);
int4store(find,tmp);
- if (!mi_rkey(file,record.id,1,(byte*) find,
- sizeof(find),HA_READ_KEY_EXACT))
+ if (!mi_rkey(file,record.id,1,(uchar*) find, HA_WHOLE_KEY,
+ HA_READ_KEY_EXACT))
found++;
else
{
diff --git a/storage/myisam/mi_unique.c b/storage/myisam/mi_unique.c
index 635f6c18247..e490fb683e4 100644
--- a/storage/myisam/mi_unique.c
+++ b/storage/myisam/mi_unique.c
@@ -18,7 +18,7 @@
#include "myisamdef.h"
#include <m_ctype.h>
-my_bool mi_check_unique(MI_INFO *info, MI_UNIQUEDEF *def, byte *record,
+my_bool mi_check_unique(MI_INFO *info, MI_UNIQUEDEF *def, uchar *record,
ha_checksum unique_hash, my_off_t disk_pos)
{
my_off_t lastpos=info->lastpos;
@@ -73,9 +73,9 @@ my_bool mi_check_unique(MI_INFO *info, MI_UNIQUEDEF *def, byte *record,
Add support for bit fields
*/
-ha_checksum mi_unique_hash(MI_UNIQUEDEF *def, const byte *record)
+ha_checksum mi_unique_hash(MI_UNIQUEDEF *def, const uchar *record)
{
- const byte *pos, *end;
+ const uchar *pos, *end;
ha_checksum crc= 0;
ulong seed1=0, seed2= 4;
HA_KEYSEG *keyseg;
@@ -111,7 +111,7 @@ ha_checksum mi_unique_hash(MI_UNIQUEDEF *def, const byte *record)
else if (keyseg->flag & HA_BLOB_PART)
{
uint tmp_length=_mi_calc_blob_length(keyseg->bit_start,pos);
- memcpy_fixed((byte*) &pos,pos+keyseg->bit_start,sizeof(char*));
+ memcpy_fixed((uchar*) &pos,pos+keyseg->bit_start,sizeof(char*));
if (!length || length > tmp_length)
length=tmp_length; /* The whole blob */
}
@@ -145,10 +145,10 @@ ha_checksum mi_unique_hash(MI_UNIQUEDEF *def, const byte *record)
# Rows are different
*/
-int mi_unique_comp(MI_UNIQUEDEF *def, const byte *a, const byte *b,
+int mi_unique_comp(MI_UNIQUEDEF *def, const uchar *a, const uchar *b,
my_bool null_are_equal)
{
- const byte *pos_a, *pos_b, *end;
+ const uchar *pos_a, *pos_b, *end;
HA_KEYSEG *keyseg;
for (keyseg=def->seg ; keyseg < def->end ; keyseg++)
@@ -206,8 +206,8 @@ int mi_unique_comp(MI_UNIQUEDEF *def, const byte *a, const byte *b,
set_if_smaller(a_length, keyseg->length);
set_if_smaller(b_length, keyseg->length);
}
- memcpy_fixed((byte*) &pos_a,pos_a+keyseg->bit_start,sizeof(char*));
- memcpy_fixed((byte*) &pos_b,pos_b+keyseg->bit_start,sizeof(char*));
+ memcpy_fixed((uchar*) &pos_a,pos_a+keyseg->bit_start,sizeof(char*));
+ memcpy_fixed((uchar*) &pos_b,pos_b+keyseg->bit_start,sizeof(char*));
}
if (type == HA_KEYTYPE_TEXT || type == HA_KEYTYPE_VARTEXT1 ||
type == HA_KEYTYPE_VARTEXT2)
diff --git a/storage/myisam/mi_update.c b/storage/myisam/mi_update.c
index bea457d2e9a..956334b7806 100644
--- a/storage/myisam/mi_update.c
+++ b/storage/myisam/mi_update.c
@@ -18,7 +18,7 @@
#include "fulltext.h"
#include "rt_index.h"
-int mi_update(register MI_INFO *info, const byte *oldrec, byte *newrec)
+int mi_update(register MI_INFO *info, const uchar *oldrec, uchar *newrec)
{
int flag,key_changed,save_errno;
reg3 my_off_t pos;
@@ -102,7 +102,7 @@ int mi_update(register MI_INFO *info, const byte *oldrec, byte *newrec)
key_changed|=HA_STATE_WRITTEN;
}
changed|=((ulonglong) 1 << i);
- if (_mi_ft_update(info,i,(char*) old_key,oldrec,newrec,pos))
+ if (_mi_ft_update(info,i, old_key,oldrec,newrec,pos))
goto err;
}
}
@@ -115,7 +115,7 @@ int mi_update(register MI_INFO *info, const byte *oldrec, byte *newrec)
info->update&= ~HA_STATE_RNEXT_SAME;
if (new_length != old_length ||
- memcmp((byte*) old_key,(byte*) new_key,new_length))
+ memcmp((uchar*) old_key,(uchar*) new_key,new_length))
{
if ((int) i == info->lastinx)
key_changed|=HA_STATE_WRITTEN; /* Mark that keyfile changed */
@@ -207,8 +207,8 @@ err:
{
if (share->keyinfo[i].flag & HA_FULLTEXT)
{
- if ((flag++ && _mi_ft_del(info,i,(char*) new_key,newrec,pos)) ||
- _mi_ft_add(info,i,(char*) old_key,oldrec,pos))
+ if ((flag++ && _mi_ft_del(info,i, new_key,newrec,pos)) ||
+ _mi_ft_add(info,i, old_key,oldrec,pos))
break;
}
else
diff --git a/storage/myisam/mi_write.c b/storage/myisam/mi_write.c
index 57c054f2de8..70ba7a4588a 100644
--- a/storage/myisam/mi_write.c
+++ b/storage/myisam/mi_write.c
@@ -40,7 +40,7 @@ int _mi_ck_write_btree(register MI_INFO *info, uint keynr,uchar *key,
/* Write new record to database */
-int mi_write(MI_INFO *info, byte *record)
+int mi_write(MI_INFO *info, uchar *record)
{
MYISAM_SHARE *share=info->s;
uint i;
@@ -112,7 +112,7 @@ int mi_write(MI_INFO *info, byte *record)
}
if (share->keyinfo[i].flag & HA_FULLTEXT )
{
- if (_mi_ft_add(info,i,(char*) buff,record,filepos))
+ if (_mi_ft_add(info,i, buff, record, filepos))
{
if (local_lock_tree)
rw_unlock(&share->key_root_lock[i]);
@@ -200,7 +200,7 @@ err:
rw_wrlock(&share->key_root_lock[i]);
if (share->keyinfo[i].flag & HA_FULLTEXT)
{
- if (_mi_ft_del(info,i,(char*) buff,record,filepos))
+ if (_mi_ft_del(info,i, buff,record,filepos))
{
if (local_lock_tree)
rw_unlock(&share->key_root_lock[i]);
@@ -286,7 +286,7 @@ int _mi_ck_write_btree(register MI_INFO *info, uint keynr, uchar *key,
if (!error)
error= _mi_ft_convert_to_ft2(info, keynr, key);
delete_dynamic(info->ft1_to_ft2);
- my_free((gptr)info->ft1_to_ft2, MYF(0));
+ my_free((uchar*)info->ft1_to_ft2, MYF(0));
info->ft1_to_ft2=0;
}
DBUG_RETURN(error);
@@ -403,14 +403,14 @@ static int w_search(register MI_INFO *info, register MI_KEYDEF *keyinfo,
ft_intXstore(keypos, subkeys);
if (!error)
error=_mi_write_keypage(info,keyinfo,page,DFLT_INIT_HITS,temp_buff);
- my_afree((byte*) temp_buff);
+ my_afree((uchar*) temp_buff);
DBUG_RETURN(error);
}
}
else /* not HA_FULLTEXT, normal HA_NOSAME key */
{
info->dupp_key_pos= dupp_key_pos;
- my_afree((byte*) temp_buff);
+ my_afree((uchar*) temp_buff);
my_errno=HA_ERR_FOUND_DUPP_KEY;
DBUG_RETURN(-1);
}
@@ -429,10 +429,10 @@ static int w_search(register MI_INFO *info, register MI_KEYDEF *keyinfo,
if (_mi_write_keypage(info,keyinfo,page,DFLT_INIT_HITS,temp_buff))
goto err;
}
- my_afree((byte*) temp_buff);
+ my_afree((uchar*) temp_buff);
DBUG_RETURN(error);
err:
- my_afree((byte*) temp_buff);
+ my_afree((uchar*) temp_buff);
DBUG_PRINT("exit",("Error: %d",my_errno));
DBUG_RETURN (-1);
} /* w_search */
@@ -488,7 +488,7 @@ int _mi_insert(register MI_INFO *info, register MI_KEYDEF *keyinfo,
if (key_pos != anc_buff+2+nod_flag && (keyinfo->flag &
(HA_BINARY_PACK_KEY | HA_PACK_KEY)))
{
- DBUG_DUMP("prev_key",(byte*) key_buff,_mi_keylength(keyinfo,key_buff));
+ DBUG_DUMP("prev_key",(uchar*) key_buff,_mi_keylength(keyinfo,key_buff));
}
if (keyinfo->flag & HA_PACK_KEY)
{
@@ -506,7 +506,7 @@ int _mi_insert(register MI_INFO *info, register MI_KEYDEF *keyinfo,
my_errno=HA_ERR_CRASHED;
DBUG_RETURN(-1);
}
- bmove_upp((byte*) endpos+t_length,(byte*) endpos,(uint) (endpos-key_pos));
+ bmove_upp((uchar*) endpos+t_length,(uchar*) endpos,(uint) (endpos-key_pos));
}
else
{
@@ -562,7 +562,7 @@ int _mi_insert(register MI_INFO *info, register MI_KEYDEF *keyinfo,
we cannot easily dispatch an empty page here */
b+=blen+ft2len+2;
for (a=anc_buff+a_length ; b < a ; b+=ft2len+2)
- insert_dynamic(info->ft1_to_ft2, (char*) b);
+ insert_dynamic(info->ft1_to_ft2, b);
/* fixing the page's length - it contains only one key now */
mi_putint(anc_buff,2+blen+ft2len+2,0);
@@ -595,7 +595,7 @@ int _mi_split_page(register MI_INFO *info, register MI_KEYDEF *keyinfo,
MI_KEY_PARAM s_temp;
DBUG_ENTER("mi_split_page");
LINT_INIT(after_key);
- DBUG_DUMP("buff",(byte*) buff,mi_getint(buff));
+ DBUG_DUMP("buff",(uchar*) buff,mi_getint(buff));
if (info->s->keyinfo+info->lastinx == keyinfo)
info->page_changed=1; /* Info->buff is used */
@@ -619,7 +619,7 @@ int _mi_split_page(register MI_INFO *info, register MI_KEYDEF *keyinfo,
{
DBUG_PRINT("test",("Splitting nod"));
pos=key_pos-nod_flag;
- memcpy((byte*) info->buff+2,(byte*) pos,(size_t) nod_flag);
+ memcpy((uchar*) info->buff+2,(uchar*) pos,(size_t) nod_flag);
}
/* Move middle item to key and pointer to new page */
@@ -635,14 +635,14 @@ int _mi_split_page(register MI_INFO *info, register MI_KEYDEF *keyinfo,
(uchar*) 0, (uchar*) 0,
key_buff, &s_temp);
length=(uint) ((buff+a_length)-key_pos);
- memcpy((byte*) info->buff+key_ref_length+t_length,(byte*) key_pos,
+ memcpy((uchar*) info->buff+key_ref_length+t_length,(uchar*) key_pos,
(size_t) length);
(*keyinfo->store_key)(keyinfo,info->buff+key_ref_length,&s_temp);
mi_putint(info->buff,length+t_length+key_ref_length,nod_flag);
if (_mi_write_keypage(info,keyinfo,new_pos,DFLT_INIT_HITS,info->buff))
DBUG_RETURN(-1);
- DBUG_DUMP("key",(byte*) key,_mi_keylength(keyinfo,key));
+ DBUG_DUMP("key",(uchar*) key,_mi_keylength(keyinfo,key));
DBUG_RETURN(2); /* Middle key up */
} /* _mi_split_page */
@@ -764,7 +764,7 @@ static int _mi_balance_page(register MI_INFO *info, MI_KEYDEF *keyinfo,
length,keys;
uchar *pos,*buff,*extra_buff;
my_off_t next_page,new_pos;
- byte tmp_part_key[MI_MAX_KEY_BUFF];
+ uchar tmp_part_key[MI_MAX_KEY_BUFF];
DBUG_ENTER("_mi_balance_page");
k_length=keyinfo->keylength;
@@ -796,7 +796,7 @@ static int _mi_balance_page(register MI_INFO *info, MI_KEYDEF *keyinfo,
if (!_mi_fetch_keypage(info,keyinfo,next_page,DFLT_INIT_HITS,info->buff,0))
goto err;
- DBUG_DUMP("next",(byte*) info->buff,mi_getint(info->buff));
+ DBUG_DUMP("next",(uchar*) info->buff,mi_getint(info->buff));
/* Test if there is room to share keys */
@@ -815,23 +815,23 @@ static int _mi_balance_page(register MI_INFO *info, MI_KEYDEF *keyinfo,
if (left_length < new_left_length)
{ /* Move keys buff -> leaf */
pos=curr_buff+left_length;
- memcpy((byte*) pos,(byte*) father_key_pos, (size_t) k_length);
- memcpy((byte*) pos+k_length, (byte*) buff+2,
+ memcpy((uchar*) pos,(uchar*) father_key_pos, (size_t) k_length);
+ memcpy((uchar*) pos+k_length, (uchar*) buff+2,
(size_t) (length=new_left_length - left_length - k_length));
pos=buff+2+length;
- memcpy((byte*) father_key_pos,(byte*) pos,(size_t) k_length);
- bmove((byte*) buff+2,(byte*) pos+k_length,new_right_length);
+ memcpy((uchar*) father_key_pos,(uchar*) pos,(size_t) k_length);
+ bmove((uchar*) buff+2,(uchar*) pos+k_length,new_right_length);
}
else
{ /* Move keys -> buff */
- bmove_upp((byte*) buff+new_right_length,(byte*) buff+right_length,
+ bmove_upp((uchar*) buff+new_right_length,(uchar*) buff+right_length,
right_length-2);
length=new_right_length-right_length-k_length;
- memcpy((byte*) buff+2+length,father_key_pos,(size_t) k_length);
+ memcpy((uchar*) buff+2+length,father_key_pos,(size_t) k_length);
pos=curr_buff+new_left_length;
- memcpy((byte*) father_key_pos,(byte*) pos,(size_t) k_length);
- memcpy((byte*) buff+2,(byte*) pos+k_length,(size_t) length);
+ memcpy((uchar*) father_key_pos,(uchar*) pos,(size_t) k_length);
+ memcpy((uchar*) buff+2,(uchar*) pos+k_length,(size_t) length);
}
if (_mi_write_keypage(info,keyinfo,next_page,DFLT_INIT_HITS,info->buff) ||
@@ -858,22 +858,22 @@ static int _mi_balance_page(register MI_INFO *info, MI_KEYDEF *keyinfo,
/* move first largest keys to new page */
pos=buff+right_length-extra_length;
- memcpy((byte*) extra_buff+2,pos,(size_t) extra_length);
+ memcpy((uchar*) extra_buff+2,pos,(size_t) extra_length);
/* Save new parting key */
memcpy(tmp_part_key, pos-k_length,k_length);
/* Make place for new keys */
- bmove_upp((byte*) buff+new_right_length,(byte*) pos-k_length,
+ bmove_upp((uchar*) buff+new_right_length,(uchar*) pos-k_length,
right_length-extra_length-k_length-2);
/* Copy keys from left page */
pos= curr_buff+new_left_length;
- memcpy((byte*) buff+2,(byte*) pos+k_length,
+ memcpy((uchar*) buff+2,(uchar*) pos+k_length,
(size_t) (length=left_length-new_left_length-k_length));
/* Copy old parting key */
- memcpy((byte*) buff+2+length,father_key_pos,(size_t) k_length);
+ memcpy((uchar*) buff+2+length,father_key_pos,(size_t) k_length);
/* Move new parting keys up to caller */
- memcpy((byte*) (right ? key : father_key_pos),pos,(size_t) k_length);
- memcpy((byte*) (right ? father_key_pos : key),tmp_part_key, k_length);
+ memcpy((uchar*) (right ? key : father_key_pos),pos,(size_t) k_length);
+ memcpy((uchar*) (right ? father_key_pos : key),tmp_part_key, k_length);
if ((new_pos=_mi_new(info,keyinfo,DFLT_INIT_HITS)) == HA_OFFSET_ERROR)
goto err;
@@ -987,7 +987,7 @@ int mi_init_bulk_insert(MI_INFO *info, ulong cache_size, ha_rows rows)
DBUG_RETURN(0);
if (rows && rows*total_keylength < cache_size)
- cache_size=rows;
+ cache_size= (ulong)rows;
else
cache_size/=total_keylength*16;
diff --git a/storage/myisam/myisam_ftdump.c b/storage/myisam/myisam_ftdump.c
index 4bc1833cca6..63d954242a0 100644
--- a/storage/myisam/myisam_ftdump.c
+++ b/storage/myisam/myisam_ftdump.c
@@ -46,7 +46,7 @@ static struct my_option my_long_options[] =
{"stats", 's', "Report global stats.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"verbose", 'v', "Be verbose.",
- (gptr*) &verbose, (gptr*) &verbose, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+ (uchar**) &verbose, (uchar**) &verbose, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{ 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
diff --git a/storage/myisam/myisamchk.c b/storage/myisam/myisamchk.c
index 066e6cdb81b..567e1057e5d 100644
--- a/storage/myisam/myisamchk.c
+++ b/storage/myisam/myisamchk.c
@@ -68,9 +68,9 @@ static void get_options(int *argc,char * * *argv);
static void print_version(void);
static void usage(void);
static int myisamchk(MI_CHECK *param, char *filename);
-static void descript(MI_CHECK *param, register MI_INFO *info, my_string name);
+static void descript(MI_CHECK *param, register MI_INFO *info, char * name);
static int mi_sort_records(MI_CHECK *param, register MI_INFO *info,
- my_string name, uint sort_key,
+ char * name, uint sort_key,
my_bool write_info, my_bool update_index);
static int sort_record_index(MI_SORT_PARAM *sort_param, MI_INFO *info,
MI_KEYDEF *keyinfo,
@@ -167,7 +167,7 @@ static struct my_option my_long_options[] =
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"character-sets-dir", OPT_CHARSETS_DIR,
"Directory where character sets are.",
- (gptr*) &charsets_dir, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ (uchar**) &charsets_dir, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"check", 'c',
"Check table for errors.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
@@ -187,8 +187,8 @@ static struct my_option my_long_options[] =
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"data-file-length", 'D',
"Max length of data file (when recreating data-file when it's full).",
- (gptr*) &check_param.max_data_file_length,
- (gptr*) &check_param.max_data_file_length,
+ (uchar**) &check_param.max_data_file_length,
+ (uchar**) &check_param.max_data_file_length,
0, GET_LL, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"extend-check", 'e',
"If used when checking a table, ensure that the table is 100 percent consistent, which will take a long time. If used when repairing a table, try to recover every possible row from the data file. Normally this will also find a lot of garbage rows; Don't use this option with repair if you are not totally desperate.",
@@ -210,13 +210,13 @@ static struct my_option my_long_options[] =
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"keys-used", 'k',
"Tell MyISAM to update only some specific keys. # is a bit mask of which keys to use. This can be used to get faster inserts.",
- (gptr*) &check_param.keys_in_use,
- (gptr*) &check_param.keys_in_use,
+ (uchar**) &check_param.keys_in_use,
+ (uchar**) &check_param.keys_in_use,
0, GET_ULL, REQUIRED_ARG, -1, 0, 0, 0, 0, 0},
{"max-record-length", OPT_MAX_RECORD_LENGTH,
"Skip rows bigger than this if myisamchk can't allocate memory to hold it",
- (gptr*) &check_param.max_record_length,
- (gptr*) &check_param.max_record_length,
+ (uchar**) &check_param.max_record_length,
+ (uchar**) &check_param.max_record_length,
0, GET_ULL, REQUIRED_ARG, LONGLONG_MAX, 0, LONGLONG_MAX, 0, 0, 0},
{"medium-check", 'm',
"Faster than extend-check, but only finds 99.99% of all errors. Should be good enough for most cases.",
@@ -245,12 +245,12 @@ static struct my_option my_long_options[] =
#endif
{"set-auto-increment", 'A',
"Force auto_increment to start at this or higher value. If no value is given, then sets the next auto_increment value to the highest used value for the auto key + 1.",
- (gptr*) &check_param.auto_increment_value,
- (gptr*) &check_param.auto_increment_value,
+ (uchar**) &check_param.auto_increment_value,
+ (uchar**) &check_param.auto_increment_value,
0, GET_ULL, OPT_ARG, 0, 0, 0, 0, 0, 0},
{"set-collation", OPT_SET_COLLATION,
"Change the collation used by the index",
- (gptr*) &set_collation_name, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ (uchar**) &set_collation_name, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"set-variable", 'O',
"Change the value of a variable. Please note that this option is deprecated; you can set variables directly with --variable-name=value.",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
@@ -262,12 +262,12 @@ static struct my_option my_long_options[] =
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"sort-records", 'R',
"Sort records according to an index. This makes your data much more localized and may speed up things. (It may be VERY slow to do a sort the first time!)",
- (gptr*) &check_param.opt_sort_key,
- (gptr*) &check_param.opt_sort_key,
+ (uchar**) &check_param.opt_sort_key,
+ (uchar**) &check_param.opt_sort_key,
0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"tmpdir", 't',
"Path for temporary files.",
- (gptr*) &opt_tmpdir,
+ (uchar**) &opt_tmpdir,
0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"update-state", 'U',
"Mark tables as crashed if any errors were found.",
@@ -285,54 +285,54 @@ static struct my_option my_long_options[] =
"Wait if table is locked.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{ "key_buffer_size", OPT_KEY_BUFFER_SIZE, "",
- (gptr*) &check_param.use_buffers, (gptr*) &check_param.use_buffers, 0,
+ (uchar**) &check_param.use_buffers, (uchar**) &check_param.use_buffers, 0,
GET_ULONG, REQUIRED_ARG, (long) USE_BUFFER_INIT, (long) MALLOC_OVERHEAD,
(long) ~0L, (long) MALLOC_OVERHEAD, (long) IO_SIZE, 0},
{ "key_cache_block_size", OPT_KEY_CACHE_BLOCK_SIZE, "",
- (gptr*) &opt_key_cache_block_size,
- (gptr*) &opt_key_cache_block_size, 0,
+ (uchar**) &opt_key_cache_block_size,
+ (uchar**) &opt_key_cache_block_size, 0,
GET_LONG, REQUIRED_ARG, MI_KEY_BLOCK_LENGTH, MI_MIN_KEY_BLOCK_LENGTH,
MI_MAX_KEY_BLOCK_LENGTH, 0, MI_MIN_KEY_BLOCK_LENGTH, 0},
{ "myisam_block_size", OPT_MYISAM_BLOCK_SIZE, "",
- (gptr*) &opt_myisam_block_size, (gptr*) &opt_myisam_block_size, 0,
+ (uchar**) &opt_myisam_block_size, (uchar**) &opt_myisam_block_size, 0,
GET_LONG, REQUIRED_ARG, MI_KEY_BLOCK_LENGTH, MI_MIN_KEY_BLOCK_LENGTH,
MI_MAX_KEY_BLOCK_LENGTH, 0, MI_MIN_KEY_BLOCK_LENGTH, 0},
{ "read_buffer_size", OPT_READ_BUFFER_SIZE, "",
- (gptr*) &check_param.read_buffer_length,
- (gptr*) &check_param.read_buffer_length, 0, GET_ULONG, REQUIRED_ARG,
+ (uchar**) &check_param.read_buffer_length,
+ (uchar**) &check_param.read_buffer_length, 0, GET_ULONG, REQUIRED_ARG,
(long) READ_BUFFER_INIT, (long) MALLOC_OVERHEAD,
(long) ~0L, (long) MALLOC_OVERHEAD, (long) 1L, 0},
{ "write_buffer_size", OPT_WRITE_BUFFER_SIZE, "",
- (gptr*) &check_param.write_buffer_length,
- (gptr*) &check_param.write_buffer_length, 0, GET_ULONG, REQUIRED_ARG,
+ (uchar**) &check_param.write_buffer_length,
+ (uchar**) &check_param.write_buffer_length, 0, GET_ULONG, REQUIRED_ARG,
(long) READ_BUFFER_INIT, (long) MALLOC_OVERHEAD,
(long) ~0L, (long) MALLOC_OVERHEAD, (long) 1L, 0},
{ "sort_buffer_size", OPT_SORT_BUFFER_SIZE, "",
- (gptr*) &check_param.sort_buffer_length,
- (gptr*) &check_param.sort_buffer_length, 0, GET_ULONG, REQUIRED_ARG,
+ (uchar**) &check_param.sort_buffer_length,
+ (uchar**) &check_param.sort_buffer_length, 0, GET_ULONG, REQUIRED_ARG,
(long) SORT_BUFFER_INIT, (long) (MIN_SORT_BUFFER + MALLOC_OVERHEAD),
(long) ~0L, (long) MALLOC_OVERHEAD, (long) 1L, 0},
{ "sort_key_blocks", OPT_SORT_KEY_BLOCKS, "",
- (gptr*) &check_param.sort_key_blocks,
- (gptr*) &check_param.sort_key_blocks, 0, GET_ULONG, REQUIRED_ARG,
+ (uchar**) &check_param.sort_key_blocks,
+ (uchar**) &check_param.sort_key_blocks, 0, GET_ULONG, REQUIRED_ARG,
BUFFERS_WHEN_SORTING, 4L, 100L, 0L, 1L, 0},
- { "decode_bits", OPT_DECODE_BITS, "", (gptr*) &decode_bits,
- (gptr*) &decode_bits, 0, GET_UINT, REQUIRED_ARG, 9L, 4L, 17L, 0L, 1L, 0},
- { "ft_min_word_len", OPT_FT_MIN_WORD_LEN, "", (gptr*) &ft_min_word_len,
- (gptr*) &ft_min_word_len, 0, GET_ULONG, REQUIRED_ARG, 4, 1, HA_FT_MAXCHARLEN,
+ { "decode_bits", OPT_DECODE_BITS, "", (uchar**) &decode_bits,
+ (uchar**) &decode_bits, 0, GET_UINT, REQUIRED_ARG, 9L, 4L, 17L, 0L, 1L, 0},
+ { "ft_min_word_len", OPT_FT_MIN_WORD_LEN, "", (uchar**) &ft_min_word_len,
+ (uchar**) &ft_min_word_len, 0, GET_ULONG, REQUIRED_ARG, 4, 1, HA_FT_MAXCHARLEN,
0, 1, 0},
- { "ft_max_word_len", OPT_FT_MAX_WORD_LEN, "", (gptr*) &ft_max_word_len,
- (gptr*) &ft_max_word_len, 0, GET_ULONG, REQUIRED_ARG, HA_FT_MAXCHARLEN, 10,
+ { "ft_max_word_len", OPT_FT_MAX_WORD_LEN, "", (uchar**) &ft_max_word_len,
+ (uchar**) &ft_max_word_len, 0, GET_ULONG, REQUIRED_ARG, HA_FT_MAXCHARLEN, 10,
HA_FT_MAXCHARLEN, 0, 1, 0},
{ "ft_stopword_file", OPT_FT_STOPWORD_FILE,
"Use stopwords from this file instead of built-in list.",
- (gptr*) &ft_stopword_file, (gptr*) &ft_stopword_file, 0, GET_STR,
+ (uchar**) &ft_stopword_file, (uchar**) &ft_stopword_file, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"stats_method", OPT_STATS_METHOD,
- "Specifies how index statistics collection code should threat NULLs. "
+ "Specifies how index statistics collection code should treat NULLs. "
"Possible values of name are \"nulls_unequal\" (default behavior for 4.1/5.0), "
"\"nulls_equal\" (emulate 4.0 behavior), and \"nulls_ignored\".",
- (gptr*) &myisam_stats_method_str, (gptr*) &myisam_stats_method_str, 0,
+ (uchar**) &myisam_stats_method_str, (uchar**) &myisam_stats_method_str, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{ 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
@@ -444,7 +444,7 @@ static void usage(void)
MySQL faster. You can check the calculated distribution\n\
by using '--description --verbose table_name'.\n\
--stats_method=name Specifies how index statistics collection code should\n\
- threat NULLs. Possible values of name are \"nulls_unequal\"\n\
+ treat NULLs. Possible values of name are \"nulls_unequal\"\n\
(default for 4.1/5.0), \"nulls_equal\" (emulate 4.0), and \n\
\"nulls_ignored\".\n\
-d, --description Prints some information about table.\n\
@@ -794,7 +794,7 @@ static void get_options(register int *argc,register char ***argv)
/* Check table */
-static int myisamchk(MI_CHECK *param, my_string filename)
+static int myisamchk(MI_CHECK *param, char * filename)
{
int error,lock_type,recreate;
int rep_quick= param->testflag & (T_QUICK | T_FORCE_UNIQUENESS);
@@ -1199,7 +1199,7 @@ end2:
/* Write info about table */
-static void descript(MI_CHECK *param, register MI_INFO *info, my_string name)
+static void descript(MI_CHECK *param, register MI_INFO *info, char * name)
{
uint key,keyseg_nr,field,start;
reg3 MI_KEYDEF *keyinfo;
@@ -1465,7 +1465,7 @@ static void descript(MI_CHECK *param, register MI_INFO *info, my_string name)
/* Sort records according to one key */
static int mi_sort_records(MI_CHECK *param,
- register MI_INFO *info, my_string name,
+ register MI_INFO *info, char * name,
uint sort_key,
my_bool write_info,
my_bool update_index)
@@ -1536,7 +1536,7 @@ static int mi_sort_records(MI_CHECK *param,
mi_check_print_error(param,"Not enough memory for key block");
goto err;
}
- if (!(sort_param.record=(byte*) my_malloc((uint) share->base.pack_reclength,
+ if (!(sort_param.record=(uchar*) my_malloc((uint) share->base.pack_reclength,
MYF(0))))
{
mi_check_print_error(param,"Not enough memory for record");
@@ -1567,7 +1567,7 @@ static int mi_sort_records(MI_CHECK *param,
for (key=0 ; key < share->base.keys ; key++)
share->keyinfo[key].flag|= HA_SORT_ALLOWS_SAME;
- if (my_pread(share->kfile,(byte*) temp_buff,
+ if (my_pread(share->kfile,(uchar*) temp_buff,
(uint) keyinfo->block_length,
share->state.key_root[sort_key],
MYF(MY_NABP+MY_WME)))
@@ -1630,7 +1630,7 @@ err:
}
if (temp_buff)
{
- my_afree((gptr) temp_buff);
+ my_afree((uchar*) temp_buff);
}
my_free(sort_param.record,MYF(MY_ALLOW_ZERO_PTR));
info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED);
@@ -1679,7 +1679,7 @@ static int sort_record_index(MI_SORT_PARAM *sort_param,MI_INFO *info,
if (nod_flag)
{
next_page=_mi_kpos(nod_flag,keypos);
- if (my_pread(info->s->kfile,(byte*) temp_buff,
+ if (my_pread(info->s->kfile,(uchar*) temp_buff,
(uint) keyinfo->block_length, next_page,
MYF(MY_NABP+MY_WME)))
{
@@ -1718,19 +1718,19 @@ static int sort_record_index(MI_SORT_PARAM *sort_param,MI_INFO *info,
goto err;
}
/* Clear end of block to get better compression if the table is backuped */
- bzero((byte*) buff+used_length,keyinfo->block_length-used_length);
- if (my_pwrite(info->s->kfile,(byte*) buff,(uint) keyinfo->block_length,
+ bzero((uchar*) buff+used_length,keyinfo->block_length-used_length);
+ if (my_pwrite(info->s->kfile,(uchar*) buff,(uint) keyinfo->block_length,
page,param->myf_rw))
{
mi_check_print_error(param,"%d when updating keyblock",my_errno);
goto err;
}
if (temp_buff)
- my_afree((gptr) temp_buff);
+ my_afree((uchar*) temp_buff);
DBUG_RETURN(0);
err:
if (temp_buff)
- my_afree((gptr) temp_buff);
+ my_afree((uchar*) temp_buff);
DBUG_RETURN(1);
} /* sort_record_index */
diff --git a/storage/myisam/myisamdef.h b/storage/myisam/myisamdef.h
index a491e6d210c..721d6b9f271 100644
--- a/storage/myisam/myisamdef.h
+++ b/storage/myisam/myisamdef.h
@@ -167,22 +167,22 @@ typedef struct st_mi_isam_share { /* Shared between opens */
char *unique_file_name; /* realpath() of index file */
char *data_file_name, /* Resolved path names from symlinks */
*index_file_name;
- byte *file_map; /* mem-map of file if possible */
+ uchar *file_map; /* mem-map of file if possible */
KEY_CACHE *key_cache; /* ref to the current key cache */
MI_DECODE_TREE *decode_trees;
uint16 *decode_tables;
- int (*read_record)(struct st_myisam_info*, my_off_t, byte*);
- int (*write_record)(struct st_myisam_info*, const byte*);
- int (*update_record)(struct st_myisam_info*, my_off_t, const byte*);
+ int (*read_record)(struct st_myisam_info*, my_off_t, uchar*);
+ int (*write_record)(struct st_myisam_info*, const uchar*);
+ int (*update_record)(struct st_myisam_info*, my_off_t, const uchar*);
int (*delete_record)(struct st_myisam_info*);
- int (*read_rnd)(struct st_myisam_info*, byte*, my_off_t, my_bool);
- int (*compare_record)(struct st_myisam_info*, const byte *);
+ int (*read_rnd)(struct st_myisam_info*, uchar*, my_off_t, my_bool);
+ int (*compare_record)(struct st_myisam_info*, const uchar *);
/* Function to use for a row checksum. */
- ha_checksum (*calc_checksum)(struct st_myisam_info*, const byte *);
+ ha_checksum (*calc_checksum)(struct st_myisam_info*, const uchar *);
int (*compare_unique)(struct st_myisam_info*, MI_UNIQUEDEF *,
- const byte *record, my_off_t pos);
- uint (*file_read)(MI_INFO *, byte *, uint, my_off_t, myf);
- uint (*file_write)(MI_INFO *, byte *, uint, my_off_t, myf);
+ const uchar *record, my_off_t pos);
+ size_t (*file_read)(MI_INFO *, uchar *, size_t, my_off_t, myf);
+ size_t (*file_write)(MI_INFO *, const uchar *, size_t, my_off_t, myf);
invalidator_by_filename invalidator; /* query cache invalidator */
ulong this_process; /* processid */
ulong last_process; /* For table-change-check */
@@ -245,12 +245,12 @@ struct st_myisam_info {
uchar *buff, /* Temp area for key */
*lastkey,*lastkey2; /* Last used search key */
uchar *first_mbr_key; /* Searhed spatial key */
- byte *rec_buff; /* Tempbuff for recordpack */
+ uchar *rec_buff; /* Tempbuff for recordpack */
uchar *int_keypos, /* Save position for next/previous */
*int_maxpos; /* -""- */
uint int_nod_flag; /* -""- */
uint32 int_keytree_version; /* -""- */
- int (*read_record)(struct st_myisam_info*, my_off_t, byte*);
+ int (*read_record)(struct st_myisam_info*, my_off_t, uchar*);
invalidator_by_filename invalidator; /* query cache invalidator */
ulong this_unique; /* uniq filenumber or thread */
ulong last_unique; /* last unique number */
@@ -334,10 +334,10 @@ typedef struct st_mi_sort_param
HA_KEYSEG *seg;
SORT_INFO *sort_info;
uchar **sort_keys;
- byte *rec_buff;
+ uchar *rec_buff;
void *wordlist, *wordptr;
MEM_ROOT wordroot;
- char *record;
+ uchar *record;
MY_TMPDIR *tmpdir;
int (*key_cmp)(struct st_mi_sort_param *, const void *, const void *);
int (*key_read)(struct st_mi_sort_param *,void *);
@@ -346,9 +346,10 @@ typedef struct st_mi_sort_param
NEAR int (*write_keys)(struct st_mi_sort_param *, register uchar **,
uint , struct st_buffpek *, IO_CACHE *);
NEAR uint (*read_to_buffer)(IO_CACHE *,struct st_buffpek *, uint);
- NEAR int (*write_key)(struct st_mi_sort_param *, IO_CACHE *,char *,
+ NEAR int (*write_key)(struct st_mi_sort_param *, IO_CACHE *,uchar *,
uint, uint);
} MI_SORT_PARAM;
+
/* Some defines used by isam-funktions */
#define USE_WHOLE_KEY MI_MAX_KEY_BUFF*2 /* Use whole key in _mi_search() */
@@ -497,20 +498,20 @@ typedef struct st_mi_s_param
/* Prototypes for intern functions */
-extern int _mi_read_dynamic_record(MI_INFO *info,my_off_t filepos,byte *buf);
-extern int _mi_write_dynamic_record(MI_INFO*, const byte*);
-extern int _mi_update_dynamic_record(MI_INFO*, my_off_t, const byte*);
+extern int _mi_read_dynamic_record(MI_INFO *info,my_off_t filepos,uchar *buf);
+extern int _mi_write_dynamic_record(MI_INFO*, const uchar*);
+extern int _mi_update_dynamic_record(MI_INFO*, my_off_t, const uchar*);
extern int _mi_delete_dynamic_record(MI_INFO *info);
-extern int _mi_cmp_dynamic_record(MI_INFO *info,const byte *record);
-extern int _mi_read_rnd_dynamic_record(MI_INFO *, byte *,my_off_t, my_bool);
-extern int _mi_write_blob_record(MI_INFO*, const byte*);
-extern int _mi_update_blob_record(MI_INFO*, my_off_t, const byte*);
-extern int _mi_read_static_record(MI_INFO *info, my_off_t filepos,byte *buf);
-extern int _mi_write_static_record(MI_INFO*, const byte*);
-extern int _mi_update_static_record(MI_INFO*, my_off_t, const byte*);
+extern int _mi_cmp_dynamic_record(MI_INFO *info,const uchar *record);
+extern int _mi_read_rnd_dynamic_record(MI_INFO *, uchar *,my_off_t, my_bool);
+extern int _mi_write_blob_record(MI_INFO*, const uchar*);
+extern int _mi_update_blob_record(MI_INFO*, my_off_t, const uchar*);
+extern int _mi_read_static_record(MI_INFO *info, my_off_t filepos,uchar *buf);
+extern int _mi_write_static_record(MI_INFO*, const uchar*);
+extern int _mi_update_static_record(MI_INFO*, my_off_t, const uchar*);
extern int _mi_delete_static_record(MI_INFO *info);
-extern int _mi_cmp_static_record(MI_INFO *info,const byte *record);
-extern int _mi_read_rnd_static_record(MI_INFO*, byte *,my_off_t, my_bool);
+extern int _mi_cmp_static_record(MI_INFO *info,const uchar *record);
+extern int _mi_read_rnd_static_record(MI_INFO*, uchar *,my_off_t, my_bool);
extern int _mi_ck_write(MI_INFO *info,uint keynr,uchar *key,uint length);
extern int _mi_ck_real_write_btree(MI_INFO *info, MI_KEYDEF *keyinfo,
uchar *key, uint key_length,
@@ -604,39 +605,39 @@ extern int _mi_dispose(MI_INFO *info,MI_KEYDEF *keyinfo,my_off_t pos,
int level);
extern my_off_t _mi_new(MI_INFO *info,MI_KEYDEF *keyinfo,int level);
extern uint _mi_make_key(MI_INFO *info,uint keynr,uchar *key,
- const byte *record,my_off_t filepos);
+ const uchar *record,my_off_t filepos);
extern uint _mi_pack_key(register MI_INFO *info, uint keynr, uchar *key,
uchar *old, key_part_map keypart_map,
HA_KEYSEG **last_used_keyseg);
-extern int _mi_read_key_record(MI_INFO *info,my_off_t filepos,byte *buf);
-extern int _mi_read_cache(IO_CACHE *info,byte *buff,my_off_t pos,
+extern int _mi_read_key_record(MI_INFO *info,my_off_t filepos,uchar *buf);
+extern int _mi_read_cache(IO_CACHE *info,uchar *buff,my_off_t pos,
uint length,int re_read_if_possibly);
-extern ulonglong retrieve_auto_increment(MI_INFO *info,const byte *record);
+extern ulonglong retrieve_auto_increment(MI_INFO *info,const uchar *record);
-extern byte *mi_alloc_rec_buff(MI_INFO *,ulong, byte**);
+extern uchar *mi_alloc_rec_buff(MI_INFO *,ulong, uchar**);
#define mi_get_rec_buff_ptr(info,buf) \
((((info)->s->options & HA_OPTION_PACK_RECORD) && (buf)) ? \
(buf) - MI_REC_BUFF_OFFSET : (buf))
#define mi_get_rec_buff_len(info,buf) \
(*((uint32 *)(mi_get_rec_buff_ptr(info,buf))))
-extern ulong _mi_rec_unpack(MI_INFO *info,byte *to,byte *from,
+extern ulong _mi_rec_unpack(MI_INFO *info,uchar *to,uchar *from,
ulong reclength);
-extern my_bool _mi_rec_check(MI_INFO *info,const char *record, byte *packpos,
+extern my_bool _mi_rec_check(MI_INFO *info,const uchar *record, uchar *packpos,
ulong packed_length, my_bool with_checkum);
extern int _mi_write_part_record(MI_INFO *info,my_off_t filepos,ulong length,
- my_off_t next_filepos,byte **record,
+ my_off_t next_filepos,uchar **record,
ulong *reclength,int *flag);
extern void _mi_print_key(FILE *stream,HA_KEYSEG *keyseg,const uchar *key,
uint length);
extern my_bool _mi_read_pack_info(MI_INFO *info,pbool fix_keys);
-extern int _mi_read_pack_record(MI_INFO *info,my_off_t filepos,byte *buf);
-extern int _mi_read_rnd_pack_record(MI_INFO*, byte *,my_off_t, my_bool);
+extern int _mi_read_pack_record(MI_INFO *info,my_off_t filepos,uchar *buf);
+extern int _mi_read_rnd_pack_record(MI_INFO*, uchar *,my_off_t, my_bool);
extern int _mi_pack_rec_unpack(MI_INFO *info, MI_BIT_BUFF *bit_buff,
- byte *to, byte *from, ulong reclength);
+ uchar *to, uchar *from, ulong reclength);
extern ulonglong mi_safe_mul(ulonglong a,ulonglong b);
-extern int _mi_ft_update(MI_INFO *info, uint keynr, byte *keybuf,
- const byte *oldrec, const byte *newrec, my_off_t pos);
+extern int _mi_ft_update(MI_INFO *info, uint keynr, uchar *keybuf,
+ const uchar *oldrec, const uchar *newrec, my_off_t pos);
struct st_sort_info;
@@ -697,33 +698,33 @@ extern "C" {
#endif
extern uint _mi_get_block_info(MI_BLOCK_INFO *,File, my_off_t);
-extern uint _mi_rec_pack(MI_INFO *info,byte *to,const byte *from);
+extern uint _mi_rec_pack(MI_INFO *info,uchar *to,const uchar *from);
extern uint _mi_pack_get_block_info(MI_INFO *myisam, MI_BIT_BUFF *bit_buff,
- MI_BLOCK_INFO *info, byte **rec_buff_p,
+ MI_BLOCK_INFO *info, uchar **rec_buff_p,
File file, my_off_t filepos);
-extern void _my_store_blob_length(byte *pos,uint pack_length,uint length);
+extern void _my_store_blob_length(uchar *pos,uint pack_length,uint length);
extern void _myisam_log(enum myisam_log_commands command,MI_INFO *info,
- const byte *buffert,uint length);
+ const uchar *buffert,uint length);
extern void _myisam_log_command(enum myisam_log_commands command,
- MI_INFO *info, const byte *buffert,
+ MI_INFO *info, const uchar *buffert,
uint length, int result);
extern void _myisam_log_record(enum myisam_log_commands command,MI_INFO *info,
- const byte *record,my_off_t filepos,
+ const uchar *record,my_off_t filepos,
int result);
extern void mi_report_error(int errcode, const char *file_name);
extern my_bool _mi_memmap_file(MI_INFO *info);
extern void _mi_unmap_file(MI_INFO *info);
-extern uint save_pack_length(uint version, byte *block_buff, ulong length);
+extern uint save_pack_length(uint version, uchar *block_buff, ulong length);
extern uint read_pack_length(uint version, const uchar *buf, ulong *length);
extern uint calc_pack_length(uint version, ulong length);
-extern uint mi_mmap_pread(MI_INFO *info, byte *Buffer,
- uint Count, my_off_t offset, myf MyFlags);
-extern uint mi_mmap_pwrite(MI_INFO *info, byte *Buffer,
- uint Count, my_off_t offset, myf MyFlags);
-extern uint mi_nommap_pread(MI_INFO *info, byte *Buffer,
- uint Count, my_off_t offset, myf MyFlags);
-extern uint mi_nommap_pwrite(MI_INFO *info, byte *Buffer,
- uint Count, my_off_t offset, myf MyFlags);
+extern size_t mi_mmap_pread(MI_INFO *info, uchar *Buffer,
+ size_t Count, my_off_t offset, myf MyFlags);
+extern size_t mi_mmap_pwrite(MI_INFO *info, const uchar *Buffer,
+ size_t Count, my_off_t offset, myf MyFlags);
+extern size_t mi_nommap_pread(MI_INFO *info, uchar *Buffer,
+ size_t Count, my_off_t offset, myf MyFlags);
+extern size_t mi_nommap_pwrite(MI_INFO *info, const uchar *Buffer,
+ size_t Count, my_off_t offset, myf MyFlags);
uint mi_state_info_write(File file, MI_STATE_INFO *state, uint pWrite);
uchar *mi_state_info_read(uchar *ptr, MI_STATE_INFO *state);
@@ -731,27 +732,27 @@ uint mi_state_info_read_dsk(File file, MI_STATE_INFO *state, my_bool pRead);
uint mi_base_info_write(File file, MI_BASE_INFO *base);
uchar *my_n_base_info_read(uchar *ptr, MI_BASE_INFO *base);
int mi_keyseg_write(File file, const HA_KEYSEG *keyseg);
-char *mi_keyseg_read(char *ptr, HA_KEYSEG *keyseg);
+uchar *mi_keyseg_read(uchar *ptr, HA_KEYSEG *keyseg);
uint mi_keydef_write(File file, MI_KEYDEF *keydef);
-char *mi_keydef_read(char *ptr, MI_KEYDEF *keydef);
+uchar *mi_keydef_read(uchar *ptr, MI_KEYDEF *keydef);
uint mi_uniquedef_write(File file, MI_UNIQUEDEF *keydef);
-char *mi_uniquedef_read(char *ptr, MI_UNIQUEDEF *keydef);
+uchar *mi_uniquedef_read(uchar *ptr, MI_UNIQUEDEF *keydef);
uint mi_recinfo_write(File file, MI_COLUMNDEF *recinfo);
-char *mi_recinfo_read(char *ptr, MI_COLUMNDEF *recinfo);
+uchar *mi_recinfo_read(uchar *ptr, MI_COLUMNDEF *recinfo);
extern int mi_disable_indexes(MI_INFO *info);
extern int mi_enable_indexes(MI_INFO *info);
extern int mi_indexes_are_disabled(MI_INFO *info);
-ulong _my_calc_total_blob_length(MI_INFO *info, const byte *record);
-ha_checksum mi_checksum(MI_INFO *info, const byte *buf);
-ha_checksum mi_static_checksum(MI_INFO *info, const byte *buf);
-my_bool mi_check_unique(MI_INFO *info, MI_UNIQUEDEF *def, byte *record,
+ulong _my_calc_total_blob_length(MI_INFO *info, const uchar *record);
+ha_checksum mi_checksum(MI_INFO *info, const uchar *buf);
+ha_checksum mi_static_checksum(MI_INFO *info, const uchar *buf);
+my_bool mi_check_unique(MI_INFO *info, MI_UNIQUEDEF *def, uchar *record,
ha_checksum unique_hash, my_off_t pos);
-ha_checksum mi_unique_hash(MI_UNIQUEDEF *def, const byte *buf);
+ha_checksum mi_unique_hash(MI_UNIQUEDEF *def, const uchar *buf);
int _mi_cmp_static_unique(MI_INFO *info, MI_UNIQUEDEF *def,
- const byte *record, my_off_t pos);
+ const uchar *record, my_off_t pos);
int _mi_cmp_dynamic_unique(MI_INFO *info, MI_UNIQUEDEF *def,
- const byte *record, my_off_t pos);
-int mi_unique_comp(MI_UNIQUEDEF *def, const byte *a, const byte *b,
+ const uchar *record, my_off_t pos);
+int mi_unique_comp(MI_UNIQUEDEF *def, const uchar *a, const uchar *b,
my_bool null_are_equal);
void mi_get_status(void* param, int concurrent_insert);
void mi_update_status(void* param);
diff --git a/storage/myisam/myisamlog.c b/storage/myisam/myisamlog.c
index 0bcf74d87a4..6566a7a7a02 100644
--- a/storage/myisam/myisamlog.c
+++ b/storage/myisam/myisamlog.c
@@ -32,14 +32,15 @@ struct file_info {
long process;
int filenr,id;
uint rnd;
- my_string name,show_name,record;
+ char *name, *show_name;
+ uchar *record;
MI_INFO *isam;
bool closed,used;
ulong accessed;
};
struct test_if_open_param {
- my_string name;
+ char * name;
int max_id;
};
@@ -53,24 +54,25 @@ struct st_access_param
extern int main(int argc,char * *argv);
static void get_options(int *argc,char ***argv);
-static int examine_log(my_string file_name,char **table_names);
-static int read_string(IO_CACHE *file,gptr *to,uint length);
+static int examine_log(char * file_name,char **table_names);
+static int read_string(IO_CACHE *file,uchar* *to,uint length);
static int file_info_compare(void *cmp_arg, void *a,void *b);
static int test_if_open(struct file_info *key,element_count count,
struct test_if_open_param *param);
-static void fix_blob_pointers(MI_INFO *isam,byte *record);
+static void fix_blob_pointers(MI_INFO *isam,uchar *record);
static int test_when_accessed(struct file_info *key,element_count count,
struct st_access_param *access_param);
static void file_info_free(struct file_info *info);
static int close_some_file(TREE *tree);
static int reopen_closed_file(TREE *tree,struct file_info *file_info);
-static int find_record_with_key(struct file_info *file_info,byte *record);
+static int find_record_with_key(struct file_info *file_info,uchar *record);
static void printf_log(const char *str,...);
-static bool cmp_filename(struct file_info *file_info,my_string name);
+static bool cmp_filename(struct file_info *file_info,char * name);
static uint verbose=0,update=0,test_info=0,max_files=0,re_open_count=0,
recover=0,prefix_remove=0,opt_processes=0;
-static my_string log_filename=0,filepath=0,write_filename=0,record_pos_file=0;
+static char *log_filename=0, *filepath=0, *write_filename=0;
+static char *record_pos_file= 0;
static ulong com_count[10][3],number_of_commands=(ulong) ~0L,
isamlog_process;
static my_off_t isamlog_filepos,start_offset=0,record_pos= HA_OFFSET_ERROR;
@@ -296,7 +298,7 @@ static void get_options(register int *argc, register char ***argv)
}
-static int examine_log(my_string file_name, char **table_names)
+static int examine_log(char * file_name, char **table_names)
{
uint command,result,files_open;
ulong access_time,length;
@@ -304,7 +306,7 @@ static int examine_log(my_string file_name, char **table_names)
int lock_command,mi_result;
char isam_file_name[FN_REFLEN],llbuff[21],llbuff2[21];
uchar head[20];
- gptr buff;
+ uchar* buff;
struct test_if_open_param open_param;
IO_CACHE cache;
File file;
@@ -327,7 +329,7 @@ static int examine_log(my_string file_name, char **table_names)
}
init_io_cache(&cache,file,0,READ_CACHE,start_offset,0,MYF(0));
- bzero((gptr) com_count,sizeof(com_count));
+ bzero((uchar*) com_count,sizeof(com_count));
init_tree(&tree,0,0,sizeof(file_info),(qsort_cmp2) file_info_compare,1,
(tree_element_free) file_info_free, NULL);
VOID(init_key_cache(dflt_key_cache,KEY_CACHE_BLOCK_SIZE,KEY_CACHE_SIZE,
@@ -335,7 +337,7 @@ static int examine_log(my_string file_name, char **table_names)
files_open=0; access_time=0;
while (access_time++ != number_of_commands &&
- !my_b_read(&cache,(byte*) head,9))
+ !my_b_read(&cache,(uchar*) head,9))
{
isamlog_filepos=my_b_tell(&cache)-9L;
file_info.filenr= mi_uint2korr(head+1);
@@ -375,14 +377,15 @@ static int examine_log(my_string file_name, char **table_names)
}
if (curr_file_info)
- printf("\nWarning: %s is opened with same process and filenumber\nMaybe you should use the -P option ?\n",
+ printf("\nWarning: %s is opened with same process and filenumber\n"
+ "Maybe you should use the -P option ?\n",
curr_file_info->show_name);
- if (my_b_read(&cache,(byte*) head,2))
+ if (my_b_read(&cache,(uchar*) head,2))
goto err;
file_info.name=0;
file_info.show_name=0;
file_info.record=0;
- if (read_string(&cache,(gptr*) &file_info.name,
+ if (read_string(&cache,(uchar**) &file_info.name,
(uint) mi_uint2korr(head)))
goto err;
{
@@ -455,7 +458,7 @@ static int examine_log(my_string file_name, char **table_names)
files_open++;
file_info.closed=0;
}
- VOID(tree_insert(&tree, (gptr) &file_info, 0, tree.custom_arg));
+ VOID(tree_insert(&tree, (uchar*) &file_info, 0, tree.custom_arg));
if (file_info.used)
{
if (verbose && !record_pos_file)
@@ -474,11 +477,11 @@ static int examine_log(my_string file_name, char **table_names)
{
if (!curr_file_info->closed)
files_open--;
- VOID(tree_delete(&tree, (gptr) curr_file_info, 0, tree.custom_arg));
+ VOID(tree_delete(&tree, (uchar*) curr_file_info, 0, tree.custom_arg));
}
break;
case MI_LOG_EXTRA:
- if (my_b_read(&cache,(byte*) head,1))
+ if (my_b_read(&cache,(uchar*) head,1))
goto err;
extra_command=(enum ha_extra_function) head[0];
if (verbose && !record_pos_file &&
@@ -499,7 +502,7 @@ static int examine_log(my_string file_name, char **table_names)
}
break;
case MI_LOG_DELETE:
- if (my_b_read(&cache,(byte*) head,8))
+ if (my_b_read(&cache,(uchar*) head,8))
goto err;
filepos=mi_sizekorr(head);
if (verbose && (!record_pos_file ||
@@ -534,7 +537,7 @@ static int examine_log(my_string file_name, char **table_names)
break;
case MI_LOG_WRITE:
case MI_LOG_UPDATE:
- if (my_b_read(&cache,(byte*) head,12))
+ if (my_b_read(&cache,(uchar*) head,12))
goto err;
filepos=mi_sizekorr(head);
length=mi_uint4korr(head+8);
@@ -616,7 +619,7 @@ static int examine_log(my_string file_name, char **table_names)
my_free(buff,MYF(0));
break;
case MI_LOG_LOCK:
- if (my_b_read(&cache,(byte*) head,sizeof(lock_command)))
+ if (my_b_read(&cache,(uchar*) head,sizeof(lock_command)))
goto err;
memcpy_fixed(&lock_command,head,sizeof(lock_command));
if (verbose && !record_pos_file &&
@@ -675,14 +678,14 @@ static int examine_log(my_string file_name, char **table_names)
}
-static int read_string(IO_CACHE *file, register gptr *to, register uint length)
+static int read_string(IO_CACHE *file, register uchar* *to, register uint length)
{
DBUG_ENTER("read_string");
if (*to)
- my_free((gptr) *to,MYF(0));
- if (!(*to= (gptr) my_malloc(length+1,MYF(MY_WME))) ||
- my_b_read(file,(byte*) *to,length))
+ my_free((uchar*) *to,MYF(0));
+ if (!(*to= (uchar*) my_malloc(length+1,MYF(MY_WME))) ||
+ my_b_read(file,(uchar*) *to,length))
{
if (*to)
my_free(*to,MYF(0));
@@ -717,9 +720,9 @@ static int test_if_open (struct file_info *key,
}
-static void fix_blob_pointers(MI_INFO *info, byte *record)
+static void fix_blob_pointers(MI_INFO *info, uchar *record)
{
- byte *pos;
+ uchar *pos;
MI_BLOB *blob,*end;
pos=record+info->s->base.reclength;
@@ -801,7 +804,7 @@ static int reopen_closed_file(TREE *tree, struct file_info *fileinfo)
/* Try to find record with uniq key */
-static int find_record_with_key(struct file_info *file_info, byte *record)
+static int find_record_with_key(struct file_info *file_info, uchar *record)
{
uint key;
MI_INFO *info=file_info->isam;
@@ -813,7 +816,7 @@ static int find_record_with_key(struct file_info *file_info, byte *record)
info->s->keyinfo[key].flag & HA_NOSAME)
{
VOID(_mi_make_key(info,key,tmp_key,record,0L));
- return mi_rkey(info,file_info->record,(int) key,(char*) tmp_key,0,
+ return mi_rkey(info,file_info->record,(int) key,tmp_key,0,
HA_READ_KEY_EXACT);
}
}
@@ -836,7 +839,7 @@ static void printf_log(const char *format,...)
}
-static bool cmp_filename(struct file_info *file_info, my_string name)
+static bool cmp_filename(struct file_info *file_info, char * name)
{
if (!file_info)
return 1;
diff --git a/storage/myisam/myisampack.c b/storage/myisam/myisampack.c
index fb631b5e63e..37428ddd279 100644
--- a/storage/myisam/myisampack.c
+++ b/storage/myisam/myisampack.c
@@ -69,8 +69,8 @@ typedef struct st_huff_counts {
my_off_t pre_space[8];
my_off_t tot_end_space,tot_pre_space,zero_fields,empty_fields,bytes_packed;
TREE int_tree; /* Tree for detecting distinct column values. */
- byte *tree_buff; /* Column values, 'field_length' each. */
- byte *tree_pos; /* Points to end of column values in 'tree_buff'. */
+ uchar *tree_buff; /* Column values, 'field_length' each. */
+ uchar *tree_pos; /* Points to end of column values in 'tree_buff'. */
} HUFF_COUNTS;
typedef struct st_huff_element HUFF_ELEMENT;
@@ -141,8 +141,8 @@ static int test_space_compress(HUFF_COUNTS *huff_counts,my_off_t records,
enum en_fieldtype field_type);
static HUFF_TREE* make_huff_trees(HUFF_COUNTS *huff_counts,uint trees);
static int make_huff_tree(HUFF_TREE *tree,HUFF_COUNTS *huff_counts);
-static int compare_huff_elements(void *not_used, byte *a,byte *b);
-static int save_counts_in_queue(byte *key,element_count count,
+static int compare_huff_elements(void *not_used, uchar *a,uchar *b);
+static int save_counts_in_queue(uchar *key,element_count count,
HUFF_TREE *tree);
static my_off_t calc_packed_length(HUFF_COUNTS *huff_counts,uint flag);
static uint join_same_trees(HUFF_COUNTS *huff_counts,uint trees);
@@ -171,7 +171,7 @@ static int save_state(MI_INFO *isam_file,PACK_MRG_INFO *mrg,my_off_t new_length,
static int save_state_mrg(File file,PACK_MRG_INFO *isam_file,my_off_t new_length,
ha_checksum crc);
static int mrg_close(PACK_MRG_INFO *mrg);
-static int mrg_rrnd(PACK_MRG_INFO *info,byte *buf);
+static int mrg_rrnd(PACK_MRG_INFO *info,uchar *buf);
static void mrg_reset(PACK_MRG_INFO *mrg);
#if !defined(DBUG_OFF)
static void fakebigcodes(HUFF_COUNTS *huff_counts, HUFF_COUNTS *end_count);
@@ -257,10 +257,10 @@ static struct my_option my_long_options[] =
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
#endif
{"backup", 'b', "Make a backup of the table as table_name.OLD.",
- (gptr*) &backup, (gptr*) &backup, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+ (uchar**) &backup, (uchar**) &backup, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"character-sets-dir", OPT_CHARSETS_DIR_MP,
- "Directory where character sets are.", (gptr*) &charsets_dir,
- (gptr*) &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ "Directory where character sets are.", (uchar**) &charsets_dir,
+ (uchar**) &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"debug", '#', "Output debug log. Often this is 'd:t:o,filename'.",
0, 0, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
{"force", 'f',
@@ -268,7 +268,7 @@ static struct my_option my_long_options[] =
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"join", 'j',
"Join all given tables into 'new_table_name'. All tables MUST have identical layouts.",
- (gptr*) &join_table, (gptr*) &join_table, 0, GET_STR, REQUIRED_ARG, 0, 0, 0,
+ (uchar**) &join_table, (uchar**) &join_table, 0, GET_STR, REQUIRED_ARG, 0, 0, 0,
0, 0, 0},
{"help", '?', "Display this help and exit.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
@@ -282,8 +282,8 @@ static struct my_option my_long_options[] =
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"version", 'V', "Output version information and exit.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
- {"wait", 'w', "Wait and retry if table is in use.", (gptr*) &opt_wait,
- (gptr*) &opt_wait, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"wait", 'w', "Wait and retry if table is in use.", (uchar**) &opt_wait,
+ (uchar**) &opt_wait, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{ 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
@@ -478,7 +478,7 @@ static bool open_isam_files(PACK_MRG_INFO *mrg,char **names,uint count)
error:
while (i--)
mi_close(mrg->file[i]);
- my_free((gptr) mrg->file,MYF(0));
+ my_free((uchar*) mrg->file,MYF(0));
return 1;
}
@@ -513,14 +513,14 @@ static int compress(PACK_MRG_INFO *mrg,char *result_table)
{
/* Make a new indexfile based on first file in list */
uint length;
- char *buff;
+ uchar *buff;
strmov(org_name,result_table); /* Fix error messages */
VOID(fn_format(new_name,result_table,"",MI_NAME_IEXT,2));
if ((join_isam_file=my_create(new_name,0,tmpfile_createflag,MYF(MY_WME)))
< 0)
goto err;
length=(uint) share->base.keystart;
- if (!(buff=my_malloc(length,MYF(MY_WME))))
+ if (!(buff= (uchar*) my_malloc(length,MYF(MY_WME))))
goto err;
if (my_pread(share->kfile,buff,length,0L,MYF(MY_WME | MY_NABP)) ||
my_write(join_isam_file,buff,length,
@@ -644,7 +644,7 @@ static int compress(PACK_MRG_INFO *mrg,char *result_table)
new_length=file_buffer.pos_in_file;
if (!error && !test_only)
{
- char buff[MEMMAP_EXTRA_MARGIN]; /* End marginal for memmap */
+ uchar buff[MEMMAP_EXTRA_MARGIN]; /* End marginal for memmap */
bzero(buff,sizeof(buff));
error=my_write(file_buffer.file,buff,sizeof(buff),
MYF(MY_WME | MY_NABP | MY_WAIT_IF_FULL)) != 0;
@@ -811,11 +811,11 @@ static void free_counts_and_tree_and_queue(HUFF_TREE *huff_trees, uint trees,
for (i=0 ; i < trees ; i++)
{
if (huff_trees[i].element_buffer)
- my_free((gptr) huff_trees[i].element_buffer,MYF(0));
+ my_free((uchar*) huff_trees[i].element_buffer,MYF(0));
if (huff_trees[i].code)
- my_free((gptr) huff_trees[i].code,MYF(0));
+ my_free((uchar*) huff_trees[i].code,MYF(0));
}
- my_free((gptr) huff_trees,MYF(0));
+ my_free((uchar*) huff_trees,MYF(0));
}
if (huff_counts)
{
@@ -823,11 +823,11 @@ static void free_counts_and_tree_and_queue(HUFF_TREE *huff_trees, uint trees,
{
if (huff_counts[i].tree_buff)
{
- my_free((gptr) huff_counts[i].tree_buff,MYF(0));
+ my_free((uchar*) huff_counts[i].tree_buff,MYF(0));
delete_tree(&huff_counts[i].int_tree);
}
}
- my_free((gptr) huff_counts,MYF(0));
+ my_free((uchar*) huff_counts,MYF(0));
}
delete_queue(&queue); /* This is safe to free */
return;
@@ -840,7 +840,7 @@ static int get_statistic(PACK_MRG_INFO *mrg,HUFF_COUNTS *huff_counts)
int error;
uint length;
ulong reclength,max_blob_length;
- byte *record,*pos,*next_pos,*end_pos,*start_pos;
+ uchar *record,*pos,*next_pos,*end_pos,*start_pos;
ha_rows record_count;
my_bool static_row_size;
HUFF_COUNTS *count,*end_count;
@@ -848,7 +848,7 @@ static int get_statistic(PACK_MRG_INFO *mrg,HUFF_COUNTS *huff_counts)
DBUG_ENTER("get_statistic");
reclength=mrg->file[0]->s->base.reclength;
- record=(byte*) my_alloca(reclength);
+ record=(uchar*) my_alloca(reclength);
end_count=huff_counts+mrg->file[0]->s->base.fields;
record_count=0; glob_crc=0;
max_blob_length=0;
@@ -1032,7 +1032,7 @@ static int get_statistic(PACK_MRG_INFO *mrg,HUFF_COUNTS *huff_counts)
{
uint i;
/* Zero fields are just counted. Go to the next record. */
- if (!memcmp((byte*) start_pos,zero_string,count->field_length))
+ if (!memcmp((uchar*) start_pos,zero_string,count->field_length))
{
count->zero_fields++;
continue;
@@ -1141,12 +1141,12 @@ static int get_statistic(PACK_MRG_INFO *mrg,HUFF_COUNTS *huff_counts)
mrg->records=record_count;
mrg->max_blob_length=max_blob_length;
- my_afree((gptr) record);
+ my_afree((uchar*) record);
DBUG_RETURN(error != HA_ERR_END_OF_FILE);
}
static int compare_huff_elements(void *not_used __attribute__((unused)),
- byte *a, byte *b)
+ uchar *a, uchar *b)
{
return *((my_off_t*) a) < *((my_off_t*) b) ? -1 :
(*((my_off_t*) a) == *((my_off_t*) b) ? 0 : 1);
@@ -1162,7 +1162,7 @@ static void check_counts(HUFF_COUNTS *huff_counts, uint trees,
my_off_t old_length,new_length,length;
DBUG_ENTER("check_counts");
- bzero((gptr) field_count,sizeof(field_count));
+ bzero((uchar*) field_count,sizeof(field_count));
space_fields=fill_zero_fields=0;
for (; trees-- ; huff_counts++)
@@ -1328,12 +1328,12 @@ static void check_counts(HUFF_COUNTS *huff_counts, uint trees,
}
else
{
- my_free((gptr) huff_counts->tree_buff,MYF(0));
+ my_free((uchar*) huff_counts->tree_buff,MYF(0));
delete_tree(&huff_counts->int_tree);
huff_counts->tree_buff=0;
}
if (tree.element_buffer)
- my_free((gptr) tree.element_buffer,MYF(0));
+ my_free((uchar*) tree.element_buffer,MYF(0));
}
if (huff_counts->pack_type & PACK_TYPE_SPACE_FIELDS)
space_fields++;
@@ -1450,8 +1450,8 @@ static HUFF_TREE* make_huff_trees(HUFF_COUNTS *huff_counts, uint trees)
if (make_huff_tree(huff_tree+tree,huff_counts+tree))
{
while (tree--)
- my_free((gptr) huff_tree[tree].element_buffer,MYF(0));
- my_free((gptr) huff_tree,MYF(0));
+ my_free((uchar*) huff_tree[tree].element_buffer,MYF(0));
+ my_free((uchar*) huff_tree,MYF(0));
DBUG_RETURN(0);
}
}
@@ -1526,7 +1526,7 @@ static int make_huff_tree(HUFF_TREE *huff_tree, HUFF_COUNTS *huff_counts)
{
HUFF_ELEMENT *temp;
if (!(temp=
- (HUFF_ELEMENT*) my_realloc((gptr) huff_tree->element_buffer,
+ (HUFF_ELEMENT*) my_realloc((uchar*) huff_tree->element_buffer,
found*2*sizeof(HUFF_ELEMENT),
MYF(MY_WME))))
return 1;
@@ -1561,7 +1561,7 @@ static int make_huff_tree(HUFF_TREE *huff_tree, HUFF_COUNTS *huff_counts)
*/
tree_walk(&huff_counts->int_tree,
(int (*)(void*, element_count,void*)) save_counts_in_queue,
- (gptr) huff_tree, left_root_right);
+ (uchar*) huff_tree, left_root_right);
}
else
{
@@ -1587,7 +1587,7 @@ static int make_huff_tree(HUFF_TREE *huff_tree, HUFF_COUNTS *huff_counts)
new_huff_el->count=huff_counts->counts[i];
new_huff_el->a.leaf.null=0;
new_huff_el->a.leaf.element_nr=i;
- queue.root[found]=(byte*) new_huff_el;
+ queue.root[found]=(uchar*) new_huff_el;
}
}
/*
@@ -1604,7 +1604,7 @@ static int make_huff_tree(HUFF_TREE *huff_tree, HUFF_COUNTS *huff_counts)
new_huff_el->a.leaf.element_nr=huff_tree->min_chr=last-1;
else
new_huff_el->a.leaf.element_nr=huff_tree->max_chr=last+1;
- queue.root[found]=(byte*) new_huff_el;
+ queue.root[found]=(uchar*) new_huff_el;
}
}
@@ -1654,7 +1654,7 @@ static int make_huff_tree(HUFF_TREE *huff_tree, HUFF_COUNTS *huff_counts)
Replace the copied top element by the new element and re-order the
queue.
*/
- queue.root[1]=(byte*) new_huff_el;
+ queue.root[1]=(uchar*) new_huff_el;
queue_replaced(&queue);
}
huff_tree->root=(HUFF_ELEMENT*) queue.root[1];
@@ -1693,7 +1693,7 @@ static int compare_tree(void* cmp_arg __attribute__((unused)),
0
*/
-static int save_counts_in_queue(byte *key, element_count count,
+static int save_counts_in_queue(uchar *key, element_count count,
HUFF_TREE *tree)
{
HUFF_ELEMENT *new_huff_el;
@@ -1703,7 +1703,7 @@ static int save_counts_in_queue(byte *key, element_count count,
new_huff_el->a.leaf.null=0;
new_huff_el->a.leaf.element_nr= (uint) (key- tree->counts->tree_buff) /
tree->counts->field_length;
- queue.root[tree->elements]=(byte*) new_huff_el;
+ queue.root[tree->elements]=(uchar*) new_huff_el;
return 0;
}
@@ -1760,7 +1760,7 @@ static my_off_t calc_packed_length(HUFF_COUNTS *huff_counts,
first=i;
last=i;
/* We start with root[1], which is the queues top element. */
- queue.root[found]=(byte*) &huff_counts->counts[i];
+ queue.root[found]=(uchar*) &huff_counts->counts[i];
}
}
if (!found)
@@ -1771,7 +1771,7 @@ static my_off_t calc_packed_length(HUFF_COUNTS *huff_counts,
the loop, which follows the Huffman algorithm.
*/
if (found < 2)
- queue.root[++found]=(byte*) &huff_counts->counts[last ? 0 : 1];
+ queue.root[++found]=(uchar*) &huff_counts->counts[last ? 0 : 1];
/* Make a queue from the queue buffer. */
queue.elements=found;
@@ -1826,7 +1826,7 @@ static my_off_t calc_packed_length(HUFF_COUNTS *huff_counts,
queue. This successively replaces the references to counts by
references to HUFF_ELEMENTs.
*/
- queue.root[1]=(byte*) new_huff_el;
+ queue.root[1]=(uchar*) new_huff_el;
queue_replaced(&queue);
}
DBUG_RETURN(bytes_packed+(bits_packed+7)/8);
@@ -1859,12 +1859,12 @@ static uint join_same_trees(HUFF_COUNTS *huff_counts, uint trees)
i->tree->tree_pack_length+j->tree->tree_pack_length+
ALLOWED_JOIN_DIFF)
{
- memcpy_fixed((byte*) i->counts,(byte*) count.counts,
+ memcpy_fixed((uchar*) i->counts,(uchar*) count.counts,
sizeof(count.counts[0])*256);
- my_free((gptr) j->tree->element_buffer,MYF(0));
+ my_free((uchar*) j->tree->element_buffer,MYF(0));
j->tree->element_buffer=0;
j->tree=i->tree;
- bmove((byte*) i->counts,(byte*) count.counts,
+ bmove((uchar*) i->counts,(uchar*) count.counts,
sizeof(count.counts[0])*256);
if (make_huff_tree(i->tree,i))
return (uint) -1;
@@ -2007,7 +2007,7 @@ static char *hexdigits(ulonglong value)
static int write_header(PACK_MRG_INFO *mrg,uint head_length,uint trees,
my_off_t tot_elements,my_off_t filelength)
{
- byte *buff= (byte*) file_buffer.pos;
+ uchar *buff= (uchar*) file_buffer.pos;
bzero(buff,HEAD_LENGTH);
memcpy_fixed(buff,myisam_pack_file_magic,4);
@@ -2023,7 +2023,7 @@ static int write_header(PACK_MRG_INFO *mrg,uint head_length,uint trees,
if (test_only)
return 0;
VOID(my_seek(file_buffer.file,0L,MY_SEEK_SET,MYF(0)));
- return my_write(file_buffer.file,(const byte *) file_buffer.pos,HEAD_LENGTH,
+ return my_write(file_buffer.file,(const uchar *) file_buffer.pos,HEAD_LENGTH,
MYF(MY_WME | MY_NABP | MY_WAIT_IF_FULL)) != 0;
}
@@ -2159,7 +2159,7 @@ static my_off_t write_huff_tree(HUFF_TREE *huff_tree, uint trees)
{ /* This should be impossible */
VOID(fprintf(stderr, "Tree offset got too big: %d, aborted\n",
huff_tree->max_offset));
- my_afree((gptr) packed_tree);
+ my_afree((uchar*) packed_tree);
return 0;
}
@@ -2331,7 +2331,7 @@ static my_off_t write_huff_tree(HUFF_TREE *huff_tree, uint trees)
DBUG_PRINT("info", (" "));
if (verbose >= 2)
VOID(printf("\n"));
- my_afree((gptr) packed_tree);
+ my_afree((uchar*) packed_tree);
if (errors)
{
VOID(fprintf(stderr, "Error: Generated decode trees are corrupt. Stop.\n"));
@@ -2412,7 +2412,7 @@ static int compress_isam_file(PACK_MRG_INFO *mrg, HUFF_COUNTS *huff_counts)
my_off_t record_count;
char llbuf[32];
ulong length,pack_length;
- byte *record,*pos,*end_pos,*record_pos,*start_pos;
+ uchar *record,*pos,*end_pos,*record_pos,*start_pos;
HUFF_COUNTS *count,*end_count;
HUFF_TREE *tree;
MI_INFO *isam_file=mrg->file[0];
@@ -2420,7 +2420,7 @@ static int compress_isam_file(PACK_MRG_INFO *mrg, HUFF_COUNTS *huff_counts)
DBUG_ENTER("compress_isam_file");
/* Allocate a buffer for the records (excluding blobs). */
- if (!(record=(byte*) my_alloca(isam_file->s->base.reclength)))
+ if (!(record=(uchar*) my_alloca(isam_file->s->base.reclength)))
return -1;
end_count=huff_counts+isam_file->s->base.fields;
@@ -2471,7 +2471,7 @@ static int compress_isam_file(PACK_MRG_INFO *mrg, HUFF_COUNTS *huff_counts)
{
if (flush_buffer((ulong) max_calc_length + (ulong) max_pack_length))
break;
- record_pos= (byte*) file_buffer.pos;
+ record_pos= (uchar*) file_buffer.pos;
file_buffer.pos+=max_pack_length;
for (start_pos=record, count= huff_counts; count < end_count ; count++)
{
@@ -2508,7 +2508,7 @@ static int compress_isam_file(PACK_MRG_INFO *mrg, HUFF_COUNTS *huff_counts)
switch (count->field_type) {
case FIELD_SKIP_ZERO:
- if (!memcmp((byte*) start_pos,zero_string,field_length))
+ if (!memcmp((uchar*) start_pos,zero_string,field_length))
{
DBUG_PRINT("fields", ("FIELD_SKIP_ZERO zeroes only, bits: 1"));
write_bits(1,1);
@@ -2637,7 +2637,7 @@ static int compress_isam_file(PACK_MRG_INFO *mrg, HUFF_COUNTS *huff_counts)
break;
case FIELD_INTERVALL:
global_count=count;
- pos=(byte*) tree_search(&count->int_tree, start_pos,
+ pos=(uchar*) tree_search(&count->int_tree, start_pos,
count->int_tree.custom_arg);
intervall=(uint) (pos - count->tree_buff)/field_length;
DBUG_PRINT("fields", ("FIELD_INTERVALL"));
@@ -2660,7 +2660,7 @@ static int compress_isam_file(PACK_MRG_INFO *mrg, HUFF_COUNTS *huff_counts)
}
else
{
- byte *blob,*blob_end;
+ uchar *blob,*blob_end;
DBUG_PRINT("fields", ("FIELD_BLOB not empty, bits: 1"));
write_bits(0,1);
/* Write the blob length. */
@@ -2701,7 +2701,7 @@ static int compress_isam_file(PACK_MRG_INFO *mrg, HUFF_COUNTS *huff_counts)
}
else
{
- byte *end= start_pos + var_pack_length + col_length;
+ uchar *end= start_pos + var_pack_length + col_length;
DBUG_PRINT("fields", ("FIELD_VARCHAR not empty, bits: 1"));
write_bits(0,1);
/* Write the varchar length. */
@@ -2733,7 +2733,7 @@ static int compress_isam_file(PACK_MRG_INFO *mrg, HUFF_COUNTS *huff_counts)
DBUG_PRINT("fields", ("---"));
}
flush_bits();
- length=(ulong) ((byte*) file_buffer.pos - record_pos) - max_pack_length;
+ length=(ulong) ((uchar*) file_buffer.pos - record_pos) - max_pack_length;
pack_length= save_pack_length(pack_version, record_pos, length);
if (pack_blob_length)
pack_length+= save_pack_length(pack_version, record_pos + pack_length,
@@ -2773,7 +2773,7 @@ static int compress_isam_file(PACK_MRG_INFO *mrg, HUFF_COUNTS *huff_counts)
if (verbose >= 2)
VOID(printf("wrote %s records.\n", llstr((longlong) record_count, llbuf)));
- my_afree((gptr) record);
+ my_afree((uchar*) record);
mrg->ref_length=max_pack_length;
mrg->min_pack_length=max_record_length ? min_record_length : 0;
mrg->max_pack_length=max_record_length;
@@ -2840,7 +2840,7 @@ static int flush_buffer(ulong neaded_length)
if (test_only)
return 0;
if (error_on_write|| my_write(file_buffer.file,
- (const byte*) file_buffer.buffer,
+ (const uchar*) file_buffer.buffer,
length,
MYF(MY_WME | MY_NABP | MY_WAIT_IF_FULL)))
{
@@ -2867,7 +2867,7 @@ static int flush_buffer(ulong neaded_length)
static void end_file_buffer(void)
{
- my_free((gptr) file_buffer.buffer,MYF(0));
+ my_free((uchar*) file_buffer.buffer,MYF(0));
}
/* output `bits` low bits of `value' */
@@ -3025,7 +3025,7 @@ static void mrg_reset(PACK_MRG_INFO *mrg)
}
}
-static int mrg_rrnd(PACK_MRG_INFO *info,byte *buf)
+static int mrg_rrnd(PACK_MRG_INFO *info,uchar *buf)
{
int error;
MI_INFO *isam_info;
@@ -3048,7 +3048,7 @@ static int mrg_rrnd(PACK_MRG_INFO *info,byte *buf)
for (;;)
{
isam_info->update&= HA_STATE_CHANGED;
- if (!(error=(*isam_info->s->read_rnd)(isam_info,(byte*) buf,
+ if (!(error=(*isam_info->s->read_rnd)(isam_info,(uchar*) buf,
filepos, 1)) ||
error != HA_ERR_END_OF_FILE)
return (error);
@@ -3071,7 +3071,7 @@ static int mrg_close(PACK_MRG_INFO *mrg)
for (i=0 ; i < mrg->count ; i++)
error|=mi_close(mrg->file[i]);
if (mrg->free_file)
- my_free((gptr) mrg->file,MYF(0));
+ my_free((uchar*) mrg->file,MYF(0));
return error;
}
@@ -3134,7 +3134,7 @@ static void fakebigcodes(HUFF_COUNTS *huff_counts, HUFF_COUNTS *end_count)
*/
if (huff_counts->tree_buff)
{
- my_free((gptr) huff_counts->tree_buff, MYF(0));
+ my_free((uchar*) huff_counts->tree_buff, MYF(0));
delete_tree(&huff_counts->int_tree);
huff_counts->tree_buff= NULL;
DBUG_PRINT("fakebigcodes", ("freed distinct column values"));
diff --git a/storage/myisam/rt_index.c b/storage/myisam/rt_index.c
index cf144839dd1..63ed60586d6 100644
--- a/storage/myisam/rt_index.c
+++ b/storage/myisam/rt_index.c
@@ -141,11 +141,11 @@ static int rtree_find_req(MI_INFO *info, MI_KEYDEF *keyinfo, uint search_flag,
res = 1;
ok:
- my_afree((byte*)page_buf);
+ my_afree((uchar*)page_buf);
return res;
err1:
- my_afree((byte*)page_buf);
+ my_afree((uchar*)page_buf);
info->lastpos = HA_OFFSET_ERROR;
return -1;
}
@@ -356,11 +356,11 @@ static int rtree_get_req(MI_INFO *info, MI_KEYDEF *keyinfo, uint key_length,
res = 1;
ok:
- my_afree((byte*)page_buf);
+ my_afree((uchar*)page_buf);
return res;
err1:
- my_afree((byte*)page_buf);
+ my_afree((uchar*)page_buf);
info->lastpos = HA_OFFSET_ERROR;
return -1;
}
@@ -602,11 +602,11 @@ static int rtree_insert_req(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *key,
}
ok:
- my_afree((byte*)page_buf);
+ my_afree((uchar*)page_buf);
DBUG_RETURN(res);
err1:
- my_afree((byte*)page_buf);
+ my_afree((uchar*)page_buf);
DBUG_RETURN(-1); /* purecov: inspected */
}
@@ -690,10 +690,10 @@ static int rtree_insert_level(MI_INFO *info, uint keynr, uchar *key,
DBUG_PRINT("rtree", ("new root page: %lu level: %d nod_flag: %u",
(ulong) new_root, 0, mi_test_if_nod(new_root_buf)));
- my_afree((byte*)new_root_buf);
+ my_afree((uchar*)new_root_buf);
break;
err1:
- my_afree((byte*)new_root_buf);
+ my_afree((uchar*)new_root_buf);
DBUG_RETURN(-1); /* purecov: inspected */
}
default:
@@ -739,7 +739,7 @@ static int rtree_fill_reinsert_list(stPageList *ReinsertList, my_off_t page,
if (ReinsertList->n_pages == ReinsertList->m_pages)
{
ReinsertList->m_pages += REINSERT_BUFFER_INC;
- if (!(ReinsertList->pages = (stPageLevel*)my_realloc((gptr)ReinsertList->pages,
+ if (!(ReinsertList->pages = (stPageLevel*)my_realloc((uchar*)ReinsertList->pages,
ReinsertList->m_pages * sizeof(stPageLevel), MYF(MY_ALLOW_ZERO_PTR))))
goto err1;
}
@@ -891,11 +891,11 @@ static int rtree_delete_req(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *key,
res = 1;
ok:
- my_afree((byte*)page_buf);
+ my_afree((uchar*)page_buf);
DBUG_RETURN(res);
err1:
- my_afree((byte*)page_buf);
+ my_afree((uchar*)page_buf);
DBUG_RETURN(-1); /* purecov: inspected */
}
@@ -968,7 +968,7 @@ int rtree_delete(MI_INFO *info, uint keynr, uchar *key, uint key_length)
if ((res= rtree_insert_level(info, keynr, k, key_length,
ReinsertList.pages[i].level)) == -1)
{
- my_afree((byte*)page_buf);
+ my_afree((uchar*)page_buf);
goto err1;
}
if (res)
@@ -984,13 +984,13 @@ int rtree_delete(MI_INFO *info, uint keynr, uchar *key, uint key_length)
}
}
}
- my_afree((byte*)page_buf);
+ my_afree((uchar*)page_buf);
if (_mi_dispose(info, keyinfo, ReinsertList.pages[i].offs,
DFLT_INIT_HITS))
goto err1;
}
if (ReinsertList.pages)
- my_free((byte*) ReinsertList.pages, MYF(0));
+ my_free((uchar*) ReinsertList.pages, MYF(0));
/* check for redundant root (not leaf, 1 child) and eliminate */
if ((old_root = info->s->state.key_root[keynr]) == HA_OFFSET_ERROR)
@@ -1117,11 +1117,11 @@ ha_rows rtree_estimate(MI_INFO *info, uint keynr, uchar *key,
res = HA_POS_ERROR;
}
- my_afree((byte*)page_buf);
+ my_afree((uchar*)page_buf);
return res;
err1:
- my_afree((byte*)page_buf);
+ my_afree((uchar*)page_buf);
return HA_POS_ERROR;
}
diff --git a/storage/myisam/rt_split.c b/storage/myisam/rt_split.c
index 0f6dc872958..ef988dbd048 100644
--- a/storage/myisam/rt_split.c
+++ b/storage/myisam/rt_split.c
@@ -345,10 +345,10 @@ int rtree_split_page(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *page, uchar *key,
DFLT_INIT_HITS, new_page);
DBUG_PRINT("rtree", ("split new block: %lu", (ulong) *new_page_offs));
- my_afree((byte*)new_page);
+ my_afree((uchar*)new_page);
split_err:
- my_afree((byte*) coord_buf);
+ my_afree((uchar*) coord_buf);
DBUG_RETURN(err_code);
}
diff --git a/storage/myisam/rt_test.c b/storage/myisam/rt_test.c
index 55b52c0c3bf..7d15afd12ef 100644
--- a/storage/myisam/rt_test.c
+++ b/storage/myisam/rt_test.c
@@ -28,9 +28,9 @@
#define KEYALG HA_KEY_ALG_RTREE
static int read_with_pos(MI_INFO * file, int silent);
-static void create_record(char *record,uint rownr);
-static void create_record1(char *record,uint rownr);
-static void print_record(char * record,my_off_t offs,const char * tail);
+static void create_record(uchar *record,uint rownr);
+static void create_record1(uchar *record,uint rownr);
+static void print_record(uchar * record,my_off_t offs,const char * tail);
static int run_test(const char *filename);
static double rt_data[]=
@@ -108,8 +108,8 @@ static int run_test(const char *filename)
int i;
int error;
int row_count=0;
- char record[MAX_REC_LENGTH];
- char read_record[MAX_REC_LENGTH];
+ uchar record[MAX_REC_LENGTH];
+ uchar read_record[MAX_REC_LENGTH];
int upd= 10;
ha_rows hrows;
@@ -342,7 +342,7 @@ static int read_with_pos (MI_INFO * file,int silent)
{
int error;
int i;
- char read_record[MAX_REC_LENGTH];
+ uchar read_record[MAX_REC_LENGTH];
if (!silent)
printf("- Reading rows with position\n");
@@ -385,12 +385,12 @@ static void bprint_record(char * record,
#endif
-static void print_record(char * record,
+static void print_record(uchar * record,
my_off_t offs __attribute__((unused)),
const char * tail)
{
int i;
- char * pos;
+ uchar * pos;
double c;
printf(" rec=(%d)",(unsigned char)record[0]);
@@ -407,16 +407,16 @@ static void print_record(char * record,
-static void create_record1(char *record,uint rownr)
+static void create_record1(uchar *record,uint rownr)
{
int i;
- char * pos;
+ uchar * pos;
double c=rownr+10;
bzero((char*) record,MAX_REC_LENGTH);
record[0]=0x01; /* DEL marker */
- for ( pos=record+1, i=0; i<2*ndims; i++)
+ for (pos=record+1, i=0; i<2*ndims; i++)
{
memcpy(pos,&c,sizeof(c));
float8store(pos,c);
@@ -426,7 +426,7 @@ static void create_record1(char *record,uint rownr)
#ifdef NOT_USED
-static void create_record0(char *record,uint rownr)
+static void create_record0(uchar *record,uint rownr)
{
int i;
char * pos;
@@ -449,16 +449,16 @@ static void create_record0(char *record,uint rownr)
#endif
-static void create_record(char *record,uint rownr)
+static void create_record(uchar *record,uint rownr)
{
int i;
- char *pos;
+ uchar *pos;
double *data= rt_data+rownr*4;
record[0]=0x01; /* DEL marker */
- for ( pos=record+1, i=0; i<ndims*2; i++)
+ for (pos=record+1, i=0; i<ndims*2; i++)
{
- float8store(pos,data[i]);
- pos+=8;
+ float8store(pos,data[i]);
+ pos+=8;
}
}
diff --git a/storage/myisam/sort.c b/storage/myisam/sort.c
index 53eb6b2e310..2146a8d16cb 100644
--- a/storage/myisam/sort.c
+++ b/storage/myisam/sort.c
@@ -78,13 +78,13 @@ static int NEAR_F write_keys_varlen(MI_SORT_PARAM *info,uchar **sort_keys,
static uint NEAR_F read_to_buffer_varlen(IO_CACHE *fromfile,BUFFPEK *buffpek,
uint sort_length);
static int NEAR_F write_merge_key(MI_SORT_PARAM *info, IO_CACHE *to_file,
- char *key, uint sort_length, uint count);
+ uchar *key, uint sort_length, uint count);
static int NEAR_F write_merge_key_varlen(MI_SORT_PARAM *info,
IO_CACHE *to_file,
- char* key, uint sort_length,
+ uchar* key, uint sort_length,
uint count);
static inline int
-my_var_write(MI_SORT_PARAM *info, IO_CACHE *to_file, byte *bufs);
+my_var_write(MI_SORT_PARAM *info, IO_CACHE *to_file, uchar *bufs);
/*
Creates a index of sorted keys
@@ -116,7 +116,7 @@ int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages,
{
info->write_keys=write_keys_varlen;
info->read_to_buffer=read_to_buffer_varlen;
- info->write_key=write_merge_key_varlen;
+ info->write_key= write_merge_key_varlen;
}
else
{
@@ -138,9 +138,10 @@ int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages,
while (memavl >= MIN_SORT_MEMORY)
{
- if ((my_off_t) (records+1)*(sort_length+sizeof(char*)) <=
- (my_off_t) memavl)
- keys= records+1;
+ if ((records < UINT_MAX32) &&
+ ((my_off_t) (records + 1) *
+ (sort_length + sizeof(char*)) <= (my_off_t) memavl))
+ keys= (uint)records+1;
else
do
{
@@ -151,7 +152,7 @@ int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages,
keys < (uint) maxbuffer)
{
mi_check_print_error(info->sort_info->param,
- "sort_buffer_size is to small");
+ "myisam_sort_buffer_size is too small");
goto err;
}
}
@@ -163,7 +164,7 @@ int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages,
if (my_init_dynamic_array(&buffpek, sizeof(BUFFPEK), maxbuffer,
maxbuffer/2))
{
- my_free((gptr) sort_keys,MYF(0));
+ my_free((uchar*) sort_keys,MYF(0));
sort_keys= 0;
}
else
@@ -175,7 +176,7 @@ int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages,
}
if (memavl < MIN_SORT_MEMORY)
{
- mi_check_print_error(info->sort_info->param,"Sort buffer to small"); /* purecov: tested */
+ mi_check_print_error(info->sort_info->param,"MyISAM sort buffer too small"); /* purecov: tested */
goto err; /* purecov: tested */
}
(*info->lock_in_memory)(info->sort_info->param);/* Everything is allocated */
@@ -230,9 +231,9 @@ int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages,
reinit_io_cache(&tempfile_for_exceptions,READ_CACHE,0L,0,0))
goto err;
- while (!my_b_read(&tempfile_for_exceptions,(byte*)&key_length,
+ while (!my_b_read(&tempfile_for_exceptions,(uchar*)&key_length,
sizeof(key_length))
- && !my_b_read(&tempfile_for_exceptions,(byte*)sort_keys,
+ && !my_b_read(&tempfile_for_exceptions,(uchar*)sort_keys,
(uint) key_length))
{
if (_mi_ck_write(idx,keyno,(uchar*) sort_keys,key_length-ref_length))
@@ -244,7 +245,7 @@ int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages,
err:
if (sort_keys)
- my_free((gptr) sort_keys,MYF(0));
+ my_free((uchar*) sort_keys,MYF(0));
delete_dynamic(&buffpek);
close_cached_file(&tempfile);
close_cached_file(&tempfile_for_exceptions);
@@ -348,7 +349,7 @@ pthread_handler_t thr_find_all_keys(void *arg)
sort_keys= (uchar **) NULL;
memavl= max(sort_param->sortbuff_size, MIN_SORT_MEMORY);
- idx= sort_param->sort_info->max_records;
+ idx= (uint)sort_param->sort_info->max_records;
sort_length= sort_param->key_length;
maxbuffer= 1;
@@ -369,7 +370,7 @@ pthread_handler_t thr_find_all_keys(void *arg)
keys < (uint) maxbuffer)
{
mi_check_print_error(sort_param->sort_info->param,
- "sort_buffer_size is to small");
+ "myisam_sort_buffer_size is too small");
goto err;
}
}
@@ -383,7 +384,7 @@ pthread_handler_t thr_find_all_keys(void *arg)
if (my_init_dynamic_array(&sort_param->buffpek, sizeof(BUFFPEK),
maxbuffer, maxbuffer/2))
{
- my_free((gptr) sort_keys,MYF(0));
+ my_free((uchar*) sort_keys,MYF(0));
sort_keys= (uchar **) NULL; /* for err: label */
}
else
@@ -397,7 +398,7 @@ pthread_handler_t thr_find_all_keys(void *arg)
if (memavl < MIN_SORT_MEMORY)
{
mi_check_print_error(sort_param->sort_info->param,
- "Sort buffer too small");
+ "MyISAM sort buffer too small");
goto err; /* purecov: tested */
}
@@ -453,7 +454,7 @@ err:
DBUG_PRINT("error", ("got some error"));
sort_param->sort_info->got_error= 1; /* no need to protect with a mutex */
if (sort_keys)
- my_free((gptr) sort_keys,MYF(0));
+ my_free((uchar*) sort_keys,MYF(0));
sort_param->sort_keys= 0;
delete_dynamic(& sort_param->buffpek);
close_cached_file(&sort_param->tempfile);
@@ -495,7 +496,7 @@ int thr_write_keys(MI_SORT_PARAM *sort_param)
MI_INFO *info=sort_info->info;
MYISAM_SHARE *share=info->s;
MI_SORT_PARAM *sinfo;
- byte *mergebuf=0;
+ uchar *mergebuf=0;
DBUG_ENTER("thr_write_keys");
LINT_INIT(length);
@@ -530,7 +531,7 @@ int thr_write_keys(MI_SORT_PARAM *sort_param)
sinfo->notnull: NULL,
(ulonglong) info->state->records);
}
- my_free((gptr) sinfo->sort_keys,MYF(0));
+ my_free((uchar*) sinfo->sort_keys,MYF(0));
my_free(mi_get_rec_buff_ptr(info, sinfo->rec_buff),
MYF(MY_ALLOW_ZERO_PTR));
sinfo->sort_keys=0;
@@ -621,12 +622,12 @@ int thr_write_keys(MI_SORT_PARAM *sort_param)
}
while (!got_error &&
- !my_b_read(&sinfo->tempfile_for_exceptions,(byte*)&key_length,
+ !my_b_read(&sinfo->tempfile_for_exceptions,(uchar*)&key_length,
sizeof(key_length)))
{
- byte ft_buf[HA_FT_MAXBYTELEN + HA_FT_WLEN + 10];
+ uchar ft_buf[HA_FT_MAXBYTELEN + HA_FT_WLEN + 10];
if (key_length > sizeof(ft_buf) ||
- my_b_read(&sinfo->tempfile_for_exceptions, (byte*)ft_buf,
+ my_b_read(&sinfo->tempfile_for_exceptions, (uchar*)ft_buf,
(uint)key_length) ||
_mi_ck_write(info, sinfo->key, (uchar*)ft_buf,
key_length - info->s->rec_reflength))
@@ -634,7 +635,7 @@ int thr_write_keys(MI_SORT_PARAM *sort_param)
}
}
}
- my_free((gptr) mergebuf,MYF(MY_ALLOW_ZERO_PTR));
+ my_free((uchar*) mergebuf,MYF(MY_ALLOW_ZERO_PTR));
DBUG_RETURN(got_error);
}
#endif /* THREAD */
@@ -648,7 +649,7 @@ static int NEAR_F write_keys(MI_SORT_PARAM *info, register uchar **sort_keys,
uint sort_length=info->key_length;
DBUG_ENTER("write_keys");
- qsort2((byte*) sort_keys,count,sizeof(byte*),(qsort2_cmp) info->key_cmp,
+ qsort2((uchar*) sort_keys,count,sizeof(uchar*),(qsort2_cmp) info->key_cmp,
info);
if (!my_b_inited(tempfile) &&
open_cached_file(tempfile, my_tmpdir(info->tmpdir), "ST",
@@ -660,7 +661,7 @@ static int NEAR_F write_keys(MI_SORT_PARAM *info, register uchar **sort_keys,
for (end=sort_keys+count ; sort_keys != end ; sort_keys++)
{
- if (my_b_write(tempfile,(byte*) *sort_keys,(uint) sort_length))
+ if (my_b_write(tempfile,(uchar*) *sort_keys,(uint) sort_length))
DBUG_RETURN(1); /* purecov: inspected */
}
DBUG_RETURN(0);
@@ -668,13 +669,13 @@ static int NEAR_F write_keys(MI_SORT_PARAM *info, register uchar **sort_keys,
static inline int
-my_var_write(MI_SORT_PARAM *info, IO_CACHE *to_file, byte *bufs)
+my_var_write(MI_SORT_PARAM *info, IO_CACHE *to_file, uchar *bufs)
{
int err;
uint16 len = _mi_keylength(info->keyinfo, (uchar*) bufs);
/* The following is safe as this is a local file */
- if ((err= my_b_write(to_file, (byte*)&len, sizeof(len))))
+ if ((err= my_b_write(to_file, (uchar*)&len, sizeof(len))))
return (err);
if ((err= my_b_write(to_file,bufs, (uint) len)))
return (err);
@@ -691,7 +692,7 @@ static int NEAR_F write_keys_varlen(MI_SORT_PARAM *info,
int err;
DBUG_ENTER("write_keys_varlen");
- qsort2((byte*) sort_keys,count,sizeof(byte*),(qsort2_cmp) info->key_cmp,
+ qsort2((uchar*) sort_keys,count,sizeof(uchar*),(qsort2_cmp) info->key_cmp,
info);
if (!my_b_inited(tempfile) &&
open_cached_file(tempfile, my_tmpdir(info->tmpdir), "ST",
@@ -702,7 +703,7 @@ static int NEAR_F write_keys_varlen(MI_SORT_PARAM *info,
buffpek->count=count;
for (end=sort_keys+count ; sort_keys != end ; sort_keys++)
{
- if ((err= my_var_write(info,tempfile, (byte*) *sort_keys)))
+ if ((err= my_var_write(info,tempfile, (uchar*) *sort_keys)))
DBUG_RETURN(err);
}
DBUG_RETURN(0);
@@ -720,8 +721,8 @@ static int NEAR_F write_key(MI_SORT_PARAM *info, uchar *key,
DISK_BUFFER_SIZE, info->sort_info->param->myf_rw))
DBUG_RETURN(1);
- if (my_b_write(tempfile,(byte*)&key_length,sizeof(key_length)) ||
- my_b_write(tempfile,(byte*)key,(uint) key_length))
+ if (my_b_write(tempfile,(uchar*)&key_length,sizeof(key_length)) ||
+ my_b_write(tempfile,(uchar*)key,(uint) key_length))
DBUG_RETURN(1);
DBUG_RETURN(0);
} /* write_key */
@@ -734,7 +735,7 @@ static int NEAR_F write_index(MI_SORT_PARAM *info, register uchar **sort_keys,
{
DBUG_ENTER("write_index");
- qsort2((gptr) sort_keys,(size_t) count,sizeof(byte*),
+ qsort2((uchar*) sort_keys,(size_t) count,sizeof(uchar*),
(qsort2_cmp) info->key_cmp,info);
while (count--)
{
@@ -773,7 +774,7 @@ static int NEAR_F merge_many_buff(MI_SORT_PARAM *info, uint keys,
{
if (merge_buffers(info,keys,from_file,to_file,sort_keys,lastbuff++,
buffpek+i,buffpek+i+MERGEBUFF-1))
- break; /* purecov: inspected */
+ goto cleanup;
}
if (merge_buffers(info,keys,from_file,to_file,sort_keys,lastbuff++,
buffpek+i,buffpek+ *maxbuffer))
@@ -783,6 +784,7 @@ static int NEAR_F merge_many_buff(MI_SORT_PARAM *info, uint keys,
temp=from_file; from_file=to_file; to_file=temp;
*maxbuffer= (int) (lastbuff-buffpek)-1;
}
+cleanup:
close_cached_file(to_file); /* This holds old result */
if (to_file == t_file)
*t_file=t_file2; /* Copy result file */
@@ -812,7 +814,7 @@ static uint NEAR_F read_to_buffer(IO_CACHE *fromfile, BUFFPEK *buffpek,
if ((count=(uint) min((ha_rows) buffpek->max_keys,buffpek->count)))
{
- if (my_pread(fromfile->file,(byte*) buffpek->base,
+ if (my_pread(fromfile->file,(uchar*) buffpek->base,
(length= sort_length*count),buffpek->file_pos,MYF_RW))
return((uint) -1); /* purecov: inspected */
buffpek->key=buffpek->base;
@@ -837,11 +839,11 @@ static uint NEAR_F read_to_buffer_varlen(IO_CACHE *fromfile, BUFFPEK *buffpek,
for (idx=1;idx<=count;idx++)
{
- if (my_pread(fromfile->file,(byte*)&length_of_key,sizeof(length_of_key),
+ if (my_pread(fromfile->file,(uchar*)&length_of_key,sizeof(length_of_key),
buffpek->file_pos,MYF_RW))
return((uint) -1);
buffpek->file_pos+=sizeof(length_of_key);
- if (my_pread(fromfile->file,(byte*) buffp,length_of_key,
+ if (my_pread(fromfile->file,(uchar*) buffp,length_of_key,
buffpek->file_pos,MYF_RW))
return((uint) -1);
buffpek->file_pos+=length_of_key;
@@ -856,16 +858,16 @@ static uint NEAR_F read_to_buffer_varlen(IO_CACHE *fromfile, BUFFPEK *buffpek,
static int NEAR_F write_merge_key_varlen(MI_SORT_PARAM *info,
- IO_CACHE *to_file,char* key,
+ IO_CACHE *to_file, uchar* key,
uint sort_length, uint count)
{
uint idx;
+ uchar *bufs = key;
- char *bufs = key;
for (idx=1;idx<=count;idx++)
{
int err;
- if ((err= my_var_write(info,to_file, (byte*) bufs)))
+ if ((err= my_var_write(info, to_file, bufs)))
return (err);
bufs=bufs+sort_length;
}
@@ -874,10 +876,10 @@ static int NEAR_F write_merge_key_varlen(MI_SORT_PARAM *info,
static int NEAR_F write_merge_key(MI_SORT_PARAM *info __attribute__((unused)),
- IO_CACHE *to_file, char* key,
+ IO_CACHE *to_file, uchar *key,
uint sort_length, uint count)
{
- return my_b_write(to_file,(byte*) key,(uint) sort_length*count);
+ return my_b_write(to_file, key, (size_t) sort_length*count);
}
/*
@@ -909,7 +911,7 @@ merge_buffers(MI_SORT_PARAM *info, uint keys, IO_CACHE *from_file,
sort_length=info->key_length;
if (init_queue(&queue,(uint) (Tb-Fb)+1,offsetof(BUFFPEK,key),0,
- (int (*)(void*, byte *,byte*)) info->key_cmp,
+ (int (*)(void*, uchar *,uchar*)) info->key_cmp,
(void*) info))
DBUG_RETURN(1); /* purecov: inspected */
@@ -922,7 +924,7 @@ merge_buffers(MI_SORT_PARAM *info, uint keys, IO_CACHE *from_file,
sort_length));
if (error == -1)
goto err; /* purecov: inspected */
- queue_insert(&queue,(char*) buffpek);
+ queue_insert(&queue,(uchar*) buffpek);
}
while (queue.elements > 1)
@@ -936,7 +938,7 @@ merge_buffers(MI_SORT_PARAM *info, uint keys, IO_CACHE *from_file,
buffpek=(BUFFPEK*) queue_top(&queue);
if (to_file)
{
- if (info->write_key(info,to_file,(byte*) buffpek->key,
+ if (info->write_key(info,to_file,(uchar*) buffpek->key,
(uint) sort_length,1))
{
error=1; goto err; /* purecov: inspected */
@@ -992,7 +994,7 @@ merge_buffers(MI_SORT_PARAM *info, uint keys, IO_CACHE *from_file,
{
if (to_file)
{
- if (info->write_key(info,to_file,(byte*) buffpek->key,
+ if (info->write_key(info,to_file,(uchar*) buffpek->key,
sort_length,buffpek->mem_count))
{
error=1; goto err; /* purecov: inspected */
@@ -1045,7 +1047,7 @@ flush_ft_buf(MI_SORT_PARAM *info)
if (info->sort_info->ft_buf)
{
err=sort_ft_buf_flush(info);
- my_free((gptr)info->sort_info->ft_buf, MYF(0));
+ my_free((uchar*)info->sort_info->ft_buf, MYF(0));
info->sort_info->ft_buf=0;
}
return err;
diff --git a/storage/myisam/sp_defs.h b/storage/myisam/sp_defs.h
index 11254d16c97..187ec62b2a3 100644
--- a/storage/myisam/sp_defs.h
+++ b/storage/myisam/sp_defs.h
@@ -40,7 +40,7 @@ enum wkbByteOrder
};
uint sp_make_key(register MI_INFO *info, uint keynr, uchar *key,
- const byte *record, my_off_t filepos);
+ const uchar *record, my_off_t filepos);
#endif /*HAVE_SPATIAL*/
#endif /* _SP_DEFS_H */
diff --git a/storage/myisam/sp_key.c b/storage/myisam/sp_key.c
index 34c96a219c7..3748a38ff81 100644
--- a/storage/myisam/sp_key.c
+++ b/storage/myisam/sp_key.c
@@ -31,25 +31,20 @@ static int sp_get_geometry_mbr(uchar *(*wkb), uchar *end, uint n_dims,
double *mbr, int top);
static int sp_mbr_from_wkb(uchar (*wkb), uint size, uint n_dims, double *mbr);
-static void get_double(double *d, const byte *pos)
-{
- float8get(*d, pos);
-}
-
uint sp_make_key(register MI_INFO *info, uint keynr, uchar *key,
- const byte *record, my_off_t filepos)
+ const uchar *record, my_off_t filepos)
{
HA_KEYSEG *keyseg;
MI_KEYDEF *keyinfo = &info->s->keyinfo[keynr];
uint len = 0;
- byte *pos;
+ uchar *pos;
uint dlen;
uchar *dptr;
double mbr[SPDIMS * 2];
uint i;
keyseg = &keyinfo->seg[-1];
- pos = (byte*)record + keyseg->start;
+ pos = (uchar*)record + keyseg->start;
dlen = _mi_calc_blob_length(keyseg->bit_start, pos);
memcpy_fixed(&dptr, pos + keyseg->bit_start, sizeof(char*));
@@ -62,48 +57,40 @@ uint sp_make_key(register MI_INFO *info, uint keynr, uchar *key,
for (i = 0, keyseg = keyinfo->seg; keyseg->type; keyseg++, i++)
{
- uint length = keyseg->length;
+ uint length = keyseg->length, start= keyseg->start;
+ double val;
+
+ DBUG_ASSERT(length == sizeof(double));
+ DBUG_ASSERT(!(start % sizeof(double)));
+ DBUG_ASSERT(start < sizeof(mbr));
+ DBUG_ASSERT(keyseg->type == HA_KEYTYPE_DOUBLE);
- pos = ((byte*)mbr) + keyseg->start;
- if (keyseg->flag & HA_SWAP_KEY)
- {
+ val= mbr[start / sizeof (double)];
#ifdef HAVE_ISNAN
- if (keyseg->type == HA_KEYTYPE_FLOAT)
- {
- float nr;
- float4get(nr, pos);
- if (isnan(nr))
- {
- /* Replace NAN with zero */
- bzero(key, length);
- key+= length;
- continue;
- }
- }
- else if (keyseg->type == HA_KEYTYPE_DOUBLE)
- {
- double nr;
- get_double(&nr, pos);
- if (isnan(nr))
- {
- bzero(key, length);
- key+= length;
- continue;
- }
- }
+ if (isnan(val))
+ {
+ bzero(key, length);
+ key+= length;
+ len+= length;
+ continue;
+ }
#endif
- pos += length;
- while (length--)
- {
+
+ if (keyseg->flag & HA_SWAP_KEY)
+ {
+ uchar buf[sizeof(double)];
+
+ float8store(buf, val);
+ pos= &buf[length];
+ while (pos > buf)
*key++ = *--pos;
- }
}
else
{
- memcpy((byte*)key, pos, length);
- key += keyseg->length;
+ float8store((uchar *)key, val);
+ key += length;
}
- len += keyseg->length;
+ len+= length;
}
_mi_dpointer(info, key, filepos);
return len;
@@ -141,13 +128,13 @@ static int sp_add_point_to_mbr(uchar *(*wkb), uchar *end, uint n_dims,
{
if ((*wkb) > end - 8)
return -1;
- get_double(&ord, (const byte*) *wkb);
+ float8get(ord, (const uchar*) *wkb);
(*wkb)+= 8;
if (ord < *mbr)
- float8store((char*) mbr, ord);
+ *mbr= ord;
mbr++;
if (ord > *mbr)
- float8store((char*) mbr, ord);
+ *mbr= ord;
mbr++;
}
return 0;
diff --git a/storage/myisam/sp_test.c b/storage/myisam/sp_test.c
index 96ba05e8a74..dee32ba423e 100644
--- a/storage/myisam/sp_test.c
+++ b/storage/myisam/sp_test.c
@@ -24,11 +24,11 @@
#define MAX_REC_LENGTH 1024
#define KEYALG HA_KEY_ALG_RTREE
-static void create_linestring(char *record,uint rownr);
-static void print_record(char * record,my_off_t offs,const char * tail);
+static void create_linestring(uchar *record,uint rownr);
+static void print_record(uchar * record,my_off_t offs,const char * tail);
-static void create_key(char *key,uint rownr);
-static void print_key(const char *key,const char * tail);
+static void create_key(uchar *key,uint rownr);
+static void print_key(const uchar *key,const char * tail);
static int run_test(const char *filename);
static int read_with_pos(MI_INFO * file, int silent);
@@ -64,9 +64,9 @@ int run_test(const char *filename)
int i;
int error;
int row_count=0;
- char record[MAX_REC_LENGTH];
- char key[MAX_REC_LENGTH];
- char read_record[MAX_REC_LENGTH];
+ uchar record[MAX_REC_LENGTH];
+ uchar key[MAX_REC_LENGTH];
+ uchar read_record[MAX_REC_LENGTH];
int upd=10;
ha_rows hrows;
@@ -272,7 +272,7 @@ static int read_with_pos (MI_INFO * file,int silent)
{
int error;
int i;
- char read_record[MAX_REC_LENGTH];
+ uchar read_record[MAX_REC_LENGTH];
int rows=0;
if (!silent)
@@ -300,7 +300,7 @@ static int read_with_pos (MI_INFO * file,int silent)
#ifdef NOT_USED
-static void bprint_record(char * record,
+static void bprint_record(uchar * record,
my_off_t offs __attribute__((unused)),
const char * tail)
{
@@ -319,9 +319,9 @@ static void bprint_record(char * record,
#endif
-static void print_record(char * record, my_off_t offs,const char * tail)
+static void print_record(uchar * record, my_off_t offs,const char * tail)
{
- char *pos;
+ uchar *pos;
char *ptr;
uint len;
@@ -341,7 +341,7 @@ static void print_record(char * record, my_off_t offs,const char * tail)
#ifdef NOT_USED
-static void create_point(char *record,uint rownr)
+static void create_point(uchar *record,uint rownr)
{
uint tmp;
char *ptr;
@@ -368,11 +368,11 @@ static void create_point(char *record,uint rownr)
#endif
-static void create_linestring(char *record,uint rownr)
+static void create_linestring(uchar *record,uint rownr)
{
uint tmp;
char *ptr;
- char *pos=record;
+ uchar *pos= record;
double x[200];
int i,j;
int npoints=2;
@@ -396,21 +396,21 @@ static void create_linestring(char *record,uint rownr)
}
-static void create_key(char *key,uint rownr)
+static void create_key(uchar *key,uint rownr)
{
double c=rownr;
- char *pos;
+ uchar *pos;
uint i;
bzero(key,MAX_REC_LENGTH);
- for ( pos=key, i=0; i<2*SPDIMS; i++)
+ for (pos=key, i=0; i<2*SPDIMS; i++)
{
float8store(pos,c);
pos+=sizeof(c);
}
}
-static void print_key(const char *key,const char * tail)
+static void print_key(const uchar *key,const char * tail)
{
double c;
uint i;
diff --git a/storage/myisammrg/CMakeLists.txt b/storage/myisammrg/CMakeLists.txt
index 8c8c8bcf9fb..848f2dfea43 100644..100755
--- a/storage/myisammrg/CMakeLists.txt
+++ b/storage/myisammrg/CMakeLists.txt
@@ -20,9 +20,14 @@ INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include ${CMAKE_SOURCE_DIR}/zlib
${CMAKE_SOURCE_DIR}/sql
${CMAKE_SOURCE_DIR}/regex
${CMAKE_SOURCE_DIR}/extra/yassl/include)
-ADD_LIBRARY(myisammrg myrg_close.c myrg_create.c myrg_delete.c myrg_extra.c myrg_info.c
+
+SET(MYISAMMRG_SOURCES myrg_close.c myrg_create.c myrg_delete.c myrg_extra.c myrg_info.c
ha_myisammrg.cc
myrg_locking.c myrg_open.c myrg_panic.c myrg_queue.c myrg_range.c
myrg_rfirst.c myrg_rkey.c myrg_rlast.c myrg_rnext.c myrg_rnext_same.c
myrg_rprev.c myrg_rrnd.c myrg_rsame.c myrg_static.c myrg_update.c
myrg_write.c)
+
+IF(NOT SOURCE_SUBLIBS)
+ ADD_LIBRARY(myisammrg ${MYISAMMRG_SOURCES})
+ENDIF(NOT SOURCE_SUBLIBS)
diff --git a/storage/myisammrg/ha_myisammrg.cc b/storage/myisammrg/ha_myisammrg.cc
index 96f7db6e633..8a914e8a2de 100644
--- a/storage/myisammrg/ha_myisammrg.cc
+++ b/storage/myisammrg/ha_myisammrg.cc
@@ -52,6 +52,24 @@ extern int check_definition(MI_KEYDEF *t1_keyinfo, MI_COLUMNDEF *t1_recinfo,
uint t1_keys, uint t1_recs,
MI_KEYDEF *t2_keyinfo, MI_COLUMNDEF *t2_recinfo,
uint t2_keys, uint t2_recs, bool strict);
+static void split_file_name(const char *file_name,
+ LEX_STRING *db, LEX_STRING *name);
+
+
+extern "C" void myrg_print_wrong_table(const char *table_name)
+{
+ LEX_STRING db, name;
+ char buf[FN_REFLEN];
+ split_file_name(table_name, &db, &name);
+ memcpy(buf, db.str, db.length);
+ buf[db.length]= '.';
+ memcpy(buf + db.length + 1, name.str, name.length);
+ buf[db.length + name.length + 1]= 0;
+ push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ ER_ADMIN_WRONG_MRG_TABLE, ER(ER_ADMIN_WRONG_MRG_TABLE),
+ buf);
+}
+
const char **ha_myisammrg::bas_ext() const
{
@@ -102,6 +120,8 @@ int ha_myisammrg::open(const char *name, int mode, uint test_if_locked)
{
DBUG_PRINT("error",("reclength: %lu mean_rec_length: %lu",
table->s->reclength, stats.mean_rec_length));
+ if (test_if_locked & HA_OPEN_FOR_REPAIR)
+ myrg_print_wrong_table(file->open_tables->table->filename);
error= HA_ERR_WRONG_MRG_TABLE_DEF;
goto err;
}
@@ -120,12 +140,19 @@ int ha_myisammrg::open(const char *name, int mode, uint test_if_locked)
u_table->table->s->base.keys,
u_table->table->s->base.fields, false))
{
- my_free((gptr) recinfo, MYF(0));
error= HA_ERR_WRONG_MRG_TABLE_DEF;
- goto err;
+ if (test_if_locked & HA_OPEN_FOR_REPAIR)
+ myrg_print_wrong_table(u_table->table->filename);
+ else
+ {
+ my_free((uchar*) recinfo, MYF(0));
+ goto err;
+ }
}
}
- my_free((gptr) recinfo, MYF(0));
+ my_free((uchar*) recinfo, MYF(0));
+ if (error == HA_ERR_WRONG_MRG_TABLE_DEF)
+ goto err;
#if !defined(BIG_TABLES) || SIZEOF_OFF_T == 4
/* Merge table has more than 2G rows */
if (table->s->crashed)
@@ -146,9 +173,9 @@ int ha_myisammrg::close(void)
return myrg_close(file);
}
-int ha_myisammrg::write_row(byte * buf)
+int ha_myisammrg::write_row(uchar * buf)
{
- statistic_increment(table->in_use->status_var.ha_write_count,&LOCK_status);
+ ha_statistic_increment(&SSV::ha_write_count);
if (file->merge_insert_method == MERGE_INSERT_DISABLED || !file->tables)
return (HA_ERR_TABLE_READONLY);
@@ -164,95 +191,87 @@ int ha_myisammrg::write_row(byte * buf)
return myrg_write(file,buf);
}
-int ha_myisammrg::update_row(const byte * old_data, byte * new_data)
+int ha_myisammrg::update_row(const uchar * old_data, uchar * new_data)
{
- statistic_increment(table->in_use->status_var.ha_update_count,&LOCK_status);
+ ha_statistic_increment(&SSV::ha_update_count);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
table->timestamp_field->set_time();
return myrg_update(file,old_data,new_data);
}
-int ha_myisammrg::delete_row(const byte * buf)
+int ha_myisammrg::delete_row(const uchar * buf)
{
- statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status);
+ ha_statistic_increment(&SSV::ha_delete_count);
return myrg_delete(file,buf);
}
-int ha_myisammrg::index_read(byte * buf, const byte * key,
- key_part_map keypart_map,
- enum ha_rkey_function find_flag)
+int ha_myisammrg::index_read_map(uchar * buf, const uchar * key,
+ key_part_map keypart_map,
+ enum ha_rkey_function find_flag)
{
- statistic_increment(table->in_use->status_var.ha_read_key_count,
- &LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_key_count);
int error=myrg_rkey(file,buf,active_index, key, keypart_map, find_flag);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
}
-int ha_myisammrg::index_read_idx(byte * buf, uint index, const byte * key,
- key_part_map keypart_map,
- enum ha_rkey_function find_flag)
+int ha_myisammrg::index_read_idx_map(uchar * buf, uint index, const uchar * key,
+ key_part_map keypart_map,
+ enum ha_rkey_function find_flag)
{
- statistic_increment(table->in_use->status_var.ha_read_key_count,
- &LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_key_count);
int error=myrg_rkey(file,buf,index, key, keypart_map, find_flag);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
}
-int ha_myisammrg::index_read_last(byte * buf, const byte * key,
- key_part_map keypart_map)
+int ha_myisammrg::index_read_last_map(uchar *buf, const uchar *key,
+ key_part_map keypart_map)
{
- statistic_increment(table->in_use->status_var.ha_read_key_count,
- &LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_key_count);
int error=myrg_rkey(file,buf,active_index, key, keypart_map,
HA_READ_PREFIX_LAST);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
}
-int ha_myisammrg::index_next(byte * buf)
+int ha_myisammrg::index_next(uchar * buf)
{
- statistic_increment(table->in_use->status_var.ha_read_next_count,
- &LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_next_count);
int error=myrg_rnext(file,buf,active_index);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
}
-int ha_myisammrg::index_prev(byte * buf)
+int ha_myisammrg::index_prev(uchar * buf)
{
- statistic_increment(table->in_use->status_var.ha_read_prev_count,
- &LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_prev_count);
int error=myrg_rprev(file,buf, active_index);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
}
-int ha_myisammrg::index_first(byte * buf)
+int ha_myisammrg::index_first(uchar * buf)
{
- statistic_increment(table->in_use->status_var.ha_read_first_count,
- &LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_first_count);
int error=myrg_rfirst(file, buf, active_index);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
}
-int ha_myisammrg::index_last(byte * buf)
+int ha_myisammrg::index_last(uchar * buf)
{
- statistic_increment(table->in_use->status_var.ha_read_last_count,
- &LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_last_count);
int error=myrg_rlast(file, buf, active_index);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
}
-int ha_myisammrg::index_next_same(byte * buf,
- const byte *key __attribute__((unused)),
+int ha_myisammrg::index_next_same(uchar * buf,
+ const uchar *key __attribute__((unused)),
uint length __attribute__((unused)))
{
- statistic_increment(table->in_use->status_var.ha_read_next_count,
- &LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_next_count);
int error=myrg_rnext_same(file,buf);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
@@ -265,26 +284,24 @@ int ha_myisammrg::rnd_init(bool scan)
}
-int ha_myisammrg::rnd_next(byte *buf)
+int ha_myisammrg::rnd_next(uchar *buf)
{
- statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
- &LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_rnd_next_count);
int error=myrg_rrnd(file, buf, HA_OFFSET_ERROR);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
}
-int ha_myisammrg::rnd_pos(byte * buf, byte *pos)
+int ha_myisammrg::rnd_pos(uchar * buf, uchar *pos)
{
- statistic_increment(table->in_use->status_var.ha_read_rnd_count,
- &LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_rnd_count);
int error=myrg_rrnd(file, buf, my_get_ptr(pos,ref_length));
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
}
-void ha_myisammrg::position(const byte *record)
+void ha_myisammrg::position(const uchar *record)
{
ulonglong row_position= myrg_position(file);
my_store_ptr(ref, ref_length, (my_off_t) row_position);
@@ -474,8 +491,8 @@ void ha_myisammrg::update_create_info(HA_CREATE_INFO *create_info)
goto err;
create_info->merge_list.elements++;
- (*create_info->merge_list.next) = (byte*) ptr;
- create_info->merge_list.next= (byte**) &ptr->next_local;
+ (*create_info->merge_list.next) = (uchar*) ptr;
+ create_info->merge_list.next= (uchar**) &ptr->next_local;
}
*create_info->merge_list.next=0;
}
@@ -603,6 +620,13 @@ bool ha_myisammrg::check_if_incompatible_data(HA_CREATE_INFO *info,
return COMPATIBLE_DATA_NO;
}
+
+int ha_myisammrg::check(THD* thd, HA_CHECK_OPT* check_opt)
+{
+ return HA_ADMIN_OK;
+}
+
+
extern int myrg_panic(enum ha_panic_function flag);
int myisammrg_panic(handlerton *hton, ha_panic_function flag)
{
@@ -615,7 +639,6 @@ static int myisammrg_init(void *p)
myisammrg_hton= (handlerton *)p;
- myisammrg_hton->state= SHOW_OPTION_YES;
myisammrg_hton->db_type= DB_TYPE_MRG_MYISAM;
myisammrg_hton->create= myisammrg_create_handler;
myisammrg_hton->panic= myisammrg_panic;
diff --git a/storage/myisammrg/ha_myisammrg.h b/storage/myisammrg/ha_myisammrg.h
index 7bbe659d4b7..91aabe277f7 100644
--- a/storage/myisammrg/ha_myisammrg.h
+++ b/storage/myisammrg/ha_myisammrg.h
@@ -35,6 +35,7 @@ class ha_myisammrg: public handler
ulonglong table_flags() const
{
return (HA_REC_NOT_IN_SEQ | HA_AUTO_PART_KEY | HA_NO_TRANSACTIONS |
+ HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE |
HA_NULL_IN_KEY | HA_CAN_INDEX_BLOBS | HA_FILE_BASED |
HA_ANY_INDEX_MAY_BE_UNIQUE | HA_CAN_BIT_FIELD |
HA_NO_COPY_ON_ALTER);
@@ -53,23 +54,24 @@ class ha_myisammrg: public handler
int open(const char *name, int mode, uint test_if_locked);
int close(void);
- int write_row(byte * buf);
- int update_row(const byte * old_data, byte * new_data);
- int delete_row(const byte * buf);
- int index_read(byte * buf, const byte * key, key_part_map keypart_map,
- enum ha_rkey_function find_flag);
- int index_read_idx(byte * buf, uint index, const byte * key,
- key_part_map keypart_map, enum ha_rkey_function find_flag);
- int index_read_last(byte * buf, const byte * key, key_part_map keypart_map);
- int index_next(byte * buf);
- int index_prev(byte * buf);
- int index_first(byte * buf);
- int index_last(byte * buf);
- int index_next_same(byte *buf, const byte *key, uint keylen);
+ int write_row(uchar * buf);
+ int update_row(const uchar * old_data, uchar * new_data);
+ int delete_row(const uchar * buf);
+ int index_read_map(uchar *buf, const uchar *key, key_part_map keypart_map,
+ enum ha_rkey_function find_flag);
+ int index_read_idx_map(uchar *buf, uint index, const uchar *key,
+ key_part_map keypart_map,
+ enum ha_rkey_function find_flag);
+ int index_read_last_map(uchar *buf, const uchar *key, key_part_map keypart_map);
+ int index_next(uchar * buf);
+ int index_prev(uchar * buf);
+ int index_first(uchar * buf);
+ int index_last(uchar * buf);
+ int index_next_same(uchar *buf, const uchar *key, uint keylen);
int rnd_init(bool scan);
- int rnd_next(byte *buf);
- int rnd_pos(byte * buf, byte *pos);
- void position(const byte *record);
+ int rnd_next(uchar *buf);
+ int rnd_pos(uchar * buf, uchar *pos);
+ void position(const uchar *record);
ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key);
int info(uint);
int reset(void);
@@ -84,4 +86,5 @@ class ha_myisammrg: public handler
void append_create_info(String *packet);
MYRG_INFO *myrg_info() { return file; }
bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes);
+ int check(THD* thd, HA_CHECK_OPT* check_opt);
};
diff --git a/storage/myisammrg/myrg_close.c b/storage/myisammrg/myrg_close.c
index 971a83928b1..baae24634b3 100644
--- a/storage/myisammrg/myrg_close.c
+++ b/storage/myisammrg/myrg_close.c
@@ -30,7 +30,7 @@ int myrg_close(MYRG_INFO *info)
pthread_mutex_lock(&THR_LOCK_open);
myrg_open_list=list_delete(myrg_open_list,&info->open_list);
pthread_mutex_unlock(&THR_LOCK_open);
- my_free((gptr) info,MYF(0));
+ my_free((uchar*) info,MYF(0));
if (error)
{
DBUG_RETURN(my_errno=error);
diff --git a/storage/myisammrg/myrg_create.c b/storage/myisammrg/myrg_create.c
index c4e91e7b29b..df81b730bfd 100644
--- a/storage/myisammrg/myrg_create.c
+++ b/storage/myisammrg/myrg_create.c
@@ -46,7 +46,7 @@ int myrg_create(const char *name, const char **table_names,
fn_same(buff,name,4);
*(end=strend(buff))='\n';
end[1]=0;
- if (my_write(file,buff,(uint) (end-buff+1),
+ if (my_write(file,(char*) buff,(uint) (end-buff+1),
MYF(MY_WME | MY_NABP)))
goto err;
}
@@ -55,7 +55,7 @@ int myrg_create(const char *name, const char **table_names,
{
end=strxmov(buff,"#INSERT_METHOD=",
get_type(&merge_insert_method,insert_method-1),"\n",NullS);
- if (my_write(file,buff,(uint) (end-buff),MYF(MY_WME | MY_NABP)))
+ if (my_write(file, (uchar*) buff,(uint) (end-buff),MYF(MY_WME | MY_NABP)))
goto err;
}
if (my_close(file,MYF(0)))
diff --git a/storage/myisammrg/myrg_def.h b/storage/myisammrg/myrg_def.h
index 344bd4edd3c..9c69da1424d 100644
--- a/storage/myisammrg/myrg_def.h
+++ b/storage/myisammrg/myrg_def.h
@@ -28,5 +28,8 @@ extern pthread_mutex_t THR_LOCK_open;
#endif
int _myrg_init_queue(MYRG_INFO *info,int inx,enum ha_rkey_function search_flag);
-int _myrg_mi_read_record(MI_INFO *info, byte *buf);
-
+int _myrg_mi_read_record(MI_INFO *info, uchar *buf);
+#ifdef __cplusplus
+extern "C"
+#endif
+void myrg_print_wrong_table(const char *table_name);
diff --git a/storage/myisammrg/myrg_delete.c b/storage/myisammrg/myrg_delete.c
index f9604f66885..93d45198b36 100644
--- a/storage/myisammrg/myrg_delete.c
+++ b/storage/myisammrg/myrg_delete.c
@@ -17,7 +17,7 @@
#include "myrg_def.h"
-int myrg_delete(MYRG_INFO *info, const byte *record)
+int myrg_delete(MYRG_INFO *info, const uchar *record)
{
if (!info->current_table)
return (my_errno= HA_ERR_NO_ACTIVE_RECORD);
diff --git a/storage/myisammrg/myrg_locking.c b/storage/myisammrg/myrg_locking.c
index a07833bc829..4f1e3f844a1 100644
--- a/storage/myisammrg/myrg_locking.c
+++ b/storage/myisammrg/myrg_locking.c
@@ -37,7 +37,15 @@ int myrg_lock_database(MYRG_INFO *info, int lock_type)
(file->table)->owned_by_merge = TRUE;
#endif
if ((new_error=mi_lock_database(file->table,lock_type)))
+ {
error=new_error;
+ if (lock_type != F_UNLCK)
+ {
+ while (--file >= info->open_tables)
+ mi_lock_database(file->table, F_UNLCK);
+ break;
+ }
+ }
}
return(error);
}
diff --git a/storage/myisammrg/myrg_open.c b/storage/myisammrg/myrg_open.c
index 3dbb605463e..500d3a29327 100644
--- a/storage/myisammrg/myrg_open.c
+++ b/storage/myisammrg/myrg_open.c
@@ -40,6 +40,7 @@ MYRG_INFO *myrg_open(const char *name, int mode, int handle_locking)
IO_CACHE file;
MI_INFO *isam=0;
uint found_merge_insert_method= 0;
+ size_t name_buff_length;
DBUG_ENTER("myrg_open");
LINT_INIT(key_parts);
@@ -48,13 +49,13 @@ MYRG_INFO *myrg_open(const char *name, int mode, int handle_locking)
if ((fd=my_open(fn_format(name_buff,name,"",MYRG_NAME_EXT,
MY_UNPACK_FILENAME|MY_APPEND_EXT),
O_RDONLY | O_SHARE,MYF(0))) < 0)
- goto err;
- errpos=1;
- if (init_io_cache(&file, fd, 4*IO_SIZE, READ_CACHE, 0, 0,
+ goto err;
+ errpos=1;
+ if (init_io_cache(&file, fd, 4*IO_SIZE, READ_CACHE, 0, 0,
MYF(MY_WME | MY_NABP)))
- goto err;
- errpos=2;
- dir_length=dirname_part(name_buff,name);
+ goto err;
+ errpos=2;
+ dir_length=dirname_part(name_buff, name, &name_buff_length);
while ((length=my_b_gets(&file,buff,FN_REFLEN-1)))
{
if ((end=buff+length)[-1] == '\n')
@@ -91,6 +92,11 @@ MYRG_INFO *myrg_open(const char *name, int mode, int handle_locking)
if (!(isam=mi_open(buff,mode,(handle_locking?HA_OPEN_WAIT_IF_LOCKED:0))))
{
my_errno= HA_ERR_WRONG_MRG_TABLE_DEF;
+ if (handle_locking & HA_OPEN_FOR_REPAIR)
+ {
+ myrg_print_wrong_table(buff);
+ continue;
+ }
goto err;
}
if (!m_info) /* First file */
@@ -119,6 +125,11 @@ MYRG_INFO *myrg_open(const char *name, int mode, int handle_locking)
if (m_info->reclength != isam->s->base.reclength)
{
my_errno=HA_ERR_WRONG_MRG_TABLE_DEF;
+ if (handle_locking & HA_OPEN_FOR_REPAIR)
+ {
+ myrg_print_wrong_table(buff);
+ continue;
+ }
goto err;
}
m_info->options|= isam->s->options;
@@ -132,6 +143,8 @@ MYRG_INFO *myrg_open(const char *name, int mode, int handle_locking)
m_info->tables);
}
+ if (my_errno == HA_ERR_WRONG_MRG_TABLE_DEF)
+ goto err;
if (!m_info && !(m_info= (MYRG_INFO*) my_malloc(sizeof(MYRG_INFO),
MYF(MY_WME | MY_ZEROFILL))))
goto err;
diff --git a/storage/myisammrg/myrg_queue.c b/storage/myisammrg/myrg_queue.c
index 1d252207db1..d2579053784 100644
--- a/storage/myisammrg/myrg_queue.c
+++ b/storage/myisammrg/myrg_queue.c
@@ -15,7 +15,7 @@
#include "myrg_def.h"
-static int queue_key_cmp(void *keyseg, byte *a, byte *b)
+static int queue_key_cmp(void *keyseg, uchar *a, uchar *b)
{
MYRG_TABLE *ma= (MYRG_TABLE *)a;
MYRG_TABLE *mb= (MYRG_TABLE *)b;
@@ -69,7 +69,7 @@ int _myrg_init_queue(MYRG_INFO *info,int inx,enum ha_rkey_function search_flag)
return error;
}
-int _myrg_mi_read_record(MI_INFO *info, byte *buf)
+int _myrg_mi_read_record(MI_INFO *info, uchar *buf)
{
if (!(*info->read_record)(info,info->lastpos,buf))
{
diff --git a/storage/myisammrg/myrg_rfirst.c b/storage/myisammrg/myrg_rfirst.c
index 80736537d02..9d7b0f9e83f 100644
--- a/storage/myisammrg/myrg_rfirst.c
+++ b/storage/myisammrg/myrg_rfirst.c
@@ -17,7 +17,7 @@
/* Read first row according to specific key */
-int myrg_rfirst(MYRG_INFO *info, byte *buf, int inx)
+int myrg_rfirst(MYRG_INFO *info, uchar *buf, int inx)
{
MYRG_TABLE *table;
MI_INFO *mi;
@@ -35,7 +35,7 @@ int myrg_rfirst(MYRG_INFO *info, byte *buf, int inx)
return err;
}
/* adding to queue */
- queue_insert(&(info->by_key),(byte *)table);
+ queue_insert(&(info->by_key),(uchar *)table);
}
/* We have done a read in all tables */
info->last_used_table=table;
diff --git a/storage/myisammrg/myrg_rkey.c b/storage/myisammrg/myrg_rkey.c
index 2d744ae31ec..8e7886f5a43 100644
--- a/storage/myisammrg/myrg_rkey.c
+++ b/storage/myisammrg/myrg_rkey.c
@@ -35,10 +35,10 @@
SerG
*/
-int myrg_rkey(MYRG_INFO *info,byte *buf,int inx, const byte *key,
+int myrg_rkey(MYRG_INFO *info,uchar *buf,int inx, const uchar *key,
key_part_map keypart_map, enum ha_rkey_function search_flag)
{
- byte *key_buff;
+ uchar *key_buff;
uint pack_key_length;
uint16 last_used_keyseg;
MYRG_TABLE *table;
@@ -60,7 +60,7 @@ int myrg_rkey(MYRG_INFO *info,byte *buf,int inx, const byte *key,
{
err=mi_rkey(mi, 0, inx, key, keypart_map, search_flag);
/* Get the saved packed key and packed key length. */
- key_buff=(byte*) mi->lastkey+mi->s->base.max_key_length;
+ key_buff=(uchar*) mi->lastkey+mi->s->base.max_key_length;
pack_key_length=mi->pack_key_length;
last_used_keyseg= mi->last_used_keyseg;
}
@@ -80,7 +80,7 @@ int myrg_rkey(MYRG_INFO *info,byte *buf,int inx, const byte *key,
DBUG_RETURN(err);
}
/* adding to queue */
- queue_insert(&(info->by_key),(byte *)table);
+ queue_insert(&(info->by_key),(uchar *)table);
}
@@ -92,6 +92,6 @@ int myrg_rkey(MYRG_INFO *info,byte *buf,int inx, const byte *key,
mi->once_flags|= RRND_PRESERVE_LASTINX;
DBUG_PRINT("info", ("using table no: %d",
(int) (info->current_table - info->open_tables + 1)));
- DBUG_DUMP("result key", (byte*) mi->lastkey, mi->lastkey_length);
+ DBUG_DUMP("result key", (uchar*) mi->lastkey, mi->lastkey_length);
DBUG_RETURN(_myrg_mi_read_record(mi,buf));
}
diff --git a/storage/myisammrg/myrg_rlast.c b/storage/myisammrg/myrg_rlast.c
index f364bf9b32f..8086a2f8104 100644
--- a/storage/myisammrg/myrg_rlast.c
+++ b/storage/myisammrg/myrg_rlast.c
@@ -17,7 +17,7 @@
/* Read last row with the same key as the previous read. */
-int myrg_rlast(MYRG_INFO *info, byte *buf, int inx)
+int myrg_rlast(MYRG_INFO *info, uchar *buf, int inx)
{
MYRG_TABLE *table;
MI_INFO *mi;
@@ -35,7 +35,7 @@ int myrg_rlast(MYRG_INFO *info, byte *buf, int inx)
return err;
}
/* adding to queue */
- queue_insert(&(info->by_key),(byte *)table);
+ queue_insert(&(info->by_key),(uchar *)table);
}
/* We have done a read in all tables */
info->last_used_table=table;
diff --git a/storage/myisammrg/myrg_rnext.c b/storage/myisammrg/myrg_rnext.c
index de1aa4df4b6..82d5cbf38b1 100644
--- a/storage/myisammrg/myrg_rnext.c
+++ b/storage/myisammrg/myrg_rnext.c
@@ -19,7 +19,7 @@
Read next row with the same key as previous read
*/
-int myrg_rnext(MYRG_INFO *info, byte *buf, int inx)
+int myrg_rnext(MYRG_INFO *info, uchar *buf, int inx)
{
int err;
MI_INFO *mi;
@@ -42,7 +42,7 @@ int myrg_rnext(MYRG_INFO *info, byte *buf, int inx)
else
{
/* Found here, adding to queue */
- queue_top(&(info->by_key))=(byte *)(info->current_table);
+ queue_top(&(info->by_key))=(uchar *)(info->current_table);
queue_replaced(&(info->by_key));
}
diff --git a/storage/myisammrg/myrg_rnext_same.c b/storage/myisammrg/myrg_rnext_same.c
index 9c6b522ee8a..ad7bbfb0f6e 100644
--- a/storage/myisammrg/myrg_rnext_same.c
+++ b/storage/myisammrg/myrg_rnext_same.c
@@ -16,7 +16,7 @@
#include "myrg_def.h"
-int myrg_rnext_same(MYRG_INFO *info, byte *buf)
+int myrg_rnext_same(MYRG_INFO *info, uchar *buf)
{
int err;
MI_INFO *mi;
@@ -39,7 +39,7 @@ int myrg_rnext_same(MYRG_INFO *info, byte *buf)
else
{
/* Found here, adding to queue */
- queue_top(&(info->by_key))=(byte *)(info->current_table);
+ queue_top(&(info->by_key))=(uchar *)(info->current_table);
queue_replaced(&(info->by_key));
}
diff --git a/storage/myisammrg/myrg_rprev.c b/storage/myisammrg/myrg_rprev.c
index b1b86a93fad..66c94974940 100644
--- a/storage/myisammrg/myrg_rprev.c
+++ b/storage/myisammrg/myrg_rprev.c
@@ -19,7 +19,7 @@
Read previous row with the same key as previous read
*/
-int myrg_rprev(MYRG_INFO *info, byte *buf, int inx)
+int myrg_rprev(MYRG_INFO *info, uchar *buf, int inx)
{
int err;
MI_INFO *mi;
@@ -42,7 +42,7 @@ int myrg_rprev(MYRG_INFO *info, byte *buf, int inx)
else
{
/* Found here, adding to queue */
- queue_top(&(info->by_key))=(byte *)(info->current_table);
+ queue_top(&(info->by_key))=(uchar *)(info->current_table);
queue_replaced(&(info->by_key));
}
diff --git a/storage/myisammrg/myrg_rrnd.c b/storage/myisammrg/myrg_rrnd.c
index 55e72b2170d..b598563680c 100644
--- a/storage/myisammrg/myrg_rrnd.c
+++ b/storage/myisammrg/myrg_rrnd.c
@@ -30,7 +30,7 @@ static MYRG_TABLE *find_table(MYRG_TABLE *start,MYRG_TABLE *end,ulonglong pos);
HA_ERR_END_OF_FILE = EOF.
*/
-int myrg_rrnd(MYRG_INFO *info,byte *buf,ulonglong filepos)
+int myrg_rrnd(MYRG_INFO *info,uchar *buf,ulonglong filepos)
{
int error;
MI_INFO *isam_info;
@@ -47,7 +47,7 @@ int myrg_rrnd(MYRG_INFO *info,byte *buf,ulonglong filepos)
}
isam_info=(info->current_table=info->open_tables)->table;
if (info->cache_in_use)
- mi_extra(isam_info,HA_EXTRA_CACHE,(byte*) &info->cache_size);
+ mi_extra(isam_info,HA_EXTRA_CACHE,(uchar*) &info->cache_size);
filepos=isam_info->s->pack.header_length;
isam_info->lastinx= (uint) -1; /* Can't forward or backward */
}
@@ -60,20 +60,20 @@ int myrg_rrnd(MYRG_INFO *info,byte *buf,ulonglong filepos)
for (;;)
{
isam_info->update&= HA_STATE_CHANGED;
- if ((error=(*isam_info->s->read_rnd)(isam_info,(byte*) buf,
+ if ((error=(*isam_info->s->read_rnd)(isam_info,(uchar*) buf,
(my_off_t) filepos,1)) !=
HA_ERR_END_OF_FILE)
DBUG_RETURN(error);
if (info->cache_in_use)
mi_extra(info->current_table->table, HA_EXTRA_NO_CACHE,
- (byte*) &info->cache_size);
+ (uchar*) &info->cache_size);
if (info->current_table+1 == info->end_table)
DBUG_RETURN(HA_ERR_END_OF_FILE);
info->current_table++;
info->last_used_table=info->current_table;
if (info->cache_in_use)
mi_extra(info->current_table->table, HA_EXTRA_CACHE,
- (byte*) &info->cache_size);
+ (uchar*) &info->cache_size);
info->current_table->file_offset=
info->current_table[-1].file_offset+
info->current_table[-1].table->state->data_file_length;
@@ -88,7 +88,7 @@ int myrg_rrnd(MYRG_INFO *info,byte *buf,ulonglong filepos)
isam_info=info->current_table->table;
isam_info->update&= HA_STATE_CHANGED;
DBUG_RETURN((*isam_info->s->read_rnd)
- (isam_info, (byte*) buf,
+ (isam_info, (uchar*) buf,
(my_off_t) (filepos - info->current_table->file_offset),
0));
}
diff --git a/storage/myisammrg/myrg_rsame.c b/storage/myisammrg/myrg_rsame.c
index 56b16c0aa3c..2f7523759dc 100644
--- a/storage/myisammrg/myrg_rsame.c
+++ b/storage/myisammrg/myrg_rsame.c
@@ -15,7 +15,7 @@
#include "myrg_def.h"
-int myrg_rsame(MYRG_INFO *info,byte *record,int inx)
+int myrg_rsame(MYRG_INFO *info,uchar *record,int inx)
{
if (inx) /* not yet used, should be 0 */
return (my_errno=HA_ERR_WRONG_INDEX);
diff --git a/storage/myisammrg/myrg_update.c b/storage/myisammrg/myrg_update.c
index ba667d69f12..5d883be8484 100644
--- a/storage/myisammrg/myrg_update.c
+++ b/storage/myisammrg/myrg_update.c
@@ -17,7 +17,7 @@
#include "myrg_def.h"
-int myrg_update(register MYRG_INFO *info,const byte *oldrec, byte *newrec)
+int myrg_update(register MYRG_INFO *info,const uchar *oldrec, uchar *newrec)
{
if (!info->current_table)
return (my_errno=HA_ERR_NO_ACTIVE_RECORD);
diff --git a/storage/myisammrg/myrg_write.c b/storage/myisammrg/myrg_write.c
index ed0a4a7996a..27534df2821 100644
--- a/storage/myisammrg/myrg_write.c
+++ b/storage/myisammrg/myrg_write.c
@@ -17,7 +17,7 @@
#include "myrg_def.h"
-int myrg_write(register MYRG_INFO *info, byte *rec)
+int myrg_write(register MYRG_INFO *info, uchar *rec)
{
/* [phi] MERGE_WRITE_DISABLED is handled by the else case */
if (info->merge_insert_method == MERGE_INSERT_TO_FIRST)
diff --git a/storage/ndb/MAINTAINERS b/storage/ndb/MAINTAINERS
new file mode 100644
index 00000000000..d1547d48234
--- /dev/null
+++ b/storage/ndb/MAINTAINERS
@@ -0,0 +1,163 @@
+MySQL Cluster MAINTAINERS
+-------------------------
+
+This is a list of knowledgable people in parts of the NDB code.
+
+In changing that area of code, you probably want to talk to the
+people who know a lot about it to look over the patch.
+
+When sending patches and queries, always CC the mailing list.
+
+If no list specified, assume internals@lists.mysql.com
+
+P: Person
+M: Mail
+L: Mailing list
+W: Web page with status/info
+C: Comment
+SRC: Source directory (relative to this directory)
+T: SCM tree type and location
+S: Status, one of:
+
+ Supported: Somebody is paid to maintain this.
+ Maintained: Not their primary job, but maintained.
+ Orphan: No current obvious maintainer.
+ Obsolete: Replaced by something else.
+
+-------------------------------------------------------------
+
+Binlog Injector
+SRC: ha_ndbcluster_binlog.cc
+C: see also row based replication
+P: Stewart Smith
+M: stewart@mysql.com
+C: Original author
+P: Tomas Ulin
+M: tomas@mysql.com
+C: Lots of updates
+P: Martin Skold
+M: martin@mysql.com
+C: Metadata ops
+S: Supported
+
+BLOBs
+SRC: ha_ndbcluster.cc
+SRC: src/ndbapi/NdbBlob*
+P: Pekka
+M: pekka@mysql.com
+S: Supported
+
+cpcd/cpcc
+SRC: src/cw/cpcd
+SRC: src/cw/cpcc
+C: Maintained only as part of autotest
+P: Jonas Orland
+M: jonas@mysql.com
+S: Maintained
+
+cpcc-win32
+SRC: src/cw/cpcc-win32
+S: Obsolete
+
+Handler
+SRC: ha_ndbcluster.cc
+P: Martin Skold
+M: martin@mysql.com
+S: Supported
+
+Management Server
+SRC: src/mgmsrv/
+P: Stewart Smith
+M: stewart@mysql.com
+S: Supported
+
+Management Client
+SRC: src/mgmclient/
+P: Stewart Smith
+M: stewart@mysql.com
+S: Supported
+
+Management API
+SRC: src/mgmapi/
+P: Stewart Smith
+M: stewart@mysql.com
+S: Supported
+
+NDB API Examples
+SRC: ndbapi-examples/
+P: Tomas Ulin
+M: tomas@mysql.com
+C: Originally by Lars
+P: Lars Thalmann
+M: lars@mysql.com
+S: Maintained
+
+NDB API NdbRecord Examples
+SRC: ndbapi-examples/
+P: Kristian Nielsen
+M: knielsen@mysql.com
+S: Maintained
+
+tsman
+C: Disk Data (Table Space MANager)
+SRC: src/kernel/blocks/tsman.cpp
+SRC: src/kernel/blocks/tsman.hpp
+P: Jonas Oreland
+M: jonas@mysql.com
+S: Supported
+
+lgman
+C: Disk Data (LoG MANager)
+SRC: src/kernel/blocks/lgman.cpp
+SRC: src/kernel/blocks/lgman.hpp
+P: Jonas Oreland
+M: jonas@mysql.com
+S: Supported
+
+pgman
+C: Disk Data (PaGe MANager)
+SRC: src/kernel/blocks/lgman.cpp
+SRC: src/kernel/blocks/lgman.hpp
+P: Jonas Oreland
+M: jonas@mysql.com
+S: Supported
+
+SUMA
+C: SUbscription MAnager
+C: Used for replication
+SRC: src/kernel/blocks/suma/
+P: Tomas Ulin
+P: tomas@mysql.com
+P: Jonas Oreland
+P: jonas@mysql.com
+S: Supported
+
+TRIX
+C: TRiggers and IndeXs (but only online Index build)
+SRC: src/kernel/blocks/trix
+P: Martin Skold
+P: mskold@mysql.com
+S: Supported
+
+QMGR
+C: Cluster (with a Q) ManaGeR
+C: Heartbeats etc
+SRC: src/kernel/blocks/qmgr
+S: Supported
+
+NDBFS
+C: NDB FileSystem
+C: File System abstraction
+SRC: src/kernel/blocks/ndbfs
+S: Supported
+
+TRIX
+C: TRiggers and IndeXs (but only online Index build)
+SRC: src/kernel/blocks/trix
+S: Supported
+
+TRIX
+C: TRiggers and IndeXs (but only online Index build)
+SRC: src/kernel/blocks/trix
+S: Supported
+
diff --git a/storage/ndb/config/common.mk.am b/storage/ndb/config/common.mk.am
index 5ed3855f31e..9633a52e91f 100644
--- a/storage/ndb/config/common.mk.am
+++ b/storage/ndb/config/common.mk.am
@@ -25,3 +25,5 @@ INCLUDES = $(INCLUDES_LOC)
LDADD = $(LDADD_LOC)
DEFS = @DEFS@ @NDB_DEFS@ $(DEFS_LOC) $(NDB_EXTRA_FLAGS)
NDB_CXXFLAGS=@ndb_cxxflags_fix@ $(NDB_CXXFLAGS_LOC)
+NDB_AM_CXXFLAGS:= $(AM_CXXFLAGS)
+AM_CXXFLAGS=$(NDB_AM_CXXFLAGS) $(NDB_CXXFLAGS)
diff --git a/storage/ndb/include/Makefile.am b/storage/ndb/include/Makefile.am
index bf8fe392072..9e6ad016d75 100644
--- a/storage/ndb/include/Makefile.am
+++ b/storage/ndb/include/Makefile.am
@@ -45,6 +45,7 @@ ndbapi/ndberror.h
mgmapiinclude_HEADERS = \
mgmapi/mgmapi.h \
+mgmapi/mgmapi_error.h \
mgmapi/mgmapi_debug.h \
mgmapi/mgmapi_config_parameters.h \
mgmapi/mgmapi_config_parameters_debug.h \
diff --git a/storage/ndb/include/debugger/EventLogger.hpp b/storage/ndb/include/debugger/EventLogger.hpp
index 7e47dbf59db..8ae96162a48 100644
--- a/storage/ndb/include/debugger/EventLogger.hpp
+++ b/storage/ndb/include/debugger/EventLogger.hpp
@@ -173,5 +173,5 @@ private:
STATIC_CONST(MAX_TEXT_LENGTH = 256);
};
-
+extern void getRestartAction(Uint32 action, BaseString &str);
#endif
diff --git a/storage/ndb/include/kernel/signaldata/FsOpenReq.hpp b/storage/ndb/include/kernel/signaldata/FsOpenReq.hpp
index 8d438f79259..8126267f946 100644
--- a/storage/ndb/include/kernel/signaldata/FsOpenReq.hpp
+++ b/storage/ndb/include/kernel/signaldata/FsOpenReq.hpp
@@ -44,6 +44,8 @@ class FsOpenReq {
friend class Restore;
friend class Dblqh;
+ friend class Dbtup;
+
/**
* For printing
*/
diff --git a/storage/ndb/include/mgmapi/mgmapi.h b/storage/ndb/include/mgmapi/mgmapi.h
index 2bedba963e2..0853f5a4422 100644
--- a/storage/ndb/include/mgmapi/mgmapi.h
+++ b/storage/ndb/include/mgmapi/mgmapi.h
@@ -18,12 +18,13 @@
#include "mgmapi_config_parameters.h"
#include "ndb_logevent.h"
+#include "mgmapi_error.h"
#define MGM_LOGLEVELS CFG_MAX_LOGLEVEL - CFG_MIN_LOGLEVEL + 1
#define NDB_MGM_MAX_LOGLEVEL 15
/**
- * @mainpage MySQL Cluster Management API
+ * @section MySQL Cluster Management API
*
* The MySQL Cluster Management API (MGM API) is a C language API
* that is used for:
@@ -212,105 +213,6 @@ extern "C" {
};
/**
- * Error codes
- */
- enum ndb_mgm_error {
- /** Not an error */
- NDB_MGM_NO_ERROR = 0,
-
- /* Request for service errors */
- /** Supplied connectstring is illegal */
- NDB_MGM_ILLEGAL_CONNECT_STRING = 1001,
- /** Supplied NdbMgmHandle is illegal */
- NDB_MGM_ILLEGAL_SERVER_HANDLE = 1005,
- /** Illegal reply from server */
- NDB_MGM_ILLEGAL_SERVER_REPLY = 1006,
- /** Illegal number of nodes */
- NDB_MGM_ILLEGAL_NUMBER_OF_NODES = 1007,
- /** Illegal node status */
- NDB_MGM_ILLEGAL_NODE_STATUS = 1008,
- /** Memory allocation error */
- NDB_MGM_OUT_OF_MEMORY = 1009,
- /** Management server not connected */
- NDB_MGM_SERVER_NOT_CONNECTED = 1010,
- /** Could not connect to socker */
- NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET = 1011,
- /** Could not bind local address */
- NDB_MGM_BIND_ADDRESS = 1012,
-
- /* Alloc node id failures */
- /** Generic error, retry may succeed */
- NDB_MGM_ALLOCID_ERROR = 1101,
- /** Non retriable error */
- NDB_MGM_ALLOCID_CONFIG_MISMATCH = 1102,
-
- /* Service errors - Start/Stop Node or System */
- /** Start failed */
- NDB_MGM_START_FAILED = 2001,
- /** Stop failed */
- NDB_MGM_STOP_FAILED = 2002,
- /** Restart failed */
- NDB_MGM_RESTART_FAILED = 2003,
-
- /* Service errors - Backup */
- /** Unable to start backup */
- NDB_MGM_COULD_NOT_START_BACKUP = 3001,
- /** Unable to abort backup */
- NDB_MGM_COULD_NOT_ABORT_BACKUP = 3002,
-
- /* Service errors - Single User Mode */
- /** Unable to enter single user mode */
- NDB_MGM_COULD_NOT_ENTER_SINGLE_USER_MODE = 4001,
- /** Unable to exit single user mode */
- NDB_MGM_COULD_NOT_EXIT_SINGLE_USER_MODE = 4002,
-
- /* Usage errors */
- /** Usage error */
- NDB_MGM_USAGE_ERROR = 5001
- };
-
-#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
- struct Ndb_Mgm_Error_Msg {
- enum ndb_mgm_error code;
- const char * msg;
- };
- const struct Ndb_Mgm_Error_Msg ndb_mgm_error_msgs[] = {
- { NDB_MGM_NO_ERROR, "No error" },
-
- /* Request for service errors */
- { NDB_MGM_ILLEGAL_CONNECT_STRING, "Illegal connect string" },
- { NDB_MGM_ILLEGAL_SERVER_HANDLE, "Illegal server handle" },
- { NDB_MGM_ILLEGAL_SERVER_REPLY, "Illegal reply from server" },
- { NDB_MGM_ILLEGAL_NUMBER_OF_NODES, "Illegal number of nodes" },
- { NDB_MGM_ILLEGAL_NODE_STATUS, "Illegal node status" },
- { NDB_MGM_OUT_OF_MEMORY, "Out of memory" },
- { NDB_MGM_SERVER_NOT_CONNECTED, "Management server not connected" },
- { NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET, "Could not connect to socket" },
-
- /* Service errors - Start/Stop Node or System */
- { NDB_MGM_START_FAILED, "Start failed" },
- { NDB_MGM_STOP_FAILED, "Stop failed" },
- { NDB_MGM_RESTART_FAILED, "Restart failed" },
-
- /* Service errors - Backup */
- { NDB_MGM_COULD_NOT_START_BACKUP, "Could not start backup" },
- { NDB_MGM_COULD_NOT_ABORT_BACKUP, "Could not abort backup" },
-
- /* Service errors - Single User Mode */
- { NDB_MGM_COULD_NOT_ENTER_SINGLE_USER_MODE,
- "Could not enter single user mode" },
- { NDB_MGM_COULD_NOT_EXIT_SINGLE_USER_MODE,
- "Could not exit single user mode" },
-
- /* Usage errors */
- { NDB_MGM_USAGE_ERROR,
- "Usage error" }
- };
- const int ndb_mgm_noOfErrorMsgs =
- sizeof(ndb_mgm_error_msgs)/sizeof(struct Ndb_Mgm_Error_Msg);
-#endif
-
- /**
* Status of a node in the cluster.
*
* Sub-structure in enum ndb_mgm_cluster_state
diff --git a/storage/ndb/include/mgmapi/mgmapi_config_parameters.h b/storage/ndb/include/mgmapi/mgmapi_config_parameters.h
index 119958d0ce0..ac2cbf060fd 100644
--- a/storage/ndb/include/mgmapi/mgmapi_config_parameters.h
+++ b/storage/ndb/include/mgmapi/mgmapi_config_parameters.h
@@ -64,6 +64,7 @@
#define CFG_DB_FILESYSTEM_PATH 125
#define CFG_DB_NO_REDOLOG_FILES 126
+#define CFG_DB_REDOLOG_FILE_SIZE 140
#define CFG_DB_LCP_DISC_PAGES_TUP 127
#define CFG_DB_LCP_DISC_PAGES_TUP_SR 128
@@ -81,6 +82,8 @@
#define CFG_DB_BACKUP_WRITE_SIZE 136
#define CFG_DB_BACKUP_MAX_WRITE_SIZE 139
+#define CFG_DB_WATCHDOG_INTERVAL_INITIAL 141
+
#define CFG_LOG_DESTINATION 147
#define CFG_DB_DISCLESS 148
@@ -113,6 +116,10 @@
#define CFG_DB_MEMREPORT_FREQUENCY 166
+#define CFG_DB_O_DIRECT 168
+
+#define CFG_DB_MAX_ALLOCATE 169
+
#define CFG_DB_SGA 198 /* super pool mem */
#define CFG_DB_DATA_MEM_2 199 /* used in special build in 5.1 */
diff --git a/storage/ndb/include/mgmapi/mgmapi_error.h b/storage/ndb/include/mgmapi/mgmapi_error.h
new file mode 100644
index 00000000000..2d0aa1ded0f
--- /dev/null
+++ b/storage/ndb/include/mgmapi/mgmapi_error.h
@@ -0,0 +1,121 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef MGMAPI_ERROR_H
+#define MGMAPI_ERROR_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+ /**
+ * Error codes
+ */
+ enum ndb_mgm_error {
+ /** Not an error */
+ NDB_MGM_NO_ERROR = 0,
+
+ /* Request for service errors */
+ /** Supplied connectstring is illegal */
+ NDB_MGM_ILLEGAL_CONNECT_STRING = 1001,
+ /** Supplied NdbMgmHandle is illegal */
+ NDB_MGM_ILLEGAL_SERVER_HANDLE = 1005,
+ /** Illegal reply from server */
+ NDB_MGM_ILLEGAL_SERVER_REPLY = 1006,
+ /** Illegal number of nodes */
+ NDB_MGM_ILLEGAL_NUMBER_OF_NODES = 1007,
+ /** Illegal node status */
+ NDB_MGM_ILLEGAL_NODE_STATUS = 1008,
+ /** Memory allocation error */
+ NDB_MGM_OUT_OF_MEMORY = 1009,
+ /** Management server not connected */
+ NDB_MGM_SERVER_NOT_CONNECTED = 1010,
+ /** Could not connect to socker */
+ NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET = 1011,
+ /** Could not bind local address */
+ NDB_MGM_BIND_ADDRESS = 1012,
+
+ /* Alloc node id failures */
+ /** Generic error, retry may succeed */
+ NDB_MGM_ALLOCID_ERROR = 1101,
+ /** Non retriable error */
+ NDB_MGM_ALLOCID_CONFIG_MISMATCH = 1102,
+
+ /* Service errors - Start/Stop Node or System */
+ /** Start failed */
+ NDB_MGM_START_FAILED = 2001,
+ /** Stop failed */
+ NDB_MGM_STOP_FAILED = 2002,
+ /** Restart failed */
+ NDB_MGM_RESTART_FAILED = 2003,
+
+ /* Service errors - Backup */
+ /** Unable to start backup */
+ NDB_MGM_COULD_NOT_START_BACKUP = 3001,
+ /** Unable to abort backup */
+ NDB_MGM_COULD_NOT_ABORT_BACKUP = 3002,
+
+ /* Service errors - Single User Mode */
+ /** Unable to enter single user mode */
+ NDB_MGM_COULD_NOT_ENTER_SINGLE_USER_MODE = 4001,
+ /** Unable to exit single user mode */
+ NDB_MGM_COULD_NOT_EXIT_SINGLE_USER_MODE = 4002,
+
+ /* Usage errors */
+ /** Usage error */
+ NDB_MGM_USAGE_ERROR = 5001
+ };
+ struct Ndb_Mgm_Error_Msg {
+ enum ndb_mgm_error code;
+ const char * msg;
+ };
+ const struct Ndb_Mgm_Error_Msg ndb_mgm_error_msgs[] = {
+ { NDB_MGM_NO_ERROR, "No error" },
+
+ /* Request for service errors */
+ { NDB_MGM_ILLEGAL_CONNECT_STRING, "Illegal connect string" },
+ { NDB_MGM_ILLEGAL_SERVER_HANDLE, "Illegal server handle" },
+ { NDB_MGM_ILLEGAL_SERVER_REPLY, "Illegal reply from server" },
+ { NDB_MGM_ILLEGAL_NUMBER_OF_NODES, "Illegal number of nodes" },
+ { NDB_MGM_ILLEGAL_NODE_STATUS, "Illegal node status" },
+ { NDB_MGM_OUT_OF_MEMORY, "Out of memory" },
+ { NDB_MGM_SERVER_NOT_CONNECTED, "Management server not connected" },
+ { NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET, "Could not connect to socket" },
+
+ /* Service errors - Start/Stop Node or System */
+ { NDB_MGM_START_FAILED, "Start failed" },
+ { NDB_MGM_STOP_FAILED, "Stop failed" },
+ { NDB_MGM_RESTART_FAILED, "Restart failed" },
+
+ /* Service errors - Backup */
+ { NDB_MGM_COULD_NOT_START_BACKUP, "Could not start backup" },
+ { NDB_MGM_COULD_NOT_ABORT_BACKUP, "Could not abort backup" },
+
+ /* Service errors - Single User Mode */
+ { NDB_MGM_COULD_NOT_ENTER_SINGLE_USER_MODE,
+ "Could not enter single user mode" },
+ { NDB_MGM_COULD_NOT_EXIT_SINGLE_USER_MODE,
+ "Could not exit single user mode" },
+
+ /* Usage errors */
+ { NDB_MGM_USAGE_ERROR,
+ "Usage error" }
+ };
+ const int ndb_mgm_noOfErrorMsgs =
+ sizeof(ndb_mgm_error_msgs)/sizeof(struct Ndb_Mgm_Error_Msg);
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/storage/ndb/include/mgmapi/ndbd_exit_codes.h b/storage/ndb/include/mgmapi/ndbd_exit_codes.h
index 71802fd8fcc..30578bdf722 100644
--- a/storage/ndb/include/mgmapi/ndbd_exit_codes.h
+++ b/storage/ndb/include/mgmapi/ndbd_exit_codes.h
@@ -78,8 +78,9 @@ typedef ndbd_exit_classification_enum ndbd_exit_classification;
#define NDBD_EXIT_SR_RESTARTCONFLICT 2311
#define NDBD_EXIT_NO_MORE_UNDOLOG 2312
#define NDBD_EXIT_SR_UNDOLOG 2313
-#define NDBD_EXIT_SR_SCHEMAFILE 2310
#define NDBD_EXIT_SINGLE_USER_MODE 2314
+#define NDBD_EXIT_NODE_DECLARED_DEAD 2315
+#define NDBD_EXIT_SR_SCHEMAFILE 2316
#define NDBD_EXIT_MEMALLOC 2327
#define NDBD_EXIT_BLOCK_JBUFCONGESTION 2334
#define NDBD_EXIT_TIME_QUEUE_SHORT 2335
diff --git a/storage/ndb/include/ndb_global.h.in b/storage/ndb/include/ndb_global.h.in
index dd4303f949c..2fc594b3f5a 100644
--- a/storage/ndb/include/ndb_global.h.in
+++ b/storage/ndb/include/ndb_global.h.in
@@ -144,4 +144,6 @@ extern "C" {
#define MAX(x,y) (((x)>(y))?(x):(y))
#endif
+#define NDB_O_DIRECT_WRITE_ALIGNMENT 512
+
#endif
diff --git a/storage/ndb/include/ndb_version.h.in b/storage/ndb/include/ndb_version.h.in
index 9e1edeecd1e..5405ad4d7aa 100644
--- a/storage/ndb/include/ndb_version.h.in
+++ b/storage/ndb/include/ndb_version.h.in
@@ -16,8 +16,7 @@
#ifndef NDB_VERSION_H
#define NDB_VERSION_H
-#include <ndb_global.h>
-#include <version.h>
+#include <ndb_types.h>
/* NDB build version */
#define NDB_VERSION_BUILD @NDB_VERSION_BUILD@
@@ -32,19 +31,35 @@
#define NDB_VERSION_STATUS "@NDB_VERSION_STATUS@"
-#define MAKE_VERSION(A,B,C) (((A) << 16) | ((B) << 8) | ((C) << 0))
+#define NDB_MAKE_VERSION(A,B,C) (((A) << 16) | ((B) << 8) | ((C) << 0))
-#define NDB_VERSION_D MAKE_VERSION(NDB_VERSION_MAJOR, NDB_VERSION_MINOR, NDB_VERSION_BUILD)
+#define NDB_VERSION_D NDB_MAKE_VERSION(NDB_VERSION_MAJOR, NDB_VERSION_MINOR, NDB_VERSION_BUILD)
#define NDB_VERSION_STRING_BUF_SZ 100
#ifdef __cplusplus
-extern "C"
-#else
-extern
+extern "C" {
#endif
-char ndb_version_string_buf[NDB_VERSION_STRING_BUF_SZ];
-#define NDB_VERSION_STRING (getVersionString(NDB_VERSION, NDB_VERSION_STATUS, \
- ndb_version_string_buf, \
- sizeof(ndb_version_string_buf)))
+
+void ndbPrintVersion();
+
+Uint32 ndbMakeVersion(Uint32 major, Uint32 minor, Uint32 build);
+
+Uint32 ndbGetMajor(Uint32 version);
+
+Uint32 ndbGetMinor(Uint32 version);
+
+Uint32 ndbGetBuild(Uint32 version);
+
+const char* ndbGetVersionString(Uint32 version, const char * status,
+ char *buf, unsigned sz);
+const char* ndbGetOwnVersionString();
+
+Uint32 ndbGetOwnVersion();
+
+#ifdef __cplusplus
+}
+#endif
+
+#define NDB_VERSION_STRING ndbGetOwnVersionString()
#define NDB_VERSION ndbGetOwnVersion()
@@ -59,19 +74,19 @@ char ndb_version_string_buf[NDB_VERSION_STRING_BUF_SZ];
/**
* From which version do we support rowid
*/
-#define NDBD_ROWID_VERSION (MAKE_VERSION(5,1,6))
-#define NDBD_INCL_NODECONF_VERSION_4 MAKE_VERSION(4,1,17)
-#define NDBD_INCL_NODECONF_VERSION_5 MAKE_VERSION(5,0,18)
-#define NDBD_FRAGID_VERSION (MAKE_VERSION(5,1,6))
-#define NDBD_DICT_LOCK_VERSION_5 MAKE_VERSION(5,0,23)
-#define NDBD_DICT_LOCK_VERSION_5_1 MAKE_VERSION(5,1,12)
+#define NDBD_ROWID_VERSION (NDB_MAKE_VERSION(5,1,6))
+#define NDBD_INCL_NODECONF_VERSION_4 NDB_MAKE_VERSION(4,1,17)
+#define NDBD_INCL_NODECONF_VERSION_5 NDB_MAKE_VERSION(5,0,18)
+#define NDBD_FRAGID_VERSION (NDB_MAKE_VERSION(5,1,6))
+#define NDBD_DICT_LOCK_VERSION_5 NDB_MAKE_VERSION(5,0,23)
+#define NDBD_DICT_LOCK_VERSION_5_1 NDB_MAKE_VERSION(5,1,12)
-#define NDBD_UPDATE_FRAG_DIST_KEY_50 MAKE_VERSION(5,0,26)
-#define NDBD_UPDATE_FRAG_DIST_KEY_51 MAKE_VERSION(5,1,12)
+#define NDBD_UPDATE_FRAG_DIST_KEY_50 NDB_MAKE_VERSION(5,0,26)
+#define NDBD_UPDATE_FRAG_DIST_KEY_51 NDB_MAKE_VERSION(5,1,12)
-#define NDBD_QMGR_SINGLEUSER_VERSION_5 MAKE_VERSION(5,0,25)
+#define NDBD_QMGR_SINGLEUSER_VERSION_5 NDB_MAKE_VERSION(5,0,25)
-#define NDBD_NODE_VERSION_REP MAKE_VERSION(6,1,1)
+#define NDBD_NODE_VERSION_REP NDB_MAKE_VERSION(6,1,1)
#endif
diff --git a/storage/ndb/include/ndbapi/Ndb.hpp b/storage/ndb/include/ndbapi/Ndb.hpp
index 5f96408ea30..4d0219d1a3c 100644
--- a/storage/ndb/include/ndbapi/Ndb.hpp
+++ b/storage/ndb/include/ndbapi/Ndb.hpp
@@ -1055,6 +1055,7 @@ class Ndb
friend class NdbDictInterface;
friend class NdbBlob;
friend class NdbImpl;
+ friend class Ndb_internal;
#endif
public:
@@ -1280,6 +1281,16 @@ public:
*/
/**
+ * Structure for passing in pointers to startTransaction
+ *
+ */
+ struct Key_part_ptr
+ {
+ const void * ptr;
+ unsigned len;
+ };
+
+ /**
* Start a transaction
*
* @note When the transaction is completed it must be closed using
@@ -1300,6 +1311,30 @@ public:
Uint32 keyLen = 0);
/**
+ * Compute hash value given table/keys
+ *
+ * @param hashvalueptr - OUT, is set to hashvalue if return value is 0
+ * @param table Pointer to table object
+ * @param keyData Null-terminated array of pointers to keyParts that is
+ * part of distribution key.
+ * Length of resp. keyPart will be read from
+ * metadata and checked against passed value
+ * @param xfrmbuf Pointer to temporary buffer that will be used
+ * to calculate hashvalue
+ * @param xfrmbuflen Lengh of buffer
+ *
+ * @note if xfrmbuf is null (default) malloc/free will be made
+ * if xfrmbuf is not null but length is too short, method will fail
+ *
+ * @return 0 - ok - hashvalueptr is set
+ * else - fail, return error code
+ */
+ static int computeHash(Uint32* hashvalueptr,
+ const NdbDictionary::Table*,
+ const struct Key_part_ptr * keyData,
+ void* xfrmbuf = 0, Uint32 xfrmbuflen = 0);
+
+ /**
* Close a transaction.
*
* @note should be called after the transaction has completed, irrespective
@@ -1488,12 +1523,15 @@ public:
int initAutoIncrement();
int getAutoIncrementValue(const char* aTableName,
- Uint64 & tupleId, Uint32 cacheSize);
+ Uint64 & tupleId, Uint32 cacheSize,
+ Uint64 step = 1, Uint64 start = 1);
int getAutoIncrementValue(const NdbDictionary::Table * aTable,
- Uint64 & tupleId, Uint32 cacheSize);
+ Uint64 & tupleId, Uint32 cacheSize,
+ Uint64 step = 1, Uint64 start = 1);
int getAutoIncrementValue(const NdbDictionary::Table * aTable,
TupleIdRange & range, Uint64 & tupleId,
- Uint32 cacheSize);
+ Uint32 cacheSize,
+ Uint64 step = 1, Uint64 start = 1);
int readAutoIncrementValue(const char* aTableName,
Uint64 & tupleId);
int readAutoIncrementValue(const NdbDictionary::Table * aTable,
@@ -1510,7 +1548,7 @@ public:
private:
int getTupleIdFromNdb(const NdbTableImpl* table,
TupleIdRange & range, Uint64 & tupleId,
- Uint32 cacheSize);
+ Uint32 cacheSize, Uint64 step = 1, Uint64 start = 1);
int readTupleIdFromNdb(const NdbTableImpl* table,
TupleIdRange & range, Uint64 & tupleId);
int setTupleIdInNdb(const NdbTableImpl* table,
diff --git a/storage/ndb/include/ndbapi/NdbOperation.hpp b/storage/ndb/include/ndbapi/NdbOperation.hpp
index 0fa2cac0a32..06111941df4 100644
--- a/storage/ndb/include/ndbapi/NdbOperation.hpp
+++ b/storage/ndb/include/ndbapi/NdbOperation.hpp
@@ -1042,6 +1042,13 @@ protected:
*/
Int8 m_abortOption;
+ /*
+ * For blob impl, option to not propagate error to trans level.
+ * Could be AO_IgnoreError variant if we want it public.
+ * Ignored unless AO_IgnoreError is also set.
+ */
+ Int8 m_noErrorPropagation;
+
friend struct Ndb_free_list_t<NdbOperation>;
};
diff --git a/storage/ndb/include/ndbapi/NdbRecAttr.hpp b/storage/ndb/include/ndbapi/NdbRecAttr.hpp
index ac098831180..121339e470b 100644
--- a/storage/ndb/include/ndbapi/NdbRecAttr.hpp
+++ b/storage/ndb/include/ndbapi/NdbRecAttr.hpp
@@ -153,6 +153,13 @@ public:
/**
* Get value stored in NdbRecAttr object.
*
+ * @return Int8 value.
+ */
+ Int8 int8_value() const;
+
+ /**
+ * Get value stored in NdbRecAttr object.
+ *
* @return 64 bit unsigned value.
*/
Uint64 u_64_value() const;
@@ -188,6 +195,13 @@ public:
/**
* Get value stored in NdbRecAttr object.
*
+ * @return Uint8 value.
+ */
+ Uint8 u_8_value() const;
+
+ /**
+ * Get value stored in NdbRecAttr object.
+ *
* @return Float value.
*/
float float_value() const;
@@ -316,6 +330,13 @@ NdbRecAttr::char_value() const
}
inline
+Int8
+NdbRecAttr::int8_value() const
+{
+ return *(Int8*)theRef;
+}
+
+inline
Uint32
NdbRecAttr::u_32_value() const
{
@@ -337,6 +358,13 @@ NdbRecAttr::u_char_value() const
}
inline
+Uint8
+NdbRecAttr::u_8_value() const
+{
+ return *(Uint8*)theRef;
+}
+
+inline
void
NdbRecAttr::release()
{
diff --git a/storage/ndb/include/portlib/NdbThread.h b/storage/ndb/include/portlib/NdbThread.h
index 569048cf6c8..373e2218c6c 100644
--- a/storage/ndb/include/portlib/NdbThread.h
+++ b/storage/ndb/include/portlib/NdbThread.h
@@ -41,7 +41,7 @@ struct NdbThread;
signum set in g_ndb_shm_signum in a portable manner.
*/
#ifdef NDB_SHM_TRANSPORTER
-void NdbThread_set_shm_sigmask(bool block);
+void NdbThread_set_shm_sigmask(my_bool block);
#endif
/**
diff --git a/storage/ndb/include/portlib/NdbTick.h b/storage/ndb/include/portlib/NdbTick.h
index 59f580de38e..70c36fdfd1e 100644
--- a/storage/ndb/include/portlib/NdbTick.h
+++ b/storage/ndb/include/portlib/NdbTick.h
@@ -37,9 +37,6 @@ NDB_TICKS NdbTick_CurrentMillisecond(void);
*/
int NdbTick_CurrentMicrosecond(NDB_TICKS * secs, Uint32 * micros);
- /*#define TIME_MEASUREMENT*/
-#ifdef TIME_MEASUREMENT
-
struct MicroSecondTimer {
NDB_TICKS seconds;
NDB_TICKS micro_seconds;
@@ -54,7 +51,6 @@ struct MicroSecondTimer {
NDB_TICKS NdbTick_getMicrosPassed(struct MicroSecondTimer start,
struct MicroSecondTimer stop);
int NdbTick_getMicroTimer(struct MicroSecondTimer* time_now);
-#endif
#ifdef __cplusplus
}
diff --git a/storage/ndb/include/util/ndb_opts.h b/storage/ndb/include/util/ndb_opts.h
index 9cb65d4bc2e..f18bb9646cc 100644
--- a/storage/ndb/include/util/ndb_opts.h
+++ b/storage/ndb/include/util/ndb_opts.h
@@ -58,40 +58,40 @@ const char *opt_debug= 0;
"Set connect string for connecting to ndb_mgmd. " \
"Syntax: \"[nodeid=<id>;][host=]<hostname>[:<port>]\". " \
"Overrides specifying entries in NDB_CONNECTSTRING and my.cnf", \
- (gptr*) &opt_ndb_connectstring, (gptr*) &opt_ndb_connectstring, \
+ (uchar**) &opt_ndb_connectstring, (uchar**) &opt_ndb_connectstring, \
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },\
{ "ndb-mgmd-host", OPT_NDB_MGMD, \
"Set host and port for connecting to ndb_mgmd. " \
"Syntax: <hostname>[:<port>].", \
- (gptr*) &opt_ndb_mgmd, (gptr*) &opt_ndb_mgmd, 0, \
+ (uchar**) &opt_ndb_mgmd, (uchar**) &opt_ndb_mgmd, 0, \
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },\
{ "ndb-nodeid", OPT_NDB_NODEID, \
"Set node id for this node.", \
- (gptr*) &opt_ndb_nodeid, (gptr*) &opt_ndb_nodeid, 0, \
+ (uchar**) &opt_ndb_nodeid, (uchar**) &opt_ndb_nodeid, 0, \
GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },\
{ "ndb-shm", OPT_NDB_SHM,\
"Allow optimizing using shared memory connections when available",\
- (gptr*) &opt_ndb_shm, (gptr*) &opt_ndb_shm, 0,\
+ (uchar**) &opt_ndb_shm, (uchar**) &opt_ndb_shm, 0,\
GET_BOOL, NO_ARG, OPT_NDB_SHM_DEFAULT, 0, 0, 0, 0, 0 },\
{"ndb-optimized-node-selection", OPT_NDB_OPTIMIZED_NODE_SELECTION,\
"Select nodes for transactions in a more optimal way",\
- (gptr*) &opt_ndb_optimized_node_selection,\
- (gptr*) &opt_ndb_optimized_node_selection, 0,\
+ (uchar**) &opt_ndb_optimized_node_selection,\
+ (uchar**) &opt_ndb_optimized_node_selection, 0,\
GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},\
{ "connect-string", OPT_NDB_CONNECTSTRING, "same as --ndb-connectstring",\
- (gptr*) &opt_ndb_connectstring, (gptr*) &opt_ndb_connectstring, \
+ (uchar**) &opt_ndb_connectstring, (uchar**) &opt_ndb_connectstring, \
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },\
{ "core-file", OPT_WANT_CORE, "Write core on errors.",\
- (gptr*) &opt_core, (gptr*) &opt_core, 0,\
+ (uchar**) &opt_core, (uchar**) &opt_core, 0,\
GET_BOOL, NO_ARG, OPT_WANT_CORE_DEFAULT, 0, 0, 0, 0, 0},\
{"character-sets-dir", OPT_CHARSETS_DIR,\
- "Directory where character sets are.", (gptr*) &charsets_dir,\
- (gptr*) &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}\
+ "Directory where character sets are.", (uchar**) &charsets_dir,\
+ (uchar**) &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}\
#ifndef DBUG_OFF
#define NDB_STD_OPTS(prog_name) \
{ "debug", '#', "Output debug log. Often this is 'd:t:o,filename'.", \
- (gptr*) &opt_debug, (gptr*) &opt_debug, \
+ (uchar**) &opt_debug, (uchar**) &opt_debug, \
0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0 }, \
NDB_STD_OPTS_COMMON
#else
diff --git a/storage/ndb/include/util/version.h b/storage/ndb/include/util/version.h
index 42513d00442..9ea18ecd9d9 100644
--- a/storage/ndb/include/util/version.h
+++ b/storage/ndb/include/util/version.h
@@ -16,25 +16,18 @@
#ifndef VERSION_H
#define VERSION_H
-#include <ndb_types.h>
+#include <ndb_version.h>
+
+/* some backwards compatible macros */
+#define MAKE_VERSION(A,B,C) NDB_MAKE_VERSION(A,B,C)
+#define getMajor(a) ndbGetMajor(a)
+#define getMinor(a) ndbGetMinor(a)
+#define getBuild(a) ndbGetBuild(a)
+
#ifdef __cplusplus
extern "C" {
#endif
- Uint32 getMajor(Uint32 version);
-
- Uint32 getMinor(Uint32 version);
-
- Uint32 getBuild(Uint32 version);
-
- Uint32 makeVersion(Uint32 major, Uint32 minor, Uint32 build);
-
- const char* getVersionString(Uint32 version, const char * status,
- char *buf, unsigned sz);
-
- void ndbPrintVersion();
- Uint32 ndbGetOwnVersion();
-
int ndbCompatible_mgmt_ndb(Uint32 ownVersion, Uint32 otherVersion);
int ndbCompatible_ndb_mgmt(Uint32 ownVersion, Uint32 otherVersion);
int ndbCompatible_mgmt_api(Uint32 ownVersion, Uint32 otherVersion);
diff --git a/storage/ndb/ndbapi-examples/mgmapi_logevent/Makefile b/storage/ndb/ndbapi-examples/mgmapi_logevent/Makefile
index c9b4507c4a7..b67150b71fa 100644
--- a/storage/ndb/ndbapi-examples/mgmapi_logevent/Makefile
+++ b/storage/ndb/ndbapi-examples/mgmapi_logevent/Makefile
@@ -1,6 +1,6 @@
TARGET = mgmapi_logevent
-SRCS = $(TARGET).cpp
-OBJS = $(TARGET).o
+SRCS = main.cpp
+OBJS = main.o
CXX = g++
CFLAGS = -c -Wall -fno-rtti -fno-exceptions
CXXFLAGS =
@@ -17,7 +17,7 @@ SYS_LIB =
$(TARGET): $(OBJS)
$(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
-$(TARGET).o: $(SRCS)
+$(OBJS): $(SRCS)
$(CXX) $(CFLAGS) -I$(TOP_SRCDIR)/include -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/mgmapi -I$(INCLUDE_DIR)/ndbapi $(SRCS)
clean:
diff --git a/storage/ndb/ndbapi-examples/mgmapi_logevent/mgmapi_logevent.cpp b/storage/ndb/ndbapi-examples/mgmapi_logevent/main.cpp
index fbe5397c5cf..fbe5397c5cf 100644
--- a/storage/ndb/ndbapi-examples/mgmapi_logevent/mgmapi_logevent.cpp
+++ b/storage/ndb/ndbapi-examples/mgmapi_logevent/main.cpp
diff --git a/storage/ndb/ndbapi-examples/mgmapi_logevent2/Makefile b/storage/ndb/ndbapi-examples/mgmapi_logevent2/Makefile
index 95b43b11f6b..fd9499c7a68 100644
--- a/storage/ndb/ndbapi-examples/mgmapi_logevent2/Makefile
+++ b/storage/ndb/ndbapi-examples/mgmapi_logevent2/Makefile
@@ -1,6 +1,6 @@
TARGET = mgmapi_logevent2
-SRCS = $(TARGET).cpp
-OBJS = $(TARGET).o
+SRCS = main.cpp
+OBJS = main.o
CXX = g++
CFLAGS = -c -Wall -fno-rtti -fno-exceptions
CXXFLAGS =
@@ -17,7 +17,7 @@ SYS_LIB =
$(TARGET): $(OBJS)
$(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
-$(TARGET).o: $(SRCS)
+$(OBJS): $(SRCS)
$(CXX) $(CFLAGS) -I$(TOP_SRCDIR)/include -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/mgmapi -I$(INCLUDE_DIR)/ndbapi $(SRCS)
clean:
diff --git a/storage/ndb/ndbapi-examples/mgmapi_logevent2/mgmapi_logevent2.cpp b/storage/ndb/ndbapi-examples/mgmapi_logevent2/main.cpp
index 5a2241fc05f..5a2241fc05f 100644
--- a/storage/ndb/ndbapi-examples/mgmapi_logevent2/mgmapi_logevent2.cpp
+++ b/storage/ndb/ndbapi-examples/mgmapi_logevent2/main.cpp
diff --git a/storage/ndb/ndbapi-examples/ndbapi_async1/ndbapi_async1.cpp b/storage/ndb/ndbapi-examples/ndbapi_async1/ndbapi_async1.cpp
index a195a419aaf..1f19f36d674 100644
--- a/storage/ndb/ndbapi-examples/ndbapi_async1/ndbapi_async1.cpp
+++ b/storage/ndb/ndbapi-examples/ndbapi_async1/ndbapi_async1.cpp
@@ -75,7 +75,7 @@ int main(int argc, char** argv)
exit(-1);
}
- if (cluster_connection->wait_until_ready(30,0))
+ if (cluster_connection->wait_until_ready(30,0) < 0)
{
std::cout << "Cluster was not ready within 30 secs." << std::endl;
exit(-1);
diff --git a/storage/ndb/ndbapi-examples/ndbapi_simple/ndbapi_simple.cpp b/storage/ndb/ndbapi-examples/ndbapi_simple/ndbapi_simple.cpp
index 0a4f6d92f2c..4e82fc3e42b 100644
--- a/storage/ndb/ndbapi-examples/ndbapi_simple/ndbapi_simple.cpp
+++ b/storage/ndb/ndbapi-examples/ndbapi_simple/ndbapi_simple.cpp
@@ -281,12 +281,14 @@ static void do_read(Ndb &myNdb)
if (myRecAttr == NULL) APIERROR(myTransaction->getNdbError());
if(myTransaction->execute( NdbTransaction::Commit ) == -1)
- if (i == 3) {
- std::cout << "Detected that deleted tuple doesn't exist!" << std::endl;
- } else {
- APIERROR(myTransaction->getNdbError());
- }
+ APIERROR(myTransaction->getNdbError());
+ if (myTransaction->getNdbError().classification == NdbError::NoDataFound)
+ if (i == 3)
+ std::cout << "Detected that deleted tuple doesn't exist!" << std::endl;
+ else
+ APIERROR(myTransaction->getNdbError());
+
if (i != 3) {
printf(" %2d %2d\n", i, myRecAttr->u_32_value());
}
diff --git a/storage/ndb/ndbapi-examples/ndbapi_simple_dual/Makefile b/storage/ndb/ndbapi-examples/ndbapi_simple_dual/Makefile
index 7f0ca52fcc3..9757df3ceab 100644
--- a/storage/ndb/ndbapi-examples/ndbapi_simple_dual/Makefile
+++ b/storage/ndb/ndbapi-examples/ndbapi_simple_dual/Makefile
@@ -1,6 +1,6 @@
TARGET = ndbapi_simple_dual
-SRCS = $(TARGET).cpp
-OBJS = $(TARGET).o
+SRCS = main.cpp
+OBJS = main.o
CXX = g++
CFLAGS = -c -Wall -fno-rtti -fno-exceptions
CXXFLAGS =
@@ -17,7 +17,7 @@ SYS_LIB =
$(TARGET): $(OBJS)
$(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
-$(TARGET).o: $(SRCS)
+$(OBJS): $(SRCS)
$(CXX) $(CFLAGS) -I$(TOP_SRCDIR)/include -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/ndbapi $(SRCS)
clean:
diff --git a/storage/ndb/ndbapi-examples/ndbapi_simple_dual/ndbapi_simple_dual.cpp b/storage/ndb/ndbapi-examples/ndbapi_simple_dual/main.cpp
index 5943894a3ee..5943894a3ee 100644
--- a/storage/ndb/ndbapi-examples/ndbapi_simple_dual/ndbapi_simple_dual.cpp
+++ b/storage/ndb/ndbapi-examples/ndbapi_simple_dual/main.cpp
diff --git a/storage/ndb/ndbapi-examples/ndbapi_simple_index/Makefile b/storage/ndb/ndbapi-examples/ndbapi_simple_index/Makefile
index c38975381f5..975563b9508 100644
--- a/storage/ndb/ndbapi-examples/ndbapi_simple_index/Makefile
+++ b/storage/ndb/ndbapi-examples/ndbapi_simple_index/Makefile
@@ -1,6 +1,6 @@
TARGET = ndbapi_simple_index
-SRCS = $(TARGET).cpp
-OBJS = $(TARGET).o
+SRCS = main.cpp
+OBJS = main.o
CXX = g++
CFLAGS = -c -Wall -fno-rtti -fno-exceptions
CXXFLAGS =
@@ -17,7 +17,7 @@ SYS_LIB =
$(TARGET): $(OBJS)
$(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
-$(TARGET).o: $(SRCS)
+$(OBJS): $(SRCS)
$(CXX) $(CFLAGS) -I$(INCLUDE_DIR)/include -I$(INCLUDE_DIR)/storage/ndb/include -I$(INCLUDE_DIR)/storage/ndb/include/ndbapi $(SRCS)
clean:
diff --git a/storage/ndb/ndbapi-examples/ndbapi_simple_index/ndbapi_simple_index.cpp b/storage/ndb/ndbapi-examples/ndbapi_simple_index/main.cpp
index 440face79ae..440face79ae 100644
--- a/storage/ndb/ndbapi-examples/ndbapi_simple_index/ndbapi_simple_index.cpp
+++ b/storage/ndb/ndbapi-examples/ndbapi_simple_index/main.cpp
diff --git a/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp b/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp
index 884a49b3a94..0d31cd5de7f 100644
--- a/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp
@@ -621,6 +621,14 @@ const GsnName SignalNames [] = {
,{ GSN_LCP_PREPARE_REF, "LCP_PREPARE_REF" }
,{ GSN_LCP_PREPARE_CONF, "LCP_PREPARE_CONF" }
+ ,{ GSN_DICT_ABORT_REQ, "DICT_ABORT_REQ" }
+ ,{ GSN_DICT_ABORT_REF, "DICT_ABORT_REF" }
+ ,{ GSN_DICT_ABORT_CONF, "DICT_ABORT_CONF" }
+
+ ,{ GSN_DICT_COMMIT_REQ, "DICT_COMMIT_REQ" }
+ ,{ GSN_DICT_COMMIT_REF, "DICT_COMMIT_REF" }
+ ,{ GSN_DICT_COMMIT_CONF, "DICT_COMMIT_CONF" }
+
/* DICT LOCK */
,{ GSN_DICT_LOCK_REQ, "DICT_LOCK_REQ" }
,{ GSN_DICT_LOCK_CONF, "DICT_LOCK_CONF" }
diff --git a/storage/ndb/src/common/portlib/NdbTick.c b/storage/ndb/src/common/portlib/NdbTick.c
index 238e9b1956d..7e54984794f 100644
--- a/storage/ndb/src/common/portlib/NdbTick.c
+++ b/storage/ndb/src/common/portlib/NdbTick.c
@@ -15,7 +15,7 @@
#include <ndb_global.h>
-#include "NdbTick.h"
+#include <NdbTick.h>
#define NANOSEC_PER_SEC 1000000000
#define MICROSEC_PER_SEC 1000000
@@ -71,7 +71,6 @@ NdbTick_CurrentMicrosecond(NDB_TICKS * secs, Uint32 * micros){
}
#endif
-#ifdef TIME_MEASUREMENT
int
NdbTick_getMicroTimer(struct MicroSecondTimer* input_timer)
{
@@ -102,4 +101,3 @@ NdbTick_getMicrosPassed(struct MicroSecondTimer start,
}
return ret_value;
}
-#endif
diff --git a/storage/ndb/src/common/transporter/Packer.cpp b/storage/ndb/src/common/transporter/Packer.cpp
index 1045c8ac283..df7ff078e63 100644
--- a/storage/ndb/src/common/transporter/Packer.cpp
+++ b/storage/ndb/src/common/transporter/Packer.cpp
@@ -20,7 +20,12 @@
#include <TransporterCallback.hpp>
#include <RefConvert.hpp>
+#ifdef ERROR_INSERT
+Uint32 MAX_RECEIVED_SIGNALS = 1024;
+#else
#define MAX_RECEIVED_SIGNALS 1024
+#endif
+
Uint32
TransporterRegistry::unpack(Uint32 * readPtr,
Uint32 sizeOfData,
diff --git a/storage/ndb/src/common/transporter/SCI_Transporter.cpp b/storage/ndb/src/common/transporter/SCI_Transporter.cpp
index 138b79acb51..0720fe84973 100644
--- a/storage/ndb/src/common/transporter/SCI_Transporter.cpp
+++ b/storage/ndb/src/common/transporter/SCI_Transporter.cpp
@@ -65,13 +65,10 @@ SCI_Transporter::SCI_Transporter(TransporterRegistry &t_reg,
m_initLocal=false;
- m_swapCounter=0;
m_failCounter=0;
m_remoteNodes[0]=remoteSciNodeId0;
m_remoteNodes[1]=remoteSciNodeId1;
m_adapters = nAdapters;
- // The maximum number of times to try and create,
- // start and destroy a sequence
m_ActiveAdapterId=0;
m_StandbyAdapterId=1;
@@ -102,8 +99,6 @@ SCI_Transporter::SCI_Transporter(TransporterRegistry &t_reg,
DBUG_VOID_RETURN;
}
-
-
void SCI_Transporter::disconnectImpl()
{
DBUG_ENTER("SCI_Transporter::disconnectImpl");
@@ -129,7 +124,8 @@ void SCI_Transporter::disconnectImpl()
if(err != SCI_ERR_OK) {
report_error(TE_SCI_UNABLE_TO_CLOSE_CHANNEL);
- DBUG_PRINT("error", ("Cannot close channel to the driver. Error code 0x%x",
+ DBUG_PRINT("error",
+ ("Cannot close channel to the driver. Error code 0x%x",
err));
}
}
@@ -164,19 +160,18 @@ bool SCI_Transporter::initTransporter() {
m_sendBuffer.m_buffer = new Uint32[m_sendBuffer.m_sendBufferSize / 4];
m_sendBuffer.m_dataSize = 0;
- DBUG_PRINT("info", ("Created SCI Send Buffer with buffer size %d and packet size %d",
+ DBUG_PRINT("info",
+ ("Created SCI Send Buffer with buffer size %d and packet size %d",
m_sendBuffer.m_sendBufferSize, m_PacketSize * 4));
if(!getLinkStatus(m_ActiveAdapterId) ||
(m_adapters > 1 &&
!getLinkStatus(m_StandbyAdapterId))) {
- DBUG_PRINT("error", ("The link is not fully operational. Check the cables and the switches"));
- //reportDisconnect(remoteNodeId, 0);
- //doDisconnect();
+ DBUG_PRINT("error",
+ ("The link is not fully operational. Check the cables and the switches"));
//NDB should terminate
report_error(TE_SCI_LINK_ERROR);
DBUG_RETURN(false);
}
-
DBUG_RETURN(true);
} // initTransporter()
@@ -235,7 +230,8 @@ sci_error_t SCI_Transporter::initLocalSegment() {
DBUG_PRINT("info", ("SCInode iD %d adapter %d\n",
sciAdapters[i].localSciNodeId, i));
if(err != SCI_ERR_OK) {
- DBUG_PRINT("error", ("Cannot open an SCI virtual device. Error code 0x%x",
+ DBUG_PRINT("error",
+ ("Cannot open an SCI virtual device. Error code 0x%x",
err));
DBUG_RETURN(err);
}
@@ -269,7 +265,8 @@ sci_error_t SCI_Transporter::initLocalSegment() {
&err);
if(err != SCI_ERR_OK) {
- DBUG_PRINT("error", ("Local Segment is not accessible by an SCI adapter. Error code 0x%x\n",
+ DBUG_PRINT("error",
+ ("Local Segment is not accessible by an SCI adapter. Error code 0x%x\n",
err));
DBUG_RETURN(err);
}
@@ -303,15 +300,13 @@ sci_error_t SCI_Transporter::initLocalSegment() {
&err);
if(err != SCI_ERR_OK) {
- DBUG_PRINT("error", ("Local Segment is not available for remote connections. Error code 0x%x\n",
+ DBUG_PRINT("error",
+ ("Local Segment is not available for remote connections. Error code 0x%x\n",
err));
DBUG_RETURN(err);
}
}
-
-
setupLocalSegment();
-
DBUG_RETURN(err);
} // initLocalSegment()
@@ -343,12 +338,6 @@ bool SCI_Transporter::doSend() {
if(sizeToSend==4097)
i4097++;
#endif
- if(startSequence(m_ActiveAdapterId)!=SCI_ERR_OK) {
- DBUG_PRINT("error", ("Start sequence failed"));
- report_error(TE_SCI_UNABLE_TO_START_SEQUENCE);
- return false;
- }
-
tryagain:
retry++;
@@ -374,119 +363,36 @@ bool SCI_Transporter::doSend() {
SCI_FLAG_ERROR_CHECK,
&err);
-
if (err != SCI_ERR_OK) {
- if(err == SCI_ERR_OUT_OF_RANGE) {
- DBUG_PRINT("error", ("Data transfer : out of range error"));
- goto tryagain;
- }
- if(err == SCI_ERR_SIZE_ALIGNMENT) {
- DBUG_PRINT("error", ("Data transfer : alignment error"));
- DBUG_PRINT("info", ("sendPtr 0x%x, sizeToSend = %d", sendPtr, sizeToSend));
- goto tryagain;
- }
- if(err == SCI_ERR_OFFSET_ALIGNMENT) {
- DBUG_PRINT("error", ("Data transfer : offset alignment"));
- goto tryagain;
- }
- if(err == SCI_ERR_TRANSFER_FAILED) {
- //(m_TargetSegm[m_StandbyAdapterId].writer)->heavyLock();
- if(getLinkStatus(m_ActiveAdapterId)) {
- goto tryagain;
- }
- if (m_adapters == 1) {
- DBUG_PRINT("error", ("SCI Transfer failed"));
+ if (err == SCI_ERR_OUT_OF_RANGE ||
+ err == SCI_ERR_SIZE_ALIGNMENT ||
+ err == SCI_ERR_OFFSET_ALIGNMENT) {
+ DBUG_PRINT("error", ("Data transfer error = %d", err));
report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR);
return false;
- }
- m_failCounter++;
- Uint32 temp=m_ActiveAdapterId;
- switch(m_swapCounter) {
- case 0:
- /**swap from active (0) to standby (1)*/
- if(getLinkStatus(m_StandbyAdapterId)) {
- DBUG_PRINT("error", ("Swapping from adapter 0 to 1"));
+ }
+ if(err == SCI_ERR_TRANSFER_FAILED) {
+ if(getLinkStatus(m_ActiveAdapterId))
+ goto tryagain;
+ if (m_adapters == 1) {
+ DBUG_PRINT("error", ("SCI Transfer failed"));
+ report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR);
+ return false;
+ }
+ m_failCounter++;
+ Uint32 temp=m_ActiveAdapterId;
+ if (getLinkStatus(m_StandbyAdapterId)) {
failoverShmWriter();
SCIStoreBarrier(m_TargetSegm[m_StandbyAdapterId].sequence,0);
m_ActiveAdapterId=m_StandbyAdapterId;
m_StandbyAdapterId=temp;
- SCIRemoveSequence((m_TargetSegm[m_StandbyAdapterId].sequence),
- FLAGS,
- &err);
- if(err!=SCI_ERR_OK) {
- report_error(TE_SCI_UNABLE_TO_REMOVE_SEQUENCE);
- DBUG_PRINT("error", ("Unable to remove sequence"));
- return false;
- }
- if(startSequence(m_ActiveAdapterId)!=SCI_ERR_OK) {
- DBUG_PRINT("error", ("Start sequence failed"));
- report_error(TE_SCI_UNABLE_TO_START_SEQUENCE);
- return false;
- }
- m_swapCounter++;
- DBUG_PRINT("info", ("failover complete"));
- goto tryagain;
- } else {
- report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR);
- DBUG_PRINT("error", ("SCI Transfer failed"));
- return false;
- }
- return false;
- break;
- case 1:
- /** swap back from 1 to 0
- must check that the link is up */
-
- if(getLinkStatus(m_StandbyAdapterId)) {
- failoverShmWriter();
- m_ActiveAdapterId=m_StandbyAdapterId;
- m_StandbyAdapterId=temp;
- DBUG_PRINT("info", ("Swapping from 1 to 0"));
- if(createSequence(m_ActiveAdapterId)!=SCI_ERR_OK) {
- DBUG_PRINT("error", ("Unable to create sequence"));
- report_error(TE_SCI_UNABLE_TO_CREATE_SEQUENCE);
- return false;
- }
- if(startSequence(m_ActiveAdapterId)!=SCI_ERR_OK) {
- DBUG_PRINT("error", ("startSequence failed... disconnecting"));
- report_error(TE_SCI_UNABLE_TO_START_SEQUENCE);
- return false;
- }
-
- SCIRemoveSequence((m_TargetSegm[m_StandbyAdapterId].sequence)
- , FLAGS,
- &err);
- if(err!=SCI_ERR_OK) {
- DBUG_PRINT("error", ("Unable to remove sequence"));
- report_error(TE_SCI_UNABLE_TO_REMOVE_SEQUENCE);
- return false;
- }
-
- if(createSequence(m_StandbyAdapterId)!=SCI_ERR_OK) {
- DBUG_PRINT("error", ("Unable to create sequence on standby"));
- report_error(TE_SCI_UNABLE_TO_CREATE_SEQUENCE);
- return false;
- }
-
- m_swapCounter=0;
-
- DBUG_PRINT("info", ("failover complete.."));
- goto tryagain;
-
+ DBUG_PRINT("error", ("Swapping from adapter %u to %u",
+ m_StandbyAdapterId, m_ActiveAdapterId));
} else {
- DBUG_PRINT("error", ("Unrecoverable data transfer error"));
report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR);
- return false;
+ DBUG_PRINT("error", ("SCI Transfer failed"));
}
-
- break;
- default:
- DBUG_PRINT("error", ("Unrecoverable data transfer error"));
- report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR);
- return false;
- break;
- }
- }
+ }
} else {
SHM_Writer * writer = (m_TargetSegm[m_ActiveAdapterId].writer);
writer->updateWritePtr(sizeToSend);
@@ -497,7 +403,6 @@ bool SCI_Transporter::doSend() {
m_sendBuffer.m_dataSize = 0;
m_sendBuffer.m_forceSendLimit = sendLimit;
}
-
} else {
/**
* If we end up here, the SCI segment is full.
@@ -552,15 +457,12 @@ void SCI_Transporter::setupLocalSegment()
DBUG_VOID_RETURN;
} //setupLocalSegment
-
-
void SCI_Transporter::setupRemoteSegment()
{
DBUG_ENTER("SCI_Transporter::setupRemoteSegment");
Uint32 sharedSize = 0;
sharedSize =4096; //start of the buffer is page aligned
-
Uint32 sizeOfBuffer = m_BufferSize;
const Uint32 slack = MAX_MESSAGE_SIZE;
sizeOfBuffer -= sharedSize;
@@ -666,7 +568,6 @@ SCI_Transporter::init_remote()
DBUG_PRINT("error", ("Error connecting segment, err 0x%x", err));
DBUG_RETURN(false);
}
-
}
// Map the remote memory segment into program space
for(Uint32 i=0; i < m_adapters ; i++) {
@@ -679,13 +580,14 @@ SCI_Transporter::init_remote()
FLAGS,
&err);
-
- if(err!= SCI_ERR_OK) {
- DBUG_PRINT("error", ("Cannot map a segment to the remote node %d. Error code 0x%x",m_RemoteSciNodeId, err));
- //NDB SHOULD TERMINATE AND COMPUTER REBOOTED!
- report_error(TE_SCI_CANNOT_MAP_REMOTESEGMENT);
- DBUG_RETURN(false);
- }
+ if(err!= SCI_ERR_OK) {
+ DBUG_PRINT("error",
+ ("Cannot map a segment to the remote node %d. Error code 0x%x",
+ m_RemoteSciNodeId, err));
+ //NDB SHOULD TERMINATE AND COMPUTER REBOOTED!
+ report_error(TE_SCI_CANNOT_MAP_REMOTESEGMENT);
+ DBUG_RETURN(false);
+ }
}
m_mapped=true;
setupRemoteSegment();
@@ -713,7 +615,6 @@ SCI_Transporter::connect_client_impl(NDB_SOCKET_TYPE sockfd)
NDB_CLOSE_SOCKET(sockfd);
DBUG_RETURN(false);
}
-
if (!init_local()) {
NDB_CLOSE_SOCKET(sockfd);
DBUG_RETURN(false);
@@ -788,29 +689,9 @@ sci_error_t SCI_Transporter::createSequence(Uint32 adapterid) {
&(m_TargetSegm[adapterid].sequence),
SCI_FLAG_FAST_BARRIER,
&err);
-
-
return err;
} // createSequence()
-
-sci_error_t SCI_Transporter::startSequence(Uint32 adapterid) {
-
- sci_error_t err;
- /** Perform preliminary error check on an SCI adapter before starting a
- * sequence of read and write operations on the mapped segment.
- */
- m_SequenceStatus = SCIStartSequence(
- (m_TargetSegm[adapterid].sequence),
- FLAGS, &err);
-
-
- // If there still is an error then data cannot be safely send
- return err;
-} // startSequence()
-
-
-
bool SCI_Transporter::disconnectLocal()
{
DBUG_ENTER("SCI_Transporter::disconnectLocal");
@@ -878,9 +759,6 @@ SCI_Transporter::~SCI_Transporter() {
DBUG_VOID_RETURN;
} // ~SCI_Transporter()
-
-
-
void SCI_Transporter::closeSCI() {
// Termination of SCI
sci_error_t err;
@@ -897,8 +775,9 @@ void SCI_Transporter::closeSCI() {
SCIClose(activeSCIDescriptor, FLAGS, &err);
if(err != SCI_ERR_OK) {
- DBUG_PRINT("error", ("Cannot close SCI channel to the driver. Error code 0x%x",
- err));
+ DBUG_PRINT("error",
+ ("Cannot close SCI channel to the driver. Error code 0x%x",
+ err));
}
SCITerminate();
DBUG_VOID_RETURN;
@@ -973,7 +852,6 @@ SCI_Transporter::getConnectionStatus() {
return false;
}
-
void
SCI_Transporter::setConnected() {
*m_remoteStatusFlag = SCICONNECTED;
@@ -983,7 +861,6 @@ SCI_Transporter::setConnected() {
*m_localStatusFlag = SCICONNECTED;
}
-
void
SCI_Transporter::setDisconnect() {
if(getLinkStatus(m_ActiveAdapterId))
@@ -994,7 +871,6 @@ SCI_Transporter::setDisconnect() {
}
}
-
bool
SCI_Transporter::checkConnected() {
if (*m_localStatusFlag == SCIDISCONNECT) {
@@ -1015,8 +891,9 @@ SCI_Transporter::initSCI() {
SCIInitialize(0, &error);
if(error != SCI_ERR_OK) {
DBUG_PRINT("error", ("Cannot initialize SISCI library."));
- DBUG_PRINT("error", ("Inconsistency between SISCI library and SISCI driver. Error code 0x%x",
- error));
+ DBUG_PRINT("error",
+ ("Inconsistency between SISCI library and SISCI driver. Error code 0x%x",
+ error));
DBUG_RETURN(false);
}
init = true;
@@ -1029,3 +906,4 @@ SCI_Transporter::get_free_buffer() const
{
return (m_TargetSegm[m_ActiveAdapterId].writer)->get_free_buffer();
}
+
diff --git a/storage/ndb/src/common/transporter/SCI_Transporter.hpp b/storage/ndb/src/common/transporter/SCI_Transporter.hpp
index fbba2ac4516..f774186f238 100644
--- a/storage/ndb/src/common/transporter/SCI_Transporter.hpp
+++ b/storage/ndb/src/common/transporter/SCI_Transporter.hpp
@@ -54,12 +54,12 @@
* local segment, the SCI transporter connects to a segment created by another
* transporter at a remote node, and the maps the remote segment into its
* virtual address space. However, since NDB Cluster relies on redundancy
- * at the network level, by using dual SCI adapters communica
- *
+ * at the network level, by using dual SCI adapters communication can be
+ * maintained even if one of the adapter cards fails (or anything on the
+ * network this adapter card exists in e.g. an SCI switch failure).
*
*/
-
/**
* class SCITransporter
* @brief - main class for the SCI transporter.
@@ -84,16 +84,6 @@ public:
sci_error_t createSequence(Uint32 adapterid);
- /**
- * starts a sequence for error checking.
- * The actual checking that a sequence is correct is done implicitly
- * in SCIMemCpy (in doSend).
- * @param adapterid the adapter on which to start the sequence.
- * @return SCI_ERR_OK if ok, otherwize something else.
- */
- sci_error_t startSequence(Uint32 adapterid);
-
-
/** Initiate Local Segment: create a memory segment,
* prepare a memory segment, map the local segment
* into memory space and make segment available.
@@ -159,7 +149,6 @@ private:
bool m_mapped;
bool m_initLocal;
bool m_sciinit;
- Uint32 m_swapCounter;
Uint32 m_failCounter;
/**
* For statistics on transfered packets
@@ -195,7 +184,6 @@ private:
*/
Uint32 m_reportFreq;
-
Uint32 m_adapters;
Uint32 m_numberOfRemoteNodes;
diff --git a/storage/ndb/src/common/transporter/TCP_Transporter.hpp b/storage/ndb/src/common/transporter/TCP_Transporter.hpp
index 211ace8f03d..ed1a154c944 100644
--- a/storage/ndb/src/common/transporter/TCP_Transporter.hpp
+++ b/storage/ndb/src/common/transporter/TCP_Transporter.hpp
@@ -102,6 +102,10 @@ private:
virtual void updateReceiveDataPtr(Uint32 bytesRead);
virtual Uint32 get_free_buffer() const;
+
+ inline bool hasReceiveData () const {
+ return receiveBuffer.sizeOfData > 0;
+ }
protected:
/**
* Setup client/server and perform connect/accept
diff --git a/storage/ndb/src/common/transporter/TransporterRegistry.cpp b/storage/ndb/src/common/transporter/TransporterRegistry.cpp
index f35217a9726..5f5f3c17b2d 100644
--- a/storage/ndb/src/common/transporter/TransporterRegistry.cpp
+++ b/storage/ndb/src/common/transporter/TransporterRegistry.cpp
@@ -739,16 +739,13 @@ TransporterRegistry::poll_SHM(Uint32 timeOutMillis)
Uint32
TransporterRegistry::poll_TCP(Uint32 timeOutMillis)
{
+ bool hasdata = false;
if (false && nTCPTransporters == 0)
{
tcpReadSelectReply = 0;
return 0;
}
- struct timeval timeout;
- timeout.tv_sec = timeOutMillis / 1000;
- timeout.tv_usec = (timeOutMillis % 1000) * 1000;
-
NDB_SOCKET_TYPE maxSocketValue = -1;
// Needed for TCP/IP connections
@@ -771,8 +768,15 @@ TransporterRegistry::poll_TCP(Uint32 timeOutMillis)
// Put the connected transporters in the socket read-set
FD_SET(socket, &tcpReadset);
}
+ hasdata |= t->hasReceiveData();
}
+ timeOutMillis = hasdata ? 0 : timeOutMillis;
+
+ struct timeval timeout;
+ timeout.tv_sec = timeOutMillis / 1000;
+ timeout.tv_usec = (timeOutMillis % 1000) * 1000;
+
// The highest socket value plus one
maxSocketValue++;
@@ -787,7 +791,7 @@ TransporterRegistry::poll_TCP(Uint32 timeOutMillis)
}
#endif
- return tcpReadSelectReply;
+ return tcpReadSelectReply || hasdata;
}
#endif
@@ -796,26 +800,27 @@ void
TransporterRegistry::performReceive()
{
#ifdef NDB_TCP_TRANSPORTER
- if(tcpReadSelectReply > 0)
+ for (int i=0; i<nTCPTransporters; i++)
{
- for (int i=0; i<nTCPTransporters; i++)
- {
- checkJobBuffer();
- TCP_Transporter *t = theTCPTransporters[i];
- const NodeId nodeId = t->getRemoteNodeId();
- const NDB_SOCKET_TYPE socket = t->getSocket();
- if(is_connected(nodeId)){
- if(t->isConnected() && FD_ISSET(socket, &tcpReadset))
+ checkJobBuffer();
+ TCP_Transporter *t = theTCPTransporters[i];
+ const NodeId nodeId = t->getRemoteNodeId();
+ const NDB_SOCKET_TYPE socket = t->getSocket();
+ if(is_connected(nodeId)){
+ if(t->isConnected())
+ {
+ if (FD_ISSET(socket, &tcpReadset))
{
- const int receiveSize = t->doReceive();
- if(receiveSize > 0)
- {
- Uint32 * ptr;
- Uint32 sz = t->getReceiveData(&ptr);
- transporter_recv_from(callbackObj, nodeId);
- Uint32 szUsed = unpack(ptr, sz, nodeId, ioStates[nodeId]);
- t->updateReceiveDataPtr(szUsed);
- }
+ t->doReceive();
+ }
+
+ if (t->hasReceiveData())
+ {
+ Uint32 * ptr;
+ Uint32 sz = t->getReceiveData(&ptr);
+ transporter_recv_from(callbackObj, nodeId);
+ Uint32 szUsed = unpack(ptr, sz, nodeId, ioStates[nodeId]);
+ t->updateReceiveDataPtr(szUsed);
}
}
}
diff --git a/storage/ndb/src/common/util/version.c b/storage/ndb/src/common/util/version.c
index f309a3d4ad5..56a92489131 100644
--- a/storage/ndb/src/common/util/version.c
+++ b/storage/ndb/src/common/util/version.c
@@ -20,26 +20,33 @@
#include <NdbEnv.h>
#include <NdbOut.hpp>
-Uint32 getMajor(Uint32 version) {
+Uint32 ndbGetMajor(Uint32 version) {
return (version >> 16) & 0xFF;
}
-Uint32 getMinor(Uint32 version) {
+Uint32 ndbGetMinor(Uint32 version) {
return (version >> 8) & 0xFF;
}
-Uint32 getBuild(Uint32 version) {
+Uint32 ndbGetBuild(Uint32 version) {
return (version >> 0) & 0xFF;
}
-Uint32 makeVersion(Uint32 major, Uint32 minor, Uint32 build) {
- return MAKE_VERSION(major, minor, build);
+Uint32 ndbMakeVersion(Uint32 major, Uint32 minor, Uint32 build) {
+ return NDB_MAKE_VERSION(major, minor, build);
}
-char ndb_version_string_buf[NDB_VERSION_STRING_BUF_SZ];
-const char * getVersionString(Uint32 version, const char * status,
- char *buf, unsigned sz)
+const char * ndbGetOwnVersionString()
+{
+ static char ndb_version_string_buf[NDB_VERSION_STRING_BUF_SZ];
+ return ndbGetVersionString(NDB_VERSION, NDB_VERSION_STATUS,
+ ndb_version_string_buf,
+ sizeof(ndb_version_string_buf));
+}
+
+const char * ndbGetVersionString(Uint32 version, const char * status,
+ char *buf, unsigned sz)
{
if (status && status[0] != 0)
basestring_snprintf(buf, sz,
diff --git a/storage/ndb/src/cw/cpcd/main.cpp b/storage/ndb/src/cw/cpcd/main.cpp
index f23a92b8010..d5c31d610cb 100644
--- a/storage/ndb/src/cw/cpcd/main.cpp
+++ b/storage/ndb/src/cw/cpcd/main.cpp
@@ -39,22 +39,22 @@ static const char *user = 0;
static struct my_option my_long_options[] =
{
{ "work-dir", 'w', "Work directory",
- (gptr*) &work_dir, (gptr*) &work_dir, 0,
+ (uchar**) &work_dir, (uchar**) &work_dir, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "port", 'p', "TCP port to listen on",
- (gptr*) &port, (gptr*) &port, 0,
+ (uchar**) &port, (uchar**) &port, 0,
GET_INT, REQUIRED_ARG, CPCD_DEFAULT_TCP_PORT, 0, 0, 0, 0, 0 },
{ "syslog", 'S', "Log events to syslog",
- (gptr*) &use_syslog, (gptr*) &use_syslog, 0,
+ (uchar**) &use_syslog, (uchar**) &use_syslog, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "logfile", 'L', "File to log events to",
- (gptr*) &logfile, (gptr*) &logfile, 0,
+ (uchar**) &logfile, (uchar**) &logfile, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "debug", 'D', "Enable debug mode",
- (gptr*) &debug, (gptr*) &debug, 0,
+ (uchar**) &debug, (uchar**) &debug, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "user", 'u', "Run as user",
- (gptr*) &user, (gptr*) &user, 0,
+ (uchar**) &user, (uchar**) &user, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
diff --git a/storage/ndb/src/kernel/blocks/ERROR_codes.txt b/storage/ndb/src/kernel/blocks/ERROR_codes.txt
index b3405679978..acfbf649522 100644
--- a/storage/ndb/src/kernel/blocks/ERROR_codes.txt
+++ b/storage/ndb/src/kernel/blocks/ERROR_codes.txt
@@ -1,12 +1,12 @@
Next QMGR 1
-Next NDBCNTR 1001
+Next NDBCNTR 1002
Next NDBFS 2000
Next DBACC 3002
Next DBTUP 4029
Next DBLQH 5045
-Next DBDICT 6007
-Next DBDIH 7183
-Next DBTC 8040
+Next DBDICT 6008
+Next DBDIH 7186
+Next DBTC 8053
Next CMVMI 9000
Next BACKUP 10038
Next DBUTIL 11002
@@ -75,6 +75,12 @@ Delay GCP_SAVEREQ by 10 secs
7180: Crash master during master-take-over in execMASTER_LCPCONF
+7183: Crash when receiving COPY_GCIREQ
+
+7184: Crash before starting next GCP after a node failure
+
+7185: Dont reply to COPY_GCI_REQ where reason == GCP
+
ERROR CODES FOR TESTING NODE FAILURE, LOCAL CHECKPOINT HANDLING:
-----------------------------------------------------------------
@@ -298,6 +304,10 @@ ABORT OF TCKEYREQ
8038 : Simulate API disconnect just after SCAN_TAB_REQ
+8052 : Simulate failure of TransactionBufferMemory allocation for OI lookup
+
+8051 : Simulate failure of allocation for saveINDXKEYINFO
+
CMVMI
-----
@@ -501,6 +511,7 @@ Dbdict:
6003 Crash in participant @ CreateTabReq::Prepare
6004 Crash in participant @ CreateTabReq::Commit
6005 Crash in participant @ CreateTabReq::CreateDrop
+6007 Fail on readTableFile for READ_TAB_FILE1 (28770)
Dbtup:
4014 - handleInsert - Out of undo buffer
@@ -523,3 +534,4 @@ Dbtup:
NDBCNTR:
1000: Crash insertion on SystemError::CopyFragRef
+1001: Delay sending NODE_FAILREP (to own node), until error is cleared
diff --git a/storage/ndb/src/kernel/blocks/backup/Backup.cpp b/storage/ndb/src/kernel/blocks/backup/Backup.cpp
index fc698d161e0..64e2c41aa69 100644
--- a/storage/ndb/src/kernel/blocks/backup/Backup.cpp
+++ b/storage/ndb/src/kernel/blocks/backup/Backup.cpp
@@ -448,6 +448,41 @@ Backup::execDUMP_STATE_ORD(Signal* signal)
filePtr.p->m_flags);
}
}
+
+ ndbout_c("m_curr_disk_write_speed: %u m_words_written_this_period: %u m_overflow_disk_write: %u",
+ m_curr_disk_write_speed, m_words_written_this_period, m_overflow_disk_write);
+ ndbout_c("m_reset_delay_used: %u m_reset_disk_speed_time: %llu",
+ m_reset_delay_used, (Uint64)m_reset_disk_speed_time);
+ for(c_backups.first(ptr); ptr.i != RNIL; c_backups.next(ptr))
+ {
+ ndbout_c("BackupRecord %u: BackupId: %u MasterRef: %x ClientRef: %x",
+ ptr.i, ptr.p->backupId, ptr.p->masterRef, ptr.p->clientRef);
+ ndbout_c(" State: %u", ptr.p->slaveState.getState());
+ ndbout_c(" noOfByte: %llu noOfRecords: %llu",
+ ptr.p->noOfBytes, ptr.p->noOfRecords);
+ ndbout_c(" noOfLogBytes: %llu noOfLogRecords: %llu",
+ ptr.p->noOfLogBytes, ptr.p->noOfLogRecords);
+ ndbout_c(" errorCode: %u", ptr.p->errorCode);
+ BackupFilePtr filePtr;
+ for(ptr.p->files.first(filePtr); filePtr.i != RNIL;
+ ptr.p->files.next(filePtr))
+ {
+ ndbout_c(" file %u: type: %u flags: H'%x tableId: %u fragmentId: %u",
+ filePtr.i, filePtr.p->fileType, filePtr.p->m_flags,
+ filePtr.p->tableId, filePtr.p->fragmentNo);
+ }
+ if (ptr.p->slaveState.getState() == SCANNING && ptr.p->dataFilePtr != RNIL)
+ {
+ c_backupFilePool.getPtr(filePtr, ptr.p->dataFilePtr);
+ OperationRecord & op = filePtr.p->operation;
+ Uint32 *tmp = NULL;
+ Uint32 sz = 0;
+ bool eof = FALSE;
+ bool ready = op.dataBuffer.getReadPtr(&tmp, &sz, &eof);
+ ndbout_c("ready: %s eof: %s", ready ? "TRUE" : "FALSE", eof ? "TRUE" : "FALSE");
+ }
+ }
+ return;
}
if(signal->theData[0] == 24){
/**
@@ -2771,6 +2806,8 @@ Backup::openFiles(Signal* signal, BackupRecordPtr ptr)
c_backupFilePool.getPtr(filePtr, ptr.p->dataFilePtr);
filePtr.p->m_flags |= BackupFile::BF_OPENING;
+ if (c_defaults.m_o_direct)
+ req->fileFlags |= FsOpenReq::OM_DIRECT;
req->userPointer = filePtr.i;
FsOpenReq::setVersion(req->fileNumber, 2);
FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_DATA);
@@ -3553,10 +3590,10 @@ Backup::execBACKUP_FRAGMENT_REQ(Signal* signal)
ScanFragReq::setHoldLockFlag(req->requestInfo, 0);
ScanFragReq::setKeyinfoFlag(req->requestInfo, 0);
ScanFragReq::setAttrLen(req->requestInfo,attrLen);
+ ScanFragReq::setTupScanFlag(req->requestInfo, 1);
if (ptr.p->is_lcp())
{
ScanFragReq::setScanPrio(req->requestInfo, 1);
- ScanFragReq::setTupScanFlag(req->requestInfo, 1);
ScanFragReq::setNoDiskFlag(req->requestInfo, 1);
ScanFragReq::setLcpScanFlag(req->requestInfo, 1);
}
@@ -3745,12 +3782,31 @@ Backup::OperationRecord::newFragment(Uint32 tableId, Uint32 fragNo)
}
bool
-Backup::OperationRecord::fragComplete(Uint32 tableId, Uint32 fragNo)
+Backup::OperationRecord::fragComplete(Uint32 tableId, Uint32 fragNo, bool fill_record)
{
Uint32 * tmp;
const Uint32 footSz = sizeof(BackupFormat::DataFile::FragmentFooter) >> 2;
+ Uint32 sz = footSz + 1;
- if(dataBuffer.getWritePtr(&tmp, footSz + 1)) {
+ if (fill_record)
+ {
+ Uint32 * new_tmp;
+ if (!dataBuffer.getWritePtr(&tmp, sz))
+ return false;
+ new_tmp = tmp + sz;
+
+ if ((UintPtr)new_tmp & (sizeof(Page32)-1))
+ {
+ /* padding is needed to get full write */
+ new_tmp += 2 /* to fit empty header minimum 2 words*/;
+ new_tmp = (Uint32 *)(((UintPtr)new_tmp + sizeof(Page32)-1) &
+ ~(UintPtr)(sizeof(Page32)-1));
+ /* new write sz */
+ sz = new_tmp - tmp;
+ }
+ }
+
+ if(dataBuffer.getWritePtr(&tmp, sz)) {
jam();
* tmp = 0; // Finish record stream
tmp++;
@@ -3762,7 +3818,17 @@ Backup::OperationRecord::fragComplete(Uint32 tableId, Uint32 fragNo)
foot->FragmentNo = htonl(fragNo);
foot->NoOfRecords = htonl(noOfRecords);
foot->Checksum = htonl(0);
- dataBuffer.updateWritePtr(footSz + 1);
+
+ if (sz != footSz + 1)
+ {
+ tmp += footSz;
+ memset(tmp, 0, (sz - footSz - 1) * 4);
+ *tmp = htonl(BackupFormat::EMPTY_ENTRY);
+ tmp++;
+ *tmp = htonl(sz - footSz - 1);
+ }
+
+ dataBuffer.updateWritePtr(sz);
return true;
}//if
return false;
@@ -3864,8 +3930,13 @@ Backup::fragmentCompleted(Signal* signal, BackupFilePtr filePtr)
return;
}//if
+ BackupRecordPtr ptr LINT_SET_PTR;
+ c_backupPool.getPtr(ptr, filePtr.p->backupPtr);
+
OperationRecord & op = filePtr.p->operation;
- if(!op.fragComplete(filePtr.p->tableId, filePtr.p->fragmentNo)) {
+ if(!op.fragComplete(filePtr.p->tableId, filePtr.p->fragmentNo,
+ c_defaults.m_o_direct))
+ {
jam();
signal->theData[0] = BackupContinueB::BUFFER_FULL_FRAG_COMPLETE;
signal->theData[1] = filePtr.i;
@@ -3875,9 +3946,6 @@ Backup::fragmentCompleted(Signal* signal, BackupFilePtr filePtr)
filePtr.p->m_flags &= ~(Uint32)BackupFile::BF_SCAN_THREAD;
- BackupRecordPtr ptr LINT_SET_PTR;
- c_backupPool.getPtr(ptr, filePtr.p->backupPtr);
-
if (ptr.p->is_lcp())
{
ptr.p->slaveState.setState(STOPPING);
@@ -4914,6 +4982,8 @@ Backup::lcp_open_file(Signal* signal, BackupRecordPtr ptr)
FsOpenReq::OM_CREATE |
FsOpenReq::OM_APPEND |
FsOpenReq::OM_AUTOSYNC;
+ if (c_defaults.m_o_direct)
+ req->fileFlags |= FsOpenReq::OM_DIRECT;
FsOpenReq::v2_setCount(req->fileNumber, 0xFFFFFFFF);
req->auto_sync_size = c_defaults.m_disk_synch_size;
diff --git a/storage/ndb/src/kernel/blocks/backup/Backup.hpp b/storage/ndb/src/kernel/blocks/backup/Backup.hpp
index 32f2e14ac92..3fd9b2967fd 100644
--- a/storage/ndb/src/kernel/blocks/backup/Backup.hpp
+++ b/storage/ndb/src/kernel/blocks/backup/Backup.hpp
@@ -240,7 +240,7 @@ public:
* Once per fragment
*/
bool newFragment(Uint32 tableId, Uint32 fragNo);
- bool fragComplete(Uint32 tableId, Uint32 fragNo);
+ bool fragComplete(Uint32 tableId, Uint32 fragNo, bool fill_record);
/**
* Once per scan frag (next) req/conf
@@ -534,6 +534,7 @@ public:
Uint32 m_disk_write_speed;
Uint32 m_disk_synch_size;
Uint32 m_diskless;
+ Uint32 m_o_direct;
};
/**
diff --git a/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp b/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp
index ace9dfe5c79..20f8f6650be 100644
--- a/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp
+++ b/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp
@@ -32,7 +32,8 @@ struct BackupFormat {
TABLE_LIST = 4,
TABLE_DESCRIPTION = 5,
GCP_ENTRY = 6,
- FRAGMENT_INFO = 7
+ FRAGMENT_INFO = 7,
+ EMPTY_ENTRY = 8
};
struct FileHeader {
@@ -93,6 +94,13 @@ struct BackupFormat {
Uint32 NoOfRecords;
Uint32 Checksum;
};
+
+ /* optional padding for O_DIRECT */
+ struct EmptyEntry {
+ Uint32 SectionType;
+ Uint32 SectionLength;
+ /* not used data */
+ };
};
/**
diff --git a/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp b/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp
index 4faa02e494f..2cd2a8a2bee 100644
--- a/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp
+++ b/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp
@@ -148,10 +148,13 @@ Backup::execREAD_CONFIG_REQ(Signal* signal)
c_defaults.m_disk_write_speed = 10 * (1024 * 1024);
c_defaults.m_disk_write_speed_sr = 100 * (1024 * 1024);
c_defaults.m_disk_synch_size = 4 * (1024 * 1024);
-
+ c_defaults.m_o_direct = true;
+
Uint32 noBackups = 0, noTables = 0, noAttribs = 0, noFrags = 0;
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS,
&c_defaults.m_diskless));
+ ndb_mgm_get_int_parameter(p, CFG_DB_O_DIRECT,
+ &c_defaults.m_o_direct);
ndb_mgm_get_int_parameter(p, CFG_DB_CHECKPOINT_SPEED_SR,
&c_defaults.m_disk_write_speed_sr);
ndb_mgm_get_int_parameter(p, CFG_DB_CHECKPOINT_SPEED,
@@ -204,7 +207,7 @@ Backup::execREAD_CONFIG_REQ(Signal* signal)
/ sizeof(Page32);
// We need to allocate an additional of 2 pages. 1 page because of a bug in
// ArrayPool and another one for DICTTAINFO.
- c_pagePool.setSize(noPages + NO_OF_PAGES_META_FILE + 2);
+ c_pagePool.setSize(noPages + NO_OF_PAGES_META_FILE + 2, true);
{ // Init all tables
SLList<Table> tables(c_tablePool);
diff --git a/storage/ndb/src/kernel/blocks/backup/FsBuffer.hpp b/storage/ndb/src/kernel/blocks/backup/FsBuffer.hpp
index d26f36ccf40..bb0bbd6d770 100644
--- a/storage/ndb/src/kernel/blocks/backup/FsBuffer.hpp
+++ b/storage/ndb/src/kernel/blocks/backup/FsBuffer.hpp
@@ -270,8 +270,8 @@ FsBuffer::getReadPtr(Uint32 ** ptr, Uint32 * sz, bool * _eof){
* ptr = &Tp[Tr];
- DEBUG(ndbout_c("getReadPtr() Tr: %d Tw: %d Ts: %d Tm: %d sz1: %d -> %d",
- Tr, Tw, Ts, Tm, sz1, * sz));
+ DEBUG(ndbout_c("getReadPtr() Tr: %d Tmw: %d Ts: %d Tm: %d sz1: %d -> %d",
+ Tr, Tmw, Ts, Tm, sz1, * sz));
return true;
}
@@ -279,8 +279,8 @@ FsBuffer::getReadPtr(Uint32 ** ptr, Uint32 * sz, bool * _eof){
if(!m_eof){
* _eof = false;
- DEBUG(ndbout_c("getReadPtr() Tr: %d Tw: %d Ts: %d Tm: %d sz1: %d -> false",
- Tr, Tw, Ts, Tm, sz1));
+ DEBUG(ndbout_c("getReadPtr() Tr: %d Tmw: %d Ts: %d Tm: %d sz1: %d -> false",
+ Tr, Tmw, Ts, Tm, sz1));
return false;
}
@@ -289,8 +289,8 @@ FsBuffer::getReadPtr(Uint32 ** ptr, Uint32 * sz, bool * _eof){
* _eof = true;
* ptr = &Tp[Tr];
- DEBUG(ndbout_c("getReadPtr() Tr: %d Tw: %d Ts: %d Tm: %d sz1: %d -> %d eof",
- Tr, Tw, Ts, Tm, sz1, * sz));
+ DEBUG(ndbout_c("getReadPtr() Tr: %d Tmw: %d Ts: %d Tm: %d sz1: %d -> %d eof",
+ Tr, Tmw, Ts, Tm, sz1, * sz));
return false;
}
@@ -316,13 +316,13 @@ FsBuffer::getWritePtr(Uint32 ** ptr, Uint32 sz){
if(sz1 > sz){ // Note at least 1 word of slack
* ptr = &Tp[Tw];
- DEBUG(ndbout_c("getWritePtr(%d) Tr: %d Tw: %d Ts: %d sz1: %d -> true",
- sz, Tr, Tw, Ts, sz1));
+ DEBUG(ndbout_c("getWritePtr(%d) Tw: %d sz1: %d -> true",
+ sz, Tw, sz1));
return true;
}
- DEBUG(ndbout_c("getWritePtr(%d) Tr: %d Tw: %d Ts: %d sz1: %d -> false",
- sz, Tr, Tw, Ts, sz1));
+ DEBUG(ndbout_c("getWritePtr(%d) Tw: %d sz1: %d -> false",
+ sz, Tw, sz1));
return false;
}
@@ -339,11 +339,15 @@ FsBuffer::updateWritePtr(Uint32 sz){
m_free -= sz;
if(Tnew < Ts){
m_writeIndex = Tnew;
+ DEBUG(ndbout_c("updateWritePtr(%d) m_writeIndex: %d",
+ sz, m_writeIndex));
return;
}
memcpy(Tp, &Tp[Ts], (Tnew - Ts) << 2);
m_writeIndex = Tnew - Ts;
+ DEBUG(ndbout_c("updateWritePtr(%d) m_writeIndex: %d",
+ sz, m_writeIndex));
}
inline
diff --git a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
index 3fe85de73e6..7a992587010 100644
--- a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
+++ b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
@@ -133,6 +133,7 @@ Cmvmi::~Cmvmi()
#ifdef ERROR_INSERT
NodeBitmask c_error_9000_nodes_mask;
+extern Uint32 MAX_RECEIVED_SIGNALS;
#endif
void Cmvmi::execNDB_TAMPER(Signal* signal)
@@ -162,6 +163,22 @@ void Cmvmi::execNDB_TAMPER(Signal* signal)
kill(getpid(), SIGABRT);
}
#endif
+
+#ifdef ERROR_INSERT
+ if (signal->theData[0] == 9003)
+ {
+ if (MAX_RECEIVED_SIGNALS < 1024)
+ {
+ MAX_RECEIVED_SIGNALS = 1024;
+ }
+ else
+ {
+ MAX_RECEIVED_SIGNALS = 1 + (rand() % 128);
+ }
+ ndbout_c("MAX_RECEIVED_SIGNALS: %d", MAX_RECEIVED_SIGNALS);
+ CLEAR_ERROR_INSERT_VALUE;
+ }
+#endif
}//execNDB_TAMPER()
void Cmvmi::execSET_LOGLEVELORD(Signal* signal)
diff --git a/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
index 4b2926d4981..63d22bd0a37 100644
--- a/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
@@ -2137,6 +2137,7 @@ Dbacc::placeReadInLockQueue(OperationrecPtr lockOwnerPtr)
if (same && (lastbits & Operationrec::OP_ACC_LOCK_MODE))
{
jam();
+ opbits |= Operationrec::OP_LOCK_MODE; // Upgrade to X-lock
goto checkop;
}
@@ -5200,9 +5201,9 @@ void Dbacc::execEXPANDCHECK2(Signal* signal)
{
jamEntry();
- if(refToBlock(signal->getSendersBlockRef()) == DBLQH){
+ if(refToBlock(signal->getSendersBlockRef()) == DBLQH)
+ {
jam();
- reenable_expand_after_redo_log_exection_complete(signal);
return;
}
diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
index 66cd523f333..569958a6aa9 100644
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
@@ -86,6 +86,9 @@
#include <signaldata/CreateObj.hpp>
#include <SLList.hpp>
+#include <EventLogger.hpp>
+extern EventLogger g_eventLogger;
+
#define ZNOT_FOUND 626
#define ZALREADYEXIST 630
@@ -188,7 +191,7 @@ struct {
0, 0, 0, 0,
&Dbdict::drop_undofile_prepare_start, 0,
0,
- 0, 0,
+ 0, &Dbdict::drop_undofile_commit_complete,
0, 0, 0
}
};
@@ -695,6 +698,9 @@ void Dbdict::execFSCLOSECONF(Signal* signal)
case FsConnectRecord::OPEN_READ_SCHEMA2:
openSchemaFile(signal, 1, fsPtr.i, false, false);
break;
+ case FsConnectRecord::OPEN_READ_TAB_FILE2:
+ openTableFile(signal, 1, fsPtr.i, c_readTableRecord.tableId, false);
+ break;
default:
jamLine((fsPtr.p->fsState & 0xFFF));
ndbrequire(false);
@@ -796,6 +802,15 @@ void Dbdict::execFSREADCONF(Signal* signal)
readSchemaConf(signal ,fsPtr);
break;
case FsConnectRecord::READ_TAB_FILE1:
+ if(ERROR_INSERTED(6007)){
+ jam();
+ FsRef * const fsRef = (FsRef *)&signal->theData[0];
+ fsRef->userPointer = fsConf->userPointer;
+ fsRef->setErrorCode(fsRef->errorCode, NDBD_EXIT_AFS_UNKNOWN);
+ fsRef->osErrorCode = ~0; // Indicate local error
+ execFSREADREF(signal);
+ return;
+ }//Testing how DICT behave if read of file 1 fails (Bug#28770)
case FsConnectRecord::READ_TAB_FILE2:
jam();
readTableConf(signal ,fsPtr);
@@ -1070,8 +1085,11 @@ void Dbdict::readTableConf(Signal* signal,
void Dbdict::readTableRef(Signal* signal,
FsConnectRecordPtr fsPtr)
{
+ /**
+ * First close corrupt file
+ */
fsPtr.p->fsState = FsConnectRecord::OPEN_READ_TAB_FILE2;
- openTableFile(signal, 1, fsPtr.i, c_readTableRecord.tableId, false);
+ closeFile(signal, fsPtr.p->filePtr, fsPtr.i);
return;
}//Dbdict::readTableRef()
@@ -1371,18 +1389,36 @@ void Dbdict::readSchemaConf(Signal* signal,
for (Uint32 n = 0; n < xsf->noOfPages; n++) {
SchemaFile * sf = &xsf->schemaPage[n];
- bool ok =
- memcmp(sf->Magic, NDB_SF_MAGIC, sizeof(sf->Magic)) == 0 &&
- sf->FileSize != 0 &&
- sf->FileSize % NDB_SF_PAGE_SIZE == 0 &&
- sf->FileSize == sf0->FileSize &&
- sf->PageNumber == n &&
- computeChecksum((Uint32*)sf, NDB_SF_PAGE_SIZE_IN_WORDS) == 0;
- ndbrequireErr(ok || !crashInd, NDBD_EXIT_SR_SCHEMAFILE);
- if (! ok) {
- jam();
+ bool ok = false;
+ const char *reason;
+ if (memcmp(sf->Magic, NDB_SF_MAGIC, sizeof(sf->Magic)) != 0)
+ { jam(); reason = "magic code"; }
+ else if (sf->FileSize == 0)
+ { jam(); reason = "file size == 0"; }
+ else if (sf->FileSize % NDB_SF_PAGE_SIZE != 0)
+ { jam(); reason = "invalid size multiple"; }
+ else if (sf->FileSize != sf0->FileSize)
+ { jam(); reason = "invalid size"; }
+ else if (sf->PageNumber != n)
+ { jam(); reason = "invalid page number"; }
+ else if (computeChecksum((Uint32*)sf, NDB_SF_PAGE_SIZE_IN_WORDS) != 0)
+ { jam(); reason = "invalid checksum"; }
+ else
+ ok = true;
+
+ if (!ok)
+ {
+ char reason_msg[128];
+ snprintf(reason_msg, sizeof(reason_msg),
+ "schema file corrupt, page %u (%s, "
+ "sz=%u sz0=%u pn=%u)",
+ n, reason, sf->FileSize, sf0->FileSize, sf->PageNumber);
+ if (crashInd)
+ progError(__LINE__, NDBD_EXIT_SR_SCHEMAFILE, reason_msg);
ndbrequireErr(fsPtr.p->fsState == FsConnectRecord::READ_SCHEMA1,
NDBD_EXIT_SR_SCHEMAFILE);
+ jam();
+ infoEvent("primary %s, trying backup", reason_msg);
readSchemaRef(signal, fsPtr);
return;
}
@@ -3209,9 +3245,7 @@ Dbdict::restartDropTab(Signal* signal, Uint32 tableId,
case DictTabInfo::LogfileGroup:
case DictTabInfo::Datafile:
case DictTabInfo::Undofile:
- warningEvent("Dont drop object: %d", tableId);
- c_restartRecord.activeTable++;
- checkSchemaStatus(signal);
+ restartDropObj(signal, tableId, old_entry);
return;
}
@@ -3254,6 +3288,9 @@ Dbdict::restartDropTab_complete(Signal* signal,
checkSchemaStatus(signal);
}
+/**
+ * Create Obj during NR/SR
+ */
void
Dbdict::restartCreateObj(Signal* signal,
Uint32 tableId,
@@ -3482,6 +3519,170 @@ Dbdict::restartCreateObj_commit_complete_done(Signal* signal,
checkSchemaStatus(signal);
}
+/**
+ * Drop object during NR/SR
+ */
+void
+Dbdict::restartDropObj(Signal* signal,
+ Uint32 tableId,
+ const SchemaFile::TableEntry * entry)
+{
+ jam();
+
+ DropObjRecordPtr dropObjPtr;
+ ndbrequire(c_opDropObj.seize(dropObjPtr));
+
+ const Uint32 key = ++c_opRecordSequence;
+ dropObjPtr.p->key = key;
+ c_opDropObj.add(dropObjPtr);
+ dropObjPtr.p->m_errorCode = 0;
+ dropObjPtr.p->m_senderRef = reference();
+ dropObjPtr.p->m_senderData = tableId;
+ dropObjPtr.p->m_clientRef = reference();
+ dropObjPtr.p->m_clientData = tableId;
+
+ dropObjPtr.p->m_obj_id = tableId;
+ dropObjPtr.p->m_obj_type = entry->m_tableType;
+ dropObjPtr.p->m_obj_version = entry->m_tableVersion;
+
+ dropObjPtr.p->m_callback.m_callbackData = key;
+ dropObjPtr.p->m_callback.m_callbackFunction=
+ safe_cast(&Dbdict::restartDropObj_prepare_start_done);
+
+ ndbout_c("Dropping %d %d", tableId, entry->m_tableType);
+ switch(entry->m_tableType){
+ case DictTabInfo::Tablespace:
+ case DictTabInfo::LogfileGroup:{
+ jam();
+ Ptr<Filegroup> fg_ptr;
+ ndbrequire(c_filegroup_hash.find(fg_ptr, tableId));
+ dropObjPtr.p->m_obj_ptr_i = fg_ptr.i;
+ dropObjPtr.p->m_vt_index = 3;
+ break;
+ }
+ case DictTabInfo::Datafile:{
+ jam();
+ Ptr<File> file_ptr;
+ dropObjPtr.p->m_vt_index = 2;
+ ndbrequire(c_file_hash.find(file_ptr, tableId));
+ dropObjPtr.p->m_obj_ptr_i = file_ptr.i;
+ break;
+ }
+ case DictTabInfo::Undofile:{
+ jam();
+ Ptr<File> file_ptr;
+ dropObjPtr.p->m_vt_index = 4;
+ ndbrequire(c_file_hash.find(file_ptr, tableId));
+ dropObjPtr.p->m_obj_ptr_i = file_ptr.i;
+
+ /**
+ * Undofiles are only removed from logfile groups file list
+ * as drop undofile is currently not supported...
+ * file will be dropped by lgman when dropping filegroup
+ */
+ dropObjPtr.p->m_callback.m_callbackFunction=
+ safe_cast(&Dbdict::restartDropObj_commit_complete_done);
+
+ if (f_dict_op[dropObjPtr.p->m_vt_index].m_commit_complete)
+ (this->*f_dict_op[dropObjPtr.p->m_vt_index].m_commit_complete)
+ (signal, dropObjPtr.p);
+ else
+ execute(signal, dropObjPtr.p->m_callback, 0);
+ return;
+ }
+ default:
+ jamLine(entry->m_tableType);
+ ndbrequire(false);
+ }
+
+ if (f_dict_op[dropObjPtr.p->m_vt_index].m_prepare_start)
+ (this->*f_dict_op[dropObjPtr.p->m_vt_index].m_prepare_start)
+ (signal, dropObjPtr.p);
+ else
+ execute(signal, dropObjPtr.p->m_callback, 0);
+}
+
+void
+Dbdict::restartDropObj_prepare_start_done(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode)
+{
+ jam();
+ ndbrequire(returnCode == 0);
+ DropObjRecordPtr dropObjPtr;
+ ndbrequire(c_opDropObj.find(dropObjPtr, callbackData));
+ ndbrequire(dropObjPtr.p->m_errorCode == 0);
+
+ dropObjPtr.p->m_callback.m_callbackFunction =
+ safe_cast(&Dbdict::restartDropObj_prepare_complete_done);
+
+ if (f_dict_op[dropObjPtr.p->m_vt_index].m_prepare_complete)
+ (this->*f_dict_op[dropObjPtr.p->m_vt_index].m_prepare_complete)
+ (signal, dropObjPtr.p);
+ else
+ execute(signal, dropObjPtr.p->m_callback, 0);
+}
+
+void
+Dbdict::restartDropObj_prepare_complete_done(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode)
+{
+ jam();
+ ndbrequire(returnCode == 0);
+ DropObjRecordPtr dropObjPtr;
+ ndbrequire(c_opDropObj.find(dropObjPtr, callbackData));
+ ndbrequire(dropObjPtr.p->m_errorCode == 0);
+
+ dropObjPtr.p->m_callback.m_callbackFunction =
+ safe_cast(&Dbdict::restartDropObj_commit_start_done);
+
+ if (f_dict_op[dropObjPtr.p->m_vt_index].m_commit_start)
+ (this->*f_dict_op[dropObjPtr.p->m_vt_index].m_commit_start)
+ (signal, dropObjPtr.p);
+ else
+ execute(signal, dropObjPtr.p->m_callback, 0);
+}
+
+void
+Dbdict::restartDropObj_commit_start_done(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode)
+{
+ jam();
+ ndbrequire(returnCode == 0);
+ DropObjRecordPtr dropObjPtr;
+ ndbrequire(c_opDropObj.find(dropObjPtr, callbackData));
+ ndbrequire(dropObjPtr.p->m_errorCode == 0);
+
+ dropObjPtr.p->m_callback.m_callbackFunction =
+ safe_cast(&Dbdict::restartDropObj_commit_complete_done);
+
+ if (f_dict_op[dropObjPtr.p->m_vt_index].m_commit_complete)
+ (this->*f_dict_op[dropObjPtr.p->m_vt_index].m_commit_complete)
+ (signal, dropObjPtr.p);
+ else
+ execute(signal, dropObjPtr.p->m_callback, 0);
+}
+
+
+void
+Dbdict::restartDropObj_commit_complete_done(Signal* signal,
+ Uint32 callbackData,
+ Uint32 returnCode)
+{
+ jam();
+ ndbrequire(returnCode == 0);
+ DropObjRecordPtr dropObjPtr;
+ ndbrequire(c_opDropObj.find(dropObjPtr, callbackData));
+ ndbrequire(dropObjPtr.p->m_errorCode == 0);
+
+ c_opDropObj.release(dropObjPtr);
+
+ c_restartRecord.activeTable++;
+ checkSchemaStatus(signal);
+}
+
/* **************************************************************** */
/* ---------------------------------------------------------------- */
/* MODULE: NODE FAILURE HANDLING ------------------------- */
@@ -13968,7 +14169,8 @@ Dbdict::getTableEntry(XSchemaFile * xsf, Uint32 tableId)
//******************************************
void
-Dbdict::execCREATE_FILE_REQ(Signal* signal){
+Dbdict::execCREATE_FILE_REQ(Signal* signal)
+{
jamEntry();
if(!assembleFragments(signal)){
@@ -14013,13 +14215,14 @@ Dbdict::execCREATE_FILE_REQ(Signal* signal){
Ptr<SchemaTransaction> trans_ptr;
if (! c_Trans.seize(trans_ptr)){
+ jam();
ref->errorCode = CreateFileRef::Busy;
ref->status = 0;
ref->errorKey = 0;
ref->errorLine = __LINE__;
break;
}
-
+ jam();
const Uint32 trans_key = ++c_opRecordSequence;
trans_ptr.p->key = trans_key;
trans_ptr.p->m_senderRef = senderRef;
@@ -14048,6 +14251,7 @@ Dbdict::execCREATE_FILE_REQ(Signal* signal){
{
Uint32 objId = getFreeObjId(0);
if (objId == RNIL) {
+ jam();
ref->errorCode = CreateFileRef::NoMoreObjectRecords;
ref->status = 0;
ref->errorKey = 0;
@@ -14072,7 +14276,6 @@ Dbdict::execCREATE_FILE_REQ(Signal* signal){
CreateObjReq::SignalLength, JBB);
c_blockState = BS_CREATE_TAB;
-
return;
} while(0);
@@ -14083,7 +14286,8 @@ Dbdict::execCREATE_FILE_REQ(Signal* signal){
}
void
-Dbdict::execCREATE_FILEGROUP_REQ(Signal* signal){
+Dbdict::execCREATE_FILEGROUP_REQ(Signal* signal)
+{
jamEntry();
if(!assembleFragments(signal)){
@@ -14127,13 +14331,14 @@ Dbdict::execCREATE_FILEGROUP_REQ(Signal* signal){
Ptr<SchemaTransaction> trans_ptr;
if (! c_Trans.seize(trans_ptr)){
+ jam();
ref->errorCode = CreateFilegroupRef::Busy;
ref->status = 0;
ref->errorKey = 0;
ref->errorLine = __LINE__;
break;
}
-
+ jam();
const Uint32 trans_key = ++c_opRecordSequence;
trans_ptr.p->key = trans_key;
trans_ptr.p->m_senderRef = senderRef;
@@ -14159,6 +14364,7 @@ Dbdict::execCREATE_FILEGROUP_REQ(Signal* signal){
{
Uint32 objId = getFreeObjId(0);
if (objId == RNIL) {
+ jam();
ref->errorCode = CreateFilegroupRef::NoMoreObjectRecords;
ref->status = 0;
ref->errorKey = 0;
@@ -14183,7 +14389,6 @@ Dbdict::execCREATE_FILEGROUP_REQ(Signal* signal){
CreateObjReq::SignalLength, JBB);
c_blockState = BS_CREATE_TAB;
-
return;
} while(0);
@@ -14219,7 +14424,8 @@ Dbdict::execDROP_FILE_REQ(Signal* signal)
break;
}
- if (c_blockState != BS_IDLE){
+ if (c_blockState != BS_IDLE)
+ {
jam();
ref->errorCode = DropFileRef::Busy;
ref->errorKey = 0;
@@ -14229,6 +14435,7 @@ Dbdict::execDROP_FILE_REQ(Signal* signal)
if (checkSingleUserMode(senderRef))
{
+ jam();
ref->errorCode = DropFileRef::SingleUser;
ref->errorKey = 0;
ref->errorLine = __LINE__;
@@ -14238,6 +14445,7 @@ Dbdict::execDROP_FILE_REQ(Signal* signal)
Ptr<File> file_ptr;
if (!c_file_hash.find(file_ptr, objId))
{
+ jam();
ref->errorCode = DropFileRef::NoSuchFile;
ref->errorLine = __LINE__;
break;
@@ -14245,6 +14453,7 @@ Dbdict::execDROP_FILE_REQ(Signal* signal)
if (file_ptr.p->m_version != version)
{
+ jam();
ref->errorCode = DropFileRef::InvalidSchemaObjectVersion;
ref->errorLine = __LINE__;
break;
@@ -14253,10 +14462,12 @@ Dbdict::execDROP_FILE_REQ(Signal* signal)
Ptr<SchemaTransaction> trans_ptr;
if (! c_Trans.seize(trans_ptr))
{
+ jam();
ref->errorCode = DropFileRef::Busy;
ref->errorLine = __LINE__;
break;
}
+ jam();
const Uint32 trans_key = ++c_opRecordSequence;
trans_ptr.p->key = trans_key;
@@ -14292,7 +14503,6 @@ Dbdict::execDROP_FILE_REQ(Signal* signal)
DropObjReq::SignalLength, JBB);
c_blockState = BS_CREATE_TAB;
-
return;
} while(0);
@@ -14320,7 +14530,8 @@ Dbdict::execDROP_FILEGROUP_REQ(Signal* signal)
Uint32 version = req->filegroup_version;
do {
- if(getOwnNodeId() != c_masterNodeId){
+ if(getOwnNodeId() != c_masterNodeId)
+ {
jam();
ref->errorCode = DropFilegroupRef::NotMaster;
ref->errorKey = 0;
@@ -14328,7 +14539,8 @@ Dbdict::execDROP_FILEGROUP_REQ(Signal* signal)
break;
}
- if (c_blockState != BS_IDLE){
+ if (c_blockState != BS_IDLE)
+ {
jam();
ref->errorCode = DropFilegroupRef::Busy;
ref->errorKey = 0;
@@ -14338,6 +14550,7 @@ Dbdict::execDROP_FILEGROUP_REQ(Signal* signal)
if (checkSingleUserMode(senderRef))
{
+ jam();
ref->errorCode = DropFilegroupRef::SingleUser;
ref->errorKey = 0;
ref->errorLine = __LINE__;
@@ -14347,6 +14560,7 @@ Dbdict::execDROP_FILEGROUP_REQ(Signal* signal)
Ptr<Filegroup> filegroup_ptr;
if (!c_filegroup_hash.find(filegroup_ptr, objId))
{
+ jam();
ref->errorCode = DropFilegroupRef::NoSuchFilegroup;
ref->errorLine = __LINE__;
break;
@@ -14354,6 +14568,7 @@ Dbdict::execDROP_FILEGROUP_REQ(Signal* signal)
if (filegroup_ptr.p->m_version != version)
{
+ jam();
ref->errorCode = DropFilegroupRef::InvalidSchemaObjectVersion;
ref->errorLine = __LINE__;
break;
@@ -14362,10 +14577,12 @@ Dbdict::execDROP_FILEGROUP_REQ(Signal* signal)
Ptr<SchemaTransaction> trans_ptr;
if (! c_Trans.seize(trans_ptr))
{
+ jam();
ref->errorCode = DropFilegroupRef::Busy;
ref->errorLine = __LINE__;
break;
}
+ jam();
const Uint32 trans_key = ++c_opRecordSequence;
trans_ptr.p->key = trans_key;
@@ -14401,7 +14618,6 @@ Dbdict::execDROP_FILEGROUP_REQ(Signal* signal)
DropObjReq::SignalLength, JBB);
c_blockState = BS_CREATE_TAB;
-
return;
} while(0);
@@ -14412,15 +14628,15 @@ Dbdict::execDROP_FILEGROUP_REQ(Signal* signal)
}
void
-Dbdict::execCREATE_OBJ_REF(Signal* signal){
- jamEntry();
-
+Dbdict::execCREATE_OBJ_REF(Signal* signal)
+{
CreateObjRef * const ref = (CreateObjRef*)signal->getDataPtr();
-
Ptr<SchemaTransaction> trans_ptr;
+
+ jamEntry();
ndbrequire(c_Trans.find(trans_ptr, ref->senderData));
-
if(ref->errorCode != CreateObjRef::NF_FakeErrorREF){
+ jam();
trans_ptr.p->setErrorCode(ref->errorCode);
}
Uint32 node = refToNode(ref->senderRef);
@@ -14428,12 +14644,12 @@ Dbdict::execCREATE_OBJ_REF(Signal* signal){
}
void
-Dbdict::execCREATE_OBJ_CONF(Signal* signal){
- jamEntry();
-
- CreateObjConf * const conf = (CreateObjConf*)signal->getDataPtr();
-
+Dbdict::execCREATE_OBJ_CONF(Signal* signal)
+{
Ptr<SchemaTransaction> trans_ptr;
+ CreateObjConf * const conf = (CreateObjConf*)signal->getDataPtr();
+
+ jamEntry();
ndbrequire(c_Trans.find(trans_ptr, conf->senderData));
schemaOp_reply(signal, trans_ptr.p, refToNode(conf->senderRef));
}
@@ -14443,6 +14659,7 @@ Dbdict::schemaOp_reply(Signal* signal,
SchemaTransaction * trans_ptr_p,
Uint32 nodeId)
{
+ jam();
{
SafeCounter tmp(c_counterMgr, trans_ptr_p->m_counter);
if(!tmp.clearWaitingFor(nodeId)){
@@ -14453,10 +14670,8 @@ Dbdict::schemaOp_reply(Signal* signal,
switch(trans_ptr_p->m_op.m_state){
case DictObjOp::Preparing:{
-
if(trans_ptr_p->m_errorCode != 0)
{
- jam();
/**
* Failed to prepare on atleast one node -> abort on all
*/
@@ -14466,10 +14681,16 @@ Dbdict::schemaOp_reply(Signal* signal,
safe_cast(&Dbdict::trans_abort_start_done);
if(f_dict_op[trans_ptr_p->m_op.m_vt_index].m_trans_abort_start)
+ {
+ jam();
(this->*f_dict_op[trans_ptr_p->m_op.m_vt_index].m_trans_abort_start)
(signal, trans_ptr_p);
+ }
else
+ {
+ jam();
execute(signal, trans_ptr_p->m_callback, 0);
+ }
return;
}
@@ -14479,14 +14700,19 @@ Dbdict::schemaOp_reply(Signal* signal,
safe_cast(&Dbdict::trans_commit_start_done);
if(f_dict_op[trans_ptr_p->m_op.m_vt_index].m_trans_commit_start)
+ {
+ jam();
(this->*f_dict_op[trans_ptr_p->m_op.m_vt_index].m_trans_commit_start)
(signal, trans_ptr_p);
+ }
else
+ {
+ jam();
execute(signal, trans_ptr_p->m_callback, 0);
+ }
return;
}
case DictObjOp::Committing: {
- jam();
ndbrequire(trans_ptr_p->m_errorCode == 0);
trans_ptr_p->m_op.m_state = DictObjOp::Committed;
@@ -14495,31 +14721,42 @@ Dbdict::schemaOp_reply(Signal* signal,
safe_cast(&Dbdict::trans_commit_complete_done);
if(f_dict_op[trans_ptr_p->m_op.m_vt_index].m_trans_commit_complete)
+ {
+ jam();
(this->*f_dict_op[trans_ptr_p->m_op.m_vt_index].m_trans_commit_complete)
(signal, trans_ptr_p);
+ }
else
- execute(signal, trans_ptr_p->m_callback, 0);
+ {
+ jam();
+ execute(signal, trans_ptr_p->m_callback, 0);
+ }
return;
}
case DictObjOp::Aborting:{
- jam();
-
trans_ptr_p->m_op.m_state = DictObjOp::Committed;
trans_ptr_p->m_callback.m_callbackData = trans_ptr_p->key;
trans_ptr_p->m_callback.m_callbackFunction=
safe_cast(&Dbdict::trans_abort_complete_done);
if(f_dict_op[trans_ptr_p->m_op.m_vt_index].m_trans_abort_complete)
+ {
+ jam();
(this->*f_dict_op[trans_ptr_p->m_op.m_vt_index].m_trans_abort_complete)
(signal, trans_ptr_p);
+ }
else
- execute(signal, trans_ptr_p->m_callback, 0);
+ {
+ jam();
+ execute(signal, trans_ptr_p->m_callback, 0);
+ }
return;
}
case DictObjOp::Defined:
case DictObjOp::Prepared:
case DictObjOp::Committed:
case DictObjOp::Aborted:
+ jam();
break;
}
ndbrequire(false);
@@ -14528,14 +14765,13 @@ Dbdict::schemaOp_reply(Signal* signal,
void
Dbdict::trans_commit_start_done(Signal* signal,
Uint32 callbackData,
- Uint32 retValue){
- jamEntry();
-
- ndbrequire(retValue == 0);
-
+ Uint32 retValue)
+{
Ptr<SchemaTransaction> trans_ptr;
+
+ jam();
+ ndbrequire(retValue == 0);
ndbrequire(c_Trans.find(trans_ptr, callbackData));
-
NodeReceiverGroup rg(DBDICT, trans_ptr.p->m_nodes);
SafeCounter tmp(c_counterMgr, trans_ptr.p->m_counter);
tmp.init<DictCommitRef>(rg, GSN_DICT_COMMIT_REF, trans_ptr.p->key);
@@ -14546,27 +14782,26 @@ Dbdict::trans_commit_start_done(Signal* signal,
req->op_key = trans_ptr.p->m_op.m_key;
sendSignal(rg, GSN_DICT_COMMIT_REQ, signal, DictCommitReq::SignalLength,
JBB);
-
trans_ptr.p->m_op.m_state = DictObjOp::Committing;
}
void
Dbdict::trans_commit_complete_done(Signal* signal,
Uint32 callbackData,
- Uint32 retValue){
- jamEntry();
-
- ndbrequire(retValue == 0);
-
+ Uint32 retValue)
+{
Ptr<SchemaTransaction> trans_ptr;
+
+ jam();
+ ndbrequire(retValue == 0);
ndbrequire(c_Trans.find(trans_ptr, callbackData));
switch(f_dict_op[trans_ptr.p->m_op.m_vt_index].m_gsn_user_req){
case GSN_CREATE_FILEGROUP_REQ:{
FilegroupPtr fg_ptr;
+ jam();
ndbrequire(c_filegroup_hash.find(fg_ptr, trans_ptr.p->m_op.m_obj_id));
- //
CreateFilegroupConf * conf = (CreateFilegroupConf*)signal->getDataPtr();
conf->senderRef = reference();
conf->senderData = trans_ptr.p->m_senderData;
@@ -14576,11 +14811,11 @@ Dbdict::trans_commit_complete_done(Signal* signal,
//@todo check api failed
sendSignal(trans_ptr.p->m_senderRef, GSN_CREATE_FILEGROUP_CONF, signal,
CreateFilegroupConf::SignalLength, JBB);
-
break;
}
case GSN_CREATE_FILE_REQ:{
FilePtr f_ptr;
+ jam();
ndbrequire(c_file_hash.find(f_ptr, trans_ptr.p->m_op.m_obj_id));
CreateFileConf * conf = (CreateFileConf*)signal->getDataPtr();
conf->senderRef = reference();
@@ -14591,11 +14826,11 @@ Dbdict::trans_commit_complete_done(Signal* signal,
//@todo check api failed
sendSignal(trans_ptr.p->m_senderRef, GSN_CREATE_FILE_CONF, signal,
CreateFileConf::SignalLength, JBB);
-
break;
}
case GSN_DROP_FILE_REQ:{
DropFileConf * conf = (DropFileConf*)signal->getDataPtr();
+ jam();
conf->senderRef = reference();
conf->senderData = trans_ptr.p->m_senderData;
conf->fileId = trans_ptr.p->m_op.m_obj_id;
@@ -14607,6 +14842,7 @@ Dbdict::trans_commit_complete_done(Signal* signal,
}
case GSN_DROP_FILEGROUP_REQ:{
DropFilegroupConf * conf = (DropFilegroupConf*)signal->getDataPtr();
+ jam();
conf->senderRef = reference();
conf->senderData = trans_ptr.p->m_senderData;
conf->filegroupId = trans_ptr.p->m_op.m_obj_id;
@@ -14629,12 +14865,12 @@ Dbdict::trans_commit_complete_done(Signal* signal,
void
Dbdict::trans_abort_start_done(Signal* signal,
Uint32 callbackData,
- Uint32 retValue){
- jamEntry();
-
- ndbrequire(retValue == 0);
-
+ Uint32 retValue)
+{
Ptr<SchemaTransaction> trans_ptr;
+
+ jam();
+ ndbrequire(retValue == 0);
ndbrequire(c_Trans.find(trans_ptr, callbackData));
NodeReceiverGroup rg(DBDICT, trans_ptr.p->m_nodes);
@@ -14652,12 +14888,12 @@ Dbdict::trans_abort_start_done(Signal* signal,
void
Dbdict::trans_abort_complete_done(Signal* signal,
Uint32 callbackData,
- Uint32 retValue){
- jamEntry();
-
- ndbrequire(retValue == 0);
-
+ Uint32 retValue)
+{
Ptr<SchemaTransaction> trans_ptr;
+
+ jam();
+ ndbrequire(retValue == 0);
ndbrequire(c_Trans.find(trans_ptr, callbackData));
switch(f_dict_op[trans_ptr.p->m_op.m_vt_index].m_gsn_user_req){
@@ -14665,6 +14901,7 @@ Dbdict::trans_abort_complete_done(Signal* signal,
{
//
CreateFilegroupRef * ref = (CreateFilegroupRef*)signal->getDataPtr();
+ jam();
ref->senderRef = reference();
ref->senderData = trans_ptr.p->m_senderData;
ref->masterNodeId = c_masterNodeId;
@@ -14676,12 +14913,12 @@ Dbdict::trans_abort_complete_done(Signal* signal,
//@todo check api failed
sendSignal(trans_ptr.p->m_senderRef, GSN_CREATE_FILEGROUP_REF, signal,
CreateFilegroupRef::SignalLength, JBB);
-
break;
}
case GSN_CREATE_FILE_REQ:
{
CreateFileRef * ref = (CreateFileRef*)signal->getDataPtr();
+ jam();
ref->senderRef = reference();
ref->senderData = trans_ptr.p->m_senderData;
ref->masterNodeId = c_masterNodeId;
@@ -14693,12 +14930,12 @@ Dbdict::trans_abort_complete_done(Signal* signal,
//@todo check api failed
sendSignal(trans_ptr.p->m_senderRef, GSN_CREATE_FILE_REF, signal,
CreateFileRef::SignalLength, JBB);
-
break;
}
case GSN_DROP_FILE_REQ:
{
DropFileRef * ref = (DropFileRef*)signal->getDataPtr();
+ jam();
ref->senderRef = reference();
ref->senderData = trans_ptr.p->m_senderData;
ref->masterNodeId = c_masterNodeId;
@@ -14709,13 +14946,13 @@ Dbdict::trans_abort_complete_done(Signal* signal,
//@todo check api failed
sendSignal(trans_ptr.p->m_senderRef, GSN_DROP_FILE_REF, signal,
DropFileRef::SignalLength, JBB);
-
break;
}
case GSN_DROP_FILEGROUP_REQ:
{
//
DropFilegroupRef * ref = (DropFilegroupRef*)signal->getDataPtr();
+ jam();
ref->senderRef = reference();
ref->senderData = trans_ptr.p->m_senderData;
ref->masterNodeId = c_masterNodeId;
@@ -14726,7 +14963,6 @@ Dbdict::trans_abort_complete_done(Signal* signal,
//@todo check api failed
sendSignal(trans_ptr.p->m_senderRef, GSN_DROP_FILEGROUP_REF, signal,
DropFilegroupRef::SignalLength, JBB);
-
break;
}
default:
@@ -14740,7 +14976,8 @@ Dbdict::trans_abort_complete_done(Signal* signal,
}
void
-Dbdict::execCREATE_OBJ_REQ(Signal* signal){
+Dbdict::execCREATE_OBJ_REQ(Signal* signal)
+{
jamEntry();
if(!assembleFragments(signal)){
@@ -14785,6 +15022,7 @@ Dbdict::execCREATE_OBJ_REQ(Signal* signal){
switch(objType){
case DictTabInfo::Tablespace:
case DictTabInfo::LogfileGroup:
+ jam();
createObjPtr.p->m_vt_index = 0;
break;
case DictTabInfo::Datafile:
@@ -14793,7 +15031,11 @@ Dbdict::execCREATE_OBJ_REQ(Signal* signal){
* Use restart code to impl. ForceCreateFile
*/
if (requestInfo & CreateFileReq::ForceCreateFile)
- createObjPtr.p->m_restart= 2;
+ {
+ jam();
+ createObjPtr.p->m_restart= 2;
+ }
+ jam();
createObjPtr.p->m_vt_index = 1;
break;
default:
@@ -14809,10 +15051,10 @@ void
Dbdict::execDICT_COMMIT_REQ(Signal* signal)
{
DictCommitReq* req = (DictCommitReq*)signal->getDataPtr();
-
Ptr<SchemaOp> op;
- ndbrequire(c_schemaOp.find(op, req->op_key));
+ jamEntry();
+ ndbrequire(c_schemaOp.find(op, req->op_key));
(this->*f_dict_op[op.p->m_vt_index].m_commit)(signal, op.p);
}
@@ -14820,23 +15062,23 @@ void
Dbdict::execDICT_ABORT_REQ(Signal* signal)
{
DictAbortReq* req = (DictAbortReq*)signal->getDataPtr();
-
Ptr<SchemaOp> op;
- ndbrequire(c_schemaOp.find(op, req->op_key));
+ jamEntry();
+ ndbrequire(c_schemaOp.find(op, req->op_key));
(this->*f_dict_op[op.p->m_vt_index].m_abort)(signal, op.p);
}
void
-Dbdict::execDICT_COMMIT_REF(Signal* signal){
- jamEntry();
-
+Dbdict::execDICT_COMMIT_REF(Signal* signal)
+{
DictCommitRef * const ref = (DictCommitRef*)signal->getDataPtr();
-
Ptr<SchemaTransaction> trans_ptr;
+
+ jamEntry();
ndbrequire(c_Trans.find(trans_ptr, ref->senderData));
-
if(ref->errorCode != DictCommitRef::NF_FakeErrorREF){
+ jam();
trans_ptr.p->setErrorCode(ref->errorCode);
}
Uint32 node = refToNode(ref->senderRef);
@@ -14844,26 +15086,26 @@ Dbdict::execDICT_COMMIT_REF(Signal* signal){
}
void
-Dbdict::execDICT_COMMIT_CONF(Signal* signal){
- jamEntry();
-
- DictCommitConf * const conf = (DictCommitConf*)signal->getDataPtr();
-
+Dbdict::execDICT_COMMIT_CONF(Signal* signal)
+{
Ptr<SchemaTransaction> trans_ptr;
+ DictCommitConf * const conf = (DictCommitConf*)signal->getDataPtr();
+
+ jamEntry();
ndbrequire(c_Trans.find(trans_ptr, conf->senderData));
schemaOp_reply(signal, trans_ptr.p, refToNode(conf->senderRef));
}
void
-Dbdict::execDICT_ABORT_REF(Signal* signal){
- jamEntry();
-
+Dbdict::execDICT_ABORT_REF(Signal* signal)
+{
DictAbortRef * const ref = (DictAbortRef*)signal->getDataPtr();
-
Ptr<SchemaTransaction> trans_ptr;
+
+ jamEntry();
ndbrequire(c_Trans.find(trans_ptr, ref->senderData));
-
if(ref->errorCode != DictAbortRef::NF_FakeErrorREF){
+ jam();
trans_ptr.p->setErrorCode(ref->errorCode);
}
Uint32 node = refToNode(ref->senderRef);
@@ -14871,31 +15113,28 @@ Dbdict::execDICT_ABORT_REF(Signal* signal){
}
void
-Dbdict::execDICT_ABORT_CONF(Signal* signal){
- jamEntry();
-
+Dbdict::execDICT_ABORT_CONF(Signal* signal)
+{
DictAbortConf * const conf = (DictAbortConf*)signal->getDataPtr();
-
Ptr<SchemaTransaction> trans_ptr;
+
+ jamEntry();
ndbrequire(c_Trans.find(trans_ptr, conf->senderData));
schemaOp_reply(signal, trans_ptr.p, refToNode(conf->senderRef));
}
-
-
void
Dbdict::createObj_prepare_start_done(Signal* signal,
Uint32 callbackData,
- Uint32 returnCode){
+ Uint32 returnCode)
+{
+ CreateObjRecordPtr createObjPtr;
+ SegmentedSectionPtr objInfoPtr;
ndbrequire(returnCode == 0);
-
- CreateObjRecordPtr createObjPtr;
ndbrequire(c_opCreateObj.find(createObjPtr, callbackData));
-
- SegmentedSectionPtr objInfoPtr;
+ jam();
getSection(objInfoPtr, createObjPtr.p->m_obj_info_ptr_i);
-
if(createObjPtr.p->m_errorCode != 0){
jam();
createObjPtr.p->m_obj_info_ptr_i= RNIL;
@@ -14923,19 +15162,19 @@ Dbdict::createObj_prepare_start_done(Signal* signal,
void
Dbdict::createObj_writeSchemaConf1(Signal* signal,
Uint32 callbackData,
- Uint32 returnCode){
- jam();
+ Uint32 returnCode)
+{
+ CreateObjRecordPtr createObjPtr;
+ Callback callback;
+ SegmentedSectionPtr objInfoPtr;
+ jam();
ndbrequire(returnCode == 0);
-
- CreateObjRecordPtr createObjPtr;
ndbrequire(c_opCreateObj.find(createObjPtr, callbackData));
- Callback callback;
callback.m_callbackData = createObjPtr.p->key;
callback.m_callbackFunction = safe_cast(&Dbdict::createObj_writeObjConf);
- SegmentedSectionPtr objInfoPtr;
getSection(objInfoPtr, createObjPtr.p->m_obj_info_ptr_i);
writeTableFile(signal, createObjPtr.p->m_obj_id, objInfoPtr, &callback);
@@ -14947,14 +15186,13 @@ Dbdict::createObj_writeSchemaConf1(Signal* signal,
void
Dbdict::createObj_writeObjConf(Signal* signal,
Uint32 callbackData,
- Uint32 returnCode){
- jam();
+ Uint32 returnCode)
+{
+ CreateObjRecordPtr createObjPtr;
+ jam();
ndbrequire(returnCode == 0);
-
- CreateObjRecordPtr createObjPtr;
ndbrequire(c_opCreateObj.find(createObjPtr, callbackData));
-
createObjPtr.p->m_callback.m_callbackFunction =
safe_cast(&Dbdict::createObj_prepare_complete_done);
(this->*f_dict_op[createObjPtr.p->m_vt_index].m_prepare_complete)
@@ -14964,12 +15202,12 @@ Dbdict::createObj_writeObjConf(Signal* signal,
void
Dbdict::createObj_prepare_complete_done(Signal* signal,
Uint32 callbackData,
- Uint32 returnCode){
+ Uint32 returnCode)
+{
+ CreateObjRecordPtr createObjPtr;
+
jam();
-
ndbrequire(returnCode == 0);
-
- CreateObjRecordPtr createObjPtr;
ndbrequire(c_opCreateObj.find(createObjPtr, callbackData));
//@todo check for master failed
@@ -14998,28 +15236,33 @@ Dbdict::createObj_prepare_complete_done(Signal* signal,
}
void
-Dbdict::createObj_commit(Signal * signal, SchemaOp * op){
- jam();
-
+Dbdict::createObj_commit(Signal * signal, SchemaOp * op)
+{
OpCreateObj * createObj = (OpCreateObj*)op;
+
createObj->m_callback.m_callbackFunction =
safe_cast(&Dbdict::createObj_commit_start_done);
if (f_dict_op[createObj->m_vt_index].m_commit_start)
+ {
+ jam();
(this->*f_dict_op[createObj->m_vt_index].m_commit_start)(signal, createObj);
+ }
else
+ {
+ jam();
execute(signal, createObj->m_callback, 0);
+ }
}
void
Dbdict::createObj_commit_start_done(Signal* signal,
Uint32 callbackData,
- Uint32 returnCode){
+ Uint32 returnCode)
+{
+ CreateObjRecordPtr createObjPtr;
jam();
-
ndbrequire(returnCode == 0);
-
- CreateObjRecordPtr createObjPtr;
ndbrequire(c_opCreateObj.find(createObjPtr, callbackData));
Uint32 objId = createObjPtr.p->m_obj_id;
@@ -15039,29 +15282,35 @@ Dbdict::createObj_commit_start_done(Signal* signal,
void
Dbdict::createObj_writeSchemaConf2(Signal* signal,
Uint32 callbackData,
- Uint32 returnCode){
- jam();
-
- CreateObjRecordPtr createObjPtr;
- ndbrequire(c_opCreateObj.find(createObjPtr, callbackData));
+ Uint32 returnCode)
+{
+ CreateObjRecordPtr createObjPtr;
+ ndbrequire(c_opCreateObj.find(createObjPtr, callbackData));
createObjPtr.p->m_callback.m_callbackFunction =
safe_cast(&Dbdict::createObj_commit_complete_done);
if (f_dict_op[createObjPtr.p->m_vt_index].m_commit_complete)
+ {
+ jam();
(this->*f_dict_op[createObjPtr.p->m_vt_index].m_commit_complete)
(signal, createObjPtr.p);
+ }
else
+ {
+ jam();
execute(signal, createObjPtr.p->m_callback, 0);
+ }
}
void
Dbdict::createObj_commit_complete_done(Signal* signal,
Uint32 callbackData,
- Uint32 returnCode){
+ Uint32 returnCode)
+{
+ CreateObjRecordPtr createObjPtr;
+
jam();
-
- CreateObjRecordPtr createObjPtr;
ndbrequire(c_opCreateObj.find(createObjPtr, callbackData));
//@todo check error
@@ -15079,27 +15328,31 @@ Dbdict::createObj_commit_complete_done(Signal* signal,
void
Dbdict::createObj_abort(Signal* signal, SchemaOp* op)
{
- jam();
-
OpCreateObj * createObj = (OpCreateObj*)op;
createObj->m_callback.m_callbackFunction =
safe_cast(&Dbdict::createObj_abort_start_done);
if (f_dict_op[createObj->m_vt_index].m_abort_start)
+ {
+ jam();
(this->*f_dict_op[createObj->m_vt_index].m_abort_start)(signal, createObj);
+ }
else
+ {
+ jam();
execute(signal, createObj->m_callback, 0);
+ }
}
void
Dbdict::createObj_abort_start_done(Signal* signal,
Uint32 callbackData,
- Uint32 returnCode){
+ Uint32 returnCode)
+{
+ CreateObjRecordPtr createObjPtr;
+
jam();
-
- CreateObjRecordPtr createObjPtr;
ndbrequire(c_opCreateObj.find(createObjPtr, callbackData));
-
XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
SchemaFile::TableEntry objEntry = * getTableEntry(xsf,
createObjPtr.p->m_obj_id);
@@ -15118,19 +15371,23 @@ Dbdict::createObj_abort_writeSchemaConf(Signal* signal,
Uint32 callbackData,
Uint32 returnCode)
{
- jam();
+ CreateObjRecordPtr createObjPtr;
- CreateObjRecordPtr createObjPtr;
ndbrequire(c_opCreateObj.find(createObjPtr, callbackData));
-
createObjPtr.p->m_callback.m_callbackFunction =
safe_cast(&Dbdict::createObj_abort_complete_done);
if (f_dict_op[createObjPtr.p->m_vt_index].m_abort_complete)
+ {
+ jam();
(this->*f_dict_op[createObjPtr.p->m_vt_index].m_abort_complete)
(signal, createObjPtr.p);
+ }
else
+ {
+ jam();
execute(signal, createObjPtr.p->m_callback, 0);
+ }
}
void
@@ -15138,9 +15395,9 @@ Dbdict::createObj_abort_complete_done(Signal* signal,
Uint32 callbackData,
Uint32 returnCode)
{
- jam();
+ CreateObjRecordPtr createObjPtr;
- CreateObjRecordPtr createObjPtr;
+ jam();
ndbrequire(c_opCreateObj.find(createObjPtr, callbackData));
DictAbortConf * const conf = (DictAbortConf*)signal->getDataPtr();
@@ -15153,7 +15410,8 @@ Dbdict::createObj_abort_complete_done(Signal* signal,
}
void
-Dbdict::execDROP_OBJ_REQ(Signal* signal){
+Dbdict::execDROP_OBJ_REQ(Signal* signal)
+{
jamEntry();
if(!assembleFragments(signal)){
@@ -15191,8 +15449,9 @@ Dbdict::execDROP_OBJ_REQ(Signal* signal){
case DictTabInfo::Tablespace:
case DictTabInfo::LogfileGroup:
{
- dropObjPtr.p->m_vt_index = 3;
Ptr<Filegroup> fg_ptr;
+ jam();
+ dropObjPtr.p->m_vt_index = 3;
ndbrequire(c_filegroup_hash.find(fg_ptr, objId));
dropObjPtr.p->m_obj_ptr_i = fg_ptr.i;
break;
@@ -15200,15 +15459,19 @@ Dbdict::execDROP_OBJ_REQ(Signal* signal){
}
case DictTabInfo::Datafile:
{
- dropObjPtr.p->m_vt_index = 2;
Ptr<File> file_ptr;
+ jam();
+ dropObjPtr.p->m_vt_index = 2;
ndbrequire(c_file_hash.find(file_ptr, objId));
dropObjPtr.p->m_obj_ptr_i = file_ptr.i;
break;
}
case DictTabInfo::Undofile:
+ {
+ jam();
dropObjPtr.p->m_vt_index = 4;
return;
+ }
default:
ndbrequire(false);
}
@@ -15223,12 +15486,12 @@ Dbdict::dropObj_prepare_start_done(Signal* signal,
Uint32 callbackData,
Uint32 returnCode)
{
- ndbrequire(returnCode == 0);
+ DropObjRecordPtr dropObjPtr;
+ Callback cb;
- DropObjRecordPtr dropObjPtr;
+ ndbrequire(returnCode == 0);
ndbrequire(c_opDropObj.find(dropObjPtr, callbackData));
- Callback cb;
cb.m_callbackData = callbackData;
cb.m_callbackFunction =
safe_cast(&Dbdict::dropObj_prepare_writeSchemaConf);
@@ -15239,7 +15502,7 @@ Dbdict::dropObj_prepare_start_done(Signal* signal,
dropObj_prepare_complete_done(signal, callbackData, 0);
return;
}
-
+ jam();
Uint32 objId = dropObjPtr.p->m_obj_id;
XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
SchemaFile::TableEntry objEntry = *getTableEntry(xsf, objId);
@@ -15252,19 +15515,23 @@ Dbdict::dropObj_prepare_writeSchemaConf(Signal* signal,
Uint32 callbackData,
Uint32 returnCode)
{
- ndbrequire(returnCode == 0);
+ DropObjRecordPtr dropObjPtr;
- DropObjRecordPtr dropObjPtr;
+ ndbrequire(returnCode == 0);
ndbrequire(c_opDropObj.find(dropObjPtr, callbackData));
-
dropObjPtr.p->m_callback.m_callbackFunction =
safe_cast(&Dbdict::dropObj_prepare_complete_done);
-
if(f_dict_op[dropObjPtr.p->m_vt_index].m_prepare_complete)
+ {
+ jam();
(this->*f_dict_op[dropObjPtr.p->m_vt_index].m_prepare_complete)
(signal, dropObjPtr.p);
+ }
else
+ {
+ jam();
execute(signal, dropObjPtr.p->m_callback, 0);
+ }
}
void
@@ -15272,10 +15539,11 @@ Dbdict::dropObj_prepare_complete_done(Signal* signal,
Uint32 callbackData,
Uint32 returnCode)
{
+ DropObjRecordPtr dropObjPtr;
+
ndbrequire(returnCode == 0);
-
- DropObjRecordPtr dropObjPtr;
ndbrequire(c_opDropObj.find(dropObjPtr, callbackData));
+ jam();
//@todo check for master failed
@@ -15301,16 +15569,22 @@ Dbdict::dropObj_prepare_complete_done(Signal* signal,
}
void
-Dbdict::dropObj_commit(Signal * signal, SchemaOp * op){
- jam();
-
+Dbdict::dropObj_commit(Signal * signal, SchemaOp * op)
+{
OpDropObj * dropObj = (OpDropObj*)op;
+
dropObj->m_callback.m_callbackFunction =
safe_cast(&Dbdict::dropObj_commit_start_done);
if (f_dict_op[dropObj->m_vt_index].m_commit_start)
+ {
+ jam();
(this->*f_dict_op[dropObj->m_vt_index].m_commit_start)(signal, dropObj);
+ }
else
+ {
+ jam();
execute(signal, dropObj->m_callback, 0);
+ }
}
void
@@ -15318,10 +15592,10 @@ Dbdict::dropObj_commit_start_done(Signal* signal,
Uint32 callbackData,
Uint32 returnCode)
{
+ DropObjRecordPtr dropObjPtr;
+
jam();
ndbrequire(returnCode == 0);
-
- DropObjRecordPtr dropObjPtr;
ndbrequire(c_opDropObj.find(dropObjPtr, callbackData));
Uint32 objId = dropObjPtr.p->m_obj_id;
@@ -15342,20 +15616,25 @@ Dbdict::dropObj_commit_writeSchemaConf(Signal* signal,
Uint32 callbackData,
Uint32 returnCode)
{
+ DropObjRecordPtr dropObjPtr;
+
jam();
ndbrequire(returnCode == 0);
-
- DropObjRecordPtr dropObjPtr;
ndbrequire(c_opDropObj.find(dropObjPtr, callbackData));
-
dropObjPtr.p->m_callback.m_callbackFunction =
safe_cast(&Dbdict::dropObj_commit_complete_done);
if(f_dict_op[dropObjPtr.p->m_vt_index].m_commit_complete)
+ {
+ jam();
(this->*f_dict_op[dropObjPtr.p->m_vt_index].m_commit_complete)
(signal, dropObjPtr.p);
+ }
else
+ {
+ jam();
execute(signal, dropObjPtr.p->m_callback, 0);
+ }
}
void
@@ -15363,7 +15642,9 @@ Dbdict::dropObj_commit_complete_done(Signal* signal,
Uint32 callbackData,
Uint32 returnCode)
{
- DropObjRecordPtr dropObjPtr;
+ DropObjRecordPtr dropObjPtr;
+
+ jam();
ndbrequire(c_opDropObj.find(dropObjPtr, callbackData));
//@todo check error
@@ -15374,22 +15655,26 @@ Dbdict::dropObj_commit_complete_done(Signal* signal,
conf->senderData = dropObjPtr.p->m_senderData;
sendSignal(dropObjPtr.p->m_senderRef, GSN_DICT_COMMIT_CONF,
signal, DictCommitConf::SignalLength, JBB);
-
c_opDropObj.release(dropObjPtr);
}
void
-Dbdict::dropObj_abort(Signal * signal, SchemaOp * op){
- jam();
-
+Dbdict::dropObj_abort(Signal * signal, SchemaOp * op)
+{
OpDropObj * dropObj = (OpDropObj*)op;
+
dropObj->m_callback.m_callbackFunction =
safe_cast(&Dbdict::dropObj_abort_start_done);
-
if (f_dict_op[dropObj->m_vt_index].m_abort_start)
+ {
+ jam();
(this->*f_dict_op[dropObj->m_vt_index].m_abort_start)(signal, dropObj);
+ }
else
+ {
+ jam();
execute(signal, dropObj->m_callback, 0);
+ }
}
void
@@ -15397,10 +15682,10 @@ Dbdict::dropObj_abort_start_done(Signal* signal,
Uint32 callbackData,
Uint32 returnCode)
{
+ DropObjRecordPtr dropObjPtr;
+
jam();
ndbrequire(returnCode == 0);
-
- DropObjRecordPtr dropObjPtr;
ndbrequire(c_opDropObj.find(dropObjPtr, callbackData));
XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
@@ -15421,6 +15706,7 @@ Dbdict::dropObj_abort_start_done(Signal* signal,
}
else
{
+ jam();
execute(signal, callback, 0);
}
}
@@ -15430,20 +15716,24 @@ Dbdict::dropObj_abort_writeSchemaConf(Signal* signal,
Uint32 callbackData,
Uint32 returnCode)
{
- jam();
+ DropObjRecordPtr dropObjPtr;
+
ndbrequire(returnCode == 0);
-
- DropObjRecordPtr dropObjPtr;
ndbrequire(c_opDropObj.find(dropObjPtr, callbackData));
-
dropObjPtr.p->m_callback.m_callbackFunction =
safe_cast(&Dbdict::dropObj_abort_complete_done);
if(f_dict_op[dropObjPtr.p->m_vt_index].m_abort_complete)
+ {
+ jam();
(this->*f_dict_op[dropObjPtr.p->m_vt_index].m_abort_complete)
(signal, dropObjPtr.p);
+ }
else
+ {
+ jam();
execute(signal, dropObjPtr.p->m_callback, 0);
+ }
}
void
@@ -15451,24 +15741,26 @@ Dbdict::dropObj_abort_complete_done(Signal* signal,
Uint32 callbackData,
Uint32 returnCode)
{
- DropObjRecordPtr dropObjPtr;
- ndbrequire(c_opDropObj.find(dropObjPtr, callbackData));
-
+ DropObjRecordPtr dropObjPtr;
DictAbortConf * const conf = (DictAbortConf*)signal->getDataPtr();
+
+ ndbrequire(c_opDropObj.find(dropObjPtr, callbackData));
+ jam();
conf->senderRef = reference();
conf->senderData = dropObjPtr.p->m_senderData;
sendSignal(dropObjPtr.p->m_senderRef, GSN_DICT_ABORT_CONF,
signal, DictAbortConf::SignalLength, JBB);
-
c_opDropObj.release(dropObjPtr);
}
void
-Dbdict::create_fg_prepare_start(Signal* signal, SchemaOp* op){
+Dbdict::create_fg_prepare_start(Signal* signal, SchemaOp* op)
+{
/**
* Put data into table record
*/
SegmentedSectionPtr objInfoPtr;
+ jam();
getSection(objInfoPtr, ((OpCreateObj*)op)->m_obj_info_ptr_i);
SimplePropertiesSectionReader it(objInfoPtr, getSectionSegmentPool());
@@ -15485,6 +15777,7 @@ Dbdict::create_fg_prepare_start(Signal* signal, SchemaOp* op){
if(status != SimpleProperties::Eof)
{
+ jam();
op->m_errorCode = CreateTableRef::InvalidFormat;
break;
}
@@ -15493,6 +15786,7 @@ Dbdict::create_fg_prepare_start(Signal* signal, SchemaOp* op){
{
if(!fg.TS_ExtentSize)
{
+ jam();
op->m_errorCode = CreateFilegroupRef::InvalidExtentSize;
break;
}
@@ -15504,6 +15798,7 @@ Dbdict::create_fg_prepare_start(Signal* signal, SchemaOp* op){
*/
if(fg.LF_UndoBufferSize < 3 * File_formats::NDB_PAGE_SIZE)
{
+ jam();
op->m_errorCode = CreateFilegroupRef::InvalidUndoBufferSize;
break;
}
@@ -15512,16 +15807,19 @@ Dbdict::create_fg_prepare_start(Signal* signal, SchemaOp* op){
Uint32 len = strlen(fg.FilegroupName) + 1;
Uint32 hash = Rope::hash(fg.FilegroupName, len);
if(get_object(fg.FilegroupName, len, hash) != 0){
+ jam();
op->m_errorCode = CreateTableRef::TableAlreadyExist;
break;
}
if(!c_obj_pool.seize(obj_ptr)){
+ jam();
op->m_errorCode = CreateTableRef::NoMoreTableRecords;
break;
}
if(!c_filegroup_pool.seize(fg_ptr)){
+ jam();
op->m_errorCode = CreateTableRef::NoMoreTableRecords;
break;
}
@@ -15531,6 +15829,7 @@ Dbdict::create_fg_prepare_start(Signal* signal, SchemaOp* op){
{
Rope name(c_rope_pool, obj_ptr.p->m_name);
if(!name.assign(fg.FilegroupName, len, hash)){
+ jam();
op->m_errorCode = CreateTableRef::OutOfStringBuffer;
break;
}
@@ -15544,6 +15843,7 @@ Dbdict::create_fg_prepare_start(Signal* signal, SchemaOp* op){
switch(fg.FilegroupType){
case DictTabInfo::Tablespace:
+ {
//fg.TS_DataGrow = group.m_grow_spec;
fg_ptr.p->m_tablespace.m_extent_size = fg.TS_ExtentSize;
fg_ptr.p->m_tablespace.m_default_logfile_group_id = fg.TS_LogfileGroupId;
@@ -15551,22 +15851,28 @@ Dbdict::create_fg_prepare_start(Signal* signal, SchemaOp* op){
Ptr<Filegroup> lg_ptr;
if (!c_filegroup_hash.find(lg_ptr, fg.TS_LogfileGroupId))
{
+ jam();
op->m_errorCode = CreateFilegroupRef::NoSuchLogfileGroup;
goto error;
}
if (lg_ptr.p->m_version != fg.TS_LogfileGroupVersion)
{
+ jam();
op->m_errorCode = CreateFilegroupRef::InvalidFilegroupVersion;
goto error;
}
increase_ref_count(lg_ptr.p->m_obj_ptr_i);
break;
+ }
case DictTabInfo::LogfileGroup:
+ {
+ jam();
fg_ptr.p->m_logfilegroup.m_undo_buffer_size = fg.LF_UndoBufferSize;
fg_ptr.p->m_logfilegroup.m_files.init();
//fg.LF_UndoGrow = ;
break;
+ }
default:
ndbrequire(false);
}
@@ -15601,13 +15907,14 @@ error:
}
void
-Dbdict::create_fg_prepare_complete(Signal* signal, SchemaOp* op){
+Dbdict::create_fg_prepare_complete(Signal* signal, SchemaOp* op)
+{
/**
* CONTACT TSMAN LGMAN PGMAN
*/
CreateFilegroupImplReq* req =
(CreateFilegroupImplReq*)signal->getDataPtrSend();
-
+ jam();
req->senderData = op->key;
req->senderRef = reference();
req->filegroup_id = op->m_obj_id;
@@ -15620,18 +15927,24 @@ Dbdict::create_fg_prepare_complete(Signal* signal, SchemaOp* op){
Uint32 len= 0;
switch(op->m_obj_type){
case DictTabInfo::Tablespace:
+ {
+ jam();
ref = TSMAN_REF;
len = CreateFilegroupImplReq::TablespaceLength;
req->tablespace.extent_size = fg_ptr.p->m_tablespace.m_extent_size;
req->tablespace.logfile_group_id =
fg_ptr.p->m_tablespace.m_default_logfile_group_id;
break;
+ }
case DictTabInfo::LogfileGroup:
+ {
+ jam();
ref = LGMAN_REF;
len = CreateFilegroupImplReq::LogfileGroupLength;
req->logfile_group.buffer_size =
fg_ptr.p->m_logfilegroup.m_undo_buffer_size;
break;
+ }
default:
ndbrequire(false);
}
@@ -15640,12 +15953,11 @@ Dbdict::create_fg_prepare_complete(Signal* signal, SchemaOp* op){
}
void
-Dbdict::execCREATE_FILEGROUP_REF(Signal* signal){
- jamEntry();
-
+Dbdict::execCREATE_FILEGROUP_REF(Signal* signal)
+{
CreateFilegroupImplRef * ref = (CreateFilegroupImplRef*)signal->getDataPtr();
-
CreateObjRecordPtr op_ptr;
+ jamEntry();
ndbrequire(c_opCreateObj.find(op_ptr, ref->senderData));
op_ptr.p->m_errorCode = ref->errorCode;
@@ -15653,13 +15965,12 @@ Dbdict::execCREATE_FILEGROUP_REF(Signal* signal){
}
void
-Dbdict::execCREATE_FILEGROUP_CONF(Signal* signal){
- jamEntry();
-
+Dbdict::execCREATE_FILEGROUP_CONF(Signal* signal)
+{
CreateFilegroupImplConf * rep =
(CreateFilegroupImplConf*)signal->getDataPtr();
-
CreateObjRecordPtr op_ptr;
+ jamEntry();
ndbrequire(c_opCreateObj.find(op_ptr, rep->senderData));
execute(signal, op_ptr.p->m_callback, 0);
@@ -15675,13 +15986,13 @@ Dbdict::create_fg_abort_start(Signal* signal, SchemaOp* op){
send_drop_fg(signal, op, DropFilegroupImplReq::Commit);
return;
}
-
+ jam();
execute(signal, op->m_callback, 0);
}
void
-Dbdict::create_fg_abort_complete(Signal* signal, SchemaOp* op){
-
+Dbdict::create_fg_abort_complete(Signal* signal, SchemaOp* op)
+{
if (op->m_obj_ptr_i != RNIL)
{
jam();
@@ -15691,12 +16002,13 @@ Dbdict::create_fg_abort_complete(Signal* signal, SchemaOp* op){
release_object(fg_ptr.p->m_obj_ptr_i);
c_filegroup_hash.release(fg_ptr);
}
-
+ jam();
execute(signal, op->m_callback, 0);
}
void
-Dbdict::create_file_prepare_start(Signal* signal, SchemaOp* op){
+Dbdict::create_file_prepare_start(Signal* signal, SchemaOp* op)
+{
/**
* Put data into table record
*/
@@ -15716,6 +16028,7 @@ Dbdict::create_file_prepare_start(Signal* signal, SchemaOp* op){
do {
if(status != SimpleProperties::Eof){
+ jam();
op->m_errorCode = CreateFileRef::InvalidFormat;
break;
}
@@ -15723,34 +16036,53 @@ Dbdict::create_file_prepare_start(Signal* signal, SchemaOp* op){
// Get Filegroup
FilegroupPtr fg_ptr;
if(!c_filegroup_hash.find(fg_ptr, f.FilegroupId)){
+ jam();
op->m_errorCode = CreateFileRef::NoSuchFilegroup;
break;
}
if(fg_ptr.p->m_version != f.FilegroupVersion){
+ jam();
op->m_errorCode = CreateFileRef::InvalidFilegroupVersion;
break;
}
switch(f.FileType){
case DictTabInfo::Datafile:
+ {
if(fg_ptr.p->m_type != DictTabInfo::Tablespace)
+ {
+ jam();
op->m_errorCode = CreateFileRef::InvalidFileType;
+ }
+ jam();
break;
+ }
case DictTabInfo::Undofile:
+ {
if(fg_ptr.p->m_type != DictTabInfo::LogfileGroup)
+ {
+ jam();
op->m_errorCode = CreateFileRef::InvalidFileType;
+ }
+ jam();
break;
+ }
default:
+ jam();
op->m_errorCode = CreateFileRef::InvalidFileType;
}
if(op->m_errorCode)
+ {
+ jam();
break;
+ }
Uint32 len = strlen(f.FileName) + 1;
Uint32 hash = Rope::hash(f.FileName, len);
if(get_object(f.FileName, len, hash) != 0){
+ jam();
op->m_errorCode = CreateFileRef::FilenameAlreadyExists;
break;
}
@@ -15761,6 +16093,7 @@ Dbdict::create_file_prepare_start(Signal* signal, SchemaOp* op){
m_ctx.m_config.getOwnConfigIterator();
if(!ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS, &dl) && dl)
{
+ jam();
op->m_errorCode = CreateFileRef::NotSupportedWhenDiskless;
break;
}
@@ -15768,11 +16101,13 @@ Dbdict::create_file_prepare_start(Signal* signal, SchemaOp* op){
// Loop through all filenames...
if(!c_obj_pool.seize(obj_ptr)){
+ jam();
op->m_errorCode = CreateTableRef::NoMoreTableRecords;
break;
}
if (! c_file_pool.seize(filePtr)){
+ jam();
op->m_errorCode = CreateFileRef::OutOfFileRecords;
break;
}
@@ -15782,6 +16117,7 @@ Dbdict::create_file_prepare_start(Signal* signal, SchemaOp* op){
{
Rope name(c_rope_pool, obj_ptr.p->m_name);
if(!name.assign(f.FileName, len, hash)){
+ jam();
op->m_errorCode = CreateTableRef::OutOfStringBuffer;
break;
}
@@ -15789,10 +16125,14 @@ Dbdict::create_file_prepare_start(Signal* signal, SchemaOp* op){
switch(fg_ptr.p->m_type){
case DictTabInfo::Tablespace:
+ {
+ jam();
increase_ref_count(fg_ptr.p->m_obj_ptr_i);
break;
+ }
case DictTabInfo::LogfileGroup:
{
+ jam();
Local_file_list list(c_file_pool, fg_ptr.p->m_logfilegroup.m_files);
list.add(filePtr);
break;
@@ -15836,37 +16176,46 @@ Dbdict::create_file_prepare_start(Signal* signal, SchemaOp* op){
c_obj_pool.release(obj_ptr);
}
}
-
execute(signal, op->m_callback, 0);
}
void
-Dbdict::create_file_prepare_complete(Signal* signal, SchemaOp* op){
+Dbdict::create_file_prepare_complete(Signal* signal, SchemaOp* op)
+{
/**
* CONTACT TSMAN LGMAN PGMAN
*/
CreateFileImplReq* req = (CreateFileImplReq*)signal->getDataPtrSend();
-
FilePtr f_ptr;
- c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i);
-
FilegroupPtr fg_ptr;
+
+ jam();
+ c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i);
ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id));
req->senderData = op->key;
req->senderRef = reference();
switch(((OpCreateObj*)op)->m_restart){
case 0:
+ {
+ jam();
req->requestInfo = CreateFileImplReq::Create;
break;
+ }
case 1:
+ {
+ jam();
req->requestInfo = CreateFileImplReq::Open;
break;
+ }
case 2:
+ {
+ jam();
req->requestInfo = CreateFileImplReq::CreateForce;
break;
}
+ }
req->file_id = f_ptr.p->key;
req->filegroup_id = f_ptr.p->m_filegroup_id;
@@ -15878,14 +16227,20 @@ Dbdict::create_file_prepare_complete(Signal* signal, SchemaOp* op){
Uint32 len= 0;
switch(op->m_obj_type){
case DictTabInfo::Datafile:
+ {
+ jam();
ref = TSMAN_REF;
len = CreateFileImplReq::DatafileLength;
req->tablespace.extent_size = fg_ptr.p->m_tablespace.m_extent_size;
break;
+ }
case DictTabInfo::Undofile:
+ {
+ jam();
ref = LGMAN_REF;
len = CreateFileImplReq::UndofileLength;
break;
+ }
default:
ndbrequire(false);
}
@@ -15900,42 +16255,41 @@ Dbdict::create_file_prepare_complete(Signal* signal, SchemaOp* op){
}
void
-Dbdict::execCREATE_FILE_REF(Signal* signal){
- jamEntry();
-
+Dbdict::execCREATE_FILE_REF(Signal* signal)
+{
CreateFileImplRef * ref = (CreateFileImplRef*)signal->getDataPtr();
-
CreateObjRecordPtr op_ptr;
+
+ jamEntry();
ndbrequire(c_opCreateObj.find(op_ptr, ref->senderData));
op_ptr.p->m_errorCode = ref->errorCode;
-
execute(signal, op_ptr.p->m_callback, 0);
}
void
-Dbdict::execCREATE_FILE_CONF(Signal* signal){
- jamEntry();
-
+Dbdict::execCREATE_FILE_CONF(Signal* signal)
+{
CreateFileImplConf * rep =
(CreateFileImplConf*)signal->getDataPtr();
-
CreateObjRecordPtr op_ptr;
+
+ jamEntry();
ndbrequire(c_opCreateObj.find(op_ptr, rep->senderData));
-
execute(signal, op_ptr.p->m_callback, 0);
}
void
-Dbdict::create_file_commit_start(Signal* signal, SchemaOp* op){
+Dbdict::create_file_commit_start(Signal* signal, SchemaOp* op)
+{
/**
* CONTACT TSMAN LGMAN PGMAN
*/
CreateFileImplReq* req = (CreateFileImplReq*)signal->getDataPtrSend();
-
FilePtr f_ptr;
- c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i);
-
FilegroupPtr fg_ptr;
+
+ jam();
+ c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i);
ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id));
req->senderData = op->key;
@@ -15949,15 +16303,20 @@ Dbdict::create_file_commit_start(Signal* signal, SchemaOp* op){
Uint32 ref= 0;
switch(op->m_obj_type){
case DictTabInfo::Datafile:
+ {
+ jam();
ref = TSMAN_REF;
break;
+ }
case DictTabInfo::Undofile:
+ {
+ jam();
ref = LGMAN_REF;
break;
+ }
default:
ndbrequire(false);
}
-
sendSignal(ref, GSN_CREATE_FILE_REQ, signal,
CreateFileImplReq::CommitLength, JBB);
}
@@ -15970,9 +16329,11 @@ Dbdict::create_file_abort_start(Signal* signal, SchemaOp* op)
if (op->m_obj_ptr_i != RNIL)
{
FilePtr f_ptr;
+ FilegroupPtr fg_ptr;
+
+ jam();
c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i);
- FilegroupPtr fg_ptr;
ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id));
req->senderData = op->key;
@@ -15986,20 +16347,24 @@ Dbdict::create_file_abort_start(Signal* signal, SchemaOp* op)
Uint32 ref= 0;
switch(op->m_obj_type){
case DictTabInfo::Datafile:
+ {
+ jam();
ref = TSMAN_REF;
break;
+ }
case DictTabInfo::Undofile:
+ {
+ jam();
ref = LGMAN_REF;
break;
+ }
default:
ndbrequire(false);
}
-
sendSignal(ref, GSN_CREATE_FILE_REQ, signal,
CreateFileImplReq::AbortLength, JBB);
return;
}
-
execute(signal, op->m_callback, 0);
}
@@ -16009,17 +16374,21 @@ Dbdict::create_file_abort_complete(Signal* signal, SchemaOp* op)
if (op->m_obj_ptr_i != RNIL)
{
FilePtr f_ptr;
- c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i);
-
FilegroupPtr fg_ptr;
+
+ jam();
+ c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i);
ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id));
-
switch(fg_ptr.p->m_type){
case DictTabInfo::Tablespace:
+ {
+ jam();
decrease_ref_count(fg_ptr.p->m_obj_ptr_i);
break;
+ }
case DictTabInfo::LogfileGroup:
{
+ jam();
Local_file_list list(c_file_pool, fg_ptr.p->m_logfilegroup.m_files);
list.remove(f_ptr);
break;
@@ -16031,19 +16400,20 @@ Dbdict::create_file_abort_complete(Signal* signal, SchemaOp* op)
release_object(f_ptr.p->m_obj_ptr_i);
c_file_hash.release(f_ptr);
}
-
execute(signal, op->m_callback, 0);
}
void
Dbdict::drop_file_prepare_start(Signal* signal, SchemaOp* op)
{
+ jam();
send_drop_file(signal, op, DropFileImplReq::Prepare);
}
void
Dbdict::drop_undofile_prepare_start(Signal* signal, SchemaOp* op)
{
+ jam();
op->m_errorCode = DropFileRef::DropUndoFileNotSupported;
execute(signal, op->m_callback, 0);
}
@@ -16051,6 +16421,7 @@ Dbdict::drop_undofile_prepare_start(Signal* signal, SchemaOp* op)
void
Dbdict::drop_file_commit_start(Signal* signal, SchemaOp* op)
{
+ jam();
send_drop_file(signal, op, DropFileImplReq::Commit);
}
@@ -16058,21 +16429,37 @@ void
Dbdict::drop_file_commit_complete(Signal* signal, SchemaOp* op)
{
FilePtr f_ptr;
- c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i);
-
FilegroupPtr fg_ptr;
- ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id));
+ jam();
+ c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i);
+ ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id));
decrease_ref_count(fg_ptr.p->m_obj_ptr_i);
release_object(f_ptr.p->m_obj_ptr_i);
c_file_hash.release(f_ptr);
+ execute(signal, op->m_callback, 0);
+}
+void
+Dbdict::drop_undofile_commit_complete(Signal* signal, SchemaOp* op)
+{
+ FilePtr f_ptr;
+ FilegroupPtr fg_ptr;
+
+ jam();
+ c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i);
+ ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id));
+ Local_file_list list(c_file_pool, fg_ptr.p->m_logfilegroup.m_files);
+ list.remove(f_ptr);
+ release_object(f_ptr.p->m_obj_ptr_i);
+ c_file_hash.release(f_ptr);
execute(signal, op->m_callback, 0);
}
void
Dbdict::drop_file_abort_start(Signal* signal, SchemaOp* op)
{
+ jam();
send_drop_file(signal, op, DropFileImplReq::Abort);
}
@@ -16081,11 +16468,11 @@ Dbdict::send_drop_file(Signal* signal, SchemaOp* op,
DropFileImplReq::RequestInfo type)
{
DropFileImplReq* req = (DropFileImplReq*)signal->getDataPtrSend();
-
FilePtr f_ptr;
- c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i);
-
FilegroupPtr fg_ptr;
+
+ jam();
+ c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i);
ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id));
req->senderData = op->key;
@@ -16099,29 +16486,34 @@ Dbdict::send_drop_file(Signal* signal, SchemaOp* op,
Uint32 ref= 0;
switch(op->m_obj_type){
case DictTabInfo::Datafile:
+ {
+ jam();
ref = TSMAN_REF;
break;
+ }
case DictTabInfo::Undofile:
+ {
+ jam();
ref = LGMAN_REF;
break;
+ }
default:
ndbrequire(false);
}
-
sendSignal(ref, GSN_DROP_FILE_REQ, signal,
DropFileImplReq::SignalLength, JBB);
}
void
-Dbdict::execDROP_OBJ_REF(Signal* signal){
- jamEntry();
-
+Dbdict::execDROP_OBJ_REF(Signal* signal)
+{
DropObjRef * const ref = (DropObjRef*)signal->getDataPtr();
-
Ptr<SchemaTransaction> trans_ptr;
+
+ jamEntry();
ndbrequire(c_Trans.find(trans_ptr, ref->senderData));
-
if(ref->errorCode != DropObjRef::NF_FakeErrorREF){
+ jam();
trans_ptr.p->setErrorCode(ref->errorCode);
}
Uint32 node = refToNode(ref->senderRef);
@@ -16129,65 +16521,61 @@ Dbdict::execDROP_OBJ_REF(Signal* signal){
}
void
-Dbdict::execDROP_OBJ_CONF(Signal* signal){
- jamEntry();
-
+Dbdict::execDROP_OBJ_CONF(Signal* signal)
+{
DropObjConf * const conf = (DropObjConf*)signal->getDataPtr();
-
Ptr<SchemaTransaction> trans_ptr;
+
+ jamEntry();
ndbrequire(c_Trans.find(trans_ptr, conf->senderData));
schemaOp_reply(signal, trans_ptr.p, refToNode(conf->senderRef));
}
void
-Dbdict::execDROP_FILE_REF(Signal* signal){
- jamEntry();
-
+Dbdict::execDROP_FILE_REF(Signal* signal)
+{
DropFileImplRef * ref = (DropFileImplRef*)signal->getDataPtr();
-
DropObjRecordPtr op_ptr;
+
+ jamEntry();
ndbrequire(c_opDropObj.find(op_ptr, ref->senderData));
op_ptr.p->m_errorCode = ref->errorCode;
-
execute(signal, op_ptr.p->m_callback, 0);
}
void
-Dbdict::execDROP_FILE_CONF(Signal* signal){
- jamEntry();
-
+Dbdict::execDROP_FILE_CONF(Signal* signal)
+{
DropFileImplConf * rep =
(DropFileImplConf*)signal->getDataPtr();
-
DropObjRecordPtr op_ptr;
+
+ jamEntry();
ndbrequire(c_opDropObj.find(op_ptr, rep->senderData));
-
execute(signal, op_ptr.p->m_callback, 0);
}
void
-Dbdict::execDROP_FILEGROUP_REF(Signal* signal){
- jamEntry();
-
+Dbdict::execDROP_FILEGROUP_REF(Signal* signal)
+{
DropFilegroupImplRef * ref = (DropFilegroupImplRef*)signal->getDataPtr();
-
DropObjRecordPtr op_ptr;
+
+ jamEntry();
ndbrequire(c_opDropObj.find(op_ptr, ref->senderData));
op_ptr.p->m_errorCode = ref->errorCode;
-
execute(signal, op_ptr.p->m_callback, 0);
}
void
-Dbdict::execDROP_FILEGROUP_CONF(Signal* signal){
- jamEntry();
-
+Dbdict::execDROP_FILEGROUP_CONF(Signal* signal)
+{
DropFilegroupImplConf * rep =
(DropFilegroupImplConf*)signal->getDataPtr();
-
DropObjRecordPtr op_ptr;
+
+ jamEntry();
ndbrequire(c_opDropObj.find(op_ptr, rep->senderData));
-
execute(signal, op_ptr.p->m_callback, 0);
}
@@ -16200,11 +16588,13 @@ Dbdict::drop_fg_prepare_start(Signal* signal, SchemaOp* op)
DictObject * obj = c_obj_pool.getPtr(fg_ptr.p->m_obj_ptr_i);
if (obj->m_ref_count)
{
+ jam();
op->m_errorCode = DropFilegroupRef::FilegroupInUse;
execute(signal, op->m_callback, 0);
}
else
{
+ jam();
send_drop_fg(signal, op, DropFilegroupImplReq::Prepare);
}
}
@@ -16216,7 +16606,7 @@ Dbdict::drop_fg_commit_start(Signal* signal, SchemaOp* op)
c_filegroup_pool.getPtr(fg_ptr, op->m_obj_ptr_i);
if (op->m_obj_type == DictTabInfo::LogfileGroup)
{
-
+ jam();
/**
* Mark all undofiles as dropped
*/
@@ -16225,6 +16615,7 @@ Dbdict::drop_fg_commit_start(Signal* signal, SchemaOp* op)
XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
for(list.first(filePtr); !filePtr.isNull(); list.next(filePtr))
{
+ jam();
Uint32 objId = filePtr.p->key;
SchemaFile::TableEntry * tableEntry = getTableEntry(xsf, objId);
tableEntry->m_tableState = SchemaFile::DROP_TABLE_COMMITTED;
@@ -16237,13 +16628,14 @@ Dbdict::drop_fg_commit_start(Signal* signal, SchemaOp* op)
else if(op->m_obj_type == DictTabInfo::Tablespace)
{
FilegroupPtr lg_ptr;
+ jam();
ndbrequire(c_filegroup_hash.
find(lg_ptr,
fg_ptr.p->m_tablespace.m_default_logfile_group_id));
decrease_ref_count(lg_ptr.p->m_obj_ptr_i);
}
-
+ jam();
send_drop_fg(signal, op, DropFilegroupImplReq::Commit);
}
@@ -16252,16 +16644,17 @@ Dbdict::drop_fg_commit_complete(Signal* signal, SchemaOp* op)
{
FilegroupPtr fg_ptr;
c_filegroup_pool.getPtr(fg_ptr, op->m_obj_ptr_i);
-
+
+ jam();
release_object(fg_ptr.p->m_obj_ptr_i);
c_filegroup_hash.release(fg_ptr);
-
execute(signal, op->m_callback, 0);
}
void
Dbdict::drop_fg_abort_start(Signal* signal, SchemaOp* op)
{
+ jam();
send_drop_fg(signal, op, DropFilegroupImplReq::Abort);
}
diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
index e5b918ca270..3fff330d699 100644
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
@@ -2565,6 +2565,12 @@ private:
const SchemaFile::TableEntry *,
const SchemaFile::TableEntry *);
void restartDropTab_complete(Signal*, Uint32 callback, Uint32);
+
+ void restartDropObj(Signal*, Uint32, const SchemaFile::TableEntry *);
+ void restartDropObj_prepare_start_done(Signal*, Uint32, Uint32);
+ void restartDropObj_prepare_complete_done(Signal*, Uint32, Uint32);
+ void restartDropObj_commit_start_done(Signal*, Uint32, Uint32);
+ void restartDropObj_commit_complete_done(Signal*, Uint32, Uint32);
void restart_checkSchemaStatusComplete(Signal*, Uint32 callback, Uint32);
void restart_writeSchemaConf(Signal*, Uint32 callbackData, Uint32);
@@ -2657,7 +2663,8 @@ public:
void send_drop_fg(Signal*, SchemaOp*, DropFilegroupImplReq::RequestInfo);
void drop_undofile_prepare_start(Signal* signal, SchemaOp*);
-
+ void drop_undofile_commit_complete(Signal* signal, SchemaOp*);
+
int checkSingleUserMode(Uint32 senderRef);
};
diff --git a/storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp b/storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp
index 602881095c3..44326e213d0 100644
--- a/storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp
+++ b/storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp
@@ -15,7 +15,7 @@
#include <ndb_global.h>
-#include <ndb_version.h>
+#include <util/version.h>
#include <NdbMain.h>
#include <NdbOut.hpp>
diff --git a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
index b4dc445e94d..5bef13cd0b9 100644
--- a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
+++ b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
@@ -898,7 +898,7 @@ private:
void ndbsttorry10Lab(Signal *, Uint32 _line);
void createMutexes(Signal* signal, Uint32 no);
void createMutex_done(Signal* signal, Uint32 no, Uint32 retVal);
- void crashSystemAtGcpStop(Signal *);
+ void crashSystemAtGcpStop(Signal *, bool);
void sendFirstDictfragsreq(Signal *, TabRecordPtr regTabPtr);
void addtabrefuseLab(Signal *, ConnectRecordPtr regConnectPtr, Uint32 errorCode);
void GCP_SAVEhandling(Signal *, Uint32 nodeId);
diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
index 1fe932aaae8..762d4ea5141 100644
--- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
@@ -747,6 +747,14 @@ done:
}
ndbrequire(ok);
+ CRASH_INSERTION(7183);
+
+ if (ERROR_INSERTED(7185) && reason==CopyGCIReq::GLOBAL_CHECKPOINT)
+ {
+ jam();
+ return;
+ }
+
/* ----------------------------------------------------------------------- */
/* WE START BY TRYING TO OPEN THE FIRST RESTORABLE GCI FILE. */
/* ----------------------------------------------------------------------- */
@@ -1230,6 +1238,17 @@ void Dbdih::execDIH_RESTARTREQ(Signal* signal)
Uint32 ng = Sysfile::getNodeGroup(i, SYSFILE->nodeGroups);
ndbrequire(ng < MAX_NDB_NODES);
Uint32 gci = node_gcis[i];
+ if (gci < SYSFILE->lastCompletedGCI[i])
+ {
+ jam();
+ /**
+ * Handle case, where *I* know that node complete GCI
+ * but node does not...bug#29167
+ * i.e node died before it wrote own sysfile
+ */
+ gci = SYSFILE->lastCompletedGCI[i];
+ }
+
if (gci > node_group_gcis[ng])
{
jam();
@@ -4139,6 +4158,11 @@ void Dbdih::execNODE_FAILREP(Signal* signal)
CLEAR_ERROR_INSERT_VALUE;
}
+ if (ERROR_INSERTED(7184))
+ {
+ SET_ERROR_INSERT_VALUE(7000);
+ }
+
/*-------------------------------------------------------------------------*/
// The first step is to convert from a bit mask to an array of failed nodes.
/*-------------------------------------------------------------------------*/
@@ -4741,12 +4765,18 @@ void Dbdih::failedNodeLcpHandling(Signal* signal, NodeRecordPtr failedNodePtr)
jam();
const Uint32 nodeId = failedNodePtr.i;
- if (c_lcpState.m_participatingLQH.get(failedNodePtr.i)){
+ if (isMaster() && c_lcpState.m_participatingLQH.get(failedNodePtr.i))
+ {
/*----------------------------------------------------*/
/* THE NODE WAS INVOLVED IN A LOCAL CHECKPOINT. WE */
/* MUST UPDATE THE ACTIVE STATUS TO INDICATE THAT */
/* THE NODE HAVE MISSED A LOCAL CHECKPOINT. */
/*----------------------------------------------------*/
+
+ /**
+ * Bug#28717, Only master should do this, as this status is copied
+ * to other nodes
+ */
switch (failedNodePtr.p->activeStatus) {
case Sysfile::NS_Active:
jam();
@@ -7807,7 +7837,7 @@ void Dbdih::checkGcpStopLab(Signal* signal)
g_eventLogger.error("System crash due to GCP Stop in state = %u",
(Uint32) cgcpStatus);
#endif
- crashSystemAtGcpStop(signal);
+ crashSystemAtGcpStop(signal, false);
return;
}//if
} else {
@@ -7821,7 +7851,7 @@ void Dbdih::checkGcpStopLab(Signal* signal)
g_eventLogger.error("System crash due to GCP Stop in state = %u",
(Uint32) cgcpStatus);
#endif
- crashSystemAtGcpStop(signal);
+ crashSystemAtGcpStop(signal, false);
return;
}//if
} else {
@@ -11177,41 +11207,132 @@ void Dbdih::tableCloseLab(Signal* signal, FileRecordPtr filePtr)
* GCP stop detected,
* send SYSTEM_ERROR to all other alive nodes
*/
-void Dbdih::crashSystemAtGcpStop(Signal* signal)
+void Dbdih::crashSystemAtGcpStop(Signal* signal, bool local)
{
+ if (local)
+ goto dolocal;
+
switch(cgcpStatus){
+ case GCP_PREPARE_SENT:
+ {
+ jam();
+ /**
+ * We're waiting for a GCP PREPARE CONF
+ */
+ infoEvent("Detected GCP stop(%d)...sending kill to %s",
+ cgcpStatus, c_GCP_PREPARE_Counter.getText());
+ ndbout_c("Detected GCP stop(%d)...sending kill to %s",
+ cgcpStatus, c_GCP_PREPARE_Counter.getText());
+
+ {
+ NodeReceiverGroup rg(DBDIH, c_GCP_PREPARE_Counter);
+ signal->theData[0] = 7022;
+ sendSignal(rg, GSN_DUMP_STATE_ORD, signal, 1, JBA);
+ }
+
+ {
+ NodeReceiverGroup rg(NDBCNTR, c_GCP_PREPARE_Counter);
+ SystemError * const sysErr = (SystemError*)&signal->theData[0];
+ sysErr->errorCode = SystemError::GCPStopDetected;
+ sysErr->errorRef = reference();
+ sysErr->data1 = cgcpStatus;
+ sysErr->data2 = cgcpOrderBlocked;
+ sendSignal(rg, GSN_SYSTEM_ERROR, signal,
+ SystemError::SignalLength, JBA);
+ }
+ ndbrequire(!c_GCP_PREPARE_Counter.done());
+ return;
+ }
+ case GCP_COMMIT_SENT:
+ {
+ jam();
+ /**
+ * We're waiting for a GCP_NODEFINISH
+ */
+ infoEvent("Detected GCP stop(%d)...sending kill to %s",
+ cgcpStatus, c_GCP_COMMIT_Counter.getText());
+ ndbout_c("Detected GCP stop(%d)...sending kill to %s",
+ cgcpStatus, c_GCP_COMMIT_Counter.getText());
+
+ {
+ NodeReceiverGroup rg(DBDIH, c_GCP_COMMIT_Counter);
+ signal->theData[0] = 7022;
+ sendSignal(rg, GSN_DUMP_STATE_ORD, signal, 1, JBA);
+ }
+
+ {
+ NodeReceiverGroup rg(NDBCNTR, c_GCP_COMMIT_Counter);
+ SystemError * const sysErr = (SystemError*)&signal->theData[0];
+ sysErr->errorCode = SystemError::GCPStopDetected;
+ sysErr->errorRef = reference();
+ sysErr->data1 = cgcpStatus;
+ sysErr->data2 = cgcpOrderBlocked;
+ sendSignal(rg, GSN_SYSTEM_ERROR, signal,
+ SystemError::SignalLength, JBA);
+ }
+ ndbrequire(!c_GCP_COMMIT_Counter.done());
+ return;
+ }
case GCP_NODE_FINISHED:
{
+ jam();
/**
* We're waiting for a GCP save conf
*/
- ndbrequire(!c_GCP_SAVEREQ_Counter.done());
NodeReceiverGroup rg(DBLQH, c_GCP_SAVEREQ_Counter);
signal->theData[0] = 2305;
sendSignal(rg, GSN_DUMP_STATE_ORD, signal, 1, JBB);
- infoEvent("Detected GCP stop...sending kill to %s",
- c_GCP_SAVEREQ_Counter.getText());
- g_eventLogger.error("Detected GCP stop...sending kill to %s",
- c_GCP_SAVEREQ_Counter.getText());
+ infoEvent("Detected GCP stop(%d)...sending kill to %s",
+ cgcpStatus, c_GCP_SAVEREQ_Counter.getText());
+ ndbout_c("Detected GCP stop(%d)...sending kill to %s",
+ cgcpStatus, c_GCP_SAVEREQ_Counter.getText());
+ ndbrequire(!c_GCP_SAVEREQ_Counter.done());
return;
}
case GCP_SAVE_LQH_FINISHED:
- g_eventLogger.error("m_copyReason: %d m_waiting: %d",
- c_copyGCIMaster.m_copyReason,
- c_copyGCIMaster.m_waiting);
- break;
- case GCP_READY: // shut up lint
- case GCP_PREPARE_SENT:
- case GCP_COMMIT_SENT:
- break;
+ {
+ jam();
+ /**
+ * We're waiting for a COPY_GCICONF
+ */
+ infoEvent("Detected GCP stop(%d)...sending kill to %s",
+ cgcpStatus, c_COPY_GCIREQ_Counter.getText());
+ ndbout_c("Detected GCP stop(%d)...sending kill to %s",
+ cgcpStatus, c_COPY_GCIREQ_Counter.getText());
+
+ {
+ NodeReceiverGroup rg(DBDIH, c_COPY_GCIREQ_Counter);
+ signal->theData[0] = 7022;
+ sendSignal(rg, GSN_DUMP_STATE_ORD, signal, 1, JBA);
+ }
+
+ {
+ NodeReceiverGroup rg(NDBCNTR, c_COPY_GCIREQ_Counter);
+ SystemError * const sysErr = (SystemError*)&signal->theData[0];
+ sysErr->errorCode = SystemError::GCPStopDetected;
+ sysErr->errorRef = reference();
+ sysErr->data1 = cgcpStatus;
+ sysErr->data2 = cgcpOrderBlocked;
+ sendSignal(rg, GSN_SYSTEM_ERROR, signal,
+ SystemError::SignalLength, JBA);
+ }
+ ndbrequire(!c_COPY_GCIREQ_Counter.done());
+ return;
+ }
+ case GCP_READY: (void)1;
}
+
+dolocal:
+ ndbout_c("m_copyReason: %d m_waiting: %d",
+ c_copyGCIMaster.m_copyReason,
+ c_copyGCIMaster.m_waiting);
- g_eventLogger.error("c_copyGCISlave: sender{Data, Ref} %d %x reason: %d nextWord: %d",
- c_copyGCISlave.m_senderData,
- c_copyGCISlave.m_senderRef,
- c_copyGCISlave.m_copyReason,
- c_copyGCISlave.m_expectedNextWord);
+ ndbout_c("c_copyGCISlave: sender{Data, Ref} %d %x reason: %d nextWord: %d",
+ c_copyGCISlave.m_senderData,
+ c_copyGCISlave.m_senderRef,
+ c_copyGCISlave.m_copyReason,
+ c_copyGCISlave.m_expectedNextWord);
FileRecordPtr file0Ptr;
file0Ptr.i = crestartInfoFile[0];
@@ -11262,23 +11383,39 @@ void Dbdih::crashSystemAtGcpStop(Signal* signal)
c_TCGETOPSIZEREQ_Counter.getText());
ndbout_c("c_UPDATE_TOREQ_Counter = %s", c_UPDATE_TOREQ_Counter.getText());
- NodeRecordPtr nodePtr;
- for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
+ if (local == false)
+ {
jam();
- ptrAss(nodePtr, nodeRecord);
- if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) {
+ NodeRecordPtr nodePtr;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
jam();
- const BlockReference ref =
- numberToRef(refToBlock(cntrlblockref), nodePtr.i);
- SystemError * const sysErr = (SystemError*)&signal->theData[0];
- sysErr->errorCode = SystemError::GCPStopDetected;
- sysErr->errorRef = reference();
- sysErr->data1 = cgcpStatus;
- sysErr->data2 = cgcpOrderBlocked;
- sendSignal(ref, GSN_SYSTEM_ERROR, signal,
- SystemError::SignalLength, JBA);
- }//if
- }//for
+ ptrAss(nodePtr, nodeRecord);
+ if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) {
+ jam();
+ const BlockReference ref =
+ numberToRef(refToBlock(cntrlblockref), nodePtr.i);
+ SystemError * const sysErr = (SystemError*)&signal->theData[0];
+ sysErr->errorCode = SystemError::GCPStopDetected;
+ sysErr->errorRef = reference();
+ sysErr->data1 = cgcpStatus;
+ sysErr->data2 = cgcpOrderBlocked;
+ sendSignal(ref, GSN_SYSTEM_ERROR, signal,
+ SystemError::SignalLength, JBA);
+ }//if
+ }//for
+ }
+ else
+ {
+ jam();
+ SystemError * const sysErr = (SystemError*)&signal->theData[0];
+ sysErr->errorCode = SystemError::GCPStopDetected;
+ sysErr->errorRef = reference();
+ sysErr->data1 = cgcpStatus;
+ sysErr->data2 = cgcpOrderBlocked;
+ EXECUTE_DIRECT(NDBCNTR, GSN_SYSTEM_ERROR,
+ signal, SystemError::SignalLength);
+ ndbrequire(false);
+ }
return;
}//Dbdih::crashSystemAtGcpStop()
@@ -14373,6 +14510,12 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal)
infoEvent(buf);
}
}
+
+ if (arg == 7022)
+ {
+ jam();
+ crashSystemAtGcpStop(signal, true);
+ }
}//Dbdih::execDUMP_STATE_ORD()
void
diff --git a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
index ba146fce005..6f8e5569831 100644
--- a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
+++ b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
@@ -71,7 +71,6 @@ class Dbtup;
/* CONSTANTS OF THE LOG PAGES */
/* ------------------------------------------------------------------------- */
#define ZPAGE_HEADER_SIZE 32
-#define ZNO_MBYTES_IN_FILE 16
#define ZPAGE_SIZE 8192
#define ZPAGES_IN_MBYTE 32
#define ZTWOLOG_NO_PAGES_IN_MBYTE 5
@@ -115,9 +114,6 @@ class Dbtup;
/* ------------------------------------------------------------------------- */
/* VARIOUS CONSTANTS USED AS FLAGS TO THE FILE MANAGER. */
/* ------------------------------------------------------------------------- */
-#define ZOPEN_READ 0
-#define ZOPEN_WRITE 1
-#define ZOPEN_READ_WRITE 2
#define ZVAR_NO_LOG_PAGE_WORD 1
#define ZLIST_OF_PAIRS 0
#define ZLIST_OF_PAIRS_SYNCH 16
@@ -142,7 +138,7 @@ class Dbtup;
/* IN THE MBYTE. */
/* ------------------------------------------------------------------------- */
#define ZFD_HEADER_SIZE 3
-#define ZFD_PART_SIZE 48
+#define ZFD_MBYTE_SIZE 3
#define ZLOG_HEAD_SIZE 8
#define ZNEXT_LOG_SIZE 2
#define ZABORT_LOG_SIZE 3
@@ -169,7 +165,6 @@ class Dbtup;
#define ZPOS_LOG_TYPE 0
#define ZPOS_NO_FD 1
#define ZPOS_FILE_NO 2
-#define ZMAX_LOG_FILES_IN_PAGE_ZERO 40
/* ------------------------------------------------------------------------- */
/* THE POSITIONS WITHIN A PREPARE LOG RECORD AND A NEW PREPARE */
/* LOG RECORD. */
@@ -1437,17 +1432,17 @@ public:
* header of each log file. That information is used during
* system restart to find the tail of the log.
*/
- UintR logLastPrepRef[16];
+ UintR *logLastPrepRef;
/**
* The max global checkpoint completed before the mbyte in the
* log file was started. One variable per mbyte.
*/
- UintR logMaxGciCompleted[16];
+ UintR *logMaxGciCompleted;
/**
* The max global checkpoint started before the mbyte in the log
* file was started. One variable per mbyte.
*/
- UintR logMaxGciStarted[16];
+ UintR *logMaxGciStarted;
/**
* This variable contains the file name as needed by the file
* system when opening the file.
@@ -1591,7 +1586,8 @@ public:
ACTIVE_WRITE_LOG = 17, ///< A write operation during
///< writing of log
READ_SR_INVALIDATE_PAGES = 18,
- WRITE_SR_INVALIDATE_PAGES = 19
+ WRITE_SR_INVALIDATE_PAGES = 19,
+ WRITE_SR_INVALIDATE_PAGES_UPDATE_PAGE0 = 20
};
/**
* We have to remember the log pages read.
@@ -2163,6 +2159,7 @@ private:
void execSTART_RECREF(Signal* signal);
void execGCP_SAVEREQ(Signal* signal);
+ void execFSOPENREF(Signal* signal);
void execFSOPENCONF(Signal* signal);
void execFSCLOSECONF(Signal* signal);
void execFSWRITECONF(Signal* signal);
@@ -2385,7 +2382,7 @@ private:
void errorReport(Signal* signal, int place);
void warningReport(Signal* signal, int place);
void invalidateLogAfterLastGCI(Signal *signal);
- void readFileInInvalidate(Signal *signal);
+ void readFileInInvalidate(Signal *signal, bool stepNext);
void exitFromInvalidate(Signal* signal);
Uint32 calcPageCheckSum(LogPageRecordPtr logP);
Uint32 handleLongTupKey(Signal* signal, Uint32* dataPtr, Uint32 len);
@@ -2671,6 +2668,8 @@ private:
LogPartRecord *logPartRecord;
LogPartRecordPtr logPartPtr;
UintR clogPartFileSize;
+ Uint32 clogFileSize; // In MBYTE
+ Uint32 cmaxLogFilesInPageZero; //
// Configurable
LogFileRecord *logFileRecord;
@@ -2678,13 +2677,15 @@ private:
UintR cfirstfreeLogFile;
UintR clogFileFileSize;
-#define ZLFO_FILE_SIZE 256 /* MAX 256 OUTSTANDING FILE OPERATIONS */
+#define ZLFO_MIN_FILE_SIZE 256
+// RedoBuffer/32K minimum ZLFO_MIN_FILE_SIZE
LogFileOperationRecord *logFileOperationRecord;
LogFileOperationRecordPtr lfoPtr;
UintR cfirstfreeLfo;
UintR clfoFileSize;
LogPageRecord *logPageRecord;
+ void *logPageRecordUnaligned;
LogPageRecordPtr logPagePtr;
UintR cfirstfreeLogPage;
UintR clogPageFileSize;
@@ -2695,7 +2696,7 @@ private:
UintR cfirstfreePageRef;
UintR cpageRefFileSize;
-#define ZSCANREC_FILE_SIZE 100
+// Configurable
ArrayPool<ScanRecord> c_scanRecordPool;
ScanRecordPtr scanptr;
UintR cscanNoFreeRec;
@@ -2888,6 +2889,7 @@ private:
UintR ctransidHash[1024];
Uint32 c_diskless;
+ Uint32 c_o_direct;
Uint32 c_error_insert_table_id;
public:
diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
index c054c227c8e..d6411ee1cb9 100644
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
@@ -30,11 +30,11 @@ void Dblqh::initData()
cgcprecFileSize = ZGCPREC_FILE_SIZE;
chostFileSize = MAX_NDB_NODES;
clcpFileSize = ZNO_CONCURRENT_LCP;
- clfoFileSize = ZLFO_FILE_SIZE;
+ clfoFileSize = 0;
clogFileFileSize = 0;
clogPartFileSize = ZLOG_PART_FILE_SIZE;
cpageRefFileSize = ZPAGE_REF_FILE_SIZE;
- cscanrecFileSize = ZSCANREC_FILE_SIZE;
+ cscanrecFileSize = 0;
ctabrecFileSize = 0;
ctcConnectrecFileSize = 0;
ctcNodeFailrecFileSize = MAX_NDB_NODES;
@@ -49,6 +49,7 @@ void Dblqh::initData()
logFileRecord = 0;
logFileOperationRecord = 0;
logPageRecord = 0;
+ logPageRecordUnaligned= 0;
pageRefRecord = 0;
tablerec = 0;
tcConnectionrec = 0;
@@ -60,6 +61,8 @@ void Dblqh::initData()
cLqhTimeOutCheckCount = 0;
cbookedAccOps = 0;
m_backup_ptr = RNIL;
+ clogFileSize = 16;
+ cmaxLogFilesInPageZero = 40;
}//Dblqh::initData()
void Dblqh::initRecords()
@@ -105,10 +108,13 @@ void Dblqh::initRecords()
sizeof(LogFileOperationRecord),
clfoFileSize);
- logPageRecord = (LogPageRecord*)allocRecord("LogPageRecord",
- sizeof(LogPageRecord),
- clogPageFileSize,
- false);
+ logPageRecord =
+ (LogPageRecord*)allocRecordAligned("LogPageRecord",
+ sizeof(LogPageRecord),
+ clogPageFileSize,
+ &logPageRecordUnaligned,
+ NDB_O_DIRECT_WRITE_ALIGNMENT,
+ false);
pageRefRecord = (PageRefRecord*)allocRecord("PageRefRecord",
sizeof(PageRefRecord),
@@ -260,6 +266,7 @@ Dblqh::Dblqh(Block_context& ctx):
addRecSignal(GSN_START_FRAGREQ, &Dblqh::execSTART_FRAGREQ);
addRecSignal(GSN_START_RECREF, &Dblqh::execSTART_RECREF);
addRecSignal(GSN_GCP_SAVEREQ, &Dblqh::execGCP_SAVEREQ);
+ addRecSignal(GSN_FSOPENREF, &Dblqh::execFSOPENREF, true);
addRecSignal(GSN_FSOPENCONF, &Dblqh::execFSOPENCONF);
addRecSignal(GSN_FSCLOSECONF, &Dblqh::execFSCLOSECONF);
addRecSignal(GSN_FSWRITECONF, &Dblqh::execFSWRITECONF);
@@ -377,7 +384,7 @@ Dblqh::~Dblqh()
sizeof(LogFileOperationRecord),
clfoFileSize);
- deallocRecord((void**)&logPageRecord,
+ deallocRecord((void**)&logPageRecordUnaligned,
"LogPageRecord",
sizeof(LogPageRecord),
clogPageFileSize);
diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
index ff4c5b2648b..efb88bfccd2 100644
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
@@ -139,6 +139,10 @@ operator<<(NdbOut& out, Operation_t op)
//#define MARKER_TRACE 1
//#define TRACE_SCAN_TAKEOVER 1
+#ifndef DEBUG_REDO
+#define DEBUG_REDO 0
+#endif
+
const Uint32 NR_ScanNo = 0;
#if defined VM_TRACE || defined ERROR_INSERT || defined NDBD_TRACENR
@@ -1023,6 +1027,11 @@ void Dblqh::execREAD_CONFIG_REQ(Signal* signal)
clogPageFileSize+= (16 - mega_byte_part);
}
+ /* maximum number of log file operations */
+ clfoFileSize = clogPageFileSize;
+ if (clfoFileSize < ZLFO_MIN_FILE_SIZE)
+ clfoFileSize = ZLFO_MIN_FILE_SIZE;
+
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_TABLE, &ctabrecFileSize));
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_TC_CONNECT,
&ctcConnectrecFileSize));
@@ -1031,14 +1040,44 @@ void Dblqh::execREAD_CONFIG_REQ(Signal* signal)
cmaxAccOps = cscanrecFileSize * MAX_PARALLEL_OP_PER_SCAN;
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS, &c_diskless));
+ c_o_direct = true;
+ ndb_mgm_get_int_parameter(p, CFG_DB_O_DIRECT, &c_o_direct);
Uint32 tmp= 0;
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_FRAG, &tmp));
c_fragment_pool.setSize(tmp);
+ if (!ndb_mgm_get_int_parameter(p, CFG_DB_REDOLOG_FILE_SIZE,
+ &clogFileSize))
+ {
+ // convert to mbyte
+ clogFileSize = (clogFileSize + 1024*1024 - 1) / (1024 * 1024);
+ ndbrequire(clogFileSize >= 4 && clogFileSize <= 1024);
+ }
+
+ cmaxLogFilesInPageZero = (ZPAGE_SIZE - ZPAGE_HEADER_SIZE - 128) /
+ (ZFD_MBYTE_SIZE * clogFileSize);
+
+ /**
+ * "Old" cmaxLogFilesInPageZero was 40
+ * Each FD need 3 words per mb, require that they can fit into 1 page
+ * (atleast 1 FD)
+ * Is also checked in ConfigInfo.cpp (max FragmentLogFileSize = 1Gb)
+ * 1Gb = 1024Mb => 3(ZFD_MBYTE_SIZE) * 1024 < 8192 (ZPAGE_SIZE)
+ */
+ if (cmaxLogFilesInPageZero > 40)
+ {
+ jam();
+ cmaxLogFilesInPageZero = 40;
+ }
+ else
+ {
+ ndbrequire(cmaxLogFilesInPageZero);
+ }
+
initRecords();
initialiseRecordsLab(signal, 0, ref, senderData);
-
+
return;
}//Dblqh::execSIZEALT_REP()
@@ -8507,9 +8546,32 @@ void Dblqh::continueAfterReceivingAllAiLab(Signal* signal)
AccScanReq::setLockMode(req->requestInfo, scanptr.p->scanLockMode);
AccScanReq::setReadCommittedFlag(req->requestInfo, scanptr.p->readCommitted);
AccScanReq::setDescendingFlag(req->requestInfo, scanptr.p->descending);
- AccScanReq::setNoDiskScanFlag(req->requestInfo,
- !tcConnectptr.p->m_disk_table);
- AccScanReq::setLcpScanFlag(req->requestInfo, scanptr.p->lcpScan);
+
+ if (refToBlock(tcConnectptr.p->clientBlockref) == BACKUP)
+ {
+ if (scanptr.p->lcpScan)
+ {
+ AccScanReq::setNoDiskScanFlag(req->requestInfo, 1);
+ AccScanReq::setLcpScanFlag(req->requestInfo, 1);
+ }
+ else
+ {
+ /* If backup scan disktables in disk order */
+ AccScanReq::setNoDiskScanFlag(req->requestInfo,
+ !tcConnectptr.p->m_disk_table);
+ AccScanReq::setLcpScanFlag(req->requestInfo, 0);
+ }
+ }
+ else
+ {
+#if BUG_27776_FIXED
+ AccScanReq::setNoDiskScanFlag(req->requestInfo,
+ !tcConnectptr.p->m_disk_table);
+#else
+ AccScanReq::setNoDiskScanFlag(req->requestInfo, 1);
+#endif
+ AccScanReq::setLcpScanFlag(req->requestInfo, 0);
+ }
req->transId1 = tcConnectptr.p->transid[0];
req->transId2 = tcConnectptr.p->transid[1];
@@ -11732,6 +11794,13 @@ void Dblqh::sendLCP_COMPLETE_REP(Signal* signal, Uint32 lcpId)
jam();
sendEMPTY_LCP_CONF(signal, true);
}
+
+ if (getNodeState().getNodeRestartInProgress() && cstartRecReq != 3)
+ {
+ jam();
+ ndbrequire(cstartRecReq == 2);
+ cstartRecReq = 3;
+ }
return;
}//Dblqh::sendCOMP_LCP_ROUND()
@@ -11765,9 +11834,9 @@ void Dblqh::sendStartLcp(Signal* signal)
Uint32 Dblqh::remainingLogSize(const LogFileRecordPtr &sltCurrLogFilePtr,
const LogPartRecordPtr &sltLogPartPtr)
{
- Uint32 hf = sltCurrLogFilePtr.p->fileNo*ZNO_MBYTES_IN_FILE+sltCurrLogFilePtr.p->currentMbyte;
- Uint32 tf = sltLogPartPtr.p->logTailFileNo*ZNO_MBYTES_IN_FILE+sltLogPartPtr.p->logTailMbyte;
- Uint32 sz = sltLogPartPtr.p->noLogFiles*ZNO_MBYTES_IN_FILE;
+ Uint32 hf = sltCurrLogFilePtr.p->fileNo*clogFileSize+sltCurrLogFilePtr.p->currentMbyte;
+ Uint32 tf = sltLogPartPtr.p->logTailFileNo*clogFileSize+sltLogPartPtr.p->logTailMbyte;
+ Uint32 sz = sltLogPartPtr.p->noLogFiles*clogFileSize;
if (tf > hf) hf += sz;
return sz-(hf-tf);
}
@@ -11825,7 +11894,7 @@ void Dblqh::setLogTail(Signal* signal, Uint32 keepGci)
/* ------------------------------------------------------------------------- */
SLT_LOOP:
for (tsltIndex = tsltStartMbyte;
- tsltIndex <= ZNO_MBYTES_IN_FILE - 1;
+ tsltIndex <= clogFileSize - 1;
tsltIndex++) {
if (sltLogFilePtr.p->logMaxGciStarted[tsltIndex] >= keepGci) {
/* ------------------------------------------------------------------------- */
@@ -11841,7 +11910,7 @@ void Dblqh::setLogTail(Signal* signal, Uint32 keepGci)
/* ------------------------------------------------------------------------- */
/*STEPPING BACK INCLUDES ALSO STEPPING BACK TO THE PREVIOUS LOG FILE. */
/* ------------------------------------------------------------------------- */
- tsltMbyte = ZNO_MBYTES_IN_FILE - 1;
+ tsltMbyte = clogFileSize - 1;
sltLogFilePtr.i = sltLogFilePtr.p->prevLogFile;
ptrCheckGuard(sltLogFilePtr, clogFileFileSize, logFileRecord);
}//if
@@ -11879,7 +11948,7 @@ void Dblqh::setLogTail(Signal* signal, Uint32 keepGci)
UintR ToldTailFileNo = sltLogPartPtr.p->logTailFileNo;
UintR ToldTailMByte = sltLogPartPtr.p->logTailMbyte;
- arrGuard(tsltMbyte, 16);
+ arrGuard(tsltMbyte, clogFileSize);
sltLogPartPtr.p->logTailFileNo =
sltLogFilePtr.p->logLastPrepRef[tsltMbyte] >> 16;
/* ------------------------------------------------------------------------- */
@@ -12002,15 +12071,27 @@ void Dblqh::execGCP_SAVEREQ(Signal* signal)
}//if
ndbrequire(ccurrentGcprec == RNIL);
- ccurrentGcprec = 0;
- gcpPtr.i = ccurrentGcprec;
- ptrCheckGuard(gcpPtr, cgcprecFileSize, gcpRecord);
-
cnewestCompletedGci = gci;
if (gci > cnewestGci) {
jam();
cnewestGci = gci;
}//if
+
+ if(getNodeState().getNodeRestartInProgress() && cstartRecReq < 3)
+ {
+ GCPSaveRef * const saveRef = (GCPSaveRef*)&signal->theData[0];
+ saveRef->dihPtr = dihPtr;
+ saveRef->nodeId = getOwnNodeId();
+ saveRef->gci = gci;
+ saveRef->errorCode = GCPSaveRef::NodeRestartInProgress;
+ sendSignal(dihBlockRef, GSN_GCP_SAVEREF, signal,
+ GCPSaveRef::SignalLength, JBB);
+ return;
+ }
+
+ ccurrentGcprec = 0;
+ gcpPtr.i = ccurrentGcprec;
+ ptrCheckGuard(gcpPtr, cgcprecFileSize, gcpRecord);
gcpPtr.p->gcpBlockref = dihBlockRef;
gcpPtr.p->gcpUserptr = dihPtr;
@@ -12264,9 +12345,6 @@ void Dblqh::execFSCLOSECONF(Signal* signal)
case LogFileRecord::CLOSE_SR_INVALIDATE_PAGES:
jam();
logFilePtr.p->logFileStatus = LogFileRecord::CLOSED;
- // Set the prev file to check if we shall close it.
- logFilePtr.i = logFilePtr.p->prevLogFile;
- ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
logPartPtr.i = logFilePtr.p->logPartRec;
ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
@@ -12316,7 +12394,7 @@ void Dblqh::execFSOPENCONF(Signal* signal)
case LogFileRecord::OPEN_SR_INVALIDATE_PAGES:
jam();
logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
- readFileInInvalidate(signal);
+ readFileInInvalidate(signal, false);
return;
case LogFileRecord::OPENING_INIT:
jam();
@@ -12379,6 +12457,26 @@ void Dblqh::execFSOPENCONF(Signal* signal)
}//switch
}//Dblqh::execFSOPENCONF()
+void
+Dblqh::execFSOPENREF(Signal* signal)
+{
+ jamEntry();
+ FsRef* ref = (FsRef*)signal->getDataPtr();
+ Uint32 err = ref->errorCode;
+ if (err == FsRef::fsErrInvalidFileSize)
+ {
+ char buf[256];
+ BaseString::snprintf(buf, sizeof(buf),
+ "Invalid file size for redo logfile, "
+ " size only changable with --initial");
+ progError(__LINE__,
+ NDBD_EXIT_INVALID_CONFIG,
+ buf);
+ return;
+ }
+
+ SimulatedBlock::execFSOPENREF(signal);
+}
/* ************>> */
/* FSREADCONF > */
@@ -12496,6 +12594,7 @@ void Dblqh::execFSWRITECONF(Signal* signal)
case LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES:
jam();
invalidateLogAfterLastGCI(signal);
+ CRASH_INSERTION(5047);
return;
case LogFileOperationRecord::WRITE_PAGE_ZERO:
jam();
@@ -12533,6 +12632,14 @@ void Dblqh::execFSWRITECONF(Signal* signal)
jam();
firstPageWriteLab(signal);
return;
+ case LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES_UPDATE_PAGE0:
+ jam();
+ // We are done...send completed signal and exit this phase.
+ releaseLfo(signal);
+ signal->theData[0] = ZSR_FOURTH_COMP;
+ signal->theData[1] = logPartPtr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ return;
default:
jam();
systemErrorLab(signal, __LINE__);
@@ -13024,7 +13131,7 @@ void Dblqh::openFileInitLab(Signal* signal)
{
logFilePtr.p->logFileStatus = LogFileRecord::OPEN_INIT;
seizeLogpage(signal);
- writeSinglePage(signal, (ZNO_MBYTES_IN_FILE * ZPAGES_IN_MBYTE) - 1,
+ writeSinglePage(signal, (clogFileSize * ZPAGES_IN_MBYTE) - 1,
ZPAGE_SIZE - 1, __LINE__);
lfoPtr.p->lfoState = LogFileOperationRecord::INIT_WRITE_AT_END;
return;
@@ -13087,7 +13194,7 @@ void Dblqh::writeInitMbyteLab(Signal* signal)
{
releaseLfo(signal);
logFilePtr.p->currentMbyte = logFilePtr.p->currentMbyte + 1;
- if (logFilePtr.p->currentMbyte == ZNO_MBYTES_IN_FILE) {
+ if (logFilePtr.p->currentMbyte == clogFileSize) {
jam();
releaseLogpage(signal);
logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_INIT;
@@ -13207,7 +13314,7 @@ void Dblqh::initLogfile(Signal* signal, Uint32 fileNo)
logFilePtr.p->lastPageWritten = 0;
logFilePtr.p->logPageZero = RNIL;
logFilePtr.p->currentMbyte = 0;
- for (tilIndex = 0; tilIndex <= 15; tilIndex++) {
+ for (tilIndex = 0; tilIndex < clogFileSize; tilIndex++) {
logFilePtr.p->logMaxGciCompleted[tilIndex] = (UintR)-1;
logFilePtr.p->logMaxGciStarted[tilIndex] = (UintR)-1;
logFilePtr.p->logLastPrepRef[tilIndex] = 0;
@@ -13258,8 +13365,14 @@ void Dblqh::openFileRw(Signal* signal, LogFileRecordPtr olfLogFilePtr)
signal->theData[3] = olfLogFilePtr.p->fileName[1];
signal->theData[4] = olfLogFilePtr.p->fileName[2];
signal->theData[5] = olfLogFilePtr.p->fileName[3];
- signal->theData[6] = ZOPEN_READ_WRITE | FsOpenReq::OM_AUTOSYNC;
+ signal->theData[6] = FsOpenReq::OM_READWRITE | FsOpenReq::OM_AUTOSYNC | FsOpenReq::OM_CHECK_SIZE;
+ if (c_o_direct)
+ signal->theData[6] |= FsOpenReq::OM_DIRECT;
req->auto_sync_size = MAX_REDO_PAGES_WITHOUT_SYNCH * sizeof(LogPageRecord);
+ Uint64 sz = clogFileSize;
+ sz *= 1024; sz *= 1024;
+ req->file_size_hi = sz >> 32;
+ req->file_size_lo = sz & 0xFFFFFFFF;
sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA);
}//Dblqh::openFileRw()
@@ -13278,7 +13391,9 @@ void Dblqh::openLogfileInit(Signal* signal)
signal->theData[3] = logFilePtr.p->fileName[1];
signal->theData[4] = logFilePtr.p->fileName[2];
signal->theData[5] = logFilePtr.p->fileName[3];
- signal->theData[6] = 0x302 | FsOpenReq::OM_AUTOSYNC;
+ signal->theData[6] = FsOpenReq::OM_READWRITE | FsOpenReq::OM_TRUNCATE | FsOpenReq::OM_CREATE | FsOpenReq::OM_AUTOSYNC;
+ if (c_o_direct)
+ signal->theData[6] |= FsOpenReq::OM_DIRECT;
req->auto_sync_size = MAX_REDO_PAGES_WITHOUT_SYNCH * sizeof(LogPageRecord);
sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA);
}//Dblqh::openLogfileInit()
@@ -13314,8 +13429,14 @@ void Dblqh::openNextLogfile(Signal* signal)
signal->theData[3] = onlLogFilePtr.p->fileName[1];
signal->theData[4] = onlLogFilePtr.p->fileName[2];
signal->theData[5] = onlLogFilePtr.p->fileName[3];
- signal->theData[6] = 2 | FsOpenReq::OM_AUTOSYNC;
+ signal->theData[6] = FsOpenReq::OM_READWRITE | FsOpenReq::OM_AUTOSYNC | FsOpenReq::OM_CHECK_SIZE;
+ if (c_o_direct)
+ signal->theData[6] |= FsOpenReq::OM_DIRECT;
req->auto_sync_size = MAX_REDO_PAGES_WITHOUT_SYNCH * sizeof(LogPageRecord);
+ Uint64 sz = clogFileSize;
+ sz *= 1024; sz *= 1024;
+ req->file_size_hi = sz >> 32;
+ req->file_size_lo = sz & 0xFFFFFFFF;
sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA);
}//if
}//Dblqh::openNextLogfile()
@@ -13446,7 +13567,7 @@ void Dblqh::writeFileDescriptor(Signal* signal)
/* -------------------------------------------------- */
/* START BY WRITING TO LOG FILE RECORD */
/* -------------------------------------------------- */
- arrGuard(logFilePtr.p->currentMbyte, 16);
+ arrGuard(logFilePtr.p->currentMbyte, clogFileSize);
logFilePtr.p->logMaxGciCompleted[logFilePtr.p->currentMbyte] =
logPartPtr.p->logPartNewestCompletedGCI;
logFilePtr.p->logMaxGciStarted[logFilePtr.p->currentMbyte] = cnewestGci;
@@ -13472,10 +13593,7 @@ void Dblqh::writeFileDescriptor(Signal* signal)
/* ------------------------------------------------------------------------- */
void Dblqh::writeFileHeaderOpen(Signal* signal, Uint32 wmoType)
{
- LogFileRecordPtr wmoLogFilePtr;
UintR twmoNoLogDescriptors;
- UintR twmoLoop;
- UintR twmoIndex;
/* -------------------------------------------------- */
/* WRITE HEADER INFORMATION IN THE NEW FILE. */
@@ -13483,52 +13601,44 @@ void Dblqh::writeFileHeaderOpen(Signal* signal, Uint32 wmoType)
logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_LOG_TYPE] = ZFD_TYPE;
logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_FILE_NO] =
logFilePtr.p->fileNo;
- if (logPartPtr.p->noLogFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) {
+ if (logPartPtr.p->noLogFiles > cmaxLogFilesInPageZero) {
jam();
- twmoNoLogDescriptors = ZMAX_LOG_FILES_IN_PAGE_ZERO;
+ twmoNoLogDescriptors = cmaxLogFilesInPageZero;
} else {
jam();
twmoNoLogDescriptors = logPartPtr.p->noLogFiles;
}//if
logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_NO_FD] =
twmoNoLogDescriptors;
- wmoLogFilePtr.i = logFilePtr.i;
- twmoLoop = 0;
-WMO_LOOP:
- jam();
- if (twmoLoop < twmoNoLogDescriptors) {
- jam();
- ptrCheckGuard(wmoLogFilePtr, clogFileFileSize, logFileRecord);
- for (twmoIndex = 0; twmoIndex <= ZNO_MBYTES_IN_FILE - 1; twmoIndex++) {
- jam();
- arrGuard(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
- (twmoLoop * ZFD_PART_SIZE)) + twmoIndex, ZPAGE_SIZE);
- logPagePtr.p->logPageWord[((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
- (twmoLoop * ZFD_PART_SIZE)) + twmoIndex] =
- wmoLogFilePtr.p->logMaxGciCompleted[twmoIndex];
- arrGuard((((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
- (twmoLoop * ZFD_PART_SIZE)) + ZNO_MBYTES_IN_FILE) +
- twmoIndex, ZPAGE_SIZE);
- logPagePtr.p->logPageWord[(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
- (twmoLoop * ZFD_PART_SIZE)) + ZNO_MBYTES_IN_FILE) + twmoIndex] =
- wmoLogFilePtr.p->logMaxGciStarted[twmoIndex];
- arrGuard((((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
- (twmoLoop * ZFD_PART_SIZE)) + (2 * ZNO_MBYTES_IN_FILE)) +
- twmoIndex, ZPAGE_SIZE);
- logPagePtr.p->logPageWord[(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
- (twmoLoop * ZFD_PART_SIZE)) + (2 * ZNO_MBYTES_IN_FILE)) + twmoIndex] =
- wmoLogFilePtr.p->logLastPrepRef[twmoIndex];
- }//for
- wmoLogFilePtr.i = wmoLogFilePtr.p->prevLogFile;
- twmoLoop = twmoLoop + 1;
- goto WMO_LOOP;
- }//if
- logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] =
- (ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
- (ZFD_PART_SIZE * twmoNoLogDescriptors);
- arrGuard(logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX], ZPAGE_SIZE);
- logPagePtr.p->logPageWord[logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]] =
- ZNEXT_LOG_RECORD_TYPE;
+
+ {
+ Uint32 pos = ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE;
+ LogFileRecordPtr filePtr = logFilePtr;
+ for (Uint32 fd = 0; fd < twmoNoLogDescriptors; fd++)
+ {
+ jam();
+ ptrCheckGuard(filePtr, clogFileFileSize, logFileRecord);
+ for (Uint32 mb = 0; mb < clogFileSize; mb ++)
+ {
+ jam();
+ Uint32 pos0 = pos + fd * (ZFD_MBYTE_SIZE * clogFileSize) + mb;
+ Uint32 pos1 = pos0 + clogFileSize;
+ Uint32 pos2 = pos1 + clogFileSize;
+ arrGuard(pos0, ZPAGE_SIZE);
+ arrGuard(pos1, ZPAGE_SIZE);
+ arrGuard(pos2, ZPAGE_SIZE);
+ logPagePtr.p->logPageWord[pos0] = filePtr.p->logMaxGciCompleted[mb];
+ logPagePtr.p->logPageWord[pos1] = filePtr.p->logMaxGciStarted[mb];
+ logPagePtr.p->logPageWord[pos2] = filePtr.p->logLastPrepRef[mb];
+ }
+ filePtr.i = filePtr.p->prevLogFile;
+ }
+ pos += (twmoNoLogDescriptors * ZFD_MBYTE_SIZE * clogFileSize);
+ arrGuard(pos, ZPAGE_SIZE);
+ logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = pos;
+ logPagePtr.p->logPageWord[pos] = ZNEXT_LOG_RECORD_TYPE;
+ }
+
/* ------------------------------------------------------- */
/* THIS IS A SPECIAL WRITE OF THE FIRST PAGE IN THE */
/* LOG FILE. THIS HAS SPECIAL SIGNIFANCE TO FIND */
@@ -13608,6 +13718,12 @@ void Dblqh::writeSinglePage(Signal* signal, Uint32 pageNo,
signal->theData[6] = logPagePtr.i;
signal->theData[7] = pageNo;
sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA);
+
+ if (DEBUG_REDO)
+ ndbout_c("writeSingle 1 page at part: %u file: %u pos: %u",
+ logPartPtr.i,
+ logFilePtr.p->fileNo,
+ pageNo);
}//Dblqh::writeSinglePage()
/* ##########################################################################
@@ -13673,9 +13789,15 @@ void Dblqh::openSrLastFileLab(Signal* signal)
void Dblqh::readSrLastFileLab(Signal* signal)
{
logPartPtr.p->logLap = logPagePtr.p->logPageWord[ZPOS_LOG_LAP];
- if (logPartPtr.p->noLogFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) {
- jam();
- initGciInLogFileRec(signal, ZMAX_LOG_FILES_IN_PAGE_ZERO);
+ if (DEBUG_REDO)
+ ndbout_c("readSrLastFileLab part: %u logExecState: %u logPartState: %u logLap: %u",
+ logPartPtr.i,
+ logPartPtr.p->logExecState,
+ logPartPtr.p->logPartState,
+ logPartPtr.p->logLap);
+ if (logPartPtr.p->noLogFiles > cmaxLogFilesInPageZero) {
+ jam();
+ initGciInLogFileRec(signal, cmaxLogFilesInPageZero);
} else {
jam();
initGciInLogFileRec(signal, logPartPtr.p->noLogFiles);
@@ -13700,7 +13822,7 @@ void Dblqh::readSrLastMbyteLab(Signal* signal)
logPartPtr.p->lastMbyte = logFilePtr.p->currentMbyte - 1;
}//if
}//if
- arrGuard(logFilePtr.p->currentMbyte, 16);
+ arrGuard(logFilePtr.p->currentMbyte, clogFileSize);
logFilePtr.p->logMaxGciCompleted[logFilePtr.p->currentMbyte] =
logPagePtr.p->logPageWord[ZPOS_MAX_GCI_COMPLETED];
logFilePtr.p->logMaxGciStarted[logFilePtr.p->currentMbyte] =
@@ -13708,7 +13830,7 @@ void Dblqh::readSrLastMbyteLab(Signal* signal)
logFilePtr.p->logLastPrepRef[logFilePtr.p->currentMbyte] =
logPagePtr.p->logPageWord[ZLAST_LOG_PREP_REF];
releaseLogpage(signal);
- if (logFilePtr.p->currentMbyte < (ZNO_MBYTES_IN_FILE - 1)) {
+ if (logFilePtr.p->currentMbyte < (clogFileSize - 1)) {
jam();
logFilePtr.p->currentMbyte++;
readSinglePage(signal, ZPAGES_IN_MBYTE * logFilePtr.p->currentMbyte);
@@ -13722,21 +13844,21 @@ void Dblqh::readSrLastMbyteLab(Signal* signal)
* ---------------------------------------------------------------------- */
if (logPartPtr.p->lastMbyte == ZNIL) {
jam();
- logPartPtr.p->lastMbyte = ZNO_MBYTES_IN_FILE - 1;
+ logPartPtr.p->lastMbyte = clogFileSize - 1;
}//if
}//if
logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_SR;
closeFile(signal, logFilePtr, __LINE__);
- if (logPartPtr.p->noLogFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) {
+ if (logPartPtr.p->noLogFiles > cmaxLogFilesInPageZero) {
Uint32 fileNo;
- if (logFilePtr.p->fileNo >= ZMAX_LOG_FILES_IN_PAGE_ZERO) {
+ if (logFilePtr.p->fileNo >= cmaxLogFilesInPageZero) {
jam();
- fileNo = logFilePtr.p->fileNo - ZMAX_LOG_FILES_IN_PAGE_ZERO;
+ fileNo = logFilePtr.p->fileNo - cmaxLogFilesInPageZero;
} else {
jam();
fileNo =
(logPartPtr.p->noLogFiles + logFilePtr.p->fileNo) -
- ZMAX_LOG_FILES_IN_PAGE_ZERO;
+ cmaxLogFilesInPageZero;
}//if
if (fileNo == 0) {
jam();
@@ -13746,11 +13868,11 @@ void Dblqh::readSrLastMbyteLab(Signal* signal)
* -------------------------------------------------------------------- */
fileNo = 1;
logPartPtr.p->srRemainingFiles =
- logPartPtr.p->noLogFiles - (ZMAX_LOG_FILES_IN_PAGE_ZERO - 1);
+ logPartPtr.p->noLogFiles - (cmaxLogFilesInPageZero - 1);
} else {
jam();
logPartPtr.p->srRemainingFiles =
- logPartPtr.p->noLogFiles - ZMAX_LOG_FILES_IN_PAGE_ZERO;
+ logPartPtr.p->noLogFiles - cmaxLogFilesInPageZero;
}//if
LogFileRecordPtr locLogFilePtr;
findLogfile(signal, fileNo, logPartPtr, &locLogFilePtr);
@@ -13775,9 +13897,9 @@ void Dblqh::openSrNextFileLab(Signal* signal)
void Dblqh::readSrNextFileLab(Signal* signal)
{
- if (logPartPtr.p->srRemainingFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) {
+ if (logPartPtr.p->srRemainingFiles > cmaxLogFilesInPageZero) {
jam();
- initGciInLogFileRec(signal, ZMAX_LOG_FILES_IN_PAGE_ZERO);
+ initGciInLogFileRec(signal, cmaxLogFilesInPageZero);
} else {
jam();
initGciInLogFileRec(signal, logPartPtr.p->srRemainingFiles);
@@ -13785,16 +13907,16 @@ void Dblqh::readSrNextFileLab(Signal* signal)
releaseLogpage(signal);
logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_SR;
closeFile(signal, logFilePtr, __LINE__);
- if (logPartPtr.p->srRemainingFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) {
+ if (logPartPtr.p->srRemainingFiles > cmaxLogFilesInPageZero) {
Uint32 fileNo;
- if (logFilePtr.p->fileNo >= ZMAX_LOG_FILES_IN_PAGE_ZERO) {
+ if (logFilePtr.p->fileNo >= cmaxLogFilesInPageZero) {
jam();
- fileNo = logFilePtr.p->fileNo - ZMAX_LOG_FILES_IN_PAGE_ZERO;
+ fileNo = logFilePtr.p->fileNo - cmaxLogFilesInPageZero;
} else {
jam();
fileNo =
(logPartPtr.p->noLogFiles + logFilePtr.p->fileNo) -
- ZMAX_LOG_FILES_IN_PAGE_ZERO;
+ cmaxLogFilesInPageZero;
}//if
if (fileNo == 0) {
jam();
@@ -13803,11 +13925,11 @@ void Dblqh::readSrNextFileLab(Signal* signal)
* -------------------------------------------------------------------- */
fileNo = 1;
logPartPtr.p->srRemainingFiles =
- logPartPtr.p->srRemainingFiles - (ZMAX_LOG_FILES_IN_PAGE_ZERO - 1);
+ logPartPtr.p->srRemainingFiles - (cmaxLogFilesInPageZero - 1);
} else {
jam();
logPartPtr.p->srRemainingFiles =
- logPartPtr.p->srRemainingFiles - ZMAX_LOG_FILES_IN_PAGE_ZERO;
+ logPartPtr.p->srRemainingFiles - cmaxLogFilesInPageZero;
}//if
LogFileRecordPtr locLogFilePtr;
findLogfile(signal, fileNo, logPartPtr, &locLogFilePtr);
@@ -14093,15 +14215,6 @@ void Dblqh::execSTART_RECREQ(Signal* signal)
* WE ALSO NEED TO SET CNEWEST_GCI TO ENSURE THAT LOG RECORDS ARE EXECUTED
* WITH A PROPER GCI.
*------------------------------------------------------------------------ */
- if(cstartType == NodeState::ST_INITIAL_NODE_RESTART){
- jam();
- cstartRecReq = 2;
- StartRecConf * conf = (StartRecConf*)signal->getDataPtrSend();
- conf->startingNodeId = getOwnNodeId();
- sendSignal(cmasterDihBlockref, GSN_START_RECCONF, signal,
- StartRecConf::SignalLength, JBB);
- return;
- }//if
if (c_lcp_restoring_fragments.isEmpty())
{
@@ -14154,6 +14267,19 @@ void Dblqh::execSTART_RECCONF(Signal* signal)
jam();
csrExecUndoLogState = EULS_COMPLETED;
+
+ if(cstartType == NodeState::ST_INITIAL_NODE_RESTART)
+ {
+ jam();
+ cstartRecReq = 2;
+
+ StartRecConf * conf = (StartRecConf*)signal->getDataPtrSend();
+ conf->startingNodeId = getOwnNodeId();
+ sendSignal(cmasterDihBlockref, GSN_START_RECCONF, signal,
+ StartRecConf::SignalLength, JBB);
+ return;
+ }
+
c_lcp_complete_fragments.first(fragptr);
build_acc(signal, fragptr.i);
return;
@@ -14678,7 +14804,7 @@ void Dblqh::srLogLimits(Signal* signal)
* EXECUTED.
* ----------------------------------------------------------------------- */
while(true) {
- ndbrequire(tmbyte < 16);
+ ndbrequire(tmbyte < clogFileSize);
if (logPartPtr.p->logExecState == LogPartRecord::LES_SEARCH_STOP) {
if (logFilePtr.p->logMaxGciCompleted[tmbyte] < logPartPtr.p->logLastGci) {
jam();
@@ -14719,7 +14845,7 @@ void Dblqh::srLogLimits(Signal* signal)
if (logPartPtr.p->logExecState != LogPartRecord::LES_EXEC_LOG) {
if (tmbyte == 0) {
jam();
- tmbyte = ZNO_MBYTES_IN_FILE - 1;
+ tmbyte = clogFileSize - 1;
logFilePtr.i = logFilePtr.p->prevLogFile;
ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
} else {
@@ -14744,6 +14870,20 @@ void Dblqh::srLogLimits(Signal* signal)
break;
}//if
}//while
+
+ if (DEBUG_REDO)
+ {
+ LogFileRecordPtr tmp;
+ tmp.i = logPartPtr.p->stopLogfile;
+ ptrCheckGuard(tmp, clogFileFileSize, logFileRecord);
+ ndbout_c("srLogLimits part: %u start file: %u mb: %u stop file: %u mb: %u",
+ logPartPtr.i,
+ tlastPrepRef >> 16,
+ tlastPrepRef & 65535,
+ tmp.p->fileNo,
+ logPartPtr.p->stopMbyte);
+ }
+
/* ------------------------------------------------------------------------
* WE HAVE NOW FOUND BOTH THE START AND THE STOP OF THE LOG. NOW START
* EXECUTING THE LOG. THE FIRST ACTION IS TO OPEN THE LOG FILE WHERE TO
@@ -15113,7 +15253,7 @@ void Dblqh::execSr(Signal* signal)
logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_NO_FD];
logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] =
(ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
- (noFdDescriptors * ZFD_PART_SIZE);
+ (noFdDescriptors * ZFD_MBYTE_SIZE * clogFileSize);
}
break;
/* ========================================================================= */
@@ -15153,11 +15293,11 @@ void Dblqh::execSr(Signal* signal)
/*---------------------------------------------------------------------------*/
/* START EXECUTION OF A NEW MBYTE IN THE LOG. */
/*---------------------------------------------------------------------------*/
- if (logFilePtr.p->currentMbyte < (ZNO_MBYTES_IN_FILE - 1)) {
+ if (logFilePtr.p->currentMbyte < (clogFileSize - 1)) {
jam();
logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG_NEW_MBYTE;
} else {
- ndbrequire(logFilePtr.p->currentMbyte == (ZNO_MBYTES_IN_FILE - 1));
+ ndbrequire(logFilePtr.p->currentMbyte == (clogFileSize - 1));
jam();
/*---------------------------------------------------------------------------*/
/* WE HAVE TO CHANGE FILE. CLOSE THIS ONE AND THEN OPEN THE NEXT. */
@@ -15170,6 +15310,12 @@ void Dblqh::execSr(Signal* signal)
case ZCOMPLETED_GCI_TYPE:
jam();
logWord = readLogword(signal);
+ if (DEBUG_REDO)
+ ndbout_c("found gci: %u part: %u file: %u page: %u",
+ logWord,
+ logPartPtr.i,
+ logFilePtr.p->fileNo,
+ logFilePtr.p->currentFilepage);
if (logWord == logPartPtr.p->logLastGci) {
jam();
/*---------------------------------------------------------------------------*/
@@ -15186,6 +15332,10 @@ void Dblqh::execSr(Signal* signal)
logPartPtr.p->headPageNo = logFilePtr.p->currentFilepage;
logPartPtr.p->headPageIndex =
logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ logPartPtr.p->logLap = logPagePtr.p->logPageWord[ZPOS_LOG_LAP];
+ if (DEBUG_REDO)
+ ndbout_c("execSr part: %u logLap: %u",
+ logPartPtr.i, logPartPtr.p->logLap);
}//if
/*---------------------------------------------------------------------------*/
/* THERE IS NO NEED OF EXECUTING PAST THIS LINE SINCE THERE WILL ONLY BE LOG */
@@ -15348,67 +15498,140 @@ void Dblqh::invalidateLogAfterLastGCI(Signal* signal) {
}
switch (lfoPtr.p->lfoState) {
- case LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES:
- jam();
- releaseLfo(signal);
- releaseLogpage(signal);
- if (logPartPtr.p->invalidatePageNo < (ZNO_MBYTES_IN_FILE * ZPAGES_IN_MBYTE - 1)) {
- // We continue in this file.
- logPartPtr.p->invalidatePageNo++;
- } else {
- // We continue in the next file.
- logFilePtr.i = logFilePtr.p->nextLogFile;
- ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
- logPartPtr.p->invalidateFileNo = logFilePtr.p->fileNo;
- // Page 0 is used for file descriptors.
- logPartPtr.p->invalidatePageNo = 1;
- if (logFilePtr.p->logFileStatus != LogFileRecord::OPEN) {
- jam();
- logFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_INVALIDATE_PAGES;
- openFileRw(signal, logFilePtr);
- return;
- break;
- }
- }
- // Read a page from the log file.
- readFileInInvalidate(signal);
- return;
- break;
-
case LogFileOperationRecord::READ_SR_INVALIDATE_PAGES:
jam();
- releaseLfo(signal);
// Check if this page must be invalidated.
// If the log lap number on a page after the head of the tail is the same
// as the actual log lap number we must invalidate this page. Otherwise it
// could be impossible to find the end of the log in a later system/node
// restart.
- if (logPagePtr.p->logPageWord[ZPOS_LOG_LAP] == logPartPtr.p->logLap) {
+ if (logPagePtr.p->logPageWord[ZPOS_LOG_LAP] == logPartPtr.p->logLap)
+ {
// This page must be invalidated.
- logPagePtr.p->logPageWord[ZPOS_LOG_LAP] = 0;
- // Contact NDBFS. Real time break.
- writeSinglePage(signal, logPartPtr.p->invalidatePageNo,
- ZPAGE_SIZE - 1, __LINE__);
- lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES;
- } else {
- // We are done with invalidating. Finish start phase 3.4.
+ // We search for end
+ // read next
+ releaseLfo(signal);
+ releaseLogpage(signal);
+ readFileInInvalidate(signal, true);
+ lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_INVALIDATE_PAGES;
+ return;
+ }
+
+ /**
+ * We found the "last" page to invalidate...
+ * Invalidate backwards until head...
+ */
+
+ // Fall through...
+ case LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES:
+ jam();
+
+ releaseLfo(signal);
+ releaseLogpage(signal);
+
+ // Step backwards...
+ logPartPtr.p->invalidatePageNo--;
+
+ if (logPartPtr.p->invalidatePageNo == 0)
+ {
+ jam();
+
+ if (logFilePtr.p->fileNo == 0)
+ {
+ /**
+ * We're wrapping in the log...
+ * update logLap
+ */
+ logPartPtr.p->logLap--;
+ ndbrequire(logPartPtr.p->logLap); // Should always be > 0
+ if (DEBUG_REDO)
+ ndbout_c("invalidateLogAfterLastGCI part: %u wrap from file 0 -> logLap: %u",
+ logPartPtr.i, logPartPtr.p->logLap);
+ }
+
+ /**
+ * Move to prev file
+ */
+ logFilePtr.i = logFilePtr.p->prevLogFile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ logPartPtr.p->invalidateFileNo = logFilePtr.p->fileNo;
+ logPartPtr.p->invalidatePageNo = clogFileSize * ZPAGES_IN_MBYTE - 1;
+ }
+
+ if (logPartPtr.p->invalidateFileNo == logPartPtr.p->headFileNo &&
+ logPartPtr.p->invalidatePageNo == logPartPtr.p->headPageNo)
+ {
+ /**
+ * Done...
+ */
+ logFilePtr.i = logPartPtr.p->currentLogfile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+
+ logFilePtr.i = logFilePtr.p->nextLogFile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+
+ // Close files if necessary. Current file and the next file should be
+ // left open.
exitFromInvalidate(signal);
+ return;
}
- return;
- break;
+ seizeLogpage(signal);
+
+ /**
+ * Make page really empty
+ */
+ bzero(logPagePtr.p, sizeof(LogPageRecord));
+ writeSinglePage(signal, logPartPtr.p->invalidatePageNo,
+ ZPAGE_SIZE - 1, __LINE__);
+
+ lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES;
+ return;
default:
jam();
systemError(signal, __LINE__);
return;
break;
}
-
- return;
}//Dblqh::invalidateLogAfterLastGCI
-void Dblqh::readFileInInvalidate(Signal* signal) {
+void Dblqh::readFileInInvalidate(Signal* signal, bool stepNext)
+{
jam();
+
+ if (stepNext)
+ {
+ logPartPtr.p->invalidatePageNo++;
+ if (logPartPtr.p->invalidatePageNo == (clogFileSize * ZPAGES_IN_MBYTE))
+ {
+ // We continue in the next file.
+ logFilePtr.i = logFilePtr.p->nextLogFile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ logPartPtr.p->invalidateFileNo = logFilePtr.p->fileNo;
+ // Page 0 is used for file descriptors.
+ logPartPtr.p->invalidatePageNo = 1;
+
+ if (logFilePtr.p->fileNo == 0)
+ {
+ /**
+ * We're wrapping in the log...
+ * update logLap
+ */
+ logPartPtr.p->logLap++;
+ if (DEBUG_REDO)
+ ndbout_c("readFileInInvalidate part: %u wrap to file 0 -> logLap: %u",
+ logPartPtr.i, logPartPtr.p->logLap);
+ }
+ if (logFilePtr.p->logFileStatus != LogFileRecord::OPEN)
+ {
+ jam();
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_INVALIDATE_PAGES;
+ openFileRw(signal, logFilePtr);
+ return;
+ }
+ }
+ }
+
// Contact NDBFS. Real time break.
readSinglePage(signal, logPartPtr.p->invalidatePageNo);
lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_INVALIDATE_PAGES;
@@ -15416,34 +15639,57 @@ void Dblqh::readFileInInvalidate(Signal* signal) {
void Dblqh::exitFromInvalidate(Signal* signal) {
jam();
- // Close files if necessary. Current file and the next file should be
- // left open.
- if (logFilePtr.i != logPartPtr.p->currentLogfile) {
- LogFileRecordPtr currentLogFilePtr;
- LogFileRecordPtr nextAfterCurrentLogFilePtr;
-
- currentLogFilePtr.i = logPartPtr.p->currentLogfile;
- ptrCheckGuard(currentLogFilePtr, clogFileFileSize, logFileRecord);
-
- nextAfterCurrentLogFilePtr.i = currentLogFilePtr.p->nextLogFile;
-
- if (logFilePtr.i != nextAfterCurrentLogFilePtr.i) {
- // This file should be closed.
- logFilePtr.p->logFileStatus = LogFileRecord::CLOSE_SR_INVALIDATE_PAGES;
- closeFile(signal, logFilePtr, __LINE__);
- // Return from this function and wait for close confirm. Then come back
- // and test the previous file for closing.
- return;
- }
- }
- // We are done with closing files, send completed signal and exit this phase.
- signal->theData[0] = ZSR_FOURTH_COMP;
- signal->theData[1] = logPartPtr.i;
- sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+loop:
+ logFilePtr.i = logFilePtr.p->nextLogFile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+
+ if (logFilePtr.i == logPartPtr.p->currentLogfile)
+ {
+ jam();
+ goto done;
+ }
+
+ if (logFilePtr.p->fileNo == 0)
+ {
+ jam();
+ /**
+ * Logfile 0 shoult *not* be closed
+ */
+ goto loop;
+ }
+
+ if (logFilePtr.p->logFileStatus == LogFileRecord::CLOSED)
+ {
+ jam();
+ goto done;
+ }
+
+ jam();
+ ndbrequire(logFilePtr.p->logFileStatus == LogFileRecord::OPEN);
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSE_SR_INVALIDATE_PAGES;
+ closeFile(signal, logFilePtr, __LINE__);
return;
-}
+done:
+ if (DEBUG_REDO)
+ ndbout_c("exitFromInvalidate part: %u head file: %u page: %u",
+ logPartPtr.i,
+ logPartPtr.p->headFileNo,
+ logPartPtr.p->headPageNo);
+
+ logFilePtr.i = logPartPtr.p->firstLogfile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ logPagePtr.i = logFilePtr.p->logPageZero;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_FILE_NO] =
+ logPartPtr.p->headFileNo;
+ writeSinglePage(signal, 0, ZPAGE_SIZE - 1, __LINE__);
+
+ lfoPtr.p->logFileRec = logFilePtr.i;
+ lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES_UPDATE_PAGE0;
+ return;
+}
/*---------------------------------------------------------------------------*/
/* THE EXECUTION OF A LOG RECORD IS COMPLETED. RELEASE PAGES IF THEY WERE */
@@ -15835,20 +16081,10 @@ void Dblqh::readSrFourthZeroLab(Signal* signal)
ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
logPartPtr.p->invalidateFileNo = logPartPtr.p->headFileNo;
logPartPtr.p->invalidatePageNo = logPartPtr.p->headPageNo;
-
logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG_INVALIDATE;
- seizeLfo(signal);
- initLfo(signal);
- // The state here is a little confusing, but simulates that we return
- // to invalidateLogAfterLastGCI() from an invalidate write and are ready
- // to read a page from file.
- lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES;
-
- /**
- * Make sure we dont release zero page
- */
- seizeLogpage(signal);
- invalidateLogAfterLastGCI(signal);
+
+ readFileInInvalidate(signal, true);
+ lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_INVALIDATE_PAGES;
return;
}//Dblqh::readSrFourthZeroLab()
@@ -16361,6 +16597,14 @@ void Dblqh::completedLogPage(Signal* signal, Uint32 clpType, Uint32 place)
signal->theData[4] = ZVAR_NO_LOG_PAGE_WORD;
signal->theData[5] = twlpNoPages;
sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 15, JBA);
+
+ if (DEBUG_REDO)
+ ndbout_c("writing %d pages at part: %u file: %u pos: %u",
+ twlpNoPages,
+ logPartPtr.i,
+ logFilePtr.p->fileNo,
+ logFilePtr.p->filePosition);
+
if (twlpType == ZNORMAL) {
jam();
lfoPtr.p->lfoState = LogFileOperationRecord::ACTIVE_WRITE_LOG;
@@ -16693,6 +16937,22 @@ void Dblqh::initialiseLogFile(Signal* signal)
ptrAss(logFilePtr, logFileRecord);
logFilePtr.p->nextLogFile = logFilePtr.i + 1;
logFilePtr.p->logFileStatus = LogFileRecord::LFS_IDLE;
+
+ logFilePtr.p->logLastPrepRef = new Uint32[clogFileSize];
+ logFilePtr.p->logMaxGciCompleted = new Uint32[clogFileSize];
+ logFilePtr.p->logMaxGciStarted = new Uint32[clogFileSize];
+
+ if (logFilePtr.p->logLastPrepRef == 0 ||
+ logFilePtr.p->logMaxGciCompleted == 0 ||
+ logFilePtr.p->logMaxGciStarted == 0)
+ {
+ char buf[256];
+ BaseString::snprintf(buf, sizeof(buf),
+ "Failed to alloc mbyte(%u) arrays for logfile %u",
+ clogFileSize, logFilePtr.i);
+ progError(__LINE__, NDBD_EXIT_MEMALLOC, buf);
+ }
+
}//for
logFilePtr.i = clogFileFileSize - 1;
ptrAss(logFilePtr, logFileRecord);
@@ -17021,41 +17281,31 @@ void Dblqh::initFragrec(Signal* signal,
* ========================================================================= */
void Dblqh::initGciInLogFileRec(Signal* signal, Uint32 noFdDescriptors)
{
- LogFileRecordPtr iglLogFilePtr;
- UintR tiglLoop;
- UintR tiglIndex;
-
- tiglLoop = 0;
- iglLogFilePtr.i = logFilePtr.i;
- iglLogFilePtr.p = logFilePtr.p;
-IGL_LOOP:
- for (tiglIndex = 0; tiglIndex <= ZNO_MBYTES_IN_FILE - 1; tiglIndex++) {
- arrGuard(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
- (tiglLoop * ZFD_PART_SIZE)) + tiglIndex, ZPAGE_SIZE);
- iglLogFilePtr.p->logMaxGciCompleted[tiglIndex] =
- logPagePtr.p->logPageWord[((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
- (tiglLoop * ZFD_PART_SIZE)) + tiglIndex];
- arrGuard((((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + ZNO_MBYTES_IN_FILE) +
- (tiglLoop * ZFD_PART_SIZE)) + tiglIndex, ZPAGE_SIZE);
- iglLogFilePtr.p->logMaxGciStarted[tiglIndex] =
- logPagePtr.p->logPageWord[(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
- ZNO_MBYTES_IN_FILE) +
- (tiglLoop * ZFD_PART_SIZE)) + tiglIndex];
- arrGuard((((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
- (2 * ZNO_MBYTES_IN_FILE)) + (tiglLoop * ZFD_PART_SIZE)) +
- tiglIndex, ZPAGE_SIZE);
- iglLogFilePtr.p->logLastPrepRef[tiglIndex] =
- logPagePtr.p->logPageWord[(((ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) +
- (2 * ZNO_MBYTES_IN_FILE)) +
- (tiglLoop * ZFD_PART_SIZE)) + tiglIndex];
- }//for
- tiglLoop = tiglLoop + 1;
- if (tiglLoop < noFdDescriptors) {
+ LogFileRecordPtr filePtr = logFilePtr;
+ Uint32 pos = ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE;
+ for (Uint32 fd = 0; fd < noFdDescriptors; fd++)
+ {
jam();
- iglLogFilePtr.i = iglLogFilePtr.p->prevLogFile;
- ptrCheckGuard(iglLogFilePtr, clogFileFileSize, logFileRecord);
- goto IGL_LOOP;
- }//if
+ for (Uint32 mb = 0; mb < clogFileSize; mb++)
+ {
+ jam();
+ Uint32 pos0 = pos + fd * (ZFD_MBYTE_SIZE * clogFileSize) + mb;
+ Uint32 pos1 = pos0 + clogFileSize;
+ Uint32 pos2 = pos1 + clogFileSize;
+ arrGuard(pos0, ZPAGE_SIZE);
+ arrGuard(pos1, ZPAGE_SIZE);
+ arrGuard(pos2, ZPAGE_SIZE);
+ filePtr.p->logMaxGciCompleted[mb] = logPagePtr.p->logPageWord[pos0];
+ filePtr.p->logMaxGciStarted[mb] = logPagePtr.p->logPageWord[pos1];
+ filePtr.p->logLastPrepRef[mb] = logPagePtr.p->logPageWord[pos2];
+ }
+ if (fd + 1 < noFdDescriptors)
+ {
+ jam();
+ filePtr.i = filePtr.p->prevLogFile;
+ ptrCheckGuard(filePtr, clogFileFileSize, logFileRecord);
+ }
+ }
}//Dblqh::initGciInLogFileRec()
/* ==========================================================================
@@ -17514,6 +17764,14 @@ void Dblqh::readExecLog(Signal* signal)
signal->theData[14] = lfoPtr.p->logPageArray[8];
signal->theData[15] = lfoPtr.p->logPageArray[9];
sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 16, JBA);
+
+ if (DEBUG_REDO)
+ ndbout_c("readExecLog %u page at part: %u file: %u pos: %u",
+ lfoPtr.p->noPagesRw,
+ logPartPtr.i,
+ logFilePtr.p->fileNo,
+ logPartPtr.p->execSrStartPageNo);
+
}//Dblqh::readExecLog()
/* ------------------------------------------------------------------------- */
@@ -17576,6 +17834,14 @@ void Dblqh::readExecSr(Signal* signal)
signal->theData[13] = lfoPtr.p->logPageArray[7];
signal->theData[14] = tresPageid;
sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 15, JBA);
+
+ if (DEBUG_REDO)
+ ndbout_c("readExecSr %u page at part: %u file: %u pos: %u",
+ 8,
+ logPartPtr.i,
+ logFilePtr.p->fileNo,
+ tresPageid);
+
}//Dblqh::readExecSr()
/* ------------------------------------------------------------------------- */
@@ -17731,6 +17997,13 @@ void Dblqh::readSinglePage(Signal* signal, Uint32 pageNo)
signal->theData[6] = logPagePtr.i;
signal->theData[7] = pageNo;
sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 8, JBA);
+
+ if (DEBUG_REDO)
+ ndbout_c("readSinglePage 1 page at part: %u file: %u pos: %u",
+ logPartPtr.i,
+ logFilePtr.p->fileNo,
+ pageNo);
+
}//Dblqh::readSinglePage()
/* --------------------------------------------------------------------------
@@ -18214,8 +18487,17 @@ void Dblqh::writeCompletedGciLog(Signal* signal)
jam();
changeMbyte(signal);
}//if
+
logFilePtr.p->remainingWordsInMbyte =
logFilePtr.p->remainingWordsInMbyte - ZCOMPLETED_GCI_LOG_SIZE;
+
+ if (DEBUG_REDO)
+ ndbout_c("writeCompletedGciLog gci: %u part: %u file: %u page: %u",
+ cnewestCompletedGci,
+ logPartPtr.i,
+ logFilePtr.p->fileNo,
+ logFilePtr.p->currentFilepage);
+
writeLogWord(signal, ZCOMPLETED_GCI_TYPE);
writeLogWord(signal, cnewestCompletedGci);
logPartPtr.p->logPartNewestCompletedGCI = cnewestCompletedGci;
@@ -18252,6 +18534,13 @@ void Dblqh::writeDirty(Signal* signal, Uint32 place)
signal->theData[6] = logPagePtr.i;
signal->theData[7] = logPartPtr.p->prevFilepage;
sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA);
+
+ if (DEBUG_REDO)
+ ndbout_c("writeDirty 1 page at part: %u file: %u pos: %u",
+ logPartPtr.i,
+ logFilePtr.p->fileNo,
+ logPartPtr.p->prevFilepage);
+
}//Dblqh::writeDirty()
/* --------------------------------------------------------------------------
@@ -18308,7 +18597,7 @@ void Dblqh::writeNextLog(Signal* signal)
ndbrequire(logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] < ZPAGE_SIZE);
logPagePtr.p->logPageWord[logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]] =
ZNEXT_MBYTE_TYPE;
- if (logFilePtr.p->currentMbyte == (ZNO_MBYTES_IN_FILE - 1)) {
+ if (logFilePtr.p->currentMbyte == (clogFileSize - 1)) {
jam();
/* -------------------------------------------------- */
/* CALCULATE THE NEW REMAINING WORDS WHEN */
@@ -18397,7 +18686,7 @@ void Dblqh::writeNextLog(Signal* signal)
systemError(signal, __LINE__);
}//if
}//if
- if (logFilePtr.p->currentMbyte == (ZNO_MBYTES_IN_FILE - 1)) {
+ if (logFilePtr.p->currentMbyte == (clogFileSize - 1)) {
jam();
twnlNextMbyte = 0;
if (logFilePtr.p->fileChangeState != LogFileRecord::NOT_ONGOING) {
diff --git a/storage/ndb/src/kernel/blocks/dblqh/Makefile.am b/storage/ndb/src/kernel/blocks/dblqh/Makefile.am
index c7c477a512c..b545096dc83 100644
--- a/storage/ndb/src/kernel/blocks/dblqh/Makefile.am
+++ b/storage/ndb/src/kernel/blocks/dblqh/Makefile.am
@@ -16,7 +16,7 @@
EXTRA_PROGRAMS = ndbd_redo_log_reader
ndbd_redo_log_reader_SOURCES = redoLogReader/records.cpp \
- redoLogReader/redoLogFileReader.cpp
+ redoLogReader/reader.cpp
include $(top_srcdir)/storage/ndb/config/common.mk.am
include $(top_srcdir)/storage/ndb/config/type_kernel.mk.am
diff --git a/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/redoLogFileReader.cpp b/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/reader.cpp
index e5df14aea9a..e5df14aea9a 100644
--- a/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/redoLogFileReader.cpp
+++ b/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/reader.cpp
diff --git a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
index 42f4033dd4a..75d79ba737f 100644
--- a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
+++ b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
@@ -1499,12 +1499,12 @@ private:
void clearCommitAckMarker(ApiConnectRecord * const regApiPtr,
TcConnectRecord * const regTcPtr);
// Trigger and index handling
- bool saveINDXKEYINFO(Signal* signal,
- TcIndexOperation* indexOp,
- const Uint32 *src,
- Uint32 len);
+ int saveINDXKEYINFO(Signal* signal,
+ TcIndexOperation* indexOp,
+ const Uint32 *src,
+ Uint32 len);
bool receivedAllINDXKEYINFO(TcIndexOperation* indexOp);
- bool saveINDXATTRINFO(Signal* signal,
+ int saveINDXATTRINFO(Signal* signal,
TcIndexOperation* indexOp,
const Uint32 *src,
Uint32 len);
diff --git a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
index 024d7bdb00c..887e6f848b1 100644
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
@@ -1800,9 +1800,18 @@ start_failure:
}//switch
}
+static
+inline
+bool
+compare_transid(Uint32* val0, Uint32* val1)
+{
+ Uint32 tmp0 = val0[0] ^ val1[0];
+ Uint32 tmp1 = val0[1] ^ val1[1];
+ return (tmp0 | tmp1) == 0;
+}
+
void Dbtc::execKEYINFO(Signal* signal)
{
- UintR compare_transid1, compare_transid2;
jamEntry();
apiConnectptr.i = signal->theData[0];
tmaxData = 20;
@@ -1812,10 +1821,8 @@ void Dbtc::execKEYINFO(Signal* signal)
}//if
ptrAss(apiConnectptr, apiConnectRecord);
ttransid_ptr = 1;
- compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[1];
- compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[2];
- compare_transid1 = compare_transid1 | compare_transid2;
- if (compare_transid1 != 0) {
+ if (compare_transid(apiConnectptr.p->transid, signal->theData+1) == false)
+ {
TCKEY_abort(signal, 19);
return;
}//if
@@ -2116,7 +2123,6 @@ void Dbtc::saveAttrbuf(Signal* signal)
void Dbtc::execATTRINFO(Signal* signal)
{
- UintR compare_transid1, compare_transid2;
UintR Tdata1 = signal->theData[0];
UintR Tlength = signal->length();
UintR TapiConnectFilesize = capiConnectFilesize;
@@ -2131,17 +2137,13 @@ void Dbtc::execATTRINFO(Signal* signal)
return;
}//if
- UintR Tdata2 = signal->theData[1];
- UintR Tdata3 = signal->theData[2];
ApiConnectRecord * const regApiPtr = &localApiConnectRecord[Tdata1];
- compare_transid1 = regApiPtr->transid[0] ^ Tdata2;
- compare_transid2 = regApiPtr->transid[1] ^ Tdata3;
apiConnectptr.p = regApiPtr;
- compare_transid1 = compare_transid1 | compare_transid2;
- if (compare_transid1 != 0) {
+ if (compare_transid(regApiPtr->transid, signal->theData+1) == false)
+ {
DEBUG("Drop ATTRINFO, wrong transid, lenght="<<Tlength
- << " transid("<<hex<<Tdata2<<", "<<Tdata3);
+ << " transid("<<hex<<signal->theData[1]<<", "<<signal->theData[2]);
TCKEY_abort(signal, 19);
return;
}//if
@@ -5456,11 +5458,32 @@ void Dbtc::execTC_COMMITREQ(Signal* signal)
}
}//Dbtc::execTC_COMMITREQ()
+/**
+ * TCROLLBACKREQ
+ *
+ * Format is:
+ *
+ * thedata[0] = apiconnectptr
+ * thedata[1] = transid[0]
+ * thedata[2] = transid[1]
+ * OPTIONAL thedata[3] = flags
+ *
+ * Flags:
+ * 0x1 = potentiallyBad data from API (try not to assert)
+ */
void Dbtc::execTCROLLBACKREQ(Signal* signal)
{
+ bool potentiallyBad= false;
UintR compare_transid1, compare_transid2;
jamEntry();
+
+ if(unlikely((signal->getLength() >= 4) && (signal->theData[3] & 0x1)))
+ {
+ ndbout_c("Trying to roll back potentially bad txn\n");
+ potentiallyBad= true;
+ }
+
apiConnectptr.i = signal->theData[0];
if (apiConnectptr.i >= capiConnectFilesize) {
goto TC_ROLL_warning;
@@ -5547,12 +5570,14 @@ void Dbtc::execTCROLLBACKREQ(Signal* signal)
TC_ROLL_warning:
jam();
- warningHandlerLab(signal, __LINE__);
+ if(likely(potentiallyBad==false))
+ warningHandlerLab(signal, __LINE__);
return;
TC_ROLL_system_error:
jam();
- systemErrorLab(signal, __LINE__);
+ if(likely(potentiallyBad==false))
+ systemErrorLab(signal, __LINE__);
return;
}//Dbtc::execTCROLLBACKREQ()
@@ -10235,6 +10260,7 @@ void Dbtc::inithost(Signal* signal)
hostptr.p->noOfWordsTCINDXCONF = 0;
hostptr.p->noOfPackedWordsLqh = 0;
hostptr.p->hostLqhBlockRef = calcLqhBlockRef(hostptr.i);
+ hostptr.p->m_nf_bits = 0;
}//for
c_alive_nodes.clear();
}//Dbtc::inithost()
@@ -11565,6 +11591,7 @@ void Dbtc::execTCINDXREQ(Signal* signal)
// This is a newly started transaction, clean-up
releaseAllSeizedIndexOperations(regApiPtr);
+ regApiPtr->apiConnectstate = CS_STARTED;
regApiPtr->transid[0] = tcIndxReq->transId1;
regApiPtr->transid[1] = tcIndxReq->transId2;
}//if
@@ -11605,20 +11632,29 @@ void Dbtc::execTCINDXREQ(Signal* signal)
Uint32 includedIndexLength = MIN(indexLength, indexBufSize);
indexOp->expectedAttrInfo = attrLength;
Uint32 includedAttrLength = MIN(attrLength, attrBufSize);
- if (saveINDXKEYINFO(signal,
- indexOp,
- dataPtr,
- includedIndexLength)) {
+
+ int ret;
+ if ((ret = saveINDXKEYINFO(signal,
+ indexOp,
+ dataPtr,
+ includedIndexLength)) == 0)
+ {
jam();
// We have received all we need
readIndexTable(signal, regApiPtr, indexOp);
return;
}
+ else if (ret == -1)
+ {
+ jam();
+ return;
+ }
+
dataPtr += includedIndexLength;
if (saveINDXATTRINFO(signal,
indexOp,
dataPtr,
- includedAttrLength)) {
+ includedAttrLength) == 0) {
jam();
// We have received all we need
readIndexTable(signal, regApiPtr, indexOp);
@@ -11721,13 +11757,25 @@ void Dbtc::execINDXKEYINFO(Signal* signal)
TcIndexOperationPtr indexOpPtr;
TcIndexOperation* indexOp;
+ if (compare_transid(regApiPtr->transid, indxKeyInfo->transId) == false)
+ {
+ TCKEY_abort(signal, 19);
+ return;
+ }
+
+ if (regApiPtr->apiConnectstate == CS_ABORTING)
+ {
+ jam();
+ return;
+ }
+
if((indexOpPtr.i = regApiPtr->accumulatingIndexOp) != RNIL)
{
indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
if (saveINDXKEYINFO(signal,
indexOp,
src,
- keyInfoLength)) {
+ keyInfoLength) == 0) {
jam();
// We have received all we need
readIndexTable(signal, regApiPtr, indexOp);
@@ -11754,17 +11802,31 @@ void Dbtc::execINDXATTRINFO(Signal* signal)
TcIndexOperationPtr indexOpPtr;
TcIndexOperation* indexOp;
+ if (compare_transid(regApiPtr->transid, indxAttrInfo->transId) == false)
+ {
+ TCKEY_abort(signal, 19);
+ return;
+ }
+
+ if (regApiPtr->apiConnectstate == CS_ABORTING)
+ {
+ jam();
+ return;
+ }
+
if((indexOpPtr.i = regApiPtr->accumulatingIndexOp) != RNIL)
{
indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
if (saveINDXATTRINFO(signal,
indexOp,
src,
- attrInfoLength)) {
+ attrInfoLength) == 0) {
jam();
// We have received all we need
readIndexTable(signal, regApiPtr, indexOp);
+ return;
}
+ return;
}
}
@@ -11772,12 +11834,13 @@ void Dbtc::execINDXATTRINFO(Signal* signal)
* Save signal INDXKEYINFO
* Return true if we have received all needed data
*/
-bool Dbtc::saveINDXKEYINFO(Signal* signal,
- TcIndexOperation* indexOp,
- const Uint32 *src,
- Uint32 len)
+int
+Dbtc::saveINDXKEYINFO(Signal* signal,
+ TcIndexOperation* indexOp,
+ const Uint32 *src,
+ Uint32 len)
{
- if (!indexOp->keyInfo.append(src, len)) {
+ if (ERROR_INSERTED(8052) || !indexOp->keyInfo.append(src, len)) {
jam();
// Failed to seize keyInfo, abort transaction
#ifdef VM_TRACE
@@ -11787,15 +11850,17 @@ bool Dbtc::saveINDXKEYINFO(Signal* signal,
apiConnectptr.i = indexOp->connectionIndex;
ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
releaseIndexOperation(apiConnectptr.p, indexOp);
- terrorCode = 4000;
+ terrorCode = 289;
+ if(TcKeyReq::getExecuteFlag(indexOp->tcIndxReq.requestInfo))
+ apiConnectptr.p->m_exec_flag= 1;
abortErrorLab(signal);
- return false;
+ return -1;
}
if (receivedAllINDXKEYINFO(indexOp) && receivedAllINDXATTRINFO(indexOp)) {
jam();
- return true;
+ return 0;
}
- return false;
+ return 1;
}
bool Dbtc::receivedAllINDXKEYINFO(TcIndexOperation* indexOp)
@@ -11807,12 +11872,13 @@ bool Dbtc::receivedAllINDXKEYINFO(TcIndexOperation* indexOp)
* Save signal INDXATTRINFO
* Return true if we have received all needed data
*/
-bool Dbtc::saveINDXATTRINFO(Signal* signal,
- TcIndexOperation* indexOp,
- const Uint32 *src,
- Uint32 len)
+int
+Dbtc::saveINDXATTRINFO(Signal* signal,
+ TcIndexOperation* indexOp,
+ const Uint32 *src,
+ Uint32 len)
{
- if (!indexOp->attrInfo.append(src, len)) {
+ if (ERROR_INSERTED(8051) || !indexOp->attrInfo.append(src, len)) {
jam();
#ifdef VM_TRACE
ndbout_c("Dbtc::saveINDXATTRINFO: Failed to seize attrInfo\n");
@@ -11820,15 +11886,17 @@ bool Dbtc::saveINDXATTRINFO(Signal* signal,
apiConnectptr.i = indexOp->connectionIndex;
ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
releaseIndexOperation(apiConnectptr.p, indexOp);
- terrorCode = 4000;
+ terrorCode = 289;
+ if(TcKeyReq::getExecuteFlag(indexOp->tcIndxReq.requestInfo))
+ apiConnectptr.p->m_exec_flag= 1;
abortErrorLab(signal);
- return false;
+ return -1;
}
if (receivedAllINDXKEYINFO(indexOp) && receivedAllINDXATTRINFO(indexOp)) {
jam();
- return true;
+ return 0;
}
- return false;
+ return 1;
}
bool Dbtc::receivedAllINDXATTRINFO(TcIndexOperation* indexOp)
@@ -12012,6 +12080,9 @@ void Dbtc::execTCKEYREF(Signal* signal)
tcIndxRef->transId[0] = tcKeyRef->transId[0];
tcIndxRef->transId[1] = tcKeyRef->transId[1];
tcIndxRef->errorCode = tcKeyRef->errorCode;
+
+ releaseIndexOperation(regApiPtr, indexOp);
+
sendSignal(regApiPtr->ndbapiBlockref,
GSN_TCINDXREF, signal, TcKeyRef::SignalLength, JBB);
return;
@@ -12556,7 +12627,18 @@ void Dbtc::executeIndexOperation(Signal* signal,
bool Dbtc::seizeIndexOperation(ApiConnectRecord* regApiPtr,
TcIndexOperationPtr& indexOpPtr)
{
- return regApiPtr->theSeizedIndexOperations.seize(indexOpPtr);
+ if (regApiPtr->theSeizedIndexOperations.seize(indexOpPtr))
+ {
+ ndbassert(indexOpPtr.p->expectedKeyInfo == 0);
+ ndbassert(indexOpPtr.p->keyInfo.getSize() == 0);
+ ndbassert(indexOpPtr.p->expectedAttrInfo == 0);
+ ndbassert(indexOpPtr.p->attrInfo.getSize() == 0);
+ ndbassert(indexOpPtr.p->expectedTransIdAI == 0);
+ ndbassert(indexOpPtr.p->transIdAI.getSize() == 0);
+ return true;
+ }
+
+ return false;
}
void Dbtc::releaseIndexOperation(ApiConnectRecord* regApiPtr,
diff --git a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
index 4577671202c..f28687dca0d 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
@@ -1053,6 +1053,8 @@ ArrayPool<TupTriggerData> c_triggerPool;
struct {
Uint32 tabUserPtr;
Uint32 tabUserRef;
+ Uint32 m_lcpno;
+ Uint32 m_fragPtrI;
} m_dropTable;
State tableStatus;
};
@@ -1614,6 +1616,11 @@ private:
void execACCKEYREF(Signal* signal);
void execACC_ABORTCONF(Signal* signal);
+
+ // Drop table
+ void execFSREMOVEREF(Signal*);
+ void execFSREMOVECONF(Signal*);
+
//------------------------------------------------------------------
//------------------------------------------------------------------
// Methods to handle execution of TUPKEYREQ + ATTRINFO.
@@ -2505,7 +2512,9 @@ private:
void drop_fragment_free_extent_log_buffer_callback(Signal*, Uint32, Uint32);
void drop_fragment_unmap_pages(Signal*, TablerecPtr, FragrecordPtr, Uint32);
void drop_fragment_unmap_page_callback(Signal* signal, Uint32, Uint32);
-
+ void drop_fragment_fsremove(Signal*, TablerecPtr, FragrecordPtr);
+ void drop_fragment_fsremove_done(Signal*, TablerecPtr, FragrecordPtr);
+
// Initialisation
void initData();
void initRecords();
@@ -2691,6 +2700,10 @@ private:
ArrayPool<Page> c_page_pool;
Uint32 cnoOfAllocatedPages;
+ Uint32 m_max_allocate_pages;
+
+ /* read ahead in pages during disk order scan */
+ Uint32 m_max_page_read_ahead;
Tablerec *tablerec;
Uint32 cnoOfTablerec;
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
index ee0f194211b..812f071e037 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
@@ -179,7 +179,8 @@ Dbtup::dealloc_tuple(Signal* signal,
&disk, tmpptr, gci);
}
- if (! (bits & Tuple_header::LCP_SKIP) && lcpScan_ptr_i != RNIL)
+ if (! (bits & (Tuple_header::LCP_SKIP | Tuple_header::ALLOC)) &&
+ lcpScan_ptr_i != RNIL)
{
ScanOpPtr scanOp;
c_scanOpPool.getPtr(scanOp, lcpScan_ptr_i);
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp
index 1182ac4ee7d..8e532ae97b5 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp
@@ -74,6 +74,10 @@ Dbtup::reportMemoryUsage(Signal* signal, int incDec){
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 6, JBB);
}
+#ifdef VM_TRACE
+extern Uint32 fc_left, fc_right, fc_remove;
+#endif
+
void
Dbtup::execDUMP_STATE_ORD(Signal* signal)
{
@@ -155,12 +159,20 @@ Dbtup::execDUMP_STATE_ORD(Signal* signal)
return;
}//if
#endif
-#if defined VM_TRACE && 0
- if (type == 1211){
- ndbout_c("Startar modul test av Page Manager");
+#if defined VM_TRACE
+ if (type == 1211 || type == 1212 || type == 1213){
+ Uint32 seed = time(0);
+ if (signal->getLength() > 1)
+ seed = signal->theData[1];
+ ndbout_c("Startar modul test av Page Manager (seed: 0x%x)", seed);
+ srand(seed);
Vector<Chunk> chunks;
const Uint32 LOOPS = 1000;
+ Uint32 sum_req = 0;
+ Uint32 sum_conf = 0;
+ Uint32 sum_loop = 0;
+ Uint32 max_loop = 0;
for(Uint32 i = 0; i<LOOPS; i++){
// Case
@@ -177,8 +189,15 @@ Dbtup::execDUMP_STATE_ORD(Signal* signal)
if(chunks.size() == 0 && c == 0){
c = 1 + rand() % 2;
}
+
+ if (type == 1211)
+ ndbout_c("loop=%d case=%d free=%d alloc=%d", i, c, free, alloc);
- ndbout_c("loop=%d case=%d free=%d alloc=%d", i, c, free, alloc);
+ if (type == 1213)
+ {
+ c = 1;
+ alloc = 2 + (sum_conf >> 3) + (sum_conf >> 4);
+ }
switch(c){
case 0:{ // Release
const int ch = rand() % chunks.size();
@@ -190,23 +209,33 @@ Dbtup::execDUMP_STATE_ORD(Signal* signal)
case 2: { // Seize(n) - fail
alloc += free;
// Fall through
+ sum_req += free;
+ goto doalloc;
}
case 1: { // Seize(n) (success)
-
+ sum_req += alloc;
+ doalloc:
Chunk chunk;
allocConsPages(alloc, chunk.pageCount, chunk.pageId);
ndbrequire(chunk.pageCount <= alloc);
if(chunk.pageCount != 0){
chunks.push_back(chunk);
if(chunk.pageCount != alloc) {
- ndbout_c(" Tried to allocate %d - only allocated %d - free: %d",
- alloc, chunk.pageCount, free);
+ if (type == 1211)
+ ndbout_c(" Tried to allocate %d - only allocated %d - free: %d",
+ alloc, chunk.pageCount, free);
}
} else {
ndbout_c(" Failed to alloc %d pages with %d pages free",
alloc, free);
}
+ sum_conf += chunk.pageCount;
+ Uint32 tot = fc_left + fc_right + fc_remove;
+ sum_loop += tot;
+ if (tot > max_loop)
+ max_loop = tot;
+
for(Uint32 i = 0; i<chunk.pageCount; i++){
PagePtr pagePtr;
pagePtr.i = chunk.pageId + i;
@@ -225,6 +254,10 @@ Dbtup::execDUMP_STATE_ORD(Signal* signal)
returnCommonArea(chunk.pageId, chunk.pageCount);
chunks.erase(chunks.size() - 1);
}
+
+ ndbout_c("Got %u%% of requested allocs, loops : %u 100*avg: %u max: %u",
+ (100 * sum_conf) / sum_req, sum_loop, 100*sum_loop / LOOPS,
+ max_loop);
}
#endif
}//Dbtup::execDUMP_STATE_ORD()
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp
index 1c3986afbbd..87705232de2 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp
@@ -317,6 +317,7 @@ Dbtup::restart_setup_page(Disk_alloc_info& alloc, PagePtr pagePtr)
unsigned uncommitted, committed;
uncommitted = committed = ~(unsigned)0;
(void) tsman.get_page_free_bits(&page, &uncommitted, &committed);
+ jamEntry();
idx = alloc.calc_page_free_bits(real_free);
ddassert(idx == committed);
@@ -427,12 +428,12 @@ Dbtup::disk_page_prealloc(Signal* signal,
c_extent_pool.getPtr(ext);
if ((pageBits= tsman.alloc_page_from_extent(&ext.p->m_key, bits)) >= 0)
{
- jam();
+ jamEntry();
found= true;
}
else
{
- jam();
+ jamEntry();
/**
* The current extent is not in a free list
* and since it couldn't accomadate the request
@@ -489,7 +490,7 @@ Dbtup::disk_page_prealloc(Signal* signal,
if ((err= tsman.alloc_extent(&ext.p->m_key)) < 0)
{
- jam();
+ jamEntry();
#if NOT_YET_UNDO_ALLOC_EXTENT
c_lgman->free_log_space(logfile_group_id,
sizeof(Disk_undo::AllocExtent)>>2);
@@ -541,6 +542,7 @@ Dbtup::disk_page_prealloc(Signal* signal,
alloc.m_curr_extent_info_ptr_i= ext.i;
ext.p->m_free_matrix_pos= RNIL;
pageBits= tsman.alloc_page_from_extent(&ext.p->m_key, bits);
+ jamEntry();
ddassert(pageBits >= 0);
}
@@ -600,6 +602,7 @@ Dbtup::disk_page_prealloc(Signal* signal,
}
int res= m_pgman.get_page(signal, preq, flags);
+ jamEntry();
switch(res)
{
case 0:
@@ -896,6 +899,7 @@ Dbtup::disk_page_set_dirty(PagePtr pagePtr)
// Make sure no one will allocate it...
tsman.unmap_page(&key, MAX_FREE_LIST - 1);
+ jamEntry();
}
void
@@ -947,6 +951,7 @@ Dbtup::disk_page_unmap_callback(Uint32 page_id, Uint32 dirty_count)
fragPtr.p->m_tablespace_id);
tsman.unmap_page(&key, idx);
+ jamEntry();
pagePtr.p->list_index = idx | 0x8000;
}
@@ -995,6 +1000,7 @@ Dbtup::disk_page_alloc(Signal* signal,
fragPtrP->m_tablespace_id);
tsman.update_page_free_bits(key, new_bits, lsn);
+ jamEntry();
}
}
@@ -1047,6 +1053,7 @@ Dbtup::disk_page_free(Signal *signal,
fragPtrP->m_tablespace_id);
tsman.update_page_free_bits(key, new_bits, lsn);
+ jamEntry();
}
Uint32 ext = pagePtr.p->m_extent_info_ptr;
@@ -1100,6 +1107,7 @@ Dbtup::disk_page_abort_prealloc(Signal *signal, Fragrecord* fragPtrP,
memcpy(&req.m_page, key, sizeof(Local_key));
int res= m_pgman.get_page(signal, req, flags);
+ jamEntry();
switch(res)
{
case 0:
@@ -1228,6 +1236,7 @@ Dbtup::disk_page_alloc_extent_log_buffer_callback(Signal* signal,
Uint64 lsn= lgman.add_entry(c, 1);
tsman.update_lsn(&key, lsn);
+ jamEntry();
}
#endif
@@ -1246,6 +1255,7 @@ Dbtup::disk_page_undo_alloc(Page* page, const Local_key* key,
Uint64 lsn= lgman.add_entry(c, 1);
m_pgman.update_lsn(* key, lsn);
+ jamEntry();
return lsn;
}
@@ -1275,6 +1285,7 @@ Dbtup::disk_page_undo_update(Page* page, const Local_key* key,
Uint64 lsn= lgman.add_entry(c, 3);
m_pgman.update_lsn(* key, lsn);
+ jamEntry();
return lsn;
}
@@ -1304,6 +1315,7 @@ Dbtup::disk_page_undo_free(Page* page, const Local_key* key,
Uint64 lsn= lgman.add_entry(c, 3);
m_pgman.update_lsn(* key, lsn);
+ jamEntry();
return lsn;
}
@@ -1398,6 +1410,7 @@ Dbtup::disk_restart_undo(Signal* signal, Uint64 lsn,
int flags = 0;
int res= m_pgman.get_page(signal, preq, flags);
+ jamEntry();
switch(res)
{
case 0:
@@ -1541,6 +1554,7 @@ Dbtup::disk_restart_undo_callback(Signal* signal,
lsn = undo->m_lsn - 1; // make sure undo isn't run again...
m_pgman.update_lsn(undo->m_key, lsn);
+ jamEntry();
}
else if (DBG_UNDO)
{
@@ -1633,6 +1647,7 @@ Dbtup::disk_restart_undo_page_bits(Signal* signal, Apply_undo* undo)
fragPtrP->m_tablespace_id);
tsman.restart_undo_page_free_bits(&undo->m_key, new_bits, undo->m_lsn, lsn);
+ jamEntry();
}
int
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
index 69b2d6d116e..45766e5e9c4 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
@@ -1201,9 +1201,19 @@ int Dbtup::handleInsertReq(Signal* signal,
if(!prevOp->is_first_operation())
org= (Tuple_header*)c_undo_buffer.get_ptr(&prevOp->m_copy_tuple_location);
if (regTabPtr->need_expand())
+ {
expand_tuple(req_struct, sizes, org, regTabPtr, !disk_insert);
+ memset(req_struct->m_disk_ptr->m_null_bits+
+ regTabPtr->m_offsets[DD].m_null_offset, 0xFF,
+ 4*regTabPtr->m_offsets[DD].m_null_words);
+ }
else
+ {
memcpy(dst, org, 4*regTabPtr->m_offsets[MM].m_fix_header_size);
+ }
+ memset(tuple_ptr->m_null_bits+
+ regTabPtr->m_offsets[MM].m_null_offset, 0xFF,
+ 4*regTabPtr->m_offsets[MM].m_null_words);
}
if (disk_insert)
@@ -1491,6 +1501,7 @@ int Dbtup::handleDeleteReq(Signal* signal,
goto error;
}
memcpy(dst, org, regTabPtr->total_rec_size << 2);
+ req_struct->m_tuple_ptr = (Tuple_header*)dst;
}
else
{
@@ -1528,7 +1539,9 @@ int Dbtup::handleDeleteReq(Signal* signal,
return 0;
}
- if (setup_read(req_struct, regOperPtr, regFragPtr, regTabPtr, disk))
+ if (regTabPtr->need_expand(disk))
+ prepare_read(req_struct, regTabPtr, disk);
+
{
Uint32 RlogSize;
int ret= handleReadReq(signal, regOperPtr, regTabPtr, req_struct);
@@ -1842,7 +1855,7 @@ int Dbtup::interpreterNextLab(Signal* signal,
Uint32 RstackPtr= 0;
union {
Uint32 TregMemBuffer[32];
- Uint64 Tdummy[16];
+ Uint64 align[16];
};
Uint32 TstackMemBuffer[32];
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
index ef242da2a43..74c7d38bd64 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
@@ -99,6 +99,10 @@ Dbtup::Dbtup(Block_context& ctx, Pgman* pgman)
addRecSignal(GSN_ACCKEYREF, &Dbtup::execACCKEYREF);
addRecSignal(GSN_ACC_ABORTCONF, &Dbtup::execACC_ABORTCONF);
+ // Drop table
+ addRecSignal(GSN_FSREMOVEREF, &Dbtup::execFSREMOVEREF, true);
+ addRecSignal(GSN_FSREMOVECONF, &Dbtup::execFSREMOVECONF, true);
+
attrbufrec = 0;
fragoperrec = 0;
fragrecord = 0;
@@ -301,6 +305,12 @@ void Dbtup::execREAD_CONFIG_REQ(Signal* signal)
Uint32 noOfTriggers= 0;
Uint32 tmp= 0;
+
+ if (ndb_mgm_get_int_parameter(p, CFG_DB_MAX_ALLOCATE, &tmp))
+ tmp = 32 * 1024 * 1024;
+ m_max_allocate_pages = (tmp + GLOBAL_PAGE_SIZE - 1) / GLOBAL_PAGE_SIZE;
+
+ tmp = 0;
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_PAGE_RANGE, &tmp));
initPageRangeSize(tmp);
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_TABLE, &cnoOfTablerec));
@@ -334,6 +344,18 @@ void Dbtup::execREAD_CONFIG_REQ(Signal* signal)
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_BATCH_SIZE, &nScanBatch));
c_scanLockPool.setSize(nScanOp * nScanBatch);
+
+ /* read ahead for disk scan can not be more that disk page buffer */
+ {
+ Uint64 tmp = 64*1024*1024;
+ ndb_mgm_get_int64_parameter(p, CFG_DB_DISK_PAGE_BUFFER_MEMORY, &tmp);
+ m_max_page_read_ahead = (tmp + GLOBAL_PAGE_SIZE - 1) / GLOBAL_PAGE_SIZE; // in pages
+ // never read ahead more than 32 pages
+ if (m_max_page_read_ahead > 32)
+ m_max_page_read_ahead = 32;
+ }
+
+
ScanOpPtr lcp;
ndbrequire(c_scanOpPool.seize(lcp));
new (lcp.p) ScanOp();
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
index 81e678d0f6f..6866236f15e 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
@@ -21,6 +21,7 @@
#include <ndb_limits.h>
#include <pc.hpp>
#include <signaldata/TupFrag.hpp>
+#include <signaldata/FsRef.hpp>
#include <signaldata/FsConf.hpp>
#include <signaldata/FsRemoveReq.hpp>
#include <signaldata/DropTab.hpp>
@@ -1283,6 +1284,24 @@ Dbtup::drop_fragment_free_var_pages(Signal* signal)
sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
return;
}
+
+ /**
+ * Remove LCP's for fragment
+ */
+ tabPtr.p->m_dropTable.m_lcpno = 0;
+ tabPtr.p->m_dropTable.m_fragPtrI = fragPtr.i;
+ drop_fragment_fsremove(signal, tabPtr, fragPtr);
+}
+
+void
+Dbtup::drop_fragment_fsremove_done(Signal* signal,
+ TablerecPtr tabPtr,
+ FragrecordPtr fragPtr)
+{
+ /**
+ * LCP's removed...
+ * now continue with "next"
+ */
Uint32 logfile_group_id = fragPtr.p->m_logfile_group_id ;
releaseFragPages(fragPtr.p);
Uint32 i;
@@ -1302,6 +1321,74 @@ Dbtup::drop_fragment_free_var_pages(Signal* signal)
return;
}
+// Remove LCP
+
+void
+Dbtup::drop_fragment_fsremove(Signal* signal,
+ TablerecPtr tabPtr,
+ FragrecordPtr fragPtr)
+{
+ FsRemoveReq* req = (FsRemoveReq*)signal->getDataPtrSend();
+ req->userReference = reference();
+ req->userPointer = tabPtr.i;
+ req->directory = 0;
+ req->ownDirectory = 0;
+
+ Uint32 lcpno = tabPtr.p->m_dropTable.m_lcpno;
+ Uint32 fragId = fragPtr.p->fragmentId;
+ Uint32 tableId = fragPtr.p->fragTableId;
+
+ FsOpenReq::setVersion(req->fileNumber, 5);
+ FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_DATA);
+ FsOpenReq::v5_setLcpNo(req->fileNumber, lcpno);
+ FsOpenReq::v5_setTableId(req->fileNumber, tableId);
+ FsOpenReq::v5_setFragmentId(req->fileNumber, fragId);
+ sendSignal(NDBFS_REF, GSN_FSREMOVEREQ, signal,
+ FsRemoveReq::SignalLength, JBB);
+}
+
+void
+Dbtup::execFSREMOVEREF(Signal* signal)
+{
+ jamEntry();
+ FsRef* ref = (FsRef*)signal->getDataPtr();
+ Uint32 userPointer = ref->userPointer;
+ FsConf* conf = (FsConf*)signal->getDataPtrSend();
+ conf->userPointer = userPointer;
+ execFSREMOVECONF(signal);
+}
+
+void
+Dbtup::execFSREMOVECONF(Signal* signal)
+{
+ jamEntry();
+ FsConf* conf = (FsConf*)signal->getDataPtrSend();
+
+ TablerecPtr tabPtr;
+ FragrecordPtr fragPtr;
+
+ tabPtr.i = conf->userPointer;
+ ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
+
+ ndbrequire(tabPtr.p->tableStatus == DROPPING);
+
+ fragPtr.i = tabPtr.p->m_dropTable.m_fragPtrI;
+ ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
+
+ tabPtr.p->m_dropTable.m_lcpno++;
+ if (tabPtr.p->m_dropTable.m_lcpno < 3)
+ {
+ jam();
+ drop_fragment_fsremove(signal, tabPtr, fragPtr);
+ }
+ else
+ {
+ jam();
+ drop_fragment_fsremove_done(signal, tabPtr, fragPtr);
+ }
+}
+// End remove LCP
+
void
Dbtup::start_restore_lcp(Uint32 tableId, Uint32 fragId)
{
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp
index d10fabf42da..24806062fcf 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp
@@ -146,10 +146,17 @@ void Dbtup::initializePage()
cnoOfAllocatedPages = tmp; // Is updated by returnCommonArea
}//Dbtup::initializePage()
+#ifdef VM_TRACE
+Uint32 fc_left, fc_right, fc_remove;
+#endif
+
void Dbtup::allocConsPages(Uint32 noOfPagesToAllocate,
Uint32& noOfPagesAllocated,
Uint32& allocPageRef)
{
+#ifdef VM_TRACE
+ fc_left = fc_right = fc_remove = 0;
+#endif
if (noOfPagesToAllocate == 0){
jam();
noOfPagesAllocated = 0;
@@ -228,7 +235,10 @@ void Dbtup::findFreeLeftNeighbours(Uint32& allocPageRef,
{
PagePtr pageFirstPtr, pageLastPtr;
Uint32 remainAllocate = noOfPagesToAllocate - noPagesAllocated;
- while (allocPageRef > 0) {
+ Uint32 loop = 0;
+ while (allocPageRef > 0 &&
+ ++loop < 16)
+ {
jam();
pageLastPtr.i = allocPageRef - 1;
c_page_pool.getPtr(pageLastPtr);
@@ -256,6 +266,9 @@ void Dbtup::findFreeLeftNeighbours(Uint32& allocPageRef,
remainAllocate -= listSize;
}//if
}//if
+#ifdef VM_TRACE
+ fc_left++;
+#endif
}//while
}//Dbtup::findFreeLeftNeighbours()
@@ -269,7 +282,10 @@ void Dbtup::findFreeRightNeighbours(Uint32& allocPageRef,
jam();
return;
}//if
- while ((allocPageRef + noPagesAllocated) < c_page_pool.getSize()) {
+ Uint32 loop = 0;
+ while ((allocPageRef + noPagesAllocated) < c_page_pool.getSize() &&
+ ++loop < 16)
+ {
jam();
pageFirstPtr.i = allocPageRef + noPagesAllocated;
c_page_pool.getPtr(pageFirstPtr);
@@ -296,24 +312,37 @@ void Dbtup::findFreeRightNeighbours(Uint32& allocPageRef,
remainAllocate -= listSize;
}//if
}//if
+#ifdef VM_TRACE
+ fc_right++;
+#endif
}//while
}//Dbtup::findFreeRightNeighbours()
void Dbtup::insertCommonArea(Uint32 insPageRef, Uint32 insList)
{
cnoOfAllocatedPages -= (1 << insList);
- PagePtr pageLastPtr, pageInsPtr;
+ PagePtr pageLastPtr, pageInsPtr, pageHeadPtr;
+ pageHeadPtr.i = cfreepageList[insList];
c_page_pool.getPtr(pageInsPtr, insPageRef);
ndbrequire(insList < 16);
pageLastPtr.i = (pageInsPtr.i + (1 << insList)) - 1;
- pageInsPtr.p->next_cluster_page = cfreepageList[insList];
+ pageInsPtr.p->page_state = ZFREE_COMMON;
+ pageInsPtr.p->next_cluster_page = pageHeadPtr.i;
pageInsPtr.p->prev_cluster_page = RNIL;
pageInsPtr.p->last_cluster_page = pageLastPtr.i;
cfreepageList[insList] = pageInsPtr.i;
+ if (pageHeadPtr.i != RNIL)
+ {
+ jam();
+ c_page_pool.getPtr(pageHeadPtr);
+ pageHeadPtr.p->prev_cluster_page = pageInsPtr.i;
+ }
+
c_page_pool.getPtr(pageLastPtr);
+ pageLastPtr.p->page_state = ZFREE_COMMON;
pageLastPtr.p->first_cluster_page = pageInsPtr.i;
pageLastPtr.p->next_page = RNIL;
}//Dbtup::insertCommonArea()
@@ -321,12 +350,13 @@ void Dbtup::insertCommonArea(Uint32 insPageRef, Uint32 insList)
void Dbtup::removeCommonArea(Uint32 remPageRef, Uint32 list)
{
cnoOfAllocatedPages += (1 << list);
- PagePtr pagePrevPtr, pageNextPtr, pageLastPtr, pageSearchPtr, remPagePtr;
+ PagePtr pagePrevPtr, pageNextPtr, pageLastPtr, remPagePtr;
c_page_pool.getPtr(remPagePtr, remPageRef);
ndbrequire(list < 16);
if (cfreepageList[list] == remPagePtr.i) {
jam();
+ ndbassert(remPagePtr.p->prev_cluster_page == RNIL);
cfreepageList[list] = remPagePtr.p->next_cluster_page;
pageNextPtr.i = cfreepageList[list];
if (pageNextPtr.i != RNIL) {
@@ -335,30 +365,25 @@ void Dbtup::removeCommonArea(Uint32 remPageRef, Uint32 list)
pageNextPtr.p->prev_cluster_page = RNIL;
}//if
} else {
- pageSearchPtr.i = cfreepageList[list];
- while (true) {
- jam();
- c_page_pool.getPtr(pageSearchPtr);
- pagePrevPtr = pageSearchPtr;
- pageSearchPtr.i = pageSearchPtr.p->next_cluster_page;
- if (pageSearchPtr.i == remPagePtr.i) {
- jam();
- break;
- }//if
- }//while
+ pagePrevPtr.i = remPagePtr.p->prev_cluster_page;
pageNextPtr.i = remPagePtr.p->next_cluster_page;
+ c_page_pool.getPtr(pagePrevPtr);
pagePrevPtr.p->next_cluster_page = pageNextPtr.i;
- if (pageNextPtr.i != RNIL) {
+ if (pageNextPtr.i != RNIL)
+ {
jam();
c_page_pool.getPtr(pageNextPtr);
pageNextPtr.p->prev_cluster_page = pagePrevPtr.i;
- }//if
+ }
}//if
remPagePtr.p->next_cluster_page= RNIL;
remPagePtr.p->last_cluster_page= RNIL;
remPagePtr.p->prev_cluster_page= RNIL;
+ remPagePtr.p->page_state = ~ZFREE_COMMON;
pageLastPtr.i = (remPagePtr.i + (1 << list)) - 1;
c_page_pool.getPtr(pageLastPtr);
pageLastPtr.p->first_cluster_page= RNIL;
+ pageLastPtr.p->page_state = ~ZFREE_COMMON;
+
}//Dbtup::removeCommonArea()
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp
index 6ef8d3585e9..cde63091cfb 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp
@@ -432,6 +432,11 @@ void Dbtup::allocMoreFragPages(Fragrecord* const regFragPtr)
// We will grow by 18.75% plus two more additional pages to grow
// a little bit quicker in the beginning.
/* -----------------------------------------------------------------*/
+
+ if (noAllocPages > m_max_allocate_pages)
+ {
+ noAllocPages = m_max_allocate_pages;
+ }
Uint32 allocated = allocFragPages(regFragPtr, noAllocPages);
regFragPtr->noOfPagesToGrow += allocated;
}//Dbtup::allocMoreFragPages()
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp
index 56bac8868b8..6e53531e118 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp
@@ -63,13 +63,11 @@ Dbtup::execACC_SCANREQ(Signal* signal)
break;
}
-#if BUG_27776_FIXED
if (!AccScanReq::getNoDiskScanFlag(req->requestInfo)
&& tablePtr.p->m_no_of_disk_attributes)
{
bits |= ScanOp::SCAN_DD;
}
-#endif
bool mm = (bits & ScanOp::SCAN_DD);
if (tablePtr.p->m_attributes[mm].m_no_of_varsize > 0) {
@@ -689,13 +687,74 @@ Dbtup::scanNext(Signal* signal, ScanOpPtr scanPtr)
// move to next extent
jam();
pos.m_extent_info_ptr_i = ext_ptr.i;
- Extent_info* ext = c_extent_pool.getPtr(pos.m_extent_info_ptr_i);
+ ext = c_extent_pool.getPtr(pos.m_extent_info_ptr_i);
key.m_file_no = ext->m_key.m_file_no;
key.m_page_no = ext->m_first_page_no;
}
}
key.m_page_idx = 0;
pos.m_get = ScanPos::Get_page_dd;
+ /*
+ read ahead for scan in disk order
+ do read ahead every 8:th page
+ */
+ if ((bits & ScanOp::SCAN_DD) &&
+ (((key.m_page_no - ext->m_first_page_no) & 7) == 0))
+ {
+ jam();
+ // initialize PGMAN request
+ Page_cache_client::Request preq;
+ preq.m_page = pos.m_key;
+ preq.m_callback = TheNULLCallback;
+
+ // set maximum read ahead
+ Uint32 read_ahead = m_max_page_read_ahead;
+
+ while (true)
+ {
+ // prepare page read ahead in current extent
+ Uint32 page_no = preq.m_page.m_page_no;
+ Uint32 page_no_limit = page_no + read_ahead;
+ Uint32 limit = ext->m_first_page_no + alloc.m_extent_size;
+ if (page_no_limit > limit)
+ {
+ jam();
+ // read ahead crosses extent, set limit for this extent
+ read_ahead = page_no_limit - limit;
+ page_no_limit = limit;
+ // and make sure we only read one extra extent next time around
+ if (read_ahead > alloc.m_extent_size)
+ read_ahead = alloc.m_extent_size;
+ }
+ else
+ {
+ jam();
+ read_ahead = 0; // no more to read ahead after this
+ }
+ // do read ahead pages for this extent
+ while (page_no < page_no_limit)
+ {
+ // page request to PGMAN
+ jam();
+ preq.m_page.m_page_no = page_no;
+ int flags = 0;
+ // ignore result
+ m_pgman.get_page(signal, preq, flags);
+ jamEntry();
+ page_no++;
+ }
+ if (!read_ahead || !list.next(ext_ptr))
+ {
+ // no more extents after this or read ahead done
+ jam();
+ break;
+ }
+ // move to next extent and initialize PGMAN request accordingly
+ Extent_info* ext = c_extent_pool.getPtr(ext_ptr.i);
+ preq.m_page.m_file_no = ext->m_key.m_file_no;
+ preq.m_page.m_page_no = ext->m_first_page_no;
+ }
+ } // if ScanOp::SCAN_DD read ahead
}
/*FALLTHRU*/
case ScanPos::Get_page_dd:
@@ -728,6 +787,7 @@ Dbtup::scanNext(Signal* signal, ScanOpPtr scanPtr)
safe_cast(&Dbtup::disk_page_tup_scan_callback);
int flags = 0;
int res = m_pgman.get_page(signal, preq, flags);
+ jamEntry();
if (res == 0) {
jam();
// request queued
diff --git a/storage/ndb/src/kernel/blocks/diskpage.cpp b/storage/ndb/src/kernel/blocks/diskpage.cpp
index 3f98e078746..50e9b6e53cb 100644
--- a/storage/ndb/src/kernel/blocks/diskpage.cpp
+++ b/storage/ndb/src/kernel/blocks/diskpage.cpp
@@ -49,7 +49,7 @@ operator<<(NdbOut& out, const File_formats::Zero_page_header& obj)
char buf[256];
out << "page size: " << obj.m_page_size << endl;
out << "ndb version: " << obj.m_ndb_version << ", " <<
- getVersionString(obj.m_ndb_version, 0, buf, sizeof(buf)) << endl;
+ ndbGetVersionString(obj.m_ndb_version, 0, buf, sizeof(buf)) << endl;
out << "ndb node id: " << obj.m_node_id << endl;
out << "file type: " << obj.m_file_type << endl;
out << "time: " << obj.m_time << ", "
diff --git a/storage/ndb/src/kernel/blocks/lgman.cpp b/storage/ndb/src/kernel/blocks/lgman.cpp
index 82fed94f62e..23738717580 100644
--- a/storage/ndb/src/kernel/blocks/lgman.cpp
+++ b/storage/ndb/src/kernel/blocks/lgman.cpp
@@ -346,6 +346,12 @@ Lgman::execCREATE_FILEGROUP_REQ(Signal* signal){
m_logfile_group_hash.add(ptr);
m_logfile_group_list.add(ptr);
+
+ if (getNodeState().getNodeRestartInProgress() ||
+ getNodeState().getSystemRestartInProgress())
+ {
+ ptr.p->m_state = Logfile_group::LG_STARTING;
+ }
CreateFilegroupImplConf* conf=
(CreateFilegroupImplConf*)signal->getDataPtr();
@@ -370,8 +376,6 @@ Lgman::execDROP_FILEGROUP_REQ(Signal* signal)
{
jamEntry();
- jamEntry();
-
Uint32 errorCode = 0;
DropFilegroupImplReq req = *(DropFilegroupImplReq*)signal->getDataPtr();
do
@@ -436,7 +440,6 @@ Lgman::drop_filegroup_drop_files(Signal* signal,
{
jam();
ndbrequire(! (ptr.p->m_state & Logfile_group::LG_THREAD_MASK));
- ndbrequire(ptr.p->m_meta_files.isEmpty());
ndbrequire(ptr.p->m_outstanding_fs == 0);
Local_undofile_list list(m_file_pool, ptr.p->m_files);
@@ -452,6 +455,18 @@ Lgman::drop_filegroup_drop_files(Signal* signal,
return;
}
+ Local_undofile_list metalist(m_file_pool, ptr.p->m_meta_files);
+ if (metalist.first(file_ptr))
+ {
+ jam();
+ metalist.remove(file_ptr);
+ list.add(file_ptr);
+ file_ptr.p->m_create.m_senderRef = ref;
+ file_ptr.p->m_create.m_senderData = data;
+ create_file_abort(signal, ptr, file_ptr);
+ return;
+ }
+
free_logbuffer_memory(ptr);
m_logfile_group_hash.release(ptr);
DropFilegroupImplConf *conf = (DropFilegroupImplConf*)signal->getDataPtr();
@@ -462,7 +477,8 @@ Lgman::drop_filegroup_drop_files(Signal* signal,
}
void
-Lgman::execCREATE_FILE_REQ(Signal* signal){
+Lgman::execCREATE_FILE_REQ(Signal* signal)
+{
jamEntry();
CreateFileImplReq* req= (CreateFileImplReq*)signal->getDataPtr();
@@ -491,6 +507,7 @@ Lgman::execCREATE_FILE_REQ(Signal* signal){
switch(requestInfo){
case CreateFileImplReq::Commit:
{
+ jam();
ndbrequire(find_file_by_id(file_ptr, ptr.p->m_meta_files, req->file_id));
file_ptr.p->m_create.m_senderRef = req->senderRef;
file_ptr.p->m_create.m_senderData = req->senderData;
@@ -503,6 +520,7 @@ Lgman::execCREATE_FILE_REQ(Signal* signal){
Uint32 senderData = req->senderData;
if (find_file_by_id(file_ptr, ptr.p->m_meta_files, req->file_id))
{
+ jam();
file_ptr.p->m_create.m_senderRef = senderRef;
file_ptr.p->m_create.m_senderData = senderData;
create_file_abort(signal, ptr, file_ptr);
@@ -510,11 +528,11 @@ Lgman::execCREATE_FILE_REQ(Signal* signal){
else
{
CreateFileImplConf* conf= (CreateFileImplConf*)signal->getDataPtr();
+ jam();
conf->senderData = senderData;
conf->senderRef = reference();
sendSignal(senderRef, GSN_CREATE_FILE_CONF, signal,
CreateFileImplConf::SignalLength, JBB);
- return;
}
return;
}
@@ -703,7 +721,8 @@ Lgman::create_file_commit(Signal* signal,
Uint32 senderData = ptr.p->m_create.m_senderData;
bool first= false;
- if(ptr.p->m_state == Undofile::FS_CREATING)
+ if(ptr.p->m_state == Undofile::FS_CREATING &&
+ (lg_ptr.p->m_state & Logfile_group::LG_ONLINE))
{
jam();
Local_undofile_list free(m_file_pool, lg_ptr.p->m_files);
@@ -2068,13 +2087,17 @@ Lgman::execSTART_RECREQ(Signal* signal)
void
Lgman::find_log_head(Signal* signal, Ptr<Logfile_group> ptr)
{
+ ndbrequire(ptr.p->m_state &
+ (Logfile_group::LG_STARTING | Logfile_group::LG_SORTING));
+
if(ptr.p->m_meta_files.isEmpty() && ptr.p->m_files.isEmpty())
{
jam();
/**
* Logfile_group wo/ any files
*/
-
+ ptr.p->m_state &= ~(Uint32)Logfile_group::LG_STARTING;
+ ptr.p->m_state |= Logfile_group::LG_ONLINE;
m_logfile_group_list.next(ptr);
signal->theData[0] = LgmanContinueB::FIND_LOG_HEAD;
signal->theData[1] = ptr.i;
diff --git a/storage/ndb/src/kernel/blocks/lgman.hpp b/storage/ndb/src/kernel/blocks/lgman.hpp
index b26c3219088..d2706818144 100644
--- a/storage/ndb/src/kernel/blocks/lgman.hpp
+++ b/storage/ndb/src/kernel/blocks/lgman.hpp
@@ -175,13 +175,14 @@ public:
,LG_SORTING = 0x002 // Sorting files
,LG_SEARCHING = 0x004 // Searching in last file
,LG_EXEC_THREAD = 0x008 // Execute thread is running
- ,LG_READ_THREAD = 0x010 // Read thread is running
+ ,LG_READ_THREAD = 0x010 // Read thread is running
,LG_FORCE_SYNC_THREAD = 0x020
,LG_SYNC_WAITERS_THREAD = 0x040
,LG_CUT_LOG_THREAD = 0x080
,LG_WAITERS_THREAD = 0x100
,LG_FLUSH_THREAD = 0x200
,LG_DROPPING = 0x400
+ ,LG_STARTING = 0x800
};
static const Uint32 LG_THREAD_MASK = Logfile_group::LG_FORCE_SYNC_THREAD |
diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
index adc6d1e3ed4..56ecc8ddc39 100644
--- a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
@@ -277,6 +277,14 @@ void Ndbcntr::execSTTOR(Signal* signal)
break;
case ZSTART_PHASE_1:
jam();
+ {
+ Uint32 db_watchdog_interval = 0;
+ const ndb_mgm_configuration_iterator * p =
+ m_ctx.m_config.getOwnConfigIterator();
+ ndb_mgm_get_int_parameter(p, CFG_DB_WATCHDOG_INTERVAL, &db_watchdog_interval);
+ ndbrequire(db_watchdog_interval);
+ update_watch_dog_timer(db_watchdog_interval);
+ }
startPhase1Lab(signal);
break;
case ZSTART_PHASE_2:
@@ -1410,6 +1418,13 @@ void Ndbcntr::execNODE_FAILREP(Signal* signal)
{
jamEntry();
+ if (ERROR_INSERTED(1001))
+ {
+ sendSignalWithDelay(reference(), GSN_NODE_FAILREP, signal, 100,
+ signal->getLength());
+ return;
+ }
+
const NodeFailRep * nodeFail = (NodeFailRep *)&signal->theData[0];
NdbNodeBitmask allFailed;
allFailed.assign(NdbNodeBitmask::Size, nodeFail->theNodes);
@@ -2734,16 +2749,34 @@ void Ndbcntr::execSTART_ORD(Signal* signal){
c_missra.execSTART_ORD(signal);
}
+#define CLEAR_DX 13
+#define CLEAR_LCP 3
+
void
-Ndbcntr::clearFilesystem(Signal* signal){
+Ndbcntr::clearFilesystem(Signal* signal)
+{
+ const Uint32 lcp = c_fsRemoveCount >= CLEAR_DX;
+
FsRemoveReq * req = (FsRemoveReq *)signal->getDataPtrSend();
req->userReference = reference();
req->userPointer = 0;
req->directory = 1;
req->ownDirectory = 1;
- FsOpenReq::setVersion(req->fileNumber, 3);
- FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_CTL); // Can by any...
- FsOpenReq::v1_setDisk(req->fileNumber, c_fsRemoveCount);
+
+ if (lcp == 0)
+ {
+ FsOpenReq::setVersion(req->fileNumber, 3);
+ FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_CTL); // Can by any...
+ FsOpenReq::v1_setDisk(req->fileNumber, c_fsRemoveCount);
+ }
+ else
+ {
+ FsOpenReq::setVersion(req->fileNumber, 5);
+ FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_DATA);
+ FsOpenReq::v5_setLcpNo(req->fileNumber, c_fsRemoveCount - CLEAR_DX);
+ FsOpenReq::v5_setTableId(req->fileNumber, 0);
+ FsOpenReq::v5_setFragmentId(req->fileNumber, 0);
+ }
sendSignal(NDBFS_REF, GSN_FSREMOVEREQ, signal,
FsRemoveReq::SignalLength, JBA);
c_fsRemoveCount++;
@@ -2752,12 +2785,12 @@ Ndbcntr::clearFilesystem(Signal* signal){
void
Ndbcntr::execFSREMOVECONF(Signal* signal){
jamEntry();
- if(c_fsRemoveCount == 13){
+ if(c_fsRemoveCount == CLEAR_DX + CLEAR_LCP){
jam();
sendSttorry(signal);
} else {
jam();
- ndbrequire(c_fsRemoveCount < 13);
+ ndbrequire(c_fsRemoveCount < CLEAR_DX + CLEAR_LCP);
clearFilesystem(signal);
}//if
}
diff --git a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
index 5f93ee31bc7..5300d5bbfd9 100644
--- a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
+++ b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
@@ -99,6 +99,7 @@ AsyncFile::AsyncFile(SimulatedBlock& fs) :
{
m_page_ptr.setNull();
m_current_request= m_last_request= 0;
+ m_open_flags = 0;
}
void
@@ -163,7 +164,12 @@ AsyncFile::run()
theStartFlag = true;
// Create write buffer for bigger writes
theWriteBufferSize = WRITEBUFFERSIZE;
- theWriteBuffer = (char *) ndbd_malloc(theWriteBufferSize);
+ theWriteBufferUnaligned = (char *) ndbd_malloc(theWriteBufferSize +
+ NDB_O_DIRECT_WRITE_ALIGNMENT-1);
+ theWriteBuffer = (char *)
+ (((UintPtr)theWriteBufferUnaligned + NDB_O_DIRECT_WRITE_ALIGNMENT - 1) &
+ ~(UintPtr)(NDB_O_DIRECT_WRITE_ALIGNMENT - 1));
+
NdbMutex_Unlock(theStartMutexPtr);
NdbCondition_Signal(theStartConditionPtr);
@@ -247,10 +253,83 @@ AsyncFile::run()
static char g_odirect_readbuf[2*GLOBAL_PAGE_SIZE -1];
#endif
+int
+AsyncFile::check_odirect_write(Uint32 flags, int& new_flags, int mode)
+{
+ assert(new_flags & (O_CREAT | O_TRUNC));
+#ifdef O_DIRECT
+ int ret;
+ char * bufptr = (char*)((UintPtr(g_odirect_readbuf)+(GLOBAL_PAGE_SIZE - 1)) & ~(GLOBAL_PAGE_SIZE - 1));
+ while (((ret = ::write(theFd, bufptr, GLOBAL_PAGE_SIZE)) == -1) &&
+ (errno == EINTR));
+ if (ret == -1)
+ {
+ new_flags &= ~O_DIRECT;
+ ndbout_c("%s Failed to write using O_DIRECT, disabling",
+ theFileName.c_str());
+ }
+
+ close(theFd);
+ theFd = ::open(theFileName.c_str(), new_flags, mode);
+ if (theFd == -1)
+ return errno;
+#endif
+
+ return 0;
+}
+
+int
+AsyncFile::check_odirect_read(Uint32 flags, int &new_flags, int mode)
+{
+#ifdef O_DIRECT
+ int ret;
+ char * bufptr = (char*)((UintPtr(g_odirect_readbuf)+(GLOBAL_PAGE_SIZE - 1)) & ~(GLOBAL_PAGE_SIZE - 1));
+ while (((ret = ::read(theFd, bufptr, GLOBAL_PAGE_SIZE)) == -1) &&
+ (errno == EINTR));
+ if (ret == -1)
+ {
+ ndbout_c("%s Failed to read using O_DIRECT, disabling",
+ theFileName.c_str());
+ goto reopen;
+ }
+
+ if(lseek(theFd, 0, SEEK_SET) != 0)
+ {
+ return errno;
+ }
+
+ if ((flags & FsOpenReq::OM_CHECK_SIZE) == 0)
+ {
+ struct stat buf;
+ if ((fstat(theFd, &buf) == -1))
+ {
+ return errno;
+ }
+ else if ((buf.st_size % GLOBAL_PAGE_SIZE) != 0)
+ {
+ ndbout_c("%s filesize not a multiple of %d, disabling O_DIRECT",
+ theFileName.c_str(), GLOBAL_PAGE_SIZE);
+ goto reopen;
+ }
+ }
+
+ return 0;
+
+reopen:
+ close(theFd);
+ new_flags &= ~O_DIRECT;
+ theFd = ::open(theFileName.c_str(), new_flags, mode);
+ if (theFd == -1)
+ return errno;
+#endif
+ return 0;
+}
+
void AsyncFile::openReq(Request* request)
{
m_auto_sync_freq = 0;
m_write_wo_sync = 0;
+ m_open_flags = request->par.open.flags;
// for open.flags, see signal FSOPENREQ
#ifdef NDB_WIN32
@@ -312,7 +391,7 @@ void AsyncFile::openReq(Request* request)
}
#else
Uint32 flags = request->par.open.flags;
- Uint32 new_flags = 0;
+ int new_flags = 0;
// Convert file open flags from Solaris to Liux
if (flags & FsOpenReq::OM_CREATE)
@@ -343,10 +422,6 @@ void AsyncFile::openReq(Request* request)
{
new_flags |= O_DIRECT;
}
-#elif defined O_SYNC
- {
- flags |= FsOpenReq::OM_SYNC;
- }
#endif
if ((flags & FsOpenReq::OM_SYNC) && ! (flags & FsOpenReq::OM_INIT))
@@ -355,15 +430,19 @@ void AsyncFile::openReq(Request* request)
new_flags |= O_SYNC;
#endif
}
-
+
+ const char * rw = "";
switch(flags & 0x3){
case FsOpenReq::OM_READONLY:
+ rw = "r";
new_flags |= O_RDONLY;
break;
case FsOpenReq::OM_WRITEONLY:
+ rw = "w";
new_flags |= O_WRONLY;
break;
case FsOpenReq::OM_READWRITE:
+ rw = "rw";
new_flags |= O_RDWR;
break;
default:
@@ -404,11 +483,6 @@ no_odirect:
if (new_flags & O_DIRECT)
{
new_flags &= ~O_DIRECT;
- flags |= FsOpenReq::OM_SYNC;
-#ifdef O_SYNC
- if (! (flags & FsOpenReq::OM_INIT))
- new_flags |= O_SYNC;
-#endif
goto no_odirect;
}
#endif
@@ -421,11 +495,6 @@ no_odirect:
else if (new_flags & O_DIRECT)
{
new_flags &= ~O_DIRECT;
- flags |= FsOpenReq::OM_SYNC;
-#ifdef O_SYNC
- if (! (flags & FsOpenReq::OM_INIT))
- new_flags |= O_SYNC;
-#endif
goto no_odirect;
}
#endif
@@ -512,7 +581,6 @@ no_odirect:
{
ndbout_c("error on first write(%d), disable O_DIRECT", err);
new_flags &= ~O_DIRECT;
- flags |= FsOpenReq::OM_SYNC;
close(theFd);
theFd = ::open(theFileName.c_str(), new_flags, mode);
if (theFd != -1)
@@ -532,26 +600,32 @@ no_odirect:
else if (flags & FsOpenReq::OM_DIRECT)
{
#ifdef O_DIRECT
- do {
- int ret;
- char * bufptr = (char*)((UintPtr(g_odirect_readbuf)+(GLOBAL_PAGE_SIZE - 1)) & ~(GLOBAL_PAGE_SIZE - 1));
- while (((ret = ::read(theFd, bufptr, GLOBAL_PAGE_SIZE)) == -1) && (errno == EINTR));
- if (ret == -1)
- {
- ndbout_c("%s Failed to read using O_DIRECT, disabling", theFileName.c_str());
- flags |= FsOpenReq::OM_SYNC;
- flags |= FsOpenReq::OM_INIT;
- break;
- }
- if(lseek(theFd, 0, SEEK_SET) != 0)
- {
- request->error = errno;
- return;
- }
- } while (0);
+ if (flags & (FsOpenReq::OM_TRUNCATE | FsOpenReq::OM_CREATE))
+ {
+ request->error = check_odirect_write(flags, new_flags, mode);
+ }
+ else
+ {
+ request->error = check_odirect_read(flags, new_flags, mode);
+ }
+
+ if (request->error)
+ return;
#endif
}
-
+#ifdef VM_TRACE
+ if (flags & FsOpenReq::OM_DIRECT)
+ {
+#ifdef O_DIRECT
+ ndbout_c("%s %s O_DIRECT: %d",
+ theFileName.c_str(), rw,
+ !!(new_flags & O_DIRECT));
+#else
+ ndbout_c("%s %s O_DIRECT: 0",
+ theFileName.c_str(), rw);
+#endif
+ }
+#endif
if ((flags & FsOpenReq::OM_SYNC) && (flags & FsOpenReq::OM_INIT))
{
#ifdef O_SYNC
@@ -562,6 +636,10 @@ no_odirect:
new_flags &= ~(O_CREAT | O_TRUNC);
new_flags |= O_SYNC;
theFd = ::open(theFileName.c_str(), new_flags, mode);
+ if (theFd == -1)
+ {
+ request->error = errno;
+ }
#endif
}
#endif
@@ -878,7 +956,12 @@ AsyncFile::writevReq( Request * request)
void
AsyncFile::closeReq(Request * request)
{
- syncReq(request);
+ if (m_open_flags & (
+ FsOpenReq::OM_WRITEONLY |
+ FsOpenReq::OM_READWRITE |
+ FsOpenReq::OM_APPEND )) {
+ syncReq(request);
+ }
#ifdef NDB_WIN32
if(!CloseHandle(hFile)) {
request->error = GetLastError();
@@ -1079,7 +1162,8 @@ AsyncFile::rmrfReq(Request * request, char * path, bool removePath){
void AsyncFile::endReq()
{
// Thread is ended with return
- if (theWriteBuffer) ndbd_free(theWriteBuffer, theWriteBufferSize);
+ if (theWriteBufferUnaligned)
+ ndbd_free(theWriteBufferUnaligned, theWriteBufferSize);
}
diff --git a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp
index cc667225ce2..e4a01753acd 100644
--- a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp
+++ b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp
@@ -224,6 +224,8 @@ private:
#else
int theFd;
#endif
+
+ Uint32 m_open_flags; // OM_ flags from request to open file
MemoryChannel<Request> *theReportTo;
MemoryChannel<Request>* theMemoryChannelPtr;
@@ -234,9 +236,13 @@ private:
bool theStartFlag;
int theWriteBufferSize;
char* theWriteBuffer;
+ void* theWriteBufferUnaligned;
size_t m_write_wo_sync; // Writes wo/ sync
size_t m_auto_sync_freq; // Auto sync freq in bytes
+
+ int check_odirect_read(Uint32 flags, int&new_flags, int mode);
+ int check_odirect_write(Uint32 flags, int&new_flags, int mode);
public:
SimulatedBlock& m_fs;
Ptr<GlobalPage> m_page_ptr;
diff --git a/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp b/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp
index 966f82cc9c2..411e7064efa 100644
--- a/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp
+++ b/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp
@@ -75,7 +75,7 @@ template <class T>
class MemoryChannel
{
public:
- MemoryChannel( int size= 256);
+ MemoryChannel( int size= 512);
virtual ~MemoryChannel( );
void writeChannel( T *t);
diff --git a/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp b/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp
index 44f8a8ab05b..26bf8878852 100644
--- a/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp
+++ b/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp
@@ -652,7 +652,7 @@ AsyncFile*
Ndbfs::createAsyncFile(){
// Check limit of open files
- if (m_maxFiles !=0 && theFiles.size()+1 == m_maxFiles) {
+ if (m_maxFiles !=0 && theFiles.size() == m_maxFiles) {
// Print info about all open files
for (unsigned i = 0; i < theFiles.size(); i++){
AsyncFile* file = theFiles[i];
diff --git a/storage/ndb/src/kernel/blocks/pgman.cpp b/storage/ndb/src/kernel/blocks/pgman.cpp
index 4e1d1c29ab8..d8e0c053984 100644
--- a/storage/ndb/src/kernel/blocks/pgman.cpp
+++ b/storage/ndb/src/kernel/blocks/pgman.cpp
@@ -122,9 +122,9 @@ Pgman::execREAD_CONFIG_REQ(Signal* signal)
if (page_buffer > 0)
{
- page_buffer /= GLOBAL_PAGE_SIZE; // in pages
- m_page_entry_pool.setSize(100*page_buffer);
+ page_buffer = (page_buffer + GLOBAL_PAGE_SIZE - 1) / GLOBAL_PAGE_SIZE; // in pages
m_param.m_max_pages = page_buffer;
+ m_page_entry_pool.setSize(m_param.m_lirs_stack_mult * page_buffer);
m_param.m_max_hot_pages = (page_buffer * 9) / 10;
}
@@ -141,9 +141,10 @@ Pgman::execREAD_CONFIG_REQ(Signal* signal)
Pgman::Param::Param() :
m_max_pages(64), // smallish for testing
+ m_lirs_stack_mult(10),
m_max_hot_pages(56),
m_max_loop_count(256),
- m_max_io_waits(64),
+ m_max_io_waits(256),
m_stats_loop_delay(1000),
m_cleanup_loop_delay(200),
m_lcp_loop_delay(0)
@@ -301,6 +302,9 @@ Pgman::get_sublist_no(Page_state state)
{
return Page_entry::SL_LOCKED;
}
+ if (state == Page_entry::ONSTACK) {
+ return Page_entry::SL_IDLE;
+ }
return Page_entry::SL_OTHER;
}
@@ -415,15 +419,55 @@ Pgman::get_page_entry(Ptr<Page_entry>& ptr, Uint32 file_no, Uint32 page_no)
{
if (find_page_entry(ptr, file_no, page_no))
{
+ jam();
ndbrequire(ptr.p->m_state != 0);
m_stats.m_page_hits++;
+
+#ifdef VM_TRACE
+ debugOut << "PGMAN: get_page_entry: found" << endl;
+ debugOut << "PGMAN: " << ptr << endl;
+#endif
return true;
}
+ if (m_page_entry_pool.getNoOfFree() == 0)
+ {
+ jam();
+ Page_sublist& pl_idle = *m_page_sublist[Page_entry::SL_IDLE];
+ Ptr<Page_entry> idle_ptr;
+ if (pl_idle.first(idle_ptr))
+ {
+ jam();
+
+#ifdef VM_TRACE
+ debugOut << "PGMAN: get_page_entry: re-use idle entry" << endl;
+ debugOut << "PGMAN: " << idle_ptr << endl;
+#endif
+
+ Page_state state = idle_ptr.p->m_state;
+ ndbrequire(state == Page_entry::ONSTACK);
+
+ Page_stack& pl_stack = m_page_stack;
+ ndbrequire(pl_stack.hasPrev(idle_ptr));
+ pl_stack.remove(idle_ptr);
+ state &= ~ Page_entry::ONSTACK;
+ set_page_state(idle_ptr, state);
+ ndbrequire(idle_ptr.p->m_state == 0);
+
+ release_page_entry(idle_ptr);
+ }
+ }
+
if (seize_page_entry(ptr, file_no, page_no))
{
+ jam();
ndbrequire(ptr.p->m_state == 0);
m_stats.m_page_faults++;
+
+#ifdef VM_TRACE
+ debugOut << "PGMAN: get_page_entry: seize" << endl;
+ debugOut << "PGMAN: " << ptr << endl;
+#endif
return true;
}
@@ -624,6 +668,7 @@ Pgman::lirs_reference(Ptr<Page_entry> ptr)
jam();
move_cleanup_ptr(ptr);
pl_queue.remove(ptr);
+ state &= ~ Page_entry::ONQUEUE;
}
if (state & Page_entry::BOUND)
{
@@ -649,11 +694,24 @@ Pgman::lirs_reference(Ptr<Page_entry> ptr)
if (state & Page_entry::ONSTACK)
{
jam();
+ bool at_bottom = ! pl_stack.hasPrev(ptr);
pl_stack.remove(ptr);
+ if (at_bottom)
+ {
+ jam();
+ ndbassert(state & Page_entry::HOT);
+ lirs_stack_prune();
+ }
}
pl_stack.add(ptr);
state |= Page_entry::ONSTACK;
state |= Page_entry::HOT;
+ // it could be on queue already
+ if (state & Page_entry::ONQUEUE) {
+ jam();
+ pl_queue.remove(ptr);
+ state &= ~Page_entry::ONQUEUE;
+ }
}
set_page_state(ptr, state);
@@ -902,9 +960,11 @@ Pgman::process_map(Signal* signal)
#ifdef VM_TRACE
debugOut << "PGMAN: >process_map" << endl;
#endif
- int max_count = m_param.m_max_io_waits - m_stats.m_current_io_waits;
- if (max_count > 0)
+ int max_count = 0;
+ if (m_param.m_max_io_waits > m_stats.m_current_io_waits) {
+ max_count = m_param.m_max_io_waits - m_stats.m_current_io_waits;
max_count = max_count / 2 + 1;
+ }
Page_sublist& pl_map = *m_page_sublist[Page_entry::SL_MAP];
while (! pl_map.isEmpty() && --max_count >= 0)
@@ -1056,15 +1116,10 @@ Pgman::process_cleanup(Signal* signal)
}
int max_loop_count = m_param.m_max_loop_count;
- int max_count = m_param.m_max_io_waits - m_stats.m_current_io_waits;
-
- if (max_count > 0)
- {
+ int max_count = 0;
+ if (m_param.m_max_io_waits > m_stats.m_current_io_waits) {
+ max_count = m_param.m_max_io_waits - m_stats.m_current_io_waits;
max_count = max_count / 2 + 1;
- /*
- * Possibly add code here to avoid writing too rapidly. May be
- * unnecessary since only cold pages are cleaned.
- */
}
Ptr<Page_entry> ptr = m_cleanup_ptr;
@@ -1166,9 +1221,12 @@ bool
Pgman::process_lcp(Signal* signal)
{
Page_hashlist& pl_hash = m_page_hashlist;
- int max_count = m_param.m_max_io_waits - m_stats.m_current_io_waits;
- if (max_count > 0)
+
+ int max_count = 0;
+ if (m_param.m_max_io_waits > m_stats.m_current_io_waits) {
+ max_count = m_param.m_max_io_waits - m_stats.m_current_io_waits;
max_count = max_count / 2 + 1;
+ }
#ifdef VM_TRACE
debugOut
@@ -1559,6 +1617,7 @@ Pgman::execFSWRITEREF(Signal* signal)
int
Pgman::get_page(Signal* signal, Ptr<Page_entry> ptr, Page_request page_req)
{
+ jamEntry();
#ifdef VM_TRACE
Ptr<Page_request> tmp = { &page_req, RNIL};
debugOut << "PGMAN: >get_page" << endl;
@@ -1706,6 +1765,7 @@ Pgman::get_page(Signal* signal, Ptr<Page_entry> ptr, Page_request page_req)
void
Pgman::update_lsn(Ptr<Page_entry> ptr, Uint32 block, Uint64 lsn)
{
+ jamEntry();
#ifdef VM_TRACE
const char* bname = getBlockName(block, "?");
debugOut << "PGMAN: >update_lsn: block=" << bname << " lsn=" << lsn << endl;
@@ -1816,6 +1876,11 @@ Pgman::free_data_file(Uint32 file_no, Uint32 fd)
int
Pgman::drop_page(Ptr<Page_entry> ptr)
{
+#ifdef VM_TRACE
+ debugOut << "PGMAN: drop_page" << endl;
+ debugOut << "PGMAN: " << ptr << endl;
+#endif
+
Page_stack& pl_stack = m_page_stack;
Page_queue& pl_queue = m_page_queue;
@@ -1828,8 +1893,15 @@ Pgman::drop_page(Ptr<Page_entry> ptr)
if (state & Page_entry::ONSTACK)
{
jam();
+ bool at_bottom = ! pl_stack.hasPrev(ptr);
pl_stack.remove(ptr);
state &= ~ Page_entry::ONSTACK;
+ if (at_bottom)
+ {
+ jam();
+ ndbassert(state & Page_entry::HOT);
+ lirs_stack_prune();
+ }
}
if (state & Page_entry::ONQUEUE)
@@ -1839,6 +1911,7 @@ Pgman::drop_page(Ptr<Page_entry> ptr)
state &= ~ Page_entry::ONQUEUE;
}
+ ndbassert(ptr.p->m_real_page_i != RNIL);
if (ptr.p->m_real_page_i != RNIL)
{
jam();
@@ -1925,6 +1998,8 @@ Pgman::verify_page_entry(Ptr<Page_entry> ptr)
break;
case Page_entry::SL_LOCKED:
break;
+ case Page_entry::SL_IDLE:
+ break;
case Page_entry::SL_OTHER:
break;
default:
@@ -1971,8 +2046,11 @@ Pgman::verify_page_lists()
ndbrequire(stack_count == pl_stack.count() || dump_page_lists());
ndbrequire(queue_count == pl_queue.count() || dump_page_lists());
+ Uint32 hot_count = 0;
Uint32 hot_bound_count = 0;
Uint32 cold_bound_count = 0;
+ Uint32 stack_request_count = 0;
+ Uint32 queue_request_count = 0;
Uint32 i1 = RNIL;
for (pl_stack.first(ptr); ptr.i != RNIL; pl_stack.next(ptr))
@@ -1983,9 +2061,13 @@ Pgman::verify_page_lists()
ndbrequire(state & Page_entry::ONSTACK || dump_page_lists());
if (! pl_stack.hasPrev(ptr))
ndbrequire(state & Page_entry::HOT || dump_page_lists());
- if (state & Page_entry::HOT &&
- state & Page_entry::BOUND)
- hot_bound_count++;
+ if (state & Page_entry::HOT) {
+ hot_count++;
+ if (state & Page_entry::BOUND)
+ hot_bound_count++;
+ }
+ if (state & Page_entry::REQUEST)
+ stack_request_count++;
}
Uint32 i2 = RNIL;
@@ -1997,6 +2079,8 @@ Pgman::verify_page_lists()
ndbrequire(state & Page_entry::ONQUEUE || dump_page_lists());
ndbrequire(state & Page_entry::BOUND || dump_page_lists());
cold_bound_count++;
+ if (state & Page_entry::REQUEST)
+ queue_request_count++;
}
Uint32 tot_bound_count =
@@ -2029,7 +2113,11 @@ Pgman::verify_page_lists()
<< " cache:" << m_stats.m_num_pages
<< "(" << locked_bound_count << "L)"
<< " stack:" << pl_stack.count()
+ << " hot:" << hot_count
+ << " hot_bound:" << hot_bound_count
+ << " stack_request:" << stack_request_count
<< " queue:" << pl_queue.count()
+ << " queue_request:" << queue_request_count
<< " queuewait:" << queuewait_count << endl;
debugOut << "PGMAN:";
@@ -2137,6 +2225,8 @@ Pgman::get_sublist_name(Uint32 list_no)
return "busy";
case Page_entry::SL_LOCKED:
return "locked";
+ case Page_entry::SL_IDLE:
+ return "idle";
case Page_entry::SL_OTHER:
return "other";
}
@@ -2227,6 +2317,13 @@ operator<<(NdbOut& out, Ptr<Pgman::Page_entry> ptr)
out << " busy_count=" << dec << pe.m_busy_count;
#ifdef VM_TRACE
{
+ Pgman::Page_stack& pl_stack = pe.m_this->m_page_stack;
+ if (! pl_stack.hasNext(ptr))
+ out << " top";
+ if (! pl_stack.hasPrev(ptr))
+ out << " bottom";
+ }
+ {
Pgman::Local_page_request_list
req_list(ptr.p->m_this->m_page_request_pool, ptr.p->m_requests);
if (! req_list.isEmpty())
diff --git a/storage/ndb/src/kernel/blocks/pgman.hpp b/storage/ndb/src/kernel/blocks/pgman.hpp
index 07029d1c3e5..e3bf0fa5780 100644
--- a/storage/ndb/src/kernel/blocks/pgman.hpp
+++ b/storage/ndb/src/kernel/blocks/pgman.hpp
@@ -325,8 +325,9 @@ private:
,SL_CALLBACK_IO = 4
,SL_BUSY = 5
,SL_LOCKED = 6
- ,SL_OTHER = 7
- ,SUBLIST_COUNT = 8
+ ,SL_IDLE = 7
+ ,SL_OTHER = 8
+ ,SUBLIST_COUNT = 9
};
Uint16 m_file_no; // disk page address set at seize
@@ -401,6 +402,7 @@ private:
struct Param {
Param();
Uint32 m_max_pages; // max number of cache pages
+ Uint32 m_lirs_stack_mult; // in m_max_pages (around 3-10)
Uint32 m_max_hot_pages; // max hot cache pages (up to 99%)
Uint32 m_max_loop_count; // limit purely local loops
Uint32 m_max_io_waits;
diff --git a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
index 4b4fba01889..1fba4d62e17 100644
--- a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
+++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
@@ -2793,7 +2793,7 @@ void Qmgr::execAPI_REGREQ(Signal* signal)
"incompatible with %s",
type == NodeInfo::API ? "api or mysqld" : "management server",
apiNodePtr.i,
- getVersionString(version,"",buf,sizeof(buf)),
+ ndbGetVersionString(version,"",buf,sizeof(buf)),
NDB_VERSION_STRING);
apiNodePtr.p->phase = ZAPI_INACTIVE;
sendApiRegRef(signal, ref, ApiRegRef::UnsupportedVersion);
@@ -3052,7 +3052,7 @@ void Qmgr::failReportLab(Signal* signal, Uint16 aFailedNode,
if (failedNodePtr.i == getOwnNodeId()) {
jam();
- Uint32 code = 0;
+ Uint32 code = NDBD_EXIT_NODE_DECLARED_DEAD;
const char * msg = 0;
char extra[100];
switch(aFailCause){
diff --git a/storage/ndb/src/kernel/blocks/restore.cpp b/storage/ndb/src/kernel/blocks/restore.cpp
index d4a2414ef2f..efc4bc1948a 100644
--- a/storage/ndb/src/kernel/blocks/restore.cpp
+++ b/storage/ndb/src/kernel/blocks/restore.cpp
@@ -557,6 +557,9 @@ Restore::restore_next(Signal* signal, FilePtr file_ptr)
case BackupFormat::GCP_ENTRY:
parse_gcp_entry(signal, file_ptr, data, len);
break;
+ case BackupFormat::EMPTY_ENTRY:
+ // skip
+ break;
case 0x4e444242: // 'NDBB'
if (check_file_version(signal, ntohl(* (data+2))) == 0)
{
@@ -1268,7 +1271,7 @@ Restore::check_file_version(Signal* signal, Uint32 file_version)
{
char buf[255];
char verbuf[255];
- getVersionString(file_version, 0, verbuf, sizeof(verbuf));
+ ndbGetVersionString(file_version, 0, verbuf, sizeof(verbuf));
BaseString::snprintf(buf, sizeof(buf),
"Unsupported version of LCP files found on disk, "
" found: %s", verbuf);
diff --git a/storage/ndb/src/kernel/blocks/tsman.cpp b/storage/ndb/src/kernel/blocks/tsman.cpp
index 62aa80a67fe..8f61ec0cf7b 100644
--- a/storage/ndb/src/kernel/blocks/tsman.cpp
+++ b/storage/ndb/src/kernel/blocks/tsman.cpp
@@ -1309,6 +1309,12 @@ Tsman::execDROP_FILE_REQ(Signal* signal)
Local_datafile_list free(m_file_pool, fg_ptr.p->m_free_files);
free.remove(file_ptr);
}
+ else if(find_file_by_id(file_ptr, fg_ptr.p->m_meta_files, req.file_id))
+ {
+ jam();
+ Local_datafile_list meta(m_file_pool, fg_ptr.p->m_meta_files);
+ meta.remove(file_ptr);
+ }
else
{
errorCode = DropFileImplRef::NoSuchFile;
diff --git a/storage/ndb/src/kernel/error/ndbd_exit_codes.c b/storage/ndb/src/kernel/error/ndbd_exit_codes.c
index 9987f9e0ecb..b36ea3af8ee 100644
--- a/storage/ndb/src/kernel/error/ndbd_exit_codes.c
+++ b/storage/ndb/src/kernel/error/ndbd_exit_codes.c
@@ -57,6 +57,8 @@ static const ErrStruct errArray[] =
"error(s) on other node(s)"},
{NDBD_EXIT_PARTITIONED_SHUTDOWN, XAE, "Partitioned cluster detected. "
"Please check if cluster is already running"},
+ {NDBD_EXIT_NODE_DECLARED_DEAD, XAE,
+ "Node declared dead. See error log for details"},
{NDBD_EXIT_POINTER_NOTINRANGE, XIE, "Pointer too large"},
{NDBD_EXIT_SR_OTHERNODEFAILED, XRE, "Another node failed during system "
"restart, please investigate error(s) on other node(s)"},
diff --git a/storage/ndb/src/kernel/vm/Configuration.cpp b/storage/ndb/src/kernel/vm/Configuration.cpp
index e0b485eda59..72770d35cde 100644
--- a/storage/ndb/src/kernel/vm/Configuration.cpp
+++ b/storage/ndb/src/kernel/vm/Configuration.cpp
@@ -74,35 +74,35 @@ static struct my_option my_long_options[] =
{ "initial", OPT_INITIAL,
"Perform initial start of ndbd, including cleaning the file system. "
"Consult documentation before using this",
- (gptr*) &_initial, (gptr*) &_initial, 0,
+ (uchar**) &_initial, (uchar**) &_initial, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "nostart", 'n',
"Don't start ndbd immediately. Ndbd will await command from ndb_mgmd",
- (gptr*) &_no_start, (gptr*) &_no_start, 0,
+ (uchar**) &_no_start, (uchar**) &_no_start, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "daemon", 'd', "Start ndbd as daemon (default)",
- (gptr*) &_daemon, (gptr*) &_daemon, 0,
+ (uchar**) &_daemon, (uchar**) &_daemon, 0,
GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0 },
{ "nodaemon", OPT_NODAEMON,
"Do not start ndbd as daemon, provided for testing purposes",
- (gptr*) &_no_daemon, (gptr*) &_no_daemon, 0,
+ (uchar**) &_no_daemon, (uchar**) &_no_daemon, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "foreground", OPT_FOREGROUND,
"Run real ndbd in foreground, provided for debugging purposes"
" (implies --nodaemon)",
- (gptr*) &_foreground, (gptr*) &_foreground, 0,
+ (uchar**) &_foreground, (uchar**) &_foreground, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "nowait-nodes", OPT_NOWAIT_NODES,
"Nodes that will not be waited for during start",
- (gptr*) &_nowait_nodes, (gptr*) &_nowait_nodes, 0,
+ (uchar**) &_nowait_nodes, (uchar**) &_nowait_nodes, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "initial-start", OPT_INITIAL_START,
"Perform initial start",
- (gptr*) &_initialstart, (gptr*) &_initialstart, 0,
+ (uchar**) &_initialstart, (uchar**) &_initialstart, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "bind-address", OPT_NOWAIT_NODES,
"Local bind address",
- (gptr*) &_bind_address, (gptr*) &_bind_address, 0,
+ (uchar**) &_bind_address, (uchar**) &_bind_address, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
@@ -443,6 +443,11 @@ Configuration::setupConfiguration(){
"TimeBetweenWatchDogCheck missing");
}
+ if(iter.get(CFG_DB_WATCHDOG_INTERVAL_INITIAL, &_timeBetweenWatchDogCheckInitial)){
+ ERROR_SET(fatal, NDBD_EXIT_INVALID_CONFIG, "Invalid configuration fetched",
+ "TimeBetweenWatchDogCheckInitial missing");
+ }
+
/**
* Get paths
*/
@@ -462,9 +467,12 @@ Configuration::setupConfiguration(){
* Create the watch dog thread
*/
{
- Uint32 t = _timeBetweenWatchDogCheck;
+ if (_timeBetweenWatchDogCheckInitial < _timeBetweenWatchDogCheck)
+ _timeBetweenWatchDogCheckInitial = _timeBetweenWatchDogCheck;
+
+ Uint32 t = _timeBetweenWatchDogCheckInitial;
t = globalEmulatorData.theWatchDog ->setCheckInterval(t);
- _timeBetweenWatchDogCheck = t;
+ _timeBetweenWatchDogCheckInitial = t;
}
ConfigValues* cf = ConfigValuesFactory::extractCurrentSection(iter.m_config);
diff --git a/storage/ndb/src/kernel/vm/Configuration.hpp b/storage/ndb/src/kernel/vm/Configuration.hpp
index 934261e40af..918a889a171 100644
--- a/storage/ndb/src/kernel/vm/Configuration.hpp
+++ b/storage/ndb/src/kernel/vm/Configuration.hpp
@@ -84,6 +84,7 @@ private:
Uint32 _maxErrorLogs;
Uint32 _lockPagesInMainMemory;
Uint32 _timeBetweenWatchDogCheck;
+ Uint32 _timeBetweenWatchDogCheckInitial;
ndb_mgm_configuration * m_ownConfig;
ndb_mgm_configuration * m_clusterConfig;
diff --git a/storage/ndb/src/kernel/vm/DynArr256.cpp b/storage/ndb/src/kernel/vm/DynArr256.cpp
index ff3e7578c6c..4e73bb8830b 100644
--- a/storage/ndb/src/kernel/vm/DynArr256.cpp
+++ b/storage/ndb/src/kernel/vm/DynArr256.cpp
@@ -344,18 +344,27 @@ err:
void
DynArr256::init(ReleaseIterator &iter)
{
- iter.m_sz = 0;
+ iter.m_sz = 1;
iter.m_pos = 0;
- iter.m_ptr_i[0] = m_head.m_ptr_i;
- iter.m_ptr_i[1] = RNIL;
+ iter.m_ptr_i[0] = RNIL;
+ iter.m_ptr_i[1] = m_head.m_ptr_i;
iter.m_ptr_i[2] = RNIL;
iter.m_ptr_i[3] = RNIL;
+ iter.m_ptr_i[4] = RNIL;
}
-bool
-DynArr256::release(ReleaseIterator &iter)
+/**
+ * Iter is in next pos
+ *
+ * 0 - done
+ * 1 - data
+ * 2 - no data
+ */
+Uint32
+DynArr256::release(ReleaseIterator &iter, Uint32 * retptr)
{
- Uint32 ptrI = iter.m_ptr_i[iter.m_sz];
+ Uint32 sz = iter.m_sz;
+ Uint32 ptrI = iter.m_ptr_i[sz];
Uint32 page_no = ptrI >> DA256_BITS;
Uint32 page_idx = ptrI & DA256_MASK;
Uint32 type_id = (~m_pool.m_type_id) & 0xFFFF;
@@ -364,9 +373,8 @@ DynArr256::release(ReleaseIterator &iter)
if (ptrI != RNIL)
{
- Uint32 tmp = iter.m_pos & 255;
- Uint32 p0 = tmp;
- for (; p0<256 && p0 < tmp + 16; p0++)
+ Uint32 p0 = iter.m_pos & 255;
+ for (; p0<256; p0++)
{
Uint32 *retVal, *magic_ptr, p;
if (p0 != 255)
@@ -389,55 +397,52 @@ DynArr256::release(ReleaseIterator &iter)
}
Uint32 magic = *magic_ptr;
+ Uint32 val = *retVal;
if (unlikely(! ((magic & (1 << p)) && (magic >> 16) == type_id)))
goto err;
- Uint32 val = * retVal;
- if (val != RNIL)
+ if (sz == m_head.m_sz)
{
- if (iter.m_sz + 2 == m_head.m_sz)
+ * retptr = val;
+ p0++;
+ if (p0 != 256)
{
- * retVal = RNIL;
- m_pool.release(val);
- iter.m_pos = (iter.m_pos & ~255) + p0;
- return false;
+ /**
+ * Move next
+ */
+ iter.m_pos &= ~(Uint32)255;
+ iter.m_pos |= p0;
}
else
{
- * retVal = RNIL;
- iter.m_sz++;
- iter.m_ptr_i[iter.m_sz] = val;
- iter.m_pos = (p0 << 8);
- return false;
+ /**
+ * Move up
+ */
+ m_pool.release(ptrI);
+ iter.m_sz --;
+ iter.m_pos >>= 8;
}
+ return 1;
+ }
+ else if (val != RNIL)
+ {
+ iter.m_sz++;
+ iter.m_ptr_i[iter.m_sz] = val;
+ iter.m_pos = (p0 << 8);
+ * retVal = RNIL;
+ return 2;
}
}
- if (p0 == 256)
- {
- if (iter.m_sz == 0)
- goto done;
- iter.m_sz--;
- iter.m_pos >>= 8;
-
- m_pool.release(ptrI);
- return false;
- }
- else
- {
- iter.m_pos = (iter.m_pos & ~255) + p0;
- return false;
- }
- }
-
-done:
- if (m_head.m_ptr_i != RNIL)
- {
- m_pool.release(m_head.m_ptr_i);
+ assert(p0 == 256);
+ m_pool.release(ptrI);
+ iter.m_sz --;
+ iter.m_pos >>= 8;
+ return 2;
}
new (&m_head) Head();
- return true;
+ return 0;
err:
require(false);
@@ -638,6 +643,7 @@ static
void
simple(DynArr256 & arr, int argc, char* argv[])
{
+ ndbout_c("argc: %d", argc);
for (Uint32 i = 1; i<(Uint32)argc; i++)
{
Uint32 * s = arr.set(atoi(argv[i]));
@@ -865,7 +871,8 @@ write(DynArr256& arr, int argc, char ** argv)
ndbout_c("Elapsed %lldus -> %f us/set", start, uspg);
DynArr256::ReleaseIterator iter;
arr.init(iter);
- while(!arr.release(iter));
+ Uint32 val;
+ while(arr.release(iter, &val));
}
}
@@ -902,7 +909,7 @@ main(int argc, char** argv)
DynArr256::Head head;
DynArr256 arr(pool, head);
- if (strcmp(argv[1], "--args") == 0)
+ if (strcmp(argv[1], "--simple") == 0)
simple(arr, argc, argv);
else if (strcmp(argv[1], "--basic") == 0)
basic(arr, argc, argv);
@@ -913,8 +920,8 @@ main(int argc, char** argv)
DynArr256::ReleaseIterator iter;
arr.init(iter);
- Uint32 cnt = 0;
- while (!arr.release(iter)) cnt++;
+ Uint32 cnt = 0, val;
+ while (arr.release(iter, &val)) cnt++;
ndbout_c("allocatedpages: %d allocatednodes: %d releasednodes: %d"
" releasecnt: %d",
diff --git a/storage/ndb/src/kernel/vm/DynArr256.hpp b/storage/ndb/src/kernel/vm/DynArr256.hpp
index 31329add1a2..780dee7e4bf 100644
--- a/storage/ndb/src/kernel/vm/DynArr256.hpp
+++ b/storage/ndb/src/kernel/vm/DynArr256.hpp
@@ -49,6 +49,8 @@ public:
Uint32 m_ptr_i;
Uint32 m_sz;
+
+ bool isEmpty() const { return m_sz == 0;}
};
DynArr256(DynArr256Pool & pool, Head& head) :
@@ -61,12 +63,16 @@ public:
{
Uint32 m_sz;
Uint32 m_pos;
- Uint32 m_ptr_i[4];
+ Uint32 m_ptr_i[5];
};
void init(ReleaseIterator&);
- bool release(ReleaseIterator&);
-
+ /**
+ * return 0 - done
+ * 1 - data (in retptr)
+ * 2 - nodata
+ */
+ Uint32 release(ReleaseIterator&, Uint32* retptr);
protected:
Head & m_head;
DynArr256Pool & m_pool;
diff --git a/storage/ndb/src/kernel/vm/RWPool.cpp b/storage/ndb/src/kernel/vm/RWPool.cpp
index 192a8f87402..056b2149e2a 100644
--- a/storage/ndb/src/kernel/vm/RWPool.cpp
+++ b/storage/ndb/src/kernel/vm/RWPool.cpp
@@ -140,7 +140,7 @@ RWPool::release(Ptr<void> ptr)
Uint32 ff = page->m_first_free;
* (record_ptr + m_record_info.m_offset_next_pool) = ff;
- page->m_first_free = ptr.i;
+ page->m_first_free = ptr.i & POOL_RECORD_MASK;
page->m_ref_count = ref_cnt - 1;
if (ff == REC_NIL)
diff --git a/storage/ndb/src/kernel/vm/SimulatedBlock.cpp b/storage/ndb/src/kernel/vm/SimulatedBlock.cpp
index 3125fc33258..bc16b9f364e 100644
--- a/storage/ndb/src/kernel/vm/SimulatedBlock.cpp
+++ b/storage/ndb/src/kernel/vm/SimulatedBlock.cpp
@@ -19,6 +19,7 @@
#include <NdbOut.hpp>
#include <GlobalData.hpp>
#include <Emulator.hpp>
+#include <WatchDog.hpp>
#include <ErrorHandlingMacros.hpp>
#include <TimeQueue.hpp>
#include <TransporterRegistry.hpp>
@@ -38,6 +39,9 @@
#include <AttributeDescriptor.hpp>
#include <NdbSqlUtil.hpp>
+#include <EventLogger.hpp>
+extern EventLogger g_eventLogger;
+
#define ljamEntry() jamEntryLine(30000 + __LINE__)
#define ljam() jamLine(30000 + __LINE__)
@@ -655,14 +659,20 @@ SimulatedBlock::getBatSize(Uint16 blockNo){
return sb->theBATSize;
}
+void* SimulatedBlock::allocRecord(const char * type, size_t s, size_t n, bool clear, Uint32 paramId)
+{
+ return allocRecordAligned(type, s, n, 0, 0, clear, paramId);
+}
+
void*
-SimulatedBlock::allocRecord(const char * type, size_t s, size_t n, bool clear, Uint32 paramId)
+SimulatedBlock::allocRecordAligned(const char * type, size_t s, size_t n, void **unaligned_buffer, Uint32 align, bool clear, Uint32 paramId)
{
void * p = NULL;
- size_t size = n*s;
- Uint64 real_size = (Uint64)((Uint64)n)*((Uint64)s);
- refresh_watch_dog();
+ Uint32 over_alloc = unaligned_buffer ? (align - 1) : 0;
+ size_t size = n*s + over_alloc;
+ Uint64 real_size = (Uint64)((Uint64)n)*((Uint64)s) + over_alloc;
+ refresh_watch_dog(9);
if (real_size > 0){
#ifdef VM_TRACE_MEM
ndbout_c("%s::allocRecord(%s, %u, %u) = %llu bytes",
@@ -696,14 +706,24 @@ SimulatedBlock::allocRecord(const char * type, size_t s, size_t n, bool clear, U
char * ptr = (char*)p;
const Uint32 chunk = 128 * 1024;
while(size > chunk){
- refresh_watch_dog();
+ refresh_watch_dog(9);
memset(ptr, 0, chunk);
ptr += chunk;
size -= chunk;
}
- refresh_watch_dog();
+ refresh_watch_dog(9);
memset(ptr, 0, size);
}
+ if (unaligned_buffer)
+ {
+ *unaligned_buffer = p;
+ p = (void *)(((UintPtr)p + over_alloc) & ~(UintPtr)(over_alloc));
+#ifdef VM_TRACE
+ g_eventLogger.info("'%s' (%u) %llu %llu, alignment correction %u bytes",
+ type, align, (Uint64)p, (Uint64)p+n*s,
+ (Uint32)((UintPtr)p - (UintPtr)*unaligned_buffer));
+#endif
+ }
}
return p;
}
@@ -720,9 +740,16 @@ SimulatedBlock::deallocRecord(void ** ptr,
}
void
-SimulatedBlock::refresh_watch_dog()
+SimulatedBlock::refresh_watch_dog(Uint32 place)
+{
+ globalData.incrementWatchDogCounter(place);
+}
+
+void
+SimulatedBlock::update_watch_dog_timer(Uint32 interval)
{
- globalData.incrementWatchDogCounter(1);
+ extern EmulatorData globalEmulatorData;
+ globalEmulatorData.theWatchDog->setCheckInterval(interval);
}
void
@@ -1631,6 +1658,11 @@ SimulatedBlock::sendFragmentedSignal(NodeReceiverGroup rg,
}
SimulatedBlock::Callback SimulatedBlock::TheEmptyCallback = {0, 0};
+void
+SimulatedBlock::TheNULLCallbackFunction(class Signal*, Uint32, Uint32)
+{ abort(); /* should never be called */ }
+SimulatedBlock::Callback SimulatedBlock::TheNULLCallback =
+{ &SimulatedBlock::TheNULLCallbackFunction, 0 };
void
SimulatedBlock::sendFragmentedSignal(BlockReference ref,
diff --git a/storage/ndb/src/kernel/vm/SimulatedBlock.hpp b/storage/ndb/src/kernel/vm/SimulatedBlock.hpp
index 37a8dde5956..31f219718e5 100644
--- a/storage/ndb/src/kernel/vm/SimulatedBlock.hpp
+++ b/storage/ndb/src/kernel/vm/SimulatedBlock.hpp
@@ -18,7 +18,7 @@
#include <NdbTick.h>
#include <kernel_types.h>
-#include <ndb_version.h>
+#include <util/version.h>
#include <ndb_limits.h>
#include "VMSignal.hpp"
@@ -131,6 +131,8 @@ public:
virtual const char* get_filename(Uint32 fd) const { return "";}
protected:
static Callback TheEmptyCallback;
+ void TheNULLCallbackFunction(class Signal*, Uint32, Uint32);
+ static Callback TheNULLCallback;
void execute(Signal* signal, Callback & c, Uint32 returnCode);
@@ -334,7 +336,8 @@ protected:
* Refresh Watch Dog in initialising code
*
*/
- void refresh_watch_dog();
+ void refresh_watch_dog(Uint32 place = 1);
+ void update_watch_dog_timer(Uint32 interval);
/**
* Prog error
@@ -377,6 +380,7 @@ protected:
*
*/
void* allocRecord(const char * type, size_t s, size_t n, bool clear = true, Uint32 paramId = 0);
+ void* allocRecordAligned(const char * type, size_t s, size_t n, void **unaligned_buffer, Uint32 align = NDB_O_DIRECT_WRITE_ALIGNMENT, bool clear = true, Uint32 paramId = 0);
/**
* Deallocate record
@@ -597,6 +601,8 @@ inline
void
SimulatedBlock::execute(Signal* signal, Callback & c, Uint32 returnCode){
CallbackFunction fun = c.m_callbackFunction;
+ if (fun == TheNULLCallback.m_callbackFunction)
+ return;
ndbrequire(fun != 0);
c.m_callbackFunction = NULL;
(this->*fun)(signal, c.m_callbackData, returnCode);
diff --git a/storage/ndb/src/kernel/vm/WatchDog.cpp b/storage/ndb/src/kernel/vm/WatchDog.cpp
index d1abb709b1e..a7f5e8f5c2b 100644
--- a/storage/ndb/src/kernel/vm/WatchDog.cpp
+++ b/storage/ndb/src/kernel/vm/WatchDog.cpp
@@ -16,6 +16,7 @@
#include <ndb_global.h>
#include <my_pthread.h>
+#include <sys/times.h>
#include "WatchDog.hpp"
#include "GlobalData.hpp"
@@ -24,6 +25,8 @@
#include <ErrorHandlingMacros.hpp>
#include <EventLogger.hpp>
+#include <NdbTick.h>
+
extern EventLogger g_eventLogger;
extern "C"
@@ -71,66 +74,115 @@ WatchDog::doStop(){
}
}
+const char *get_action(Uint32 IPValue)
+{
+ const char *action;
+ switch (IPValue) {
+ case 1:
+ action = "Job Handling";
+ break;
+ case 2:
+ action = "Scanning Timers";
+ break;
+ case 3:
+ action = "External I/O";
+ break;
+ case 4:
+ action = "Print Job Buffers at crash";
+ break;
+ case 5:
+ action = "Checking connections";
+ break;
+ case 6:
+ action = "Performing Send";
+ break;
+ case 7:
+ action = "Polling for Receive";
+ break;
+ case 8:
+ action = "Performing Receive";
+ break;
+ case 9:
+ action = "Allocating memory";
+ break;
+ default:
+ action = "Unknown place";
+ break;
+ }//switch
+ return action;
+}
+
void
-WatchDog::run(){
- unsigned int anIPValue;
- unsigned int alerts = 0;
+WatchDog::run()
+{
+ unsigned int anIPValue, sleep_time;
unsigned int oldIPValue = 0;
-
+ unsigned int theIntervalCheck = theInterval;
+ struct MicroSecondTimer start_time, last_time, now;
+ NdbTick_getMicroTimer(&start_time);
+ last_time = start_time;
+
// WatchDog for the single threaded NDB
- while(!theStop){
- Uint32 tmp = theInterval / 500;
- tmp= (tmp ? tmp : 1);
-
- while(!theStop && tmp > 0){
- NdbSleep_MilliSleep(500);
- tmp--;
- }
-
+ while (!theStop)
+ {
+ sleep_time= 100;
+
+ NdbSleep_MilliSleep(sleep_time);
if(theStop)
break;
+ NdbTick_getMicroTimer(&now);
+ if (NdbTick_getMicrosPassed(last_time, now)/1000 > sleep_time*2)
+ {
+ struct tms my_tms;
+ times(&my_tms);
+ g_eventLogger.info("Watchdog: User time: %llu System time: %llu",
+ (Uint64)my_tms.tms_utime,
+ (Uint64)my_tms.tms_stime);
+ g_eventLogger.warning("Watchdog: Warning overslept %u ms, expected %u ms.",
+ NdbTick_getMicrosPassed(last_time, now)/1000,
+ sleep_time);
+ }
+ last_time = now;
+
// Verify that the IP thread is not stuck in a loop
anIPValue = *theIPValue;
- if(anIPValue != 0) {
+ if (anIPValue != 0)
+ {
oldIPValue = anIPValue;
globalData.incrementWatchDogCounter(0);
- alerts = 0;
- } else {
- const char *last_stuck_action;
- alerts++;
- switch (oldIPValue) {
- case 1:
- last_stuck_action = "Job Handling";
- break;
- case 2:
- last_stuck_action = "Scanning Timers";
- break;
- case 3:
- last_stuck_action = "External I/O";
- break;
- case 4:
- last_stuck_action = "Print Job Buffers at crash";
- break;
- case 5:
- last_stuck_action = "Checking connections";
- break;
- case 6:
- last_stuck_action = "Performing Send";
- break;
- case 7:
- last_stuck_action = "Polling for Receive";
- break;
- case 8:
- last_stuck_action = "Performing Receive";
- break;
- default:
- last_stuck_action = "Unknown place";
- break;
- }//switch
- g_eventLogger.warning("Ndb kernel is stuck in: %s", last_stuck_action);
- if(alerts == 3){
- shutdownSystem(last_stuck_action);
+ NdbTick_getMicroTimer(&start_time);
+ theIntervalCheck = theInterval;
+ }
+ else
+ {
+ int warn = 1;
+ Uint32 elapsed = NdbTick_getMicrosPassed(start_time, now)/1000;
+ /*
+ oldIPValue == 9 indicates malloc going on, this can take some time
+ so only warn if we pass the watchdog interval
+ */
+ if (oldIPValue == 9)
+ if (elapsed < theIntervalCheck)
+ warn = 0;
+ else
+ theIntervalCheck += theInterval;
+
+ if (warn)
+ {
+ const char *last_stuck_action = get_action(oldIPValue);
+ g_eventLogger.warning("Ndb kernel is stuck in: %s", last_stuck_action);
+ {
+ struct tms my_tms;
+ times(&my_tms);
+ g_eventLogger.info("Watchdog: User time: %llu System time: %llu",
+ (Uint64)my_tms.tms_utime,
+ (Uint64)my_tms.tms_stime);
+ }
+ if (elapsed > 3 * theInterval)
+ {
+ shutdownSystem(last_stuck_action);
+ }
}
}
}
diff --git a/storage/ndb/src/mgmapi/mgmapi.cpp b/storage/ndb/src/mgmapi/mgmapi.cpp
index e7dc1d1d503..662e5c22f48 100644
--- a/storage/ndb/src/mgmapi/mgmapi.cpp
+++ b/storage/ndb/src/mgmapi/mgmapi.cpp
@@ -408,7 +408,7 @@ ndb_mgm_call(NdbMgmHandle handle, const ParserRow<ParserDummy> *command_reply,
}
else
{
- CHECK_TIMEDOUT_RET(handle, in, out, NULL);
+ DBUG_CHECK_TIMEDOUT_RET(handle, in, out, NULL);
if(ctx.m_status==Parser_t::Eof
|| ctx.m_status==Parser_t::NoLine)
{
@@ -524,7 +524,7 @@ ndb_mgm_connect(NdbMgmHandle handle, int no_retries,
NDB_SOCKET_TYPE sockfd= NDB_INVALID_SOCKET;
Uint32 i;
SocketClient s(0, 0);
- s.set_connect_timeout(handle->timeout);
+ s.set_connect_timeout((handle->timeout+999)/1000);
if (!s.init())
{
fprintf(handle->errstream,
diff --git a/storage/ndb/src/mgmclient/CommandInterpreter.cpp b/storage/ndb/src/mgmclient/CommandInterpreter.cpp
index 93fc3d46e43..875cc2771ae 100644
--- a/storage/ndb/src/mgmclient/CommandInterpreter.cpp
+++ b/storage/ndb/src/mgmclient/CommandInterpreter.cpp
@@ -18,6 +18,7 @@
#include <Vector.hpp>
#include <mgmapi.h>
#include <util/BaseString.hpp>
+#include <ndbd_exit_codes.h>
class MgmtSrvr;
@@ -704,6 +705,133 @@ CommandInterpreter::printError()
}
}
+/*
+ * print log event from mgmsrv to console screen
+ */
+#define make_uint64(a,b) (((Uint64)(a)) + (((Uint64)(b)) << 32))
+#define Q64(a) make_uint64(event->EVENT.a ## _lo, event->EVENT.a ## _hi)
+#define R event->source_nodeid
+#define Q(a) event->EVENT.a
+#define QVERSION getMajor(Q(version)), getMinor(Q(version)), getBuild(Q(version))
+#define NDB_LE_(a) NDB_LE_ ## a
+static void
+printLogEvent(struct ndb_logevent* event)
+{
+ switch (event->type) {
+ /**
+ * NDB_MGM_EVENT_CATEGORY_BACKUP
+ */
+#undef EVENT
+#define EVENT BackupStarted
+ case NDB_LE_BackupStarted:
+ ndbout_c("Node %u: Backup %d started from node %d",
+ R, Q(backup_id), Q(starting_node));
+ break;
+#undef EVENT
+#define EVENT BackupFailedToStart
+ case NDB_LE_BackupFailedToStart:
+ ndbout_c("Node %u: Backup request from %d failed to start. Error: %d",
+ R, Q(starting_node), Q(error));
+ break;
+#undef EVENT
+#define EVENT BackupCompleted
+ case NDB_LE_BackupCompleted:
+ ndbout_c("Node %u: Backup %u started from node %u completed\n"
+ " StartGCP: %u StopGCP: %u\n"
+ " #Records: %u #LogRecords: %u\n"
+ " Data: %u bytes Log: %u bytes", R,
+ Q(backup_id), Q(starting_node),
+ Q(start_gci), Q(stop_gci),
+ Q(n_records), Q(n_log_records),
+ Q(n_bytes), Q(n_log_bytes));
+ break;
+#undef EVENT
+#define EVENT BackupAborted
+ case NDB_LE_BackupAborted:
+ ndbout_c("Node %u: Backup %d started from %d has been aborted. Error: %d",
+ R, Q(backup_id), Q(starting_node), Q(error));
+ break;
+ /**
+ * NDB_MGM_EVENT_CATEGORY_STARTUP
+ */
+#undef EVENT
+#define EVENT NDBStartStarted
+ case NDB_LE_NDBStartStarted:
+ ndbout_c("Node %u: Start initiated (version %d.%d.%d)",
+ R, QVERSION);
+ break;
+#undef EVENT
+#define EVENT NDBStartCompleted
+ case NDB_LE_NDBStartCompleted:
+ ndbout_c("Node %u: Started (version %d.%d.%d)",
+ R, QVERSION);
+ break;
+#undef EVENT
+#define EVENT NDBStopStarted
+ case NDB_LE_NDBStopStarted:
+ ndbout_c("Node %u: %s shutdown initiated", R,
+ (Q(stoptype) == 1 ? "Cluster" : "Node"));
+ break;
+#undef EVENT
+#define EVENT NDBStopCompleted
+ case NDB_LE_NDBStopCompleted:
+ {
+ BaseString action_str("");
+ BaseString signum_str("");
+ getRestartAction(Q(action), action_str);
+ if (Q(signum))
+ signum_str.appfmt(" Initiated by signal %d.",
+ Q(signum));
+ ndbout_c("Node %u: Node shutdown completed%s.%s",
+ R, action_str.c_str(), signum_str.c_str());
+ }
+ break;
+#undef EVENT
+#define EVENT NDBStopForced
+ case NDB_LE_NDBStopForced:
+ {
+ BaseString action_str("");
+ BaseString reason_str("");
+ BaseString sphase_str("");
+ int signum = Q(signum);
+ int error = Q(error);
+ int sphase = Q(sphase);
+ int extra = Q(extra);
+ getRestartAction(Q(action), action_str);
+ if (signum)
+ reason_str.appfmt(" Initiated by signal %d.", signum);
+ if (error)
+ {
+ ndbd_exit_classification cl;
+ ndbd_exit_status st;
+ const char *msg = ndbd_exit_message(error, &cl);
+ const char *cl_msg = ndbd_exit_classification_message(cl, &st);
+ const char *st_msg = ndbd_exit_status_message(st);
+ reason_str.appfmt(" Caused by error %d: \'%s(%s). %s\'.",
+ error, msg, cl_msg, st_msg);
+ if (extra != 0)
+ reason_str.appfmt(" (extra info %d)", extra);
+ }
+ if (sphase < 255)
+ sphase_str.appfmt(" Occured during startphase %u.", sphase);
+ ndbout_c("Node %u: Forced node shutdown completed%s.%s%s",
+ R, action_str.c_str(), sphase_str.c_str(),
+ reason_str.c_str());
+ }
+ break;
+#undef EVENT
+#define EVENT StopAborted
+ case NDB_LE_NDBStopAborted:
+ ndbout_c("Node %u: Node shutdown aborted", R);
+ break;
+ /**
+ * default nothing to print
+ */
+ default:
+ break;
+ }
+}
+
//*****************************************************************************
//*****************************************************************************
@@ -720,30 +848,21 @@ event_thread_run(void* p)
int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP,
1, NDB_MGM_EVENT_CATEGORY_STARTUP,
0 };
- int fd = ndb_mgm_listen_event(handle, filter);
- if (fd != NDB_INVALID_SOCKET)
+
+ NdbLogEventHandle log_handle= NULL;
+ struct ndb_logevent log_event;
+
+ log_handle= ndb_mgm_create_logevent_handle(handle, filter);
+ if (log_handle)
{
do_event_thread= 1;
- char *tmp= 0;
- char buf[1024];
do {
- SocketInputStream in(fd,2000);
- if((tmp = in.gets(buf, sizeof(buf))))
- {
- const char ping_token[]= "<PING>";
- if (memcmp(ping_token,tmp,sizeof(ping_token)-1))
- if(tmp && strlen(tmp))
- {
- Guard g(printmutex);
- ndbout << tmp;
- }
- }
- else if(in.timedout() && ndb_mgm_check_connection(handle)<0)
- {
- break;
- }
+ if (ndb_logevent_get_next(log_handle, &log_event, 2000) <= 0)
+ continue;
+ Guard g(printmutex);
+ printLogEvent(&log_event);
} while(do_event_thread);
- NDB_CLOSE_SOCKET(fd);
+ ndb_mgm_destroy_logevent_handle(&log_handle);
}
else
{
@@ -1008,6 +1127,7 @@ CommandInterpreter::execute_impl(const char *_line, bool interactive)
}
else if(strcasecmp(firstToken, "ENTER") == 0 &&
allAfterFirstToken != NULL &&
+ allAfterFirstToken != NULL &&
strncasecmp(allAfterFirstToken, "SINGLE USER MODE ",
sizeof("SINGLE USER MODE") - 1) == 0){
m_error = executeEnterSingleUser(allAfterFirstToken);
@@ -1966,7 +2086,7 @@ CommandInterpreter::executeRestart(Vector<BaseString> &command_list,
return -1;
}
- if (!nostart)
+ if (nostart)
ndbout_c("Shutting down nodes with \"-n, no start\" option, to subsequently start the nodes.");
result= ndb_mgm_restart3(m_mgmsrv, no_of_nodes, node_ids,
@@ -2476,8 +2596,7 @@ CommandInterpreter::executeStartBackup(char* parameters, bool interactive)
{
struct ndb_mgm_reply reply;
unsigned int backupId;
- int fd = -1;
-
+
Vector<BaseString> args;
{
BaseString(parameters).split(args);
@@ -2494,8 +2613,6 @@ CommandInterpreter::executeStartBackup(char* parameters, bool interactive)
if (sz == 2 && args[1] == "NOWAIT")
{
flags = 0;
- result = ndb_mgm_start_backup(m_mgmsrv, 0, &backupId, &reply);
- goto END_BACKUP;
}
else if (sz == 1 || (sz == 3 && args[1] == "WAIT" && args[2] == "COMPLETED"))
{
@@ -2513,62 +2630,74 @@ CommandInterpreter::executeStartBackup(char* parameters, bool interactive)
return -1;
}
- /**
- * If interactive...event listner is already running
- */
+ NdbLogEventHandle log_handle= NULL;
+ struct ndb_logevent log_event;
if (flags == 2 && !interactive)
{
int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP, 0, 0 };
- fd = ndb_mgm_listen_event(m_mgmsrv, filter);
- if (fd < 0)
+ log_handle = ndb_mgm_create_logevent_handle(m_mgmsrv, filter);
+ if (!log_handle)
{
ndbout << "Initializing start of backup failed" << endl;
printError();
- return fd;
+ return -1;
}
}
result = ndb_mgm_start_backup(m_mgmsrv, flags, &backupId, &reply);
-END_BACKUP:
if (result != 0) {
ndbout << "Backup failed" << endl;
printError();
- if (fd >= 0)
- close(fd);
+ if (log_handle)
+ ndb_mgm_destroy_logevent_handle(&log_handle);
return result;
}
- if (fd >= 0)
+ /**
+ * If interactive, event listner thread is already running
+ */
+ if (log_handle && !interactive)
{
- char *tmp;
- char buf[1024];
- {
- SocketInputStream in(fd);
- int count = 0;
- do {
- tmp = in.gets(buf, 1024);
- if(tmp)
- {
- ndbout << tmp;
- unsigned int id;
- if(sscanf(tmp, "%*[^:]: Backup %d ", &id) == 1 && id == backupId){
- count++;
- }
- }
- } while(count < 2);
- }
-
- SocketInputStream in(fd, 10);
+ int count = 0;
+ int retry = 0;
do {
- tmp = in.gets(buf, 1024);
- if(tmp && tmp[0] != 0)
+ if (ndb_logevent_get_next(log_handle, &log_event, 60000) > 0)
+ {
+ int print = 0;
+ switch (log_event.type) {
+ case NDB_LE_BackupStarted:
+ if (log_event.BackupStarted.backup_id == backupId)
+ print = 1;
+ break;
+ case NDB_LE_BackupCompleted:
+ if (log_event.BackupCompleted.backup_id == backupId)
+ print = 1;
+ break;
+ case NDB_LE_BackupAborted:
+ if (log_event.BackupAborted.backup_id == backupId)
+ print = 1;
+ break;
+ default:
+ break;
+ }
+ if (print)
+ {
+ Guard g(m_print_mutex);
+ printLogEvent(&log_event);
+ count++;
+ }
+ }
+ else
{
- ndbout << tmp;
+ retry++;
}
- } while(tmp && tmp[0] != 0);
-
- close(fd);
+ } while(count < 2 && retry < 3);
+
+ if (retry >= 3)
+ ndbout << "get backup event failed for " << retry << " times" << endl;
+
+ ndb_mgm_destroy_logevent_handle(&log_handle);
}
return 0;
diff --git a/storage/ndb/src/mgmclient/Makefile.am b/storage/ndb/src/mgmclient/Makefile.am
index 5b2009240c3..41f659cf68d 100644
--- a/storage/ndb/src/mgmclient/Makefile.am
+++ b/storage/ndb/src/mgmclient/Makefile.am
@@ -21,7 +21,8 @@ libndbmgmclient_la_LIBADD = ../mgmapi/libmgmapi.la \
../common/logger/liblogger.la \
../common/portlib/libportlib.la \
../common/util/libgeneral.la \
- ../common/portlib/libportlib.la
+ ../common/portlib/libportlib.la \
+ ../common/debugger/libtrace.la
ndb_mgm_SOURCES = main.cpp
@@ -35,6 +36,7 @@ INCLUDES += -I$(top_srcdir)/storage/ndb/include/mgmapi \
LDADD_LOC = $(noinst_LTLIBRARIES) \
../common/portlib/libportlib.la \
@readline_link@ \
+ $(top_builddir)/storage/ndb/src/libndbclient.la \
$(top_builddir)/dbug/libdbug.a \
$(top_builddir)/mysys/libmysys.a \
$(top_builddir)/strings/libmystrings.a \
diff --git a/storage/ndb/src/mgmclient/main.cpp b/storage/ndb/src/mgmclient/main.cpp
index 44408362f09..fbd81c71700 100644
--- a/storage/ndb/src/mgmclient/main.cpp
+++ b/storage/ndb/src/mgmclient/main.cpp
@@ -23,6 +23,8 @@ extern "C" {
#elif !defined(__NETWARE__)
#include <readline/readline.h>
extern "C" int add_history(const char *command); /* From readline directory */
+extern "C" int read_history(const char *command);
+extern "C" int write_history(const char *command);
#define HAVE_READLINE
#endif
}
@@ -71,11 +73,11 @@ static struct my_option my_long_options[] =
NDB_STD_OPTS("ndb_mgm"),
{ "execute", 'e',
"execute command and exit",
- (gptr*) &opt_execute_str, (gptr*) &opt_execute_str, 0,
+ (uchar**) &opt_execute_str, (uchar**) &opt_execute_str, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "try-reconnect", 't',
"Specify number of tries for connecting to ndb_mgmd (0 = infinite)",
- (gptr*) &_try_reconnect, (gptr*) &_try_reconnect, 0,
+ (uchar**) &_try_reconnect, (uchar**) &_try_reconnect, 0,
GET_UINT, REQUIRED_ARG, 3, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
@@ -155,10 +157,35 @@ int main(int argc, char** argv){
signal(SIGPIPE, handler);
com = new Ndb_mgmclient(opt_connect_str,1);
int ret= 0;
+ BaseString histfile;
if (!opt_execute_str)
{
+#ifdef HAVE_READLINE
+ char *histfile_env= getenv("NDB_MGM_HISTFILE");
+ if (histfile_env)
+ histfile.assign(histfile_env,strlen(histfile_env));
+ else if(getenv("HOME"))
+ {
+ histfile.assign(getenv("HOME"),strlen(getenv("HOME")));
+ histfile.append("/.ndb_mgm_history");
+ }
+ if (histfile.length())
+ read_history(histfile.c_str());
+#endif
+
ndbout << "-- NDB Cluster -- Management Client --" << endl;
while(read_and_execute(_try_reconnect));
+
+#ifdef HAVE_READLINE
+ if (histfile.length())
+ {
+ BaseString histfile_tmp;
+ histfile_tmp.assign(histfile);
+ histfile_tmp.append(".TMP");
+ if(!write_history(histfile_tmp.c_str()))
+ my_rename(histfile_tmp.c_str(), histfile.c_str(), MYF(MY_WME));
+ }
+#endif
}
else
{
diff --git a/storage/ndb/src/mgmsrv/ConfigInfo.cpp b/storage/ndb/src/mgmsrv/ConfigInfo.cpp
index 26e50cb8a68..9cbb7d93ceb 100644
--- a/storage/ndb/src/mgmsrv/ConfigInfo.cpp
+++ b/storage/ndb/src/mgmsrv/ConfigInfo.cpp
@@ -580,6 +580,18 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
STR_VALUE(MAX_INT_RNIL) },
{
+ CFG_DB_WATCHDOG_INTERVAL_INITIAL,
+ "TimeBetweenWatchDogCheckInitial",
+ DB_TOKEN,
+ "Time between execution checks inside a database node in the early start phases when memory is allocated",
+ ConfigInfo::CI_USED,
+ true,
+ ConfigInfo::CI_INT,
+ "6000",
+ "70",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
CFG_DB_STOP_ON_ERROR,
"StopOnError",
DB_TOKEN,
@@ -880,6 +892,18 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
STR_VALUE(MAX_INT_RNIL) },
{
+ CFG_DB_REDOLOG_FILE_SIZE,
+ "FragmentLogFileSize",
+ DB_TOKEN,
+ "Size of each Redo log file",
+ ConfigInfo::CI_USED,
+ false,
+ ConfigInfo::CI_INT,
+ "16M",
+ "4M",
+ "1G" },
+
+ {
CFG_DB_MAX_OPEN_FILES,
"MaxNoOfOpenFiles",
DB_TOKEN,
@@ -1298,6 +1322,18 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
STR_VALUE(MAX_INT_RNIL) },
{
+ CFG_DB_MAX_ALLOCATE,
+ "MaxAllocate",
+ DB_TOKEN,
+ "Maximum size of allocation to use when allocating memory for tables",
+ ConfigInfo::CI_USED,
+ false,
+ ConfigInfo::CI_INT,
+ "32M",
+ "1M",
+ "1G" },
+
+ {
CFG_DB_MEMREPORT_FREQUENCY,
"MemReportFrequency",
DB_TOKEN,
@@ -1309,6 +1345,18 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
"0",
STR_VALUE(MAX_INT_RNIL) },
+ {
+ CFG_DB_O_DIRECT,
+ "ODirect",
+ DB_TOKEN,
+ "Use O_DIRECT file write/read when possible",
+ ConfigInfo::CI_USED,
+ true,
+ ConfigInfo::CI_BOOL,
+ "false",
+ "false",
+ "true"},
+
/***************************************************************************
* API
***************************************************************************/
@@ -2897,25 +2945,50 @@ static bool fixNodeId(InitConfigFileParser::Context & ctx, const char * data)
char buf[] = "NodeIdX"; buf[6] = data[sizeof("NodeI")];
char sysbuf[] = "SystemX"; sysbuf[6] = data[sizeof("NodeI")];
const char* nodeId;
- require(ctx.m_currentSection->get(buf, &nodeId));
+ if(!ctx.m_currentSection->get(buf, &nodeId))
+ {
+ ctx.reportError("Mandatory parameter %s missing from section"
+ "[%s] starting at line: %d",
+ buf, ctx.fname, ctx.m_sectionLineno);
+ return false;
+ }
char tmpLine[MAX_LINE_LENGTH];
strncpy(tmpLine, nodeId, MAX_LINE_LENGTH);
char* token1 = strtok(tmpLine, ".");
char* token2 = strtok(NULL, ".");
Uint32 id;
-
+
+ if(!token1)
+ {
+ ctx.reportError("Value for mandatory parameter %s missing from section "
+ "[%s] starting at line: %d",
+ buf, ctx.fname, ctx.m_sectionLineno);
+ return false;
+ }
if (token2 == NULL) { // Only a number given
errno = 0;
char* p;
id = strtol(token1, &p, 10);
- if (errno != 0) warning("STRTOK1", nodeId);
+ if (errno != 0 || id <= 0x0 || id > MAX_NODES)
+ {
+ ctx.reportError("Illegal value for mandatory parameter %s from section "
+ "[%s] starting at line: %d",
+ buf, ctx.fname, ctx.m_sectionLineno);
+ return false;
+ }
require(ctx.m_currentSection->put(buf, id, true));
} else { // A pair given (e.g. "uppsala.32")
errno = 0;
char* p;
id = strtol(token2, &p, 10);
- if (errno != 0) warning("STRTOK2", nodeId);
+ if (errno != 0 || id <= 0x0 || id > MAX_NODES)
+ {
+ ctx.reportError("Illegal value for mandatory parameter %s from section "
+ "[%s] starting at line: %d",
+ buf, ctx.fname, ctx.m_sectionLineno);
+ return false;
+ }
require(ctx.m_currentSection->put(buf, id, true));
require(ctx.m_currentSection->put(sysbuf, token1));
}
@@ -3733,16 +3806,16 @@ check_node_vs_replicas(Vector<ConfigInfo::ConfigRuleSection>&sections,
}
}
if (db_host_count > 1 && node_group_warning.length() > 0)
- ndbout_c("Cluster configuration warning:\n%s",node_group_warning.c_str());
+ ctx.reportWarning("Cluster configuration warning:\n%s",node_group_warning.c_str());
if (!with_arbitration_rank)
{
- ndbout_c("Cluster configuration warning:"
+ ctx.reportWarning("Cluster configuration warning:"
"\n Neither %s nor %s nodes are configured with arbitrator,"
"\n may cause complete cluster shutdown in case of host failure.",
MGM_TOKEN, API_TOKEN);
}
if (db_host_count > 1 && arbitration_warning.length() > 0)
- ndbout_c("Cluster configuration warning:%s%s",arbitration_warning.c_str(),
+ ctx.reportWarning("Cluster configuration warning:%s%s",arbitration_warning.c_str(),
"\n Running arbitrator on the same host as a database node may"
"\n cause complete cluster shutdown in case of host failure.");
}
diff --git a/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp b/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp
index 94768e6ae52..569cb1eb654 100644
--- a/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp
+++ b/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp
@@ -612,10 +612,11 @@ static
my_bool
parse_mycnf_opt(int, const struct my_option * opt, char * value)
{
+ long *app_type= (long*) &opt->app_type;
if(opt->comment)
- ((struct my_option *)opt)->app_type++;
+ (*app_type)++;
else
- ((struct my_option *)opt)->app_type = order++;
+ *app_type = order++;
return 0;
}
@@ -780,19 +781,19 @@ InitConfigFileParser::parse_mycnf()
const ConfigInfo::ParamInfo& param = ConfigInfo::m_ParamInfo[i];
switch(param._type){
case ConfigInfo::CI_BOOL:
- opt.value = (gptr*)malloc(sizeof(int));
+ opt.value = (uchar **)malloc(sizeof(int));
opt.var_type = GET_INT;
break;
case ConfigInfo::CI_INT:
- opt.value = (gptr*)malloc(sizeof(int));
+ opt.value = (uchar**)malloc(sizeof(int));
opt.var_type = GET_INT;
break;
case ConfigInfo::CI_INT64:
- opt.value = (gptr*)malloc(sizeof(Int64));
+ opt.value = (uchar**)malloc(sizeof(Int64));
opt.var_type = GET_LL;
break;
case ConfigInfo::CI_STRING:
- opt.value = (gptr*)malloc(sizeof(char *));
+ opt.value = (uchar**)malloc(sizeof(char *));
opt.var_type = GET_STR;
break;
default:
@@ -818,28 +819,28 @@ InitConfigFileParser::parse_mycnf()
bzero(&opt, sizeof(opt));
opt.name = "ndbd";
opt.id = 256;
- opt.value = (gptr*)malloc(sizeof(char*));
+ opt.value = (uchar**)malloc(sizeof(char*));
opt.var_type = GET_STR;
opt.arg_type = REQUIRED_ARG;
options.push_back(opt);
opt.name = "ndb_mgmd";
opt.id = 256;
- opt.value = (gptr*)malloc(sizeof(char*));
+ opt.value = (uchar**)malloc(sizeof(char*));
opt.var_type = GET_STR;
opt.arg_type = REQUIRED_ARG;
options.push_back(opt);
opt.name = "mysqld";
opt.id = 256;
- opt.value = (gptr*)malloc(sizeof(char*));
+ opt.value = (uchar**)malloc(sizeof(char*));
opt.var_type = GET_STR;
opt.arg_type = REQUIRED_ARG;
options.push_back(opt);
opt.name = "ndbapi";
opt.id = 256;
- opt.value = (gptr*)malloc(sizeof(char*));
+ opt.value = (uchar**)malloc(sizeof(char*));
opt.var_type = GET_STR;
opt.arg_type = REQUIRED_ARG;
options.push_back(opt);
@@ -948,22 +949,6 @@ end:
template class Vector<struct my_option>;
-#if 0
-struct my_option
-{
- const char *name; /* Name of the option */
- int id; /* unique id or short option */
- const char *comment; /* option comment, for autom. --help */
- gptr *value; /* The variable value */
- gptr *u_max_value; /* The user def. max variable value */
- const char **str_values; /* Pointer to possible values */
- ulong var_type;
- enum get_opt_arg_type arg_type;
- longlong def_value; /* Default value */
- longlong min_value; /* Min allowed value */
- longlong max_value; /* Max allowed value */
- longlong sub_size; /* Subtract this from given value */
- long block_size; /* Value should be a mult. of this */
- int app_type; /* To be used by an application */
-};
-#endif
+/*
+ See include/my_getopt.h for the declaration of struct my_option
+*/
diff --git a/storage/ndb/src/mgmsrv/Makefile.am b/storage/ndb/src/mgmsrv/Makefile.am
index adde2ad5d34..c19f885ae8d 100644
--- a/storage/ndb/src/mgmsrv/Makefile.am
+++ b/storage/ndb/src/mgmsrv/Makefile.am
@@ -38,7 +38,7 @@ INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/src/ndbapi \
-I$(top_srcdir)/storage/ndb/src/common/mgmcommon \
-I$(top_srcdir)/storage/ndb/src/mgmclient
-LDADD_LOC = $(top_builddir)/storage/ndb/src/mgmclient/CommandInterpreter.o \
+LDADD_LOC = $(top_builddir)/storage/ndb/src/mgmclient/CommandInterpreter.lo \
$(top_builddir)/storage/ndb/src/libndbclient.la \
$(top_builddir)/dbug/libdbug.a \
$(top_builddir)/mysys/libmysys.a \
diff --git a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp
index dde6829c82c..af708664a69 100644
--- a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp
@@ -18,6 +18,7 @@
#include "MgmtSrvr.hpp"
#include "MgmtErrorReporter.hpp"
+#include "ndb_mgmd_error.h"
#include <ConfigRetriever.hpp>
#include <NdbOut.hpp>
@@ -239,13 +240,6 @@ MgmtSrvr::stopEventLog()
// Nothing yet
}
-class ErrorItem
-{
-public:
- int _errorCode;
- const char * _errorText;
-};
-
bool
MgmtSrvr::setEventLogFilter(int severity, int enable)
{
@@ -268,62 +262,6 @@ MgmtSrvr::isEventLogFilterEnabled(int severity)
return g_eventLogger.isEnable((Logger::LoggerLevel)severity);
}
-static ErrorItem errorTable[] =
-{
- {MgmtSrvr::NO_CONTACT_WITH_PROCESS, "No contact with the process (dead ?)."},
- {MgmtSrvr::PROCESS_NOT_CONFIGURED, "The process is not configured."},
- {MgmtSrvr::WRONG_PROCESS_TYPE,
- "The process has wrong type. Expected a DB process."},
- {MgmtSrvr::COULD_NOT_ALLOCATE_MEMORY, "Could not allocate memory."},
- {MgmtSrvr::SEND_OR_RECEIVE_FAILED, "Send to process or receive failed."},
- {MgmtSrvr::INVALID_LEVEL, "Invalid level. Should be between 1 and 30."},
- {MgmtSrvr::INVALID_ERROR_NUMBER, "Invalid error number. Should be >= 0."},
- {MgmtSrvr::INVALID_TRACE_NUMBER, "Invalid trace number."},
- {MgmtSrvr::NOT_IMPLEMENTED, "Not implemented."},
- {MgmtSrvr::INVALID_BLOCK_NAME, "Invalid block name"},
-
- {MgmtSrvr::CONFIG_PARAM_NOT_EXIST,
- "The configuration parameter does not exist for the process type."},
- {MgmtSrvr::CONFIG_PARAM_NOT_UPDATEABLE,
- "The configuration parameter is not possible to update."},
- {MgmtSrvr::VALUE_WRONG_FORMAT_INT_EXPECTED,
- "Incorrect value. Expected integer."},
- {MgmtSrvr::VALUE_TOO_LOW, "Value is too low."},
- {MgmtSrvr::VALUE_TOO_HIGH, "Value is too high."},
- {MgmtSrvr::VALUE_WRONG_FORMAT_BOOL_EXPECTED,
- "Incorrect value. Expected TRUE or FALSE."},
-
- {MgmtSrvr::CONFIG_FILE_OPEN_WRITE_ERROR,
- "Could not open configuration file for writing."},
- {MgmtSrvr::CONFIG_FILE_OPEN_READ_ERROR,
- "Could not open configuration file for reading."},
- {MgmtSrvr::CONFIG_FILE_WRITE_ERROR,
- "Write error when writing configuration file."},
- {MgmtSrvr::CONFIG_FILE_READ_ERROR,
- "Read error when reading configuration file."},
- {MgmtSrvr::CONFIG_FILE_CLOSE_ERROR, "Could not close configuration file."},
-
- {MgmtSrvr::CONFIG_CHANGE_REFUSED_BY_RECEIVER,
- "The change was refused by the receiving process."},
- {MgmtSrvr::COULD_NOT_SYNC_CONFIG_CHANGE_AGAINST_PHYSICAL_MEDIUM,
- "The change could not be synced against physical medium."},
- {MgmtSrvr::CONFIG_FILE_CHECKSUM_ERROR,
- "The config file is corrupt. Checksum error."},
- {MgmtSrvr::NOT_POSSIBLE_TO_SEND_CONFIG_UPDATE_TO_PROCESS_TYPE,
- "It is not possible to send an update of a configuration variable "
- "to this kind of process."},
- {MgmtSrvr::NODE_SHUTDOWN_IN_PROGESS, "Node shutdown in progress" },
- {MgmtSrvr::SYSTEM_SHUTDOWN_IN_PROGRESS, "System shutdown in progress" },
- {MgmtSrvr::NODE_SHUTDOWN_WOULD_CAUSE_SYSTEM_CRASH,
- "Node shutdown would cause system crash" },
- {MgmtSrvr::UNSUPPORTED_NODE_SHUTDOWN,
- "Unsupported multi node shutdown. Abort option required." },
- {MgmtSrvr::NODE_NOT_API_NODE, "The specified node is not an API node." },
- {MgmtSrvr::OPERATION_NOT_ALLOWED_START_STOP,
- "Operation not allowed while nodes are starting or stopping."},
- {MgmtSrvr::NO_CONTACT_WITH_DB_NODES, "No contact with database nodes" }
-};
-
int MgmtSrvr::translateStopRef(Uint32 errCode)
{
switch(errCode){
@@ -343,8 +281,6 @@ int MgmtSrvr::translateStopRef(Uint32 errCode)
return 4999;
}
-static int noOfErrorCodes = sizeof(errorTable) / sizeof(ErrorItem);
-
int
MgmtSrvr::getNodeCount(enum ndb_mgm_node_type type) const
{
@@ -628,6 +564,16 @@ MgmtSrvr::start(BaseString &error_string)
ndbout_c("This is probably a bug.");
}
+ /*
+ set api reg req frequency quite high:
+
+ 100 ms interval to make sure we have fairly up-to-date
+ info from the nodes. This to make sure that this info
+ is not dependent on heart beat settings in the
+ configuration
+ */
+ theFacade->theClusterMgr->set_max_api_reg_req_interval(100);
+
TransporterRegistry *reg = theFacade->get_registry();
for(unsigned int i=0;i<reg->m_transporter_interface.size();i++) {
BaseString msg;
@@ -1959,18 +1905,8 @@ MgmtSrvr::dumpState(int nodeId, const Uint32 args[], Uint32 no)
const char* MgmtSrvr::getErrorText(int errorCode, char *buf, int buf_sz)
{
-
- for (int i = 0; i < noOfErrorCodes; ++i) {
- if (errorCode == errorTable[i]._errorCode) {
- BaseString::snprintf(buf, buf_sz, errorTable[i]._errorText);
- buf[buf_sz-1]= 0;
- return buf;
- }
- }
-
ndb_error_string(errorCode, buf, buf_sz);
buf[buf_sz-1]= 0;
-
return buf;
}
diff --git a/storage/ndb/src/mgmsrv/MgmtSrvr.hpp b/storage/ndb/src/mgmsrv/MgmtSrvr.hpp
index a54b7866091..90287554ef8 100644
--- a/storage/ndb/src/mgmsrv/MgmtSrvr.hpp
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.hpp
@@ -148,45 +148,6 @@ public:
*/
bool isEventLogFilterEnabled(int severity);
- STATIC_CONST( NO_CONTACT_WITH_PROCESS = 5000 );
- STATIC_CONST( PROCESS_NOT_CONFIGURED = 5001 );
- STATIC_CONST( WRONG_PROCESS_TYPE = 5002 );
- STATIC_CONST( COULD_NOT_ALLOCATE_MEMORY = 5003 );
- STATIC_CONST( SEND_OR_RECEIVE_FAILED = 5005 );
- STATIC_CONST( INVALID_LEVEL = 5006 );
- STATIC_CONST( INVALID_ERROR_NUMBER = 5007 );
- STATIC_CONST( INVALID_TRACE_NUMBER = 5008 );
- STATIC_CONST( NOT_IMPLEMENTED = 5009 );
- STATIC_CONST( INVALID_BLOCK_NAME = 5010 );
-
- STATIC_CONST( CONFIG_PARAM_NOT_EXIST = 5011 );
- STATIC_CONST( CONFIG_PARAM_NOT_UPDATEABLE = 5012 );
- STATIC_CONST( VALUE_WRONG_FORMAT_INT_EXPECTED = 5013 );
- STATIC_CONST( VALUE_TOO_LOW = 5014 );
- STATIC_CONST( VALUE_TOO_HIGH = 5015 );
- STATIC_CONST( VALUE_WRONG_FORMAT_BOOL_EXPECTED = 5016 );
-
- STATIC_CONST( CONFIG_FILE_OPEN_WRITE_ERROR = 5017 );
- STATIC_CONST( CONFIG_FILE_OPEN_READ_ERROR = 5018 );
- STATIC_CONST( CONFIG_FILE_WRITE_ERROR = 5019 );
- STATIC_CONST( CONFIG_FILE_READ_ERROR = 5020 );
- STATIC_CONST( CONFIG_FILE_CLOSE_ERROR = 5021 );
-
- STATIC_CONST( CONFIG_CHANGE_REFUSED_BY_RECEIVER = 5022 );
- STATIC_CONST( COULD_NOT_SYNC_CONFIG_CHANGE_AGAINST_PHYSICAL_MEDIUM = 5023 );
- STATIC_CONST( CONFIG_FILE_CHECKSUM_ERROR = 5024 );
- STATIC_CONST( NOT_POSSIBLE_TO_SEND_CONFIG_UPDATE_TO_PROCESS_TYPE = 5025 );
-
- STATIC_CONST( NODE_SHUTDOWN_IN_PROGESS = 5026 );
- STATIC_CONST( SYSTEM_SHUTDOWN_IN_PROGRESS = 5027 );
- STATIC_CONST( NODE_SHUTDOWN_WOULD_CAUSE_SYSTEM_CRASH = 5028 );
-
- STATIC_CONST( NO_CONTACT_WITH_DB_NODES = 5030 );
- STATIC_CONST( UNSUPPORTED_NODE_SHUTDOWN = 5031 );
-
- STATIC_CONST( NODE_NOT_API_NODE = 5062 );
- STATIC_CONST( OPERATION_NOT_ALLOWED_START_STOP = 5063 );
-
/**
* This enum specifies the different signal loggig modes possible to set
* with the setSignalLoggingMode method.
diff --git a/storage/ndb/src/mgmsrv/Services.cpp b/storage/ndb/src/mgmsrv/Services.cpp
index f260ff7e3ec..9272b5ab532 100644
--- a/storage/ndb/src/mgmsrv/Services.cpp
+++ b/storage/ndb/src/mgmsrv/Services.cpp
@@ -18,7 +18,7 @@
#include <uucode.h>
#include <socket_io.h>
-#include <ndb_version.h>
+#include <util/version.h>
#include <mgmapi.h>
#include <EventLogger.hpp>
#include <signaldata/SetLogLevelOrd.hpp>
diff --git a/storage/ndb/src/mgmsrv/main.cpp b/storage/ndb/src/mgmsrv/main.cpp
index b880657d89b..16c560868ef 100644
--- a/storage/ndb/src/mgmsrv/main.cpp
+++ b/storage/ndb/src/mgmsrv/main.cpp
@@ -142,29 +142,29 @@ static struct my_option my_long_options[] =
{
NDB_STD_OPTS("ndb_mgmd"),
{ "config-file", 'f', "Specify cluster configuration file",
- (gptr*) &opt_config_filename, (gptr*) &opt_config_filename, 0,
+ (uchar**) &opt_config_filename, (uchar**) &opt_config_filename, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "print-full-config", 'P', "Print full config and exit",
- (gptr*) &g_print_full_config, (gptr*) &g_print_full_config, 0,
+ (uchar**) &g_print_full_config, (uchar**) &g_print_full_config, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "daemon", 'd', "Run ndb_mgmd in daemon mode (default)",
- (gptr*) &opt_daemon, (gptr*) &opt_daemon, 0,
+ (uchar**) &opt_daemon, (uchar**) &opt_daemon, 0,
GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0 },
{ "interactive", OPT_INTERACTIVE,
"Run interactive. Not supported but provided for testing purposes",
- (gptr*) &opt_interactive, (gptr*) &opt_interactive, 0,
+ (uchar**) &opt_interactive, (uchar**) &opt_interactive, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "no-nodeid-checks", OPT_NO_NODEID_CHECKS,
"Do not provide any node id checks",
- (gptr*) &g_no_nodeid_checks, (gptr*) &g_no_nodeid_checks, 0,
+ (uchar**) &g_no_nodeid_checks, (uchar**) &g_no_nodeid_checks, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "nodaemon", OPT_NO_DAEMON,
"Don't run as daemon, but don't read from stdin",
- (gptr*) &opt_non_interactive, (gptr*) &opt_non_interactive, 0,
+ (uchar**) &opt_non_interactive, (uchar**) &opt_non_interactive, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "mycnf", 256,
"Read cluster config from my.cnf",
- (gptr*) &opt_mycnf, (gptr*) &opt_mycnf, 0,
+ (uchar**) &opt_mycnf, (uchar**) &opt_mycnf, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
diff --git a/storage/ndb/src/mgmsrv/ndb_mgmd_error.h b/storage/ndb/src/mgmsrv/ndb_mgmd_error.h
new file mode 100644
index 00000000000..2438f15c808
--- /dev/null
+++ b/storage/ndb/src/mgmsrv/ndb_mgmd_error.h
@@ -0,0 +1,33 @@
+/* Copyright (C) 2007 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef NDB_MGMD_ERROR_H
+#define NDB_MGMD_ERROR_H
+
+#define NO_CONTACT_WITH_PROCESS 5000
+#define WRONG_PROCESS_TYPE 5002
+#define SEND_OR_RECEIVE_FAILED 5005
+#define INVALID_ERROR_NUMBER 5007
+#define INVALID_TRACE_NUMBER 5008
+#define INVALID_BLOCK_NAME 5010
+#define NODE_SHUTDOWN_IN_PROGESS 5026
+#define SYSTEM_SHUTDOWN_IN_PROGRESS 5027
+#define NODE_SHUTDOWN_WOULD_CAUSE_SYSTEM_CRASH 5028
+#define NO_CONTACT_WITH_DB_NODES 5030
+#define UNSUPPORTED_NODE_SHUTDOWN 5031
+#define NODE_NOT_API_NODE 5062
+#define OPERATION_NOT_ALLOWED_START_STOP 5063
+
+#endif
diff --git a/storage/ndb/src/ndbapi/ClusterMgr.cpp b/storage/ndb/src/ndbapi/ClusterMgr.cpp
index 52c95df6d15..448bc1025e8 100644
--- a/storage/ndb/src/ndbapi/ClusterMgr.cpp
+++ b/storage/ndb/src/ndbapi/ClusterMgr.cpp
@@ -16,7 +16,7 @@
#include <ndb_global.h>
#include <my_pthread.h>
#include <ndb_limits.h>
-#include <ndb_version.h>
+#include <util/version.h>
#include "TransporterFacade.hpp"
#include "ClusterMgr.hpp"
@@ -61,6 +61,7 @@ ClusterMgr::ClusterMgr(TransporterFacade & _facade):
clusterMgrThreadMutex = NdbMutex_Create();
waitForHBCond= NdbCondition_Create();
waitingForHB= false;
+ m_max_api_reg_req_interval= 0xFFFFFFFF; // MAX_INT
noOfAliveNodes= 0;
noOfConnectedNodes= 0;
theClusterMgrThread= 0;
@@ -243,7 +244,7 @@ ClusterMgr::threadMain( ){
}
theFacade.lock_mutex();
- for (int i = 1; i < MAX_NODES; i++){
+ for (int i = 1; i < MAX_NDB_NODES; i++){
/**
* Send register request (heartbeat) to all available nodes
* at specified timing intervals
@@ -264,7 +265,8 @@ ClusterMgr::threadMain( ){
}
theNode.hbCounter += timeSlept;
- if (theNode.hbCounter >= theNode.hbFrequency) {
+ if (theNode.hbCounter >= m_max_api_reg_req_interval ||
+ theNode.hbCounter >= theNode.hbFrequency) {
/**
* It is now time to send a new Heartbeat
*/
diff --git a/storage/ndb/src/ndbapi/ClusterMgr.hpp b/storage/ndb/src/ndbapi/ClusterMgr.hpp
index 6e74620dd4f..0a261eb202f 100644
--- a/storage/ndb/src/ndbapi/ClusterMgr.hpp
+++ b/storage/ndb/src/ndbapi/ClusterMgr.hpp
@@ -50,6 +50,7 @@ public:
void startThread();
void forceHB();
+ void set_max_api_reg_req_interval(unsigned int millisec) { m_max_api_reg_req_interval = millisec; }
private:
void threadMain();
@@ -89,6 +90,7 @@ public:
Uint32 m_connect_count;
private:
+ Uint32 m_max_api_reg_req_interval;
Uint32 noOfAliveNodes;
Uint32 noOfConnectedNodes;
Node theNodes[MAX_NODES];
diff --git a/storage/ndb/src/ndbapi/Ndb.cpp b/storage/ndb/src/ndbapi/Ndb.cpp
index 78b7af5522b..15647861eef 100644
--- a/storage/ndb/src/ndbapi/Ndb.cpp
+++ b/storage/ndb/src/ndbapi/Ndb.cpp
@@ -37,6 +37,7 @@ Name: Ndb.cpp
#include "API.hpp"
#include <NdbEnv.h>
#include <BaseString.hpp>
+#include <NdbSqlUtil.hpp>
/****************************************************************************
void connect();
@@ -201,9 +202,10 @@ Ndb::NDB_connect(Uint32 tNode)
DBUG_PRINT("info",
("unsuccessful connect tReturnCode %d, tNdbCon->Status() %d",
tReturnCode, tNdbCon->Status()));
- if (theError.code == 299)
+ if (theError.code == 299 || // single user mode
+ theError.code == 281 ) // cluster shutdown in progress
{
- // single user mode so no need to retry with other node
+ // no need to retry with other node
DBUG_RETURN(-1);
}
DBUG_RETURN(3);
@@ -304,6 +306,180 @@ Return Value: Returns a pointer to a connection object.
Return NULL otherwise.
Remark: Start transaction. Synchronous.
*****************************************************************************/
+int
+Ndb::computeHash(Uint32 *retval,
+ const NdbDictionary::Table *table,
+ const struct Key_part_ptr * keyData,
+ void* buf, Uint32 bufLen)
+{
+ Uint32 j = 0;
+ Uint32 sumlen = 0; // Needed len
+ const NdbTableImpl* impl = &NdbTableImpl::getImpl(*table);
+ const NdbColumnImpl* const * cols = impl->m_columns.getBase();
+ Uint32 len;
+ char* pos;
+
+ Uint32 colcnt = impl->m_columns.size();
+ Uint32 parts = impl->m_noOfDistributionKeys;
+ if (parts == 0)
+ {
+ parts = impl->m_noOfKeys;
+ }
+
+ for (Uint32 i = 0; i<parts; i++)
+ {
+ if (unlikely(keyData[i].ptr == 0))
+ goto enullptr;
+ }
+
+ if (unlikely(keyData[parts].ptr != 0))
+ goto emissingnullptr;
+
+ const NdbColumnImpl* partcols[NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY];
+ for (Uint32 i = 0; i<colcnt && j < parts; i++)
+ {
+ if (cols[i]->m_distributionKey)
+ {
+ // wl3717_todo
+ // char allowed now as dist key so this case should be tested
+ partcols[j++] = cols[i];
+ }
+ }
+
+ for (Uint32 i = 0; i<parts; i++)
+ {
+ Uint32 lb, len;
+ if (unlikely(!NdbSqlUtil::get_var_length(partcols[i]->m_type,
+ keyData[i].ptr,
+ keyData[i].len,
+ lb, len)))
+ goto emalformedkey;
+
+ if (unlikely(keyData[i].len < (lb + len)))
+ goto elentosmall;
+
+ Uint32 maxlen = (partcols[i]->m_attrSize * partcols[i]->m_arraySize);
+
+ if (unlikely(lb == 0 && keyData[i].len != maxlen))
+ goto emalformedkey;
+
+ if (partcols[i]->m_cs)
+ {
+ Uint32 xmul = partcols[i]->m_cs->strxfrm_multiply;
+ xmul = xmul ? xmul : 1;
+ len = xmul * (maxlen - lb);
+ }
+
+ len = (lb + len + 3) & ~(Uint32)3;
+ sumlen += len;
+
+ }
+
+ if (buf)
+ {
+ UintPtr org = UintPtr(buf);
+ UintPtr use = (org + 7) & ~(UintPtr)7;
+
+ buf = (void*)use;
+ bufLen -= (use - org);
+
+ if (unlikely(sumlen > bufLen))
+ goto ebuftosmall;
+ }
+ else
+ {
+ buf = malloc(sumlen);
+ if (unlikely(buf == 0))
+ goto enomem;
+ bufLen = 0;
+ assert((UintPtr(buf) & 7) == 0);
+ }
+
+ pos = (char*)buf;
+ for (Uint32 i = 0; i<parts; i++)
+ {
+ Uint32 lb, len;
+ NdbSqlUtil::get_var_length(partcols[i]->m_type,
+ keyData[i].ptr, keyData[i].len, lb, len);
+ CHARSET_INFO* cs;
+ if ((cs = partcols[i]->m_cs))
+ {
+ Uint32 xmul = cs->strxfrm_multiply;
+ if (xmul == 0)
+ xmul = 1;
+ /*
+ * Varchar end-spaces are ignored in comparisons. To get same hash
+ * we blank-pad to maximum length via strnxfrm.
+ */
+ Uint32 maxlen = (partcols[i]->m_attrSize * partcols[i]->m_arraySize);
+ Uint32 dstLen = xmul * (maxlen - lb);
+ int n = NdbSqlUtil::strnxfrm_bug7284(cs,
+ (unsigned char*)pos,
+ dstLen,
+ ((unsigned char*)keyData[i].ptr)+lb,
+ len);
+
+ if (unlikely(n == -1))
+ goto emalformedstring;
+
+ while ((n & 3) != 0)
+ {
+ pos[n++] = 0;
+ }
+ pos += n;
+ }
+ else
+ {
+ len += lb;
+ memcpy(pos, keyData[i].ptr, len);
+ while (len & 3)
+ {
+ * (pos + len++) = 0;
+ }
+ pos += len;
+ }
+ }
+ len = UintPtr(pos) - UintPtr(buf);
+ assert((len & 3) == 0);
+
+ Uint32 values[4];
+ md5_hash(values, (const Uint64*)buf, len >> 2);
+
+ if (retval)
+ {
+ * retval = values[1];
+ }
+
+ if (bufLen == 0)
+ free(buf);
+
+ return 0;
+
+enullptr:
+ return 4316;
+
+emissingnullptr:
+ return 4276;
+
+elentosmall:
+ return 4277;
+
+ebuftosmall:
+ return 4278;
+
+emalformedstring:
+ if (bufLen == 0)
+ free(buf);
+
+ return 4279;
+
+emalformedkey:
+ return 4280;
+
+enomem:
+ return 4000;
+}
+
NdbTransaction*
Ndb::startTransaction(const NdbDictionary::Table *table,
const char * keyData, Uint32 keyLen)
@@ -754,17 +930,27 @@ Ndb::getNodeId()
}
/****************************************************************************
-Uint64 getTupleIdFromNdb( Uint32 aTableId, Uint32 cacheSize );
-
-Parameters: aTableId : The TableId.
- cacheSize: Prefetch this many values
-Remark: Returns a new TupleId to the application.
- The TupleId comes from SYSTAB_0 where SYSKEY_0 = TableId.
- It is initialized to (TableId << 48) + 1 in NdbcntrMain.cpp.
+Uint64 getAutoIncrementValue( const char* aTableName,
+ Uint64 & autoValue,
+ Uint32 cacheSize,
+ Uint64 step,
+ Uint64 start);
+
+Parameters: aTableName (IN) : The table name.
+ autoValue (OUT) : Returns new autoincrement value
+ cacheSize (IN) : Prefetch this many values
+ step (IN) : Specifies the step between the
+ autoincrement values.
+ start (IN) : Start value for first value
+Remark: Returns a new autoincrement value to the application.
+ The autoincrement values can be increased by steps
+ (default 1) and a number of values can be prefetched
+ by specifying cacheSize (default 10).
****************************************************************************/
int
Ndb::getAutoIncrementValue(const char* aTableName,
- Uint64 & tupleId, Uint32 cacheSize)
+ Uint64 & autoValue, Uint32 cacheSize,
+ Uint64 step, Uint64 start)
{
DBUG_ENTER("Ndb::getAutoIncrementValue");
ASSERT_NOT_MYSQLD;
@@ -778,15 +964,16 @@ Ndb::getAutoIncrementValue(const char* aTableName,
}
const NdbTableImpl* table = info->m_table_impl;
TupleIdRange & range = info->m_tuple_id_range;
- if (getTupleIdFromNdb(table, range, tupleId, cacheSize) == -1)
+ if (getTupleIdFromNdb(table, range, autoValue, cacheSize, step, start) == -1)
DBUG_RETURN(-1);
- DBUG_PRINT("info", ("value %lu", (ulong) tupleId));
+ DBUG_PRINT("info", ("value %lu", (ulong) autoValue));
DBUG_RETURN(0);
}
int
Ndb::getAutoIncrementValue(const NdbDictionary::Table * aTable,
- Uint64 & tupleId, Uint32 cacheSize)
+ Uint64 & autoValue, Uint32 cacheSize,
+ Uint64 step, Uint64 start)
{
DBUG_ENTER("Ndb::getAutoIncrementValue");
ASSERT_NOT_MYSQLD;
@@ -801,51 +988,86 @@ Ndb::getAutoIncrementValue(const NdbDictionary::Table * aTable,
DBUG_RETURN(-1);
}
TupleIdRange & range = info->m_tuple_id_range;
- if (getTupleIdFromNdb(table, range, tupleId, cacheSize) == -1)
+ if (getTupleIdFromNdb(table, range, autoValue, cacheSize, step, start) == -1)
DBUG_RETURN(-1);
- DBUG_PRINT("info", ("value %lu", (ulong)tupleId));
+ DBUG_PRINT("info", ("value %lu", (ulong)autoValue));
DBUG_RETURN(0);
}
int
Ndb::getAutoIncrementValue(const NdbDictionary::Table * aTable,
- TupleIdRange & range, Uint64 & tupleId,
- Uint32 cacheSize)
+ TupleIdRange & range, Uint64 & autoValue,
+ Uint32 cacheSize, Uint64 step, Uint64 start)
{
DBUG_ENTER("Ndb::getAutoIncrementValue");
assert(aTable != 0);
const NdbTableImpl* table = & NdbTableImpl::getImpl(*aTable);
- if (getTupleIdFromNdb(table, range, tupleId, cacheSize) == -1)
+ if (getTupleIdFromNdb(table, range, autoValue, cacheSize, step, start) == -1)
DBUG_RETURN(-1);
- DBUG_PRINT("info", ("value %lu", (ulong)tupleId));
+ DBUG_PRINT("info", ("value %lu", (ulong)autoValue));
DBUG_RETURN(0);
}
int
Ndb::getTupleIdFromNdb(const NdbTableImpl* table,
- TupleIdRange & range, Uint64 & tupleId, Uint32 cacheSize)
+ TupleIdRange & range, Uint64 & tupleId,
+ Uint32 cacheSize, Uint64 step, Uint64 start)
{
+/*
+ Returns a new TupleId to the application.
+ The TupleId comes from SYSTAB_0 where SYSKEY_0 = TableId.
+ It is initialized to (TableId << 48) + 1 in NdbcntrMain.cpp.
+ In most cases step= start= 1, in which case we get:
+ 1,2,3,4,5,...
+ If step=10 and start=5 and first number is 1, we get:
+ 5,15,25,35,...
+*/
DBUG_ENTER("Ndb::getTupleIdFromNdb");
- if (range.m_first_tuple_id != range.m_last_tuple_id)
+ /*
+ Check if the next value can be taken from the pre-fetched
+ sequence.
+ */
+ if (range.m_first_tuple_id != range.m_last_tuple_id &&
+ range.m_first_tuple_id + step <= range.m_last_tuple_id)
{
assert(range.m_first_tuple_id < range.m_last_tuple_id);
- tupleId = ++range.m_first_tuple_id;
- DBUG_PRINT("info", ("next cached value %lu", (ulong)tupleId));
+ range.m_first_tuple_id += step;
+ tupleId = range.m_first_tuple_id;
+ DBUG_PRINT("info", ("Next cached value %lu", (ulong) tupleId));
}
else
{
+ /*
+ If start value is greater than step it is ignored
+ */
+ Uint64 offset = (start > step) ? 1 : start;
+
+ /*
+ Pre-fetch a number of values depending on cacheSize
+ */
if (cacheSize == 0)
cacheSize = 1;
+
DBUG_PRINT("info", ("reading %u values from database", (uint)cacheSize));
/*
* reserve next cacheSize entries in db. adds cacheSize to NEXTID
- * and returns first tupleId in the new range.
+ * and returns first tupleId in the new range. If tupleId's are
+ * incremented in steps then multiply the cacheSize with step size.
*/
- Uint64 opValue = cacheSize;
+ Uint64 opValue = cacheSize * step;
+
if (opTupleIdOnNdb(table, range, opValue, 0) == -1)
DBUG_RETURN(-1);
- tupleId = opValue;
+ DBUG_PRINT("info", ("Next value fetched from database %lu", (ulong) opValue));
+ DBUG_PRINT("info", ("Increasing %lu by offset %lu, increment is %lu", (ulong) (ulong) opValue, (ulong) offset, (ulong) step));
+ Uint64 current, next;
+ Uint64 div = ((Uint64) (opValue + step - offset)) / step;
+ next = div * step + offset;
+ current = (next < step) ? next : next - step;
+ tupleId = (opValue <= current) ? current : next;
+ DBUG_PRINT("info", ("Returning %lu", (ulong) tupleId));
+ range.m_first_tuple_id = tupleId;
}
DBUG_RETURN(0);
}
diff --git a/storage/ndb/src/ndbapi/NdbBlob.cpp b/storage/ndb/src/ndbapi/NdbBlob.cpp
index 25dcafdef53..0fc96add084 100644
--- a/storage/ndb/src/ndbapi/NdbBlob.cpp
+++ b/storage/ndb/src/ndbapi/NdbBlob.cpp
@@ -536,7 +536,7 @@ int
NdbBlob::setTableKeyValue(NdbOperation* anOp)
{
DBUG_ENTER("NdbBlob::setTableKeyValue");
- DBUG_DUMP("info", theKeyBuf.data, 4 * theTable->m_keyLenInWords);
+ DBUG_DUMP("info", (uchar*) theKeyBuf.data, 4 * theTable->m_keyLenInWords);
const Uint32* data = (const Uint32*)theKeyBuf.data;
const unsigned columns = theTable->m_columns.size();
unsigned pos = 0;
@@ -562,7 +562,8 @@ int
NdbBlob::setAccessKeyValue(NdbOperation* anOp)
{
DBUG_ENTER("NdbBlob::setAccessKeyValue");
- DBUG_DUMP("info", theAccessKeyBuf.data, 4 * theAccessTable->m_keyLenInWords);
+ DBUG_DUMP("info", (uchar*) theAccessKeyBuf.data,
+ 4 * theAccessTable->m_keyLenInWords);
const Uint32* data = (const Uint32*)theAccessKeyBuf.data;
const unsigned columns = theAccessTable->m_columns.size();
unsigned pos = 0;
@@ -587,7 +588,7 @@ NdbBlob::setPartKeyValue(NdbOperation* anOp, Uint32 part)
{
DBUG_ENTER("NdbBlob::setPartKeyValue");
DBUG_PRINT("info", ("dist=%u part=%u packkey=", getDistKey(part), part));
- DBUG_DUMP("info", thePackKeyBuf.data, 4 * thePackKeyBuf.size);
+ DBUG_DUMP("info", (uchar*) thePackKeyBuf.data, 4 * thePackKeyBuf.size);
// TODO use attr ids after compatibility with 4.1.7 not needed
if (anOp->equal("PK", thePackKeyBuf.data) == -1 ||
anOp->equal("DIST", getDistKey(part)) == -1 ||
@@ -1135,7 +1136,12 @@ NdbBlob::readTableParts(char* buf, Uint32 part, Uint32 count)
while (n < count) {
NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTable);
if (tOp == NULL ||
- tOp->committedRead() == -1 ||
+ /*
+ * This was committedRead() before. However lock on main
+ * table tuple does not fully protect blob parts since DBTUP
+ * commits each tuple separately.
+ */
+ tOp->readTuple() == -1 ||
setPartKeyValue(tOp, part + n) == -1 ||
tOp->getValue((Uint32)3, buf) == NULL) {
setErrorCode(tOp);
@@ -1261,6 +1267,7 @@ NdbBlob::deletePartsUnknown(Uint32 part)
DBUG_RETURN(-1);
}
tOp->m_abortOption= NdbOperation::AO_IgnoreError;
+ tOp->m_noErrorPropagation = true;
n++;
}
DBUG_PRINT("info", ("bat=%u", bat));
@@ -1597,6 +1604,7 @@ NdbBlob::preExecute(NdbTransaction::ExecType anExecType, bool& batch)
}
if (isWriteOp()) {
tOp->m_abortOption = NdbOperation::AO_IgnoreError;
+ tOp->m_noErrorPropagation = true;
}
theHeadInlineReadOp = tOp;
// execute immediately
@@ -1643,6 +1651,7 @@ NdbBlob::preExecute(NdbTransaction::ExecType anExecType, bool& batch)
}
if (isWriteOp()) {
tOp->m_abortOption = NdbOperation::AO_IgnoreError;
+ tOp->m_noErrorPropagation = true;
}
theHeadInlineReadOp = tOp;
// execute immediately
diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
index 63d36bf012f..ab6d90ad59e 100644
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
@@ -42,7 +42,7 @@
#include <my_sys.h>
#include <NdbEnv.h>
#include <NdbMem.h>
-#include <ndb_version.h>
+#include <util/version.h>
#define DEBUG_PRINT 0
#define INCOMPATIBLE_VERSION -2
diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp
index 673587b1ed7..aa9bd174471 100644
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp
@@ -391,7 +391,7 @@ struct NdbFileImpl : public NdbDictObjectImpl {
NdbFileImpl(NdbDictionary::Object::Type t);
Uint64 m_size;
- Uint32 m_free;
+ Uint64 m_free;
BaseString m_path;
BaseString m_filegroup_name;
Uint32 m_filegroup_id;
diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp
index 00acfe62ad9..a82983fca8c 100644
--- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp
+++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp
@@ -41,6 +41,7 @@
#include <NdbEventOperation.hpp>
#include "NdbEventOperationImpl.hpp"
#include <signaldata/AlterTable.hpp>
+#include "ndb_internal.hpp"
#include <EventLogger.hpp>
extern EventLogger g_eventLogger;
@@ -2838,7 +2839,7 @@ send_report:
data[5]= apply_gci >> 32;
data[6]= latest_gci & ~(Uint32)0;
data[7]= latest_gci >> 32;
- m_ndb->theImpl->send_event_report(data,8);
+ Ndb_internal::send_event_report(m_ndb, data,8);
#ifdef VM_TRACE
assert(m_total_alloc >= m_free_data_sz);
#endif
diff --git a/storage/ndb/src/ndbapi/NdbOperation.cpp b/storage/ndb/src/ndbapi/NdbOperation.cpp
index 903372ddb9d..50531292e40 100644
--- a/storage/ndb/src/ndbapi/NdbOperation.cpp
+++ b/storage/ndb/src/ndbapi/NdbOperation.cpp
@@ -76,7 +76,8 @@ NdbOperation::NdbOperation(Ndb* aNdb, NdbOperation::Type aType) :
m_keyInfoGSN(GSN_KEYINFO),
m_attrInfoGSN(GSN_ATTRINFO),
theBlobList(NULL),
- m_abortOption(-1)
+ m_abortOption(-1),
+ m_noErrorPropagation(false)
{
theReceiver.init(NdbReceiver::NDB_OPERATION, this);
theError.code = 0;
@@ -101,7 +102,8 @@ NdbOperation::setErrorCode(int anErrorCode)
theError.code = anErrorCode;
theNdbCon->theErrorLine = theErrorLine;
theNdbCon->theErrorOperation = this;
- theNdbCon->setOperationErrorCode(anErrorCode);
+ if (!(m_abortOption == AO_IgnoreError && m_noErrorPropagation))
+ theNdbCon->setOperationErrorCode(anErrorCode);
}
/******************************************************************************
@@ -116,6 +118,7 @@ NdbOperation::setErrorCodeAbort(int anErrorCode)
theError.code = anErrorCode;
theNdbCon->theErrorLine = theErrorLine;
theNdbCon->theErrorOperation = this;
+ // ignore m_noErrorPropagation
theNdbCon->setOperationErrorCodeAbort(anErrorCode);
}
@@ -161,6 +164,7 @@ NdbOperation::init(const NdbTableImpl* tab, NdbTransaction* myConnection){
theMagicNumber = 0xABCDEF01;
theBlobList = NULL;
m_abortOption = -1;
+ m_noErrorPropagation = false;
m_no_disk_flag = 1;
tSignal = theNdb->getSignal();
diff --git a/storage/ndb/src/ndbapi/NdbOperationDefine.cpp b/storage/ndb/src/ndbapi/NdbOperationDefine.cpp
index ced9b18bd55..c9459ff911c 100644
--- a/storage/ndb/src/ndbapi/NdbOperationDefine.cpp
+++ b/storage/ndb/src/ndbapi/NdbOperationDefine.cpp
@@ -608,7 +608,6 @@ NdbOperation::setAnyValue(Uint32 any_value)
const NdbColumnImpl* impl =
&NdbColumnImpl::getImpl(* NdbDictionary::Column::ANY_VALUE);
OperationType tOpType = theOperationType;
- OperationStatus tStatus = theStatus;
switch(tOpType){
case DeleteRequest:{
diff --git a/storage/ndb/src/ndbapi/NdbOperationInt.cpp b/storage/ndb/src/ndbapi/NdbOperationInt.cpp
index 0df1dbfe2c8..f69211cb78b 100644
--- a/storage/ndb/src/ndbapi/NdbOperationInt.cpp
+++ b/storage/ndb/src/ndbapi/NdbOperationInt.cpp
@@ -1023,7 +1023,7 @@ NdbOperation::branch_col(Uint32 type,
DBUG_PRINT("enter", ("type: %u col:%u val: 0x%lx len: %u label: %u",
type, ColId, (long) val, len, Label));
if (val != NULL)
- DBUG_DUMP("value", (char*)val, len);
+ DBUG_DUMP("value", (uchar*)val, len);
if (initial_interpreterCheck() == -1)
DBUG_RETURN(-1);
diff --git a/storage/ndb/src/ndbapi/NdbRecAttr.cpp b/storage/ndb/src/ndbapi/NdbRecAttr.cpp
index 7615ee71c65..38ca14085f0 100644
--- a/storage/ndb/src/ndbapi/NdbRecAttr.cpp
+++ b/storage/ndb/src/ndbapi/NdbRecAttr.cpp
@@ -270,7 +270,7 @@ ndbrecattr_print_formatted(NdbOut& out, const NdbRecAttr &r,
out << r.u_short_value();
break;
case NdbDictionary::Column::Tinyunsigned:
- out << (unsigned) r.u_char_value();
+ out << (unsigned) r.u_8_value();
break;
case NdbDictionary::Column::Bigint:
out << r.int64_value();
@@ -285,7 +285,7 @@ ndbrecattr_print_formatted(NdbOut& out, const NdbRecAttr &r,
out << r.short_value();
break;
case NdbDictionary::Column::Tinyint:
- out << (int) r.char_value();
+ out << (int) r.int8_value();
break;
case NdbDictionary::Column::Binary:
if (!f.hex_format)
@@ -411,7 +411,7 @@ ndbrecattr_print_formatted(NdbOut& out, const NdbRecAttr &r,
break;
case NdbDictionary::Column::Year:
{
- uint year = 1900 + r.u_char_value();
+ uint year = 1900 + r.u_8_value();
char buf[40];
sprintf(buf, "%04d", year);
out << buf;
diff --git a/storage/ndb/src/ndbapi/NdbScanOperation.cpp b/storage/ndb/src/ndbapi/NdbScanOperation.cpp
index dc9a74ae11c..89782453a72 100644
--- a/storage/ndb/src/ndbapi/NdbScanOperation.cpp
+++ b/storage/ndb/src/ndbapi/NdbScanOperation.cpp
@@ -1202,7 +1202,7 @@ NdbIndexScanOperation::setBound(const NdbColumnImpl* tAttrInfo,
Uint32 tupKeyLen = theTupKeyLen;
union {
Uint32 tempData[2000];
- Uint64 __align;
+ Uint64 __my_align;
};
Uint64 *valPtr;
if(remaining > totalLen && aligned && nobytes){
@@ -1311,7 +1311,7 @@ NdbIndexScanOperation::getKeyFromSCANTABREQ(Uint32* data, Uint32 size)
}
pos += rem;
}
- DBUG_DUMP("key", (char*)data, size << 2);
+ DBUG_DUMP("key", (uchar*) data, size << 2);
DBUG_RETURN(size);
}
diff --git a/storage/ndb/src/ndbapi/NdbTransaction.cpp b/storage/ndb/src/ndbapi/NdbTransaction.cpp
index ada0372a184..55c6f0f4b99 100644
--- a/storage/ndb/src/ndbapi/NdbTransaction.cpp
+++ b/storage/ndb/src/ndbapi/NdbTransaction.cpp
@@ -453,12 +453,27 @@ NdbTransaction::executeNoBlobs(NdbTransaction::ExecType aTypeOfExec,
while (1) {
int noOfComp = tNdb->sendPollNdb(3 * timeout, 1, forceSend);
if (noOfComp == 0) {
- /**
- * This timeout situation can occur if NDB crashes.
+ /*
+ * Just for fun, this is only one of two places where
+ * we could hit this error... It's quite possible we
+ * hit it in Ndbif.cpp in Ndb::check_send_timeout()
+ *
+ * We behave rather similarly in both places.
+ * Hitting this is certainly a bug though...
*/
- ndbout << "This timeout should never occur, execute(..)" << endl;
- theError.code = 4012;
- setOperationErrorCodeAbort(4012); // Error code for "Cluster Failure"
+ g_eventLogger.error("WARNING: Timeout in executeNoBlobs() waiting for "
+ "response from NDB data nodes. This should NEVER "
+ "occur. You have likely hit a NDB Bug. Please "
+ "file a bug.");
+ DBUG_PRINT("error",("This timeout should never occure, execute()"));
+ g_eventLogger.error("Forcibly trying to rollback txn (%p"
+ ") to try to clean up data node resources.",
+ this);
+ executeNoBlobs(NdbTransaction::Rollback);
+ theError.code = 4012;
+ theError.status= NdbError::PermanentError;
+ theError.classification= NdbError::TimeoutExpired;
+ setOperationErrorCodeAbort(4012); // ndbd timeout
DBUG_RETURN(-1);
}//if
@@ -522,7 +537,12 @@ NdbTransaction::executeAsynchPrepare(NdbTransaction::ExecType aTypeOfExec,
*/
if (theError.code != 0)
DBUG_PRINT("enter", ("Resetting error %d on execute", theError.code));
- theError.code = 0;
+ /**
+ * for timeout (4012) we want sendROLLBACK to behave differently.
+ * Else, normal behaviour of reset errcode
+ */
+ if (theError.code != 4012)
+ theError.code = 0;
NdbScanOperation* tcOp = m_theFirstScanOperation;
if (tcOp != 0){
// Execute any cursor operations
@@ -843,6 +863,12 @@ NdbTransaction::sendROLLBACK() // Send a TCROLLBACKREQ signal;
tSignal.setData(theTCConPtr, 1);
tSignal.setData(tTransId1, 2);
tSignal.setData(tTransId2, 3);
+ if(theError.code == 4012)
+ {
+ g_eventLogger.error("Sending TCROLLBACKREQ with Bad flag");
+ tSignal.setLength(tSignal.getLength() + 1); // + flags
+ tSignal.setData(0x1, 4); // potentially bad data
+ }
tReturnCode = tp->sendSignal(&tSignal,theDBnode);
if (tReturnCode != -1) {
theSendStatus = sendTC_ROLLBACK;
diff --git a/storage/ndb/src/ndbapi/ndb_internal.hpp b/storage/ndb/src/ndbapi/ndb_internal.hpp
new file mode 100644
index 00000000000..f5f37f95a04
--- /dev/null
+++ b/storage/ndb/src/ndbapi/ndb_internal.hpp
@@ -0,0 +1,26 @@
+/* Copyright (C) 2007 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "NdbImpl.hpp"
+
+class Ndb_internal
+{
+private:
+ friend class NdbEventBuffer;
+ Ndb_internal() {}
+ virtual ~Ndb_internal() {}
+ static int send_event_report(Ndb *ndb, Uint32 *data, Uint32 length)
+ { return ndb->theImpl->send_event_report(data, length); }
+};
diff --git a/storage/ndb/src/ndbapi/ndberror.c b/storage/ndb/src/ndbapi/ndberror.c
index 3cecd15684d..a0417e5b118 100644
--- a/storage/ndb/src/ndbapi/ndberror.c
+++ b/storage/ndb/src/ndbapi/ndberror.c
@@ -19,6 +19,9 @@
#include <ndberror.h>
#include <m_string.h>
+#include "../mgmsrv/ndb_mgmd_error.h"
+
+
typedef struct ErrorBundle {
int code;
int mysql_code;
@@ -151,7 +154,7 @@ ErrorBundle ErrorCodes[] = {
*/
{ 4007, DMEC, UR, "Send to ndbd node failed" },
{ 4008, DMEC, UR, "Receive from NDB failed" },
- { 4009, DMEC, UR, "Cluster Failure" },
+ { 4009, HA_ERR_NO_CONNECTION, UR, "Cluster Failure" },
{ 4012, DMEC, UR,
"Request ndbd time-out, maybe due to high load or communication problems"},
{ 4013, DMEC, UR, "Request timed out in waiting for node failure"},
@@ -179,13 +182,14 @@ ErrorBundle ErrorCodes[] = {
{ 873, DMEC, TR, "Out of attrinfo records for scan in tuple manager" },
{ 899, DMEC, TR, "Rowid already allocated" },
{ 1217, DMEC, TR, "Out of operation records in local data manager (increase MaxNoOfLocalOperations)" },
- { 1220, DMEC, TR, "REDO log files overloaded, consult online manual (decrease TimeBetweenLocalCheckpoints, and|or increase NoOfFragmentLogFiles)" },
+ { 1220, DMEC, TR, "REDO log files overloaded, consult online manual (increase FragmentLogFileSize)" },
{ 1222, DMEC, TR, "Out of transaction markers in LQH" },
{ 4021, DMEC, TR, "Out of Send Buffer space in NDB API" },
{ 4022, DMEC, TR, "Out of Send Buffer space in NDB API" },
{ 4032, DMEC, TR, "Out of Send Buffer space in NDB API" },
{ 1501, DMEC, TR, "Out of undo space" },
{ 288, DMEC, TR, "Out of index operations in transaction coordinator (increase MaxNoOfConcurrentIndexOperations)" },
+ { 289, DMEC, TR, "Out of transaction buffer memory in TC (increase TransactionBufferMemory)" },
/**
* InsufficientSpace
@@ -286,6 +290,7 @@ ErrorBundle ErrorCodes[] = {
/**
* Application error
*/
+ { 281, HA_ERR_NO_CONNECTION, AE, "Operation not allowed due to cluster shutdown in progress" },
{ 299, DMEC, AE, "Operation not allowed or aborted due to single user mode" },
{ 763, DMEC, AE, "Alter table requires cluster nodes to have exact same version" },
{ 823, DMEC, AE, "Too much attrinfo from application in tuple manager" },
@@ -619,6 +624,33 @@ ErrorBundle ErrorCodes[] = {
{ 4273, DMEC, IE, "No blob table in dict cache" },
{ 4274, DMEC, IE, "Corrupted main table PK in blob operation" },
{ 4275, DMEC, AE, "The blob method is incompatible with operation type or lock mode" },
+
+ { NO_CONTACT_WITH_PROCESS, DMEC, AE,
+ "No contact with the process (dead ?)."},
+ { WRONG_PROCESS_TYPE, DMEC, AE,
+ "The process has wrong type. Expected a DB process."},
+ { SEND_OR_RECEIVE_FAILED, DMEC, AE,
+ "Send to process or receive failed."},
+ { INVALID_ERROR_NUMBER, DMEC, AE,
+ "Invalid error number. Should be >= 0."},
+ { INVALID_TRACE_NUMBER, DMEC, AE,
+ "Invalid trace number."},
+ { INVALID_BLOCK_NAME, DMEC, AE,
+ "Invalid block name"},
+ { NODE_SHUTDOWN_IN_PROGESS, DMEC, AE,
+ "Node shutdown in progress" },
+ { SYSTEM_SHUTDOWN_IN_PROGRESS, DMEC, AE,
+ "System shutdown in progress" },
+ { NODE_SHUTDOWN_WOULD_CAUSE_SYSTEM_CRASH, DMEC, AE,
+ "Node shutdown would cause system crash" },
+ { UNSUPPORTED_NODE_SHUTDOWN, DMEC, AE,
+ "Unsupported multi node shutdown. Abort option required." },
+ { NODE_NOT_API_NODE, DMEC, AE,
+ "The specified node is not an API node." },
+ { OPERATION_NOT_ALLOWED_START_STOP, DMEC, AE,
+ "Operation not allowed while nodes are starting or stopping."},
+ { NO_CONTACT_WITH_DB_NODES, DMEC, AE,
+ "No contact with database nodes" }
};
static
diff --git a/storage/ndb/test/include/HugoTransactions.hpp b/storage/ndb/test/include/HugoTransactions.hpp
index caed577f8c9..e2b12f261a8 100644
--- a/storage/ndb/test/include/HugoTransactions.hpp
+++ b/storage/ndb/test/include/HugoTransactions.hpp
@@ -20,7 +20,7 @@
#include <NDBT.hpp>
#include <HugoCalculator.hpp>
#include <HugoOperations.hpp>
-
+class NDBT_Stats;
class HugoTransactions : public HugoOperations {
public:
@@ -109,10 +109,24 @@ public:
void setRetryMax(int retryMax = 100) { m_retryMax = retryMax; }
Uint32 m_latest_gci;
+
+ void setStatsLatency(NDBT_Stats* stats) { m_stats_latency = stats; }
+
+ // allows multiple threads to update separate batches
+ void setThrInfo(int thr_count, int thr_no) {
+ m_thr_count = thr_count;
+ m_thr_no = thr_no;
+ }
+
protected:
NDBT_ResultRow row;
int m_defaultScanUpdateMethod;
int m_retryMax;
+
+ NDBT_Stats* m_stats_latency;
+
+ int m_thr_count; // 0 if no separation between threads
+ int m_thr_no;
};
diff --git a/storage/ndb/test/include/NDBT_Thread.hpp b/storage/ndb/test/include/NDBT_Thread.hpp
new file mode 100644
index 00000000000..5b724991b29
--- /dev/null
+++ b/storage/ndb/test/include/NDBT_Thread.hpp
@@ -0,0 +1,226 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef NDB_THREAD_HPP
+#define NDB_THREAD_HPP
+
+#include <NdbMutex.h>
+#include <NdbCondition.h>
+#include <NdbThread.h>
+
+// NDBT_Thread ctor -> NDBT_Thread_run -> thr.run()
+extern "C" {
+static void* NDBT_Thread_run(void* arg);
+}
+
+// Function to run in a thread.
+
+typedef void NDBT_ThreadFunc(class NDBT_Thread&);
+
+/*
+ * NDBT_Thread
+ *
+ * Represents a thread. The thread pauses at startup.
+ * Main process sets a function to run. When the function
+ * returns, the thread pauses again to wait for a command.
+ * This allows main process to sync with the thread and
+ * exchange data with it.
+ *
+ * Input to thread is typically options. The input area
+ * is read-only in the thread. Output from thread is
+ * results such as statistics. Error code is handled
+ * separately.
+ *
+ * Pointer to Ndb object and method to create it are
+ * provided for convenience.
+ */
+
+class NDBT_ThreadSet;
+
+class NDBT_Thread {
+public:
+ NDBT_Thread();
+ NDBT_Thread(NDBT_ThreadSet* thread_set, int thread_no);
+ void create(NDBT_ThreadSet* thread_set, int thread_no);
+ ~NDBT_Thread();
+
+ // if part of a set
+ inline NDBT_ThreadSet& get_thread_set() const {
+ assert(m_thread_set != 0);
+ return *m_thread_set;
+ }
+ inline int get_thread_no() const {
+ return m_thread_no;
+ }
+
+ // { Wait -> Start -> Stop }+ -> Exit
+ enum State {
+ Wait = 1, // wait for command
+ Start, // run current function
+ Stop, // stopped (paused) when current function done
+ Exit // exit thread
+ };
+
+ // tell thread to start running current function
+ void start();
+ // wait for thread to stop when function is done
+ void stop();
+ // tell thread to exit
+ void exit();
+ // collect thread after exit
+ void join();
+
+ // set function to run
+ inline void set_func(NDBT_ThreadFunc* func) {
+ m_func = func;
+ }
+
+ // input area
+ inline void set_input(const void* input) {
+ m_input = input;
+ }
+ inline const void* get_input() const {
+ return m_input;
+ }
+
+ // output area
+ inline void set_output(void* output) {
+ m_output = output;
+ }
+ inline void* get_output() const {
+ return m_output;
+ }
+ template <class T> inline void set_output() {
+ set_output(new T);
+ }
+ inline void delete_output() {
+ delete m_output;
+ m_output = 0;
+ }
+
+ // thread-specific Ndb object
+ inline class Ndb* get_ndb() const {
+ return m_ndb;
+ }
+ int connect(class Ndb_cluster_connection*, const char* db = "TEST_DB");
+ void disconnect();
+
+ // error code (OS, Ndb, other)
+ void clear_err() {
+ m_err = 0;
+ }
+ void set_err(int err) {
+ m_err = err;
+ }
+ int get_err() const {
+ return m_err;
+ }
+
+private:
+ friend class NDBT_ThreadSet;
+ friend void* NDBT_Thread_run(void* arg);
+
+ enum { Magic = 0xabacadae };
+ Uint32 m_magic;
+
+ State m_state;
+ NDBT_ThreadSet* m_thread_set;
+ int m_thread_no;
+
+ NDBT_ThreadFunc* m_func;
+ const void* m_input;
+ void* m_output;
+ class Ndb* m_ndb;
+ int m_err;
+
+ // run the thread
+ void run();
+
+ void lock() {
+ NdbMutex_Lock(m_mutex);
+ }
+ void unlock() {
+ NdbMutex_Unlock(m_mutex);
+ }
+
+ void wait() {
+ NdbCondition_Wait(m_cond, m_mutex);
+ }
+ void signal() {
+ NdbCondition_Signal(m_cond);
+ }
+
+ NdbMutex* m_mutex;
+ NdbCondition* m_cond;
+ NdbThread* m_thread;
+ void* m_status;
+};
+
+/*
+ * A set of threads, indexed from 0 to count-1. Methods
+ * are applied to each thread (serially). Input area is
+ * common to all threads. Output areas are allocated
+ * separately according to a template class.
+ */
+
+class NDBT_ThreadSet {
+public:
+ NDBT_ThreadSet(int count);
+ ~NDBT_ThreadSet();
+
+ inline int get_count() const {
+ return m_count;
+ }
+ inline NDBT_Thread& get_thread(int n) {
+ assert(n < m_count && m_thread[n] != 0);
+ return *m_thread[n];
+ }
+
+ // tell each thread to start running
+ void start();
+ // wait for each thread to stop
+ void stop();
+ // tell each thread to exit
+ void exit();
+ // collect each thread after exit
+ void join();
+
+ // set function to run in each thread
+ void set_func(NDBT_ThreadFunc* func);
+
+ // set input area (same instance in each thread)
+ void set_input(const void* input);
+
+ // set output areas
+ template <class T> inline void set_output() {
+ for (int n = 0; n < m_count; n++) {
+ NDBT_Thread& thr = *m_thread[n];
+ thr.set_output<T>();
+ }
+ }
+ void delete_output();
+
+ // thread-specific Ndb objects
+ int connect(class Ndb_cluster_connection*, const char* db = "TEST_DB");
+ void disconnect();
+
+ int get_err() const;
+
+private:
+ int m_count;
+ NDBT_Thread** m_thread;
+};
+
+#endif
diff --git a/storage/ndb/test/ndbapi/benchronja.cpp b/storage/ndb/test/ndbapi/benchronja.cpp
index 4973e6e2487..73ee324a888 100644
--- a/storage/ndb/test/ndbapi/benchronja.cpp
+++ b/storage/ndb/test/ndbapi/benchronja.cpp
@@ -41,7 +41,14 @@
#define MAXSTRLEN 16
#define MAXATTR 64
#define MAXTABLES 64
-#define MAXTHREADS 256
+#define NDB_MAXTHREADS 256
+/*
+ NDB_MAXTHREADS used to be just MAXTHREADS, which collides with a
+ #define from <sys/thread.h> on AIX (IBM compiler). We explicitly
+ #undef it here lest someone use it by habit and get really funny
+ results. K&R says we may #undef non-existent symbols, so let's go.
+*/
+#undef MAXTHREADS
#define MAXATTRSIZE 8000
#define START_TIMER NdbTimer timer; timer.doStart();
#define STOP_TIMER timer.doStop();
@@ -56,18 +63,18 @@ struct ThreadNdb
Ndb* NdbRef;
};
-static NdbThread* threadLife[MAXTHREADS];
+static NdbThread* threadLife[NDB_MAXTHREADS];
static unsigned int tNoOfThreads;
static unsigned int tNoOfOpsPerExecute;
static unsigned int tNoOfRecords;
static unsigned int tNoOfOperations;
-static int ThreadReady[MAXTHREADS];
-static int ThreadStart[MAXTHREADS];
+static int ThreadReady[NDB_MAXTHREADS];
+static int ThreadStart[NDB_MAXTHREADS];
NDB_COMMAND(benchronja, "benchronja", "benchronja", "benchronja", 65535){
ndb_init();
- ThreadNdb tabThread[MAXTHREADS];
+ ThreadNdb tabThread[NDB_MAXTHREADS];
int i = 0 ;
int cont = 0 ;
Ndb* pMyNdb = NULL ; //( "TEST_DB" );
@@ -84,7 +91,7 @@ NDB_COMMAND(benchronja, "benchronja", "benchronja", "benchronja", 65535){
{
if (strcmp(argv[i], "-t") == 0){
tNoOfThreads = atoi(argv[i+1]);
- if ((tNoOfThreads < 1) || (tNoOfThreads > MAXTHREADS)) goto error_input;
+ if ((tNoOfThreads < 1) || (tNoOfThreads > NDB_MAXTHREADS)) goto error_input;
}else if (strcmp(argv[i], "-o") == 0){
tNoOfOperations = atoi(argv[i+1]);
if (tNoOfOperations < 1) goto error_input;
diff --git a/storage/ndb/test/ndbapi/flexAsynch.cpp b/storage/ndb/test/ndbapi/flexAsynch.cpp
index 20a157fc2f3..1f52315482f 100644
--- a/storage/ndb/test/ndbapi/flexAsynch.cpp
+++ b/storage/ndb/test/ndbapi/flexAsynch.cpp
@@ -35,7 +35,14 @@
#define MAXSTRLEN 16
#define MAXATTR 64
#define MAXTABLES 64
-#define MAXTHREADS 128
+#define NDB_MAXTHREADS 128
+/*
+ NDB_MAXTHREADS used to be just MAXTHREADS, which collides with a
+ #define from <sys/thread.h> on AIX (IBM compiler). We explicitly
+ #undef it here lest someone use it by habit and get really funny
+ results. K&R says we may #undef non-existent symbols, so let's go.
+*/
+#undef MAXTHREADS
#define MAXPAR 1024
#define MAXATTRSIZE 1000
#define PKSIZE 2
@@ -76,10 +83,10 @@ struct ThreadNdb
int ThreadNo;
};
-static NdbThread* threadLife[MAXTHREADS];
+static NdbThread* threadLife[NDB_MAXTHREADS];
static int tNodeId;
-static int ThreadReady[MAXTHREADS];
-static StartType ThreadStart[MAXTHREADS];
+static int ThreadReady[NDB_MAXTHREADS];
+static StartType ThreadStart[NDB_MAXTHREADS];
static char tableName[MAXTABLES][MAXSTRLEN+1];
static char attrName[MAXATTR][MAXSTRLEN+1];
@@ -160,7 +167,7 @@ NDB_COMMAND(flexAsynch, "flexAsynch", "flexAsynch", "flexAsynch", 65535)
return NDBT_ProgramExit(NDBT_WRONGARGS);
}
- pThreadData = new ThreadNdb[MAXTHREADS];
+ pThreadData = new ThreadNdb[NDB_MAXTHREADS];
ndbout << endl << "FLEXASYNCH - Starting normal mode" << endl;
ndbout << "Perform benchmark of insert, update and delete transactions";
@@ -844,7 +851,7 @@ readArguments(int argc, const char** argv){
while (argc > 1){
if (strcmp(argv[i], "-t") == 0){
tNoOfThreads = atoi(argv[i+1]);
- if ((tNoOfThreads < 1) || (tNoOfThreads > MAXTHREADS)){
+ if ((tNoOfThreads < 1) || (tNoOfThreads > NDB_MAXTHREADS)){
ndbout_c("Invalid no of threads");
return -1;
}
diff --git a/storage/ndb/test/ndbapi/flexHammer.cpp b/storage/ndb/test/ndbapi/flexHammer.cpp
index 9b9fd7a4a92..1b0097cf84b 100644
--- a/storage/ndb/test/ndbapi/flexHammer.cpp
+++ b/storage/ndb/test/ndbapi/flexHammer.cpp
@@ -66,7 +66,14 @@ ErrorData * flexHammerErrorData;
#define MAXSTRLEN 16
#define MAXATTR 64
#define MAXTABLES 64
-#define MAXTHREADS 256
+#define NDB_MAXTHREADS 256
+/*
+ NDB_MAXTHREADS used to be just MAXTHREADS, which collides with a
+ #define from <sys/thread.h> on AIX (IBM compiler). We explicitly
+ #undef it here lest someone use it by habit and get really funny
+ results. K&R says we may #undef non-existent symbols, so let's go.
+*/
+#undef MAXTHREADS
#define MAXATTRSIZE 100
// Max number of retries if something fails
#define MaxNoOfAttemptsC 10
@@ -119,8 +126,8 @@ static int tAttributeSize;
static int tNoOfOperations;
static int tNoOfRecords;
static int tNoOfLoops;
-static ReadyType ThreadReady[MAXTHREADS];
-static StartType ThreadStart[MAXTHREADS];
+static ReadyType ThreadReady[NDB_MAXTHREADS];
+static StartType ThreadStart[NDB_MAXTHREADS];
static char tableName[MAXTABLES][MAXSTRLEN];
static char attrName[MAXATTR][MAXSTRLEN];
static int theSimpleFlag = 0;
@@ -640,7 +647,7 @@ readArguments (int argc, const char** argv)
while (argc > 1) {
if (strcmp(argv[i], "-t") == 0) {
tNoOfThreads = atoi(argv[i+1]);
- if ((tNoOfThreads < 1) || (tNoOfThreads > MAXTHREADS))
+ if ((tNoOfThreads < 1) || (tNoOfThreads > NDB_MAXTHREADS))
return(1);
}
else if (strcmp(argv[i], "-o") == 0) {
diff --git a/storage/ndb/test/ndbapi/flexScan.cpp b/storage/ndb/test/ndbapi/flexScan.cpp
index e0b07250762..105dfeedfff 100644
--- a/storage/ndb/test/ndbapi/flexScan.cpp
+++ b/storage/ndb/test/ndbapi/flexScan.cpp
@@ -68,7 +68,14 @@
#define MAXSTRLEN 16
#define MAXATTR 64
#define MAXTABLES 64
-#define MAXTHREADS 256
+#define NDB_MAXTHREADS 256
+/*
+ NDB_MAXTHREADS used to be just MAXTHREADS, which collides with a
+ #define from <sys/thread.h> on AIX (IBM compiler). We explicitly
+ #undef it here lest someone use it by habit and get really funny
+ results. K&R says we may #undef non-existent symbols, so let's go.
+*/
+#undef MAXTHREADS
#define MAXATTRSIZE 64
enum StartType {
@@ -848,7 +855,7 @@ static int readArguments(int argc, const char** argv)
if (strcmp(argv[i], "-t") == 0) {
if (argv[i + 1] != NULL) {
tNoOfThreads = atoi(argv[i + 1]);
- if ((tNoOfThreads < 1) || (tNoOfThreads > MAXTHREADS)) {
+ if ((tNoOfThreads < 1) || (tNoOfThreads > NDB_MAXTHREADS)) {
retValue = -1;
} // if
} // if
diff --git a/storage/ndb/test/ndbapi/flexTT.cpp b/storage/ndb/test/ndbapi/flexTT.cpp
index 71d5b6c096e..4373102f77e 100644
--- a/storage/ndb/test/ndbapi/flexTT.cpp
+++ b/storage/ndb/test/ndbapi/flexTT.cpp
@@ -35,7 +35,14 @@
#define MAXSTRLEN 16
#define MAXATTR 64
#define MAXTABLES 64
-#define MAXTHREADS 128
+#define NDB_MAXTHREADS 128
+/*
+ NDB_MAXTHREADS used to be just MAXTHREADS, which collides with a
+ #define from <sys/thread.h> on AIX (IBM compiler). We explicitly
+ #undef it here lest someone use it by habit and get really funny
+ results. K&R says we may #undef non-existent symbols, so let's go.
+*/
+#undef MAXTHREADS
#define MAXPAR 1024
#define MAXATTRSIZE 1000
#define PKSIZE 1
@@ -101,10 +108,10 @@ static void input_error();
ErrorData * flexTTErrorData;
-static NdbThread* threadLife[MAXTHREADS];
+static NdbThread* threadLife[NDB_MAXTHREADS];
static int tNodeId;
-static int ThreadReady[MAXTHREADS];
-static StartType ThreadStart[MAXTHREADS];
+static int ThreadReady[NDB_MAXTHREADS];
+static StartType ThreadStart[NDB_MAXTHREADS];
static char tableName[1][MAXSTRLEN+1];
static char attrName[5][MAXSTRLEN+1];
@@ -184,7 +191,7 @@ NDB_COMMAND(flexTT, "flexTT", "flexTT", "flexTT", 65535)
return NDBT_ProgramExit(NDBT_WRONGARGS);
}
- pThreadData = new ThreadNdb[MAXTHREADS];
+ pThreadData = new ThreadNdb[NDB_MAXTHREADS];
ndbout << endl << "FLEXTT - Starting normal mode" << endl;
ndbout << "Perform TimesTen benchmark" << endl;
@@ -798,7 +805,7 @@ readArguments(int argc, const char** argv){
while (argc > 1){
if (strcmp(argv[i], "-t") == 0){
tNoOfThreads = atoi(argv[i+1]);
- if ((tNoOfThreads < 1) || (tNoOfThreads > MAXTHREADS)){
+ if ((tNoOfThreads < 1) || (tNoOfThreads > NDB_MAXTHREADS)){
ndbout_c("Invalid no of threads");
return -1;
}
diff --git a/storage/ndb/test/ndbapi/flexTimedAsynch.cpp b/storage/ndb/test/ndbapi/flexTimedAsynch.cpp
index cc44ab8b237..b6301e59df2 100644
--- a/storage/ndb/test/ndbapi/flexTimedAsynch.cpp
+++ b/storage/ndb/test/ndbapi/flexTimedAsynch.cpp
@@ -57,7 +57,14 @@
#define MAXSTRLEN 16
#define MAXATTR 64
#define MAXTABLES 64
-#define MAXTHREADS 256
+#define NDB_MAXTHREADS 256
+/*
+ NDB_MAXTHREADS used to be just MAXTHREADS, which collides with a
+ #define from <sys/thread.h> on AIX (IBM compiler). We explicitly
+ #undef it here lest someone use it by habit and get really funny
+ results. K&R says we may #undef non-existent symbols, so let's go.
+*/
+#undef MAXTHREADS
#define MAXATTRSIZE 1000
#define PKSIZE 1
@@ -95,10 +102,10 @@ static int failed = 0 ; // lame global variable that keeps track of failed trans
// incremented in executeCallback() and reset in main()
/************************************************************* < epaulsa */
-static NdbThread* threadLife[MAXTHREADS];
+static NdbThread* threadLife[NDB_MAXTHREADS];
static int tNodeId;
-static int ThreadReady[MAXTHREADS];
-static StartType ThreadStart[MAXTHREADS];
+static int ThreadReady[NDB_MAXTHREADS];
+static StartType ThreadStart[NDB_MAXTHREADS];
static char tableName[MAXTABLES][MAXSTRLEN+1];
static char attrName[MAXATTR][MAXSTRLEN+1];
static int *getAttrValueTable;
@@ -174,7 +181,7 @@ void deleteAttributeSpace(){
NDB_COMMAND(flexTimedAsynch, "flexTimedAsynch", "flexTimedAsynch [-tpoilcas]", "flexTimedAsynch", 65535)
{
ndb_init();
- ThreadNdb tabThread[MAXTHREADS];
+ ThreadNdb tabThread[NDB_MAXTHREADS];
int tLoops=0;
int returnValue;
//NdbOut flexTimedAsynchNdbOut;
@@ -615,8 +622,8 @@ void readArguments(int argc, const char** argv)
if (strcmp(argv[i], "-t") == 0)
{
tNoOfThreads = atoi(argv[i+1]);
- // if ((tNoOfThreads < 1) || (tNoOfThreads > MAXTHREADS))
- if ((tNoOfThreads < 1) || (tNoOfThreads > MAXTHREADS))
+ // if ((tNoOfThreads < 1) || (tNoOfThreads > NDB_MAXTHREADS))
+ if ((tNoOfThreads < 1) || (tNoOfThreads > NDB_MAXTHREADS))
exit(-1);
}
else if (strcmp(argv[i], "-i") == 0)
@@ -628,7 +635,7 @@ void readArguments(int argc, const char** argv)
else if (strcmp(argv[i], "-p") == 0)
{
tNoOfTransInBatch = atoi(argv[i+1]);
- //if ((tNoOfTransInBatch < 1) || (tNoOfTransInBatch > MAXTHREADS))
+ //if ((tNoOfTransInBatch < 1) || (tNoOfTransInBatch > NDB_MAXTHREADS))
if ((tNoOfTransInBatch < 1) || (tNoOfTransInBatch > 10000))
exit(-1);
}
diff --git a/storage/ndb/test/ndbapi/initronja.cpp b/storage/ndb/test/ndbapi/initronja.cpp
index 170c3dd5cfb..28ffa9f211d 100644
--- a/storage/ndb/test/ndbapi/initronja.cpp
+++ b/storage/ndb/test/ndbapi/initronja.cpp
@@ -29,7 +29,14 @@
#define MAXSTRLEN 16
#define MAXATTR 64
#define MAXTABLES 64
-#define MAXTHREADS 256
+#define NDB_MAXTHREADS 256
+/*
+ NDB_MAXTHREADS used to be just MAXTHREADS, which collides with a
+ #define from <sys/thread.h> on AIX (IBM compiler). We explicitly
+ #undef it here lest someone use it by habit and get really funny
+ results. K&R says we may #undef non-existent symbols, so let's go.
+*/
+#undef MAXTHREADS
#define MAXATTRSIZE 8000
static unsigned int tNoOfRecords;
diff --git a/storage/ndb/test/ndbapi/testBasic.cpp b/storage/ndb/test/ndbapi/testBasic.cpp
index 44c3d023169..952b5a50dc5 100644
--- a/storage/ndb/test/ndbapi/testBasic.cpp
+++ b/storage/ndb/test/ndbapi/testBasic.cpp
@@ -1290,17 +1290,7 @@ runDeleteRead(NDBT_Context* ctx, NDBT_Step* step){
NdbTransaction* pTrans = pNdb->startTransaction();
NdbOperation* pOp = pTrans->getNdbOperation(tab->getName());
pOp->deleteTuple();
- for(a = 0; a<tab->getNoOfColumns(); a++)
- {
- if (tab->getColumn(a)->getPrimaryKey() == true)
- {
- if(tmp.equalForAttr(pOp, a, 0) != 0)
- {
- ERR(pTrans->getNdbError());
- return NDBT_FAILED;
- }
- }
- }
+ tmp.equalForRow(pOp, loops);
// Define attributes to read
for(a = 0; a<tab->getNoOfColumns(); a++)
@@ -1313,6 +1303,30 @@ runDeleteRead(NDBT_Context* ctx, NDBT_Step* step){
pTrans->execute(Commit);
pTrans->close();
+
+ pTrans = pNdb->startTransaction();
+ pOp = pTrans->getNdbOperation(tab->getName());
+ pOp->insertTuple();
+ tmp.setValues(pOp, loops, 0);
+
+ pOp = pTrans->getNdbOperation(tab->getName());
+ pOp->deleteTuple();
+ tmp.equalForRow(pOp, loops);
+ for(a = 0; a<tab->getNoOfColumns(); a++)
+ {
+ if((row.attributeStore(a) = pOp->getValue(tab->getColumn(a)->getName())) == 0)
+ {
+ ERR(pTrans->getNdbError());
+ return NDBT_FAILED;
+ }
+ }
+ if (pTrans->execute(Commit) != 0)
+ {
+ ERR(pTrans->getNdbError());
+ return NDBT_FAILED;
+ }
+
+ pTrans->close();
}
return NDBT_OK;
@@ -1444,6 +1458,91 @@ runBug28073(NDBT_Context *ctx, NDBT_Step* step)
template class Vector<Uint64>;
+int
+runBug20535(NDBT_Context* ctx, NDBT_Step* step)
+{
+ Uint32 i;
+ Ndb* pNdb = GETNDB(step);
+ const NdbDictionary::Table * tab = ctx->getTab();
+ NdbDictionary::Dictionary * dict = pNdb->getDictionary();
+
+ bool null = false;
+ for (i = 0; i<tab->getNoOfColumns(); i++)
+ {
+ if (tab->getColumn(i)->getNullable())
+ {
+ null = true;
+ break;
+ }
+ }
+
+ if (!null)
+ return NDBT_OK;
+
+ HugoTransactions hugoTrans(* tab);
+ hugoTrans.loadTable(pNdb, 1);
+
+ NdbTransaction* pTrans = pNdb->startTransaction();
+ NdbOperation* pOp = pTrans->getNdbOperation(tab->getName());
+ pOp->deleteTuple();
+ hugoTrans.equalForRow(pOp, 0);
+ if (pTrans->execute(NoCommit) != 0)
+ return NDBT_FAILED;
+
+ pOp = pTrans->getNdbOperation(tab->getName());
+ pOp->insertTuple();
+ hugoTrans.equalForRow(pOp, 0);
+ for (i = 0; i<tab->getNoOfColumns(); i++)
+ {
+ if (!tab->getColumn(i)->getPrimaryKey() &&
+ !tab->getColumn(i)->getNullable())
+ {
+ hugoTrans.setValueForAttr(pOp, i, 0, 1);
+ }
+ }
+
+ if (pTrans->execute(Commit) != 0)
+ return NDBT_FAILED;
+
+ pTrans->close();
+
+ pTrans = pNdb->startTransaction();
+ pOp = pTrans->getNdbOperation(tab->getName());
+ pOp->readTuple();
+ hugoTrans.equalForRow(pOp, 0);
+ Vector<NdbRecAttr*> values;
+ for (i = 0; i<tab->getNoOfColumns(); i++)
+ {
+ if (!tab->getColumn(i)->getPrimaryKey() &&
+ tab->getColumn(i)->getNullable())
+ {
+ values.push_back(pOp->getValue(i));
+ }
+ }
+
+ if (pTrans->execute(Commit) != 0)
+ return NDBT_FAILED;
+
+ null = true;
+ for (i = 0; i<values.size(); i++)
+ {
+ if (!values[i]->isNULL())
+ {
+ null = false;
+ ndbout_c("column %s is not NULL", values[i]->getColumn()->getName());
+ }
+ }
+
+ pTrans->close();
+
+ if (null)
+ return NDBT_OK;
+ else
+ return NDBT_FAILED;
+}
+
+template class Vector<NdbRecAttr*>;
+
NDBT_TESTSUITE(testBasic);
TESTCASE("PkInsert",
"Verify that we can insert and delete from this table using PK"
@@ -1728,6 +1827,10 @@ TESTCASE("Bug28073",
"Infinite loop in lock queue" ){
STEP(runBug28073);
}
+TESTCASE("Bug20535",
+ "Verify what happens when we fill the db" ){
+ STEP(runBug20535);
+}
NDBT_TESTSUITE_END(testBasic);
#if 0
diff --git a/storage/ndb/test/ndbapi/testDict.cpp b/storage/ndb/test/ndbapi/testDict.cpp
index 9828cb768df..13c071f968e 100644
--- a/storage/ndb/test/ndbapi/testDict.cpp
+++ b/storage/ndb/test/ndbapi/testDict.cpp
@@ -2204,6 +2204,159 @@ runBug21755(NDBT_Context* ctx, NDBT_Step* step)
return NDBT_OK;
}
+static
+int
+create_tablespace(NdbDictionary::Dictionary* pDict,
+ const char * lgname,
+ const char * tsname,
+ const char * dfname)
+{
+ NdbDictionary::Tablespace ts;
+ ts.setName(tsname);
+ ts.setExtentSize(1024*1024);
+ ts.setDefaultLogfileGroup(lgname);
+
+ if(pDict->createTablespace(ts) != 0)
+ {
+ g_err << "Failed to create tablespace:"
+ << endl << pDict->getNdbError() << endl;
+ return NDBT_FAILED;
+ }
+
+ NdbDictionary::Datafile df;
+ df.setPath(dfname);
+ df.setSize(1*1024*1024);
+ df.setTablespace(tsname);
+
+ if(pDict->createDatafile(df) != 0)
+ {
+ g_err << "Failed to create datafile:"
+ << endl << pDict->getNdbError() << endl;
+ return NDBT_FAILED;
+ }
+ return 0;
+}
+
+int
+runBug24631(NDBT_Context* ctx, NDBT_Step* step)
+{
+ char tsname[256];
+ char dfname[256];
+ char lgname[256];
+ char ufname[256];
+ NdbRestarter res;
+
+ if (res.getNumDbNodes() < 2)
+ return NDBT_OK;
+
+ Ndb* pNdb = GETNDB(step);
+ NdbDictionary::Dictionary* pDict = pNdb->getDictionary();
+
+ NdbDictionary::Dictionary::List list;
+ if (pDict->listObjects(list) == -1)
+ return NDBT_FAILED;
+
+ const char * lgfound = 0;
+
+ for (Uint32 i = 0; i<list.count; i++)
+ {
+ switch(list.elements[i].type){
+ case NdbDictionary::Object::LogfileGroup:
+ lgfound = list.elements[i].name;
+ break;
+ default:
+ break;
+ }
+ if (lgfound)
+ break;
+ }
+
+ if (lgfound == 0)
+ {
+ BaseString::snprintf(lgname, sizeof(lgname), "LG-%u", rand());
+ NdbDictionary::LogfileGroup lg;
+
+ lg.setName(lgname);
+ lg.setUndoBufferSize(8*1024*1024);
+ if(pDict->createLogfileGroup(lg) != 0)
+ {
+ g_err << "Failed to create logfilegroup:"
+ << endl << pDict->getNdbError() << endl;
+ return NDBT_FAILED;
+ }
+
+ NdbDictionary::Undofile uf;
+ BaseString::snprintf(ufname, sizeof(ufname), "%s-%u", lgname, rand());
+ uf.setPath(ufname);
+ uf.setSize(2*1024*1024);
+ uf.setLogfileGroup(lgname);
+
+ if(pDict->createUndofile(uf) != 0)
+ {
+ g_err << "Failed to create undofile:"
+ << endl << pDict->getNdbError() << endl;
+ return NDBT_FAILED;
+ }
+ }
+ else
+ {
+ BaseString::snprintf(lgname, sizeof(lgname), "%s", lgfound);
+ }
+
+ BaseString::snprintf(tsname, sizeof(tsname), "TS-%u", rand());
+ BaseString::snprintf(dfname, sizeof(dfname), "%s-%u.dat", tsname, rand());
+
+ if (create_tablespace(pDict, lgname, tsname, dfname))
+ return NDBT_FAILED;
+
+
+ int node = res.getRandomNotMasterNodeId(rand());
+ res.restartOneDbNode(node, false, true, true);
+ NdbSleep_SecSleep(3);
+
+ if (pDict->dropDatafile(pDict->getDatafile(0, dfname)) != 0)
+ {
+ g_err << "Failed to drop datafile: " << pDict->getNdbError() << endl;
+ return NDBT_FAILED;
+ }
+
+ if (pDict->dropTablespace(pDict->getTablespace(tsname)) != 0)
+ {
+ g_err << "Failed to drop tablespace: " << pDict->getNdbError() << endl;
+ return NDBT_FAILED;
+ }
+
+ if (res.waitNodesNoStart(&node, 1))
+ return NDBT_FAILED;
+
+ res.startNodes(&node, 1);
+ if (res.waitClusterStarted())
+ return NDBT_FAILED;
+
+ if (create_tablespace(pDict, lgname, tsname, dfname))
+ return NDBT_FAILED;
+
+ if (pDict->dropDatafile(pDict->getDatafile(0, dfname)) != 0)
+ {
+ g_err << "Failed to drop datafile: " << pDict->getNdbError() << endl;
+ return NDBT_FAILED;
+ }
+
+ if (pDict->dropTablespace(pDict->getTablespace(tsname)) != 0)
+ {
+ g_err << "Failed to drop tablespace: " << pDict->getNdbError() << endl;
+ return NDBT_FAILED;
+ }
+
+ if (lgfound == 0)
+ {
+ if (pDict->dropLogfileGroup(pDict->getLogfileGroup(lgname)) != 0)
+ return NDBT_FAILED;
+ }
+
+ return NDBT_OK;
+}
+
struct RandSchemaOp
{
struct Obj
@@ -2707,6 +2860,10 @@ TESTCASE("DictRestart",
""){
INITIALIZER(runDictRestart);
}
+TESTCASE("Bug24631",
+ ""){
+ INITIALIZER(runBug24631);
+}
NDBT_TESTSUITE_END(testDict);
int main(int argc, const char** argv){
diff --git a/storage/ndb/test/ndbapi/testIndex.cpp b/storage/ndb/test/ndbapi/testIndex.cpp
index 7691f036a46..00e559c7a0f 100644
--- a/storage/ndb/test/ndbapi/testIndex.cpp
+++ b/storage/ndb/test/ndbapi/testIndex.cpp
@@ -1298,6 +1298,103 @@ runBug25059(NDBT_Context* ctx, NDBT_Step* step)
return res;
}
+int tcSaveINDX_test(NDBT_Context* ctx, NDBT_Step* step, int inject_err)
+{
+ int result= NDBT_OK;
+ Ndb* pNdb = GETNDB(step);
+ NdbDictionary::Dictionary * dict = pNdb->getDictionary();
+ const NdbDictionary::Index * idx = dict->getIndex(pkIdxName,
+ ctx->getTab()->getName());
+
+ HugoOperations ops(*ctx->getTab(), idx);
+
+ g_err << "Using INDEX: " << pkIdxName << endl;
+
+ NdbRestarter restarter;
+
+ int loops = ctx->getNumLoops();
+ const int rows = ctx->getNumRecords();
+ const int batchsize = ctx->getProperty("BatchSize", 1);
+
+ for(int bs=1; bs < loops; bs++)
+ {
+ int c= 0;
+ while (c++ < loops)
+ {
+ g_err << "BS " << bs << " LOOP #" << c << endl;
+
+ g_err << "inserting error on op#" << c << endl;
+
+ CHECK(ops.startTransaction(pNdb) == 0);
+ for(int i=1;i<=c;i++)
+ {
+ if(i==c)
+ {
+ if(restarter.insertErrorInAllNodes(inject_err)!=0)
+ {
+ g_err << "**** FAILED to insert error" << endl;
+ result= NDBT_FAILED;
+ break;
+ }
+ }
+ CHECK(ops.indexReadRecords(pNdb, pkIdxName, i,false,1) == 0);
+ if(i%bs==0 || i==c)
+ {
+ if(i<c)
+ {
+ if(ops.execute_NoCommit(pNdb, AO_IgnoreError)!=NDBT_OK)
+ {
+ g_err << "**** executeNoCommit should have succeeded" << endl;
+ result= NDBT_FAILED;
+ }
+ }
+ else
+ {
+ if(ops.execute_NoCommit(pNdb, AO_IgnoreError)!=289)
+ {
+ g_err << "**** executeNoCommit should have failed with 289"
+ << endl;
+ result= NDBT_FAILED;
+ }
+ g_err << "NdbError.code= " <<
+ ops.getTransaction()->getNdbError().code << endl;
+ break;
+ }
+ }
+ }
+
+ CHECK(ops.closeTransaction(pNdb) == 0);
+
+ if(restarter.insertErrorInAllNodes(0) != 0)
+ {
+ g_err << "**** Failed to error insert(0)" << endl;
+ return NDBT_FAILED;
+ }
+
+ CHECK(ops.startTransaction(pNdb) == 0);
+ if (ops.indexReadRecords(pNdb, pkIdxName,0,0,rows) != 0){
+ g_err << "**** Index read failed" << endl;
+ return NDBT_FAILED;
+ }
+ CHECK(ops.closeTransaction(pNdb) == 0);
+ }
+ }
+
+ return result;
+}
+
+int
+runBug28804(NDBT_Context* ctx, NDBT_Step* step)
+{
+ return tcSaveINDX_test(ctx, step, 8052);
+}
+
+int
+runBug28804_ATTRINFO(NDBT_Context* ctx, NDBT_Step* step)
+{
+ return tcSaveINDX_test(ctx, step, 8051);
+}
+
NDBT_TESTSUITE(testIndex);
TESTCASE("CreateAll",
"Test that we can create all various indexes on each table\n"
@@ -1629,6 +1726,27 @@ TESTCASE("Bug25059",
STEP(runBug25059);
FINALIZER(createPkIndex_Drop);
}
+TESTCASE("Bug28804",
+ "Test behaviour on out of TransactionBufferMemory for index lookup"){
+ TC_PROPERTY("LoggedIndexes", (unsigned)0);
+ INITIALIZER(runClearTable);
+ INITIALIZER(createPkIndex);
+ INITIALIZER(runLoadTable);
+ STEP(runBug28804);
+ FINALIZER(createPkIndex_Drop);
+ FINALIZER(runClearTable);
+}
+TESTCASE("Bug28804_ATTRINFO",
+ "Test behaviour on out of TransactionBufferMemory for index lookup"
+ " in saveINDXATTRINFO"){
+ TC_PROPERTY("LoggedIndexes", (unsigned)0);
+ INITIALIZER(runClearTable);
+ INITIALIZER(createPkIndex);
+ INITIALIZER(runLoadTable);
+ STEP(runBug28804_ATTRINFO);
+ FINALIZER(createPkIndex_Drop);
+ FINALIZER(runClearTable);
+}
NDBT_TESTSUITE_END(testIndex);
int main(int argc, const char** argv){
diff --git a/storage/ndb/test/ndbapi/testIndexStat.cpp b/storage/ndb/test/ndbapi/testIndexStat.cpp
index 7c69361a732..0e15cdd80d1 100644
--- a/storage/ndb/test/ndbapi/testIndexStat.cpp
+++ b/storage/ndb/test/ndbapi/testIndexStat.cpp
@@ -1210,8 +1210,8 @@ struct V_rir {
static double data(const Range& range) { return (double)range.errpct; }
};
-template static void computestat<Key, V_rpk>(Stat& stat);
-template static void computestat<Range, V_rir>(Stat& stat);
+template void computestat<Key, V_rpk>(Stat& stat);
+template void computestat<Range, V_rir>(Stat& stat);
static Stat g_stat_rpk; // summaries over loops
static Stat g_stat_rir;
@@ -1297,43 +1297,43 @@ my_long_options[] =
{
NDB_STD_OPTS("testIndexStat"),
{ "loglevel", 1001, "Logging level in this program 0-3 (default 0)",
- (gptr*)&g_opts.loglevel, (gptr*)&g_opts.loglevel, 0,
+ (uchar **)&g_opts.loglevel, (uchar **)&g_opts.loglevel, 0,
GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "seed", 1002, "Random seed (0=loop number, default -1=random)",
- (gptr*)&g_opts.seed, (gptr*)&g_opts.seed, 0,
+ (uchar **)&g_opts.seed, (uchar **)&g_opts.seed, 0,
GET_INT, REQUIRED_ARG, -1, 0, 0, 0, 0, 0 },
{ "loop", 1003, "Number of test loops (default 1, 0=forever)",
- (gptr*)&g_opts.loop, (gptr*)&g_opts.loop, 0,
+ (uchar **)&g_opts.loop, (uchar **)&g_opts.loop, 0,
GET_INT, REQUIRED_ARG, 1, 0, 0, 0, 0, 0 },
{ "rows", 1004, "Number of rows (default 100000)",
- (gptr*)&g_opts.rows, (gptr*)&g_opts.rows, 0,
+ (uchar **)&g_opts.rows, (uchar **)&g_opts.rows, 0,
GET_UINT, REQUIRED_ARG, 100000, 0, 0, 0, 0, 0 },
{ "ops", 1005, "Number of index scans per loop (default 1000)",
- (gptr*)&g_opts.ops, (gptr*)&g_opts.ops, 0,
+ (uchar **)&g_opts.ops, (uchar **)&g_opts.ops, 0,
GET_UINT, REQUIRED_ARG, 1000, 0, 0, 0, 0, 0 },
{ "dupkeys", 1006, "Pct records per key (min 100, default 1000)",
- (gptr*)&g_opts.dupkeys, (gptr*)&g_opts.dupkeys, 0,
+ (uchar **)&g_opts.dupkeys, (uchar **)&g_opts.dupkeys, 0,
GET_UINT, REQUIRED_ARG, 1000, 0, 0, 0, 0, 0 },
{ "scanpct", 1007, "Preferred max pct of total rows per scan (default 5)",
- (gptr*)&g_opts.scanpct, (gptr*)&g_opts.scanpct, 0,
+ (uchar **)&g_opts.scanpct, (uchar **)&g_opts.scanpct, 0,
GET_UINT, REQUIRED_ARG, 5, 0, 0, 0, 0, 0 },
{ "nullkeys", 1008, "Pct nulls in each key attribute (default 10)",
- (gptr*)&g_opts.nullkeys, (gptr*)&g_opts.nullkeys, 0,
+ (uchar **)&g_opts.nullkeys, (uchar **)&g_opts.nullkeys, 0,
GET_UINT, REQUIRED_ARG, 10, 0, 0, 0, 0, 0 },
{ "eqscans", 1009, "Pct scans for partial/full equality (default 50)",
- (gptr*)&g_opts.eqscans, (gptr*)&g_opts.eqscans, 0,
+ (uchar **)&g_opts.eqscans, (uchar **)&g_opts.eqscans, 0,
GET_UINT, REQUIRED_ARG, 50, 0, 0, 0, 0, 0 },
{ "dupscans", 1010, "Pct scans using same bounds (default 10)",
- (gptr*)&g_opts.dupscans, (gptr*)&g_opts.dupscans, 0,
+ (uchar **)&g_opts.dupscans, (uchar **)&g_opts.dupscans, 0,
GET_UINT, REQUIRED_ARG, 10, 0, 0, 0, 0, 0 },
{ "keeptable", 1011, "Use existing table and data if any and do not drop",
- (gptr*)&g_opts.keeptable, (gptr*)&g_opts.keeptable, 0,
+ (uchar **)&g_opts.keeptable, (uchar **)&g_opts.keeptable, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "no-extra-checks", 1012, "Omit expensive consistency checks",
- (gptr*)&g_opts.nochecks, (gptr*)&g_opts.nochecks, 0,
+ (uchar **)&g_opts.nochecks, (uchar **)&g_opts.nochecks, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "abort-on-error", 1013, "Dump core on any error",
- (gptr*)&g_opts.abort, (gptr*)&g_opts.abort, 0,
+ (uchar **)&g_opts.abort, (uchar **)&g_opts.abort, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0,
0, 0, 0,
diff --git a/storage/ndb/test/ndbapi/testMgm.cpp b/storage/ndb/test/ndbapi/testMgm.cpp
index cc074087bdb..e43972c8c29 100644
--- a/storage/ndb/test/ndbapi/testMgm.cpp
+++ b/storage/ndb/test/ndbapi/testMgm.cpp
@@ -212,6 +212,76 @@ int runTestApiSession(NDBT_Context* ctx, NDBT_Step* step)
}
}
+int runTestApiConnectTimeout(NDBT_Context* ctx, NDBT_Step* step)
+{
+ char *mgm= ctx->getRemoteMgm();
+ int result= NDBT_FAILED;
+ int cc= 0;
+ int mgmd_nodeid= 0;
+ ndb_mgm_reply reply;
+
+ NdbMgmHandle h;
+ h= ndb_mgm_create_handle();
+ ndb_mgm_set_connectstring(h, mgm);
+
+ ndbout << "TEST connect timeout" << endl;
+
+ ndb_mgm_set_timeout(h, 3000);
+
+ struct timeval tstart, tend;
+ int secs;
+ timerclear(&tstart);
+ timerclear(&tend);
+ gettimeofday(&tstart,NULL);
+
+ ndb_mgm_connect(h,0,0,0);
+
+ gettimeofday(&tend,NULL);
+
+ secs= tend.tv_sec - tstart.tv_sec;
+ ndbout << "Took about: " << secs <<" seconds"<<endl;
+
+ if(secs < 4)
+ result= NDBT_OK;
+ else
+ goto done;
+
+ ndb_mgm_set_connectstring(h, mgm);
+
+ ndbout << "TEST connect timeout" << endl;
+
+ ndb_mgm_destroy_handle(&h);
+
+ h= ndb_mgm_create_handle();
+ ndb_mgm_set_connectstring(h, "1.1.1.1");
+
+ ndbout << "TEST connect timeout (invalid host)" << endl;
+
+ ndb_mgm_set_timeout(h, 3000);
+
+ timerclear(&tstart);
+ timerclear(&tend);
+ gettimeofday(&tstart,NULL);
+
+ ndb_mgm_connect(h,0,0,0);
+
+ gettimeofday(&tend,NULL);
+
+ secs= tend.tv_sec - tstart.tv_sec;
+ ndbout << "Took about: " << secs <<" seconds"<<endl;
+
+ if(secs < 4)
+ result= NDBT_OK;
+ else
+ result= NDBT_FAILED;
+
+done:
+ ndb_mgm_disconnect(h);
+ ndb_mgm_destroy_handle(&h);
+
+ return result;
+}
+
int runTestApiTimeoutBasic(NDBT_Context* ctx, NDBT_Step* step)
{
char *mgm= ctx->getRemoteMgm();
@@ -728,6 +798,11 @@ TESTCASE("ApiSessionFailure",
INITIALIZER(runTestApiSession);
}
+TESTCASE("ApiConnectTimeout",
+ "Connect timeout tests for MGMAPI"){
+ INITIALIZER(runTestApiConnectTimeout);
+
+}
TESTCASE("ApiTimeoutBasic",
"Basic timeout tests for MGMAPI"){
INITIALIZER(runTestApiTimeoutBasic);
diff --git a/storage/ndb/test/ndbapi/testNdbApi.cpp b/storage/ndb/test/ndbapi/testNdbApi.cpp
index f731dc3601f..c05a2417bca 100644
--- a/storage/ndb/test/ndbapi/testNdbApi.cpp
+++ b/storage/ndb/test/ndbapi/testNdbApi.cpp
@@ -1588,6 +1588,36 @@ int runTestExecuteAsynch(NDBT_Context* ctx, NDBT_Step* step){
template class Vector<NdbScanOperation*>;
+int
+runBug28443(NDBT_Context* ctx, NDBT_Step* step)
+{
+ int result = NDBT_OK;
+ int records = ctx->getNumRecords();
+
+ NdbRestarter restarter;
+
+ restarter.insertErrorInAllNodes(9003);
+
+ for (Uint32 i = 0; i<ctx->getNumLoops(); i++)
+ {
+ HugoTransactions hugoTrans(*ctx->getTab());
+ if (hugoTrans.loadTable(GETNDB(step), records, 2048) != 0)
+ {
+ result = NDBT_FAILED;
+ goto done;
+ }
+ if (runClearTable(ctx, step) != 0)
+ {
+ result = NDBT_FAILED;
+ goto done;
+ }
+ }
+
+done:
+ restarter.insertErrorInAllNodes(9003);
+
+ return result;
+}
NDBT_TESTSUITE(testNdbApi);
TESTCASE("MaxNdb",
@@ -1689,6 +1719,10 @@ TESTCASE("ExecuteAsynch",
"Check that executeAsync() works (BUG#27495)\n"){
INITIALIZER(runTestExecuteAsynch);
}
+TESTCASE("Bug28443",
+ ""){
+ INITIALIZER(runBug28443);
+}
NDBT_TESTSUITE_END(testNdbApi);
int main(int argc, const char** argv){
diff --git a/storage/ndb/test/ndbapi/testNodeRestart.cpp b/storage/ndb/test/ndbapi/testNodeRestart.cpp
index 85dbc2aab2a..99b72699762 100644
--- a/storage/ndb/test/ndbapi/testNodeRestart.cpp
+++ b/storage/ndb/test/ndbapi/testNodeRestart.cpp
@@ -963,12 +963,62 @@ int runBug24717(NDBT_Context* ctx, NDBT_Step* step){
restarter.startNodes(&nodeId, 1);
- for (Uint32 i = 0; i < 100; i++)
- {
- hugoTrans.pkReadRecords(pNdb, 100, 1, NdbOperation::LM_CommittedRead);
- }
-
+ do {
+ for (Uint32 i = 0; i < 100; i++)
+ {
+ hugoTrans.pkReadRecords(pNdb, 100, 1, NdbOperation::LM_CommittedRead);
+ }
+ } while (restarter.waitClusterStarted(5) != 0);
+ }
+
+ return NDBT_OK;
+}
+
+int
+runBug29364(NDBT_Context* ctx, NDBT_Step* step){
+ int result = NDBT_OK;
+ int loops = ctx->getNumLoops();
+ int records = ctx->getNumRecords();
+ NdbRestarter restarter;
+ Ndb* pNdb = GETNDB(step);
+
+ HugoTransactions hugoTrans(*ctx->getTab());
+
+ if (restarter.getNumDbNodes() < 4)
+ return NDBT_OK;
+
+ int dump0[] = { 9000, 0 } ;
+ int dump1[] = { 9001, 0 } ;
+ Uint32 ownNode = refToNode(pNdb->getReference());
+ dump0[1] = ownNode;
+
+ for (; loops; loops --)
+ {
+ int node0 = restarter.getDbNodeId(rand() % restarter.getNumDbNodes());
+ int node1 = restarter.getRandomNodeOtherNodeGroup(node0, rand());
+
+ restarter.restartOneDbNode(node0, false, true, true);
+ restarter.waitNodesNoStart(&node0, 1);
+ restarter.startNodes(&node0, 1);
restarter.waitClusterStarted();
+
+ restarter.restartOneDbNode(node1, false, true, true);
+ restarter.waitNodesNoStart(&node1, 1);
+ if (restarter.dumpStateOneNode(node1, dump0, 2))
+ return NDBT_FAILED;
+
+ restarter.startNodes(&node1, 1);
+
+ do {
+
+ for (Uint32 i = 0; i < 100; i++)
+ {
+ hugoTrans.pkReadRecords(pNdb, 100, 1, NdbOperation::LM_CommittedRead);
+ }
+ } while (restarter.waitClusterStarted(5) != 0);
+
+ if (restarter.dumpStateOneNode(node1, dump1, 1))
+ return NDBT_FAILED;
}
return NDBT_OK;
@@ -1629,6 +1679,85 @@ runBug28023(NDBT_Context* ctx, NDBT_Step* step)
return NDBT_FAILED;
}
}
+
+ return NDBT_OK;
+}
+
+
+int
+runBug28717(NDBT_Context* ctx, NDBT_Step* step)
+{
+ int result = NDBT_OK;
+ int loops = ctx->getNumLoops();
+ int records = ctx->getNumRecords();
+ Ndb* pNdb = GETNDB(step);
+ NdbRestarter res;
+
+ if (res.getNumDbNodes() < 4)
+ {
+ return NDBT_OK;
+ }
+
+ int master = res.getMasterNodeId();
+ int node0 = res.getRandomNodeOtherNodeGroup(master, rand());
+ int node1 = res.getRandomNodeSameNodeGroup(node0, rand());
+
+ ndbout_c("master: %d node0: %d node1: %d", master, node0, node1);
+
+ if (res.restartOneDbNode(node0, false, true, true))
+ {
+ return NDBT_FAILED;
+ }
+
+ {
+ int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_CHECKPOINT, 0 };
+ NdbLogEventHandle handle =
+ ndb_mgm_create_logevent_handle(res.handle, filter);
+
+
+ int dump[] = { DumpStateOrd::DihStartLcpImmediately };
+ struct ndb_logevent event;
+
+ for (Uint32 i = 0; i<3; i++)
+ {
+ res.dumpStateOneNode(master, dump, 1);
+ while(ndb_logevent_get_next(handle, &event, 0) >= 0 &&
+ event.type != NDB_LE_LocalCheckpointStarted);
+ while(ndb_logevent_get_next(handle, &event, 0) >= 0 &&
+ event.type != NDB_LE_LocalCheckpointCompleted);
+ }
+ }
+
+ if (res.waitNodesNoStart(&node0, 1))
+ return NDBT_FAILED;
+
+ int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
+
+ if (res.dumpStateOneNode(node0, val2, 2))
+ return NDBT_FAILED;
+
+ if (res.insertErrorInNode(node0, 5010))
+ return NDBT_FAILED;
+
+ if (res.insertErrorInNode(node1, 1001))
+ return NDBT_FAILED;
+
+ if (res.startNodes(&node0, 1))
+ return NDBT_FAILED;
+
+ NdbSleep_SecSleep(3);
+
+ if (res.insertErrorInNode(node1, 0))
+ return NDBT_FAILED;
+
+ if (res.waitNodesNoStart(&node0, 1))
+ return NDBT_FAILED;
+
+ if (res.startNodes(&node0, 1))
+ return NDBT_FAILED;
+
+ if (res.waitClusterStarted())
+ return NDBT_FAILED;
return NDBT_OK;
}
@@ -1993,6 +2122,12 @@ TESTCASE("Bug27466", ""){
TESTCASE("Bug28023", ""){
INITIALIZER(runBug28023);
}
+TESTCASE("Bug28717", ""){
+ INITIALIZER(runBug28717);
+}
+TESTCASE("Bug29364", ""){
+ INITIALIZER(runBug29364);
+}
NDBT_TESTSUITE_END(testNodeRestart);
int main(int argc, const char** argv){
diff --git a/storage/ndb/test/ndbapi/testOperations.cpp b/storage/ndb/test/ndbapi/testOperations.cpp
index 38d6e817637..95be2b988e1 100644
--- a/storage/ndb/test/ndbapi/testOperations.cpp
+++ b/storage/ndb/test/ndbapi/testOperations.cpp
@@ -99,11 +99,6 @@ OperationTestCase matrix[] = {
break; }
#define C3(b) if (!(b)) { \
- g_err << "ERR: "<< step->getName() \
- << " failed on line " << __LINE__ << endl; \
- abort(); return NDBT_FAILED; }
-
-#define C3(b) if (!(b)) { \
g_err << "ERR: failed on line " << __LINE__ << endl; \
return NDBT_FAILED; }
diff --git a/storage/ndb/test/ndbapi/testScanFilter.cpp b/storage/ndb/test/ndbapi/testScanFilter.cpp
index ac0b0ed2c4d..dfe1097bd25 100644
--- a/storage/ndb/test/ndbapi/testScanFilter.cpp
+++ b/storage/ndb/test/ndbapi/testScanFilter.cpp
@@ -49,7 +49,15 @@ const char COL_LEN = 7;
* there are six columns, 'i', 'j', 'k', 'l', 'm', 'n', and each on is equal to 1 or 1,
* Since each tuple should be unique in this case, then TUPLE_NUM = 2 power 6 = 64
*/
-const int TUPLE_NUM = (int)pow(2, COL_LEN-1);
+#ifdef _AIX
+/*
+ IBM xlC_r breaks on the initialization with pow():
+ "The expression must be an integral constant expression."
+*/
+const int TUPLE_NUM = 64;
+#else
+const int TUPLE_NUM = (int)pow(2, COL_LEN-1);
+#endif
/*
* the recursive level of random scan filter, can
@@ -479,7 +487,7 @@ int get_column_id(char ch)
*/
bool check_col_equal_one(int tuple_no, int col_id)
{
- int i = (int)pow(2, 6 - col_id);
+ int i = (int)pow((double)2, (double)(6 - col_id));
int j = tuple_no / i;
if(j % 2)
return true;
diff --git a/storage/ndb/test/ndbapi/testSystemRestart.cpp b/storage/ndb/test/ndbapi/testSystemRestart.cpp
index bd5cd3dd3c8..901c0e35568 100644
--- a/storage/ndb/test/ndbapi/testSystemRestart.cpp
+++ b/storage/ndb/test/ndbapi/testSystemRestart.cpp
@@ -1219,6 +1219,81 @@ runBug24664(NDBT_Context* ctx, NDBT_Step* step)
return result;
}
+int
+runBug29167(NDBT_Context* ctx, NDBT_Step* step)
+{
+ int result = NDBT_OK;
+ NdbRestarter restarter;
+ Ndb* pNdb = GETNDB(step);
+ const Uint32 nodeCount = restarter.getNumDbNodes();
+
+ if (nodeCount < 2)
+ return NDBT_OK;
+
+ int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_CHECKPOINT, 0 };
+ NdbLogEventHandle handle =
+ ndb_mgm_create_logevent_handle(restarter.handle, filter);
+
+ struct ndb_logevent event;
+ int master = restarter.getMasterNodeId();
+ do {
+ int node1 = restarter.getRandomNodeOtherNodeGroup(master, rand());
+ int node2 = restarter.getRandomNodeSameNodeGroup(node1, rand());
+
+ int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
+ restarter.dumpStateAllNodes(val2, 2);
+ int dump[] = { DumpStateOrd::DihSetTimeBetweenGcp, 30000 };
+ restarter.dumpStateAllNodes(dump, 2);
+
+ while(ndb_logevent_get_next(handle, &event, 0) >= 0 &&
+ event.type != NDB_LE_GlobalCheckpointCompleted);
+
+ CHECK(restarter.insertErrorInAllNodes(932) == 0);
+
+ CHECK(restarter.insertErrorInNode(node1, 7183) == 0);
+ CHECK(restarter.insertErrorInNode(node2, 7183) == 0);
+
+ CHECK(restarter.waitClusterNoStart() == 0);
+ restarter.startAll();
+ CHECK(restarter.waitClusterStarted() == 0);
+ } while(false);
+
+ return result;
+}
+
+int
+runBug28770(NDBT_Context* ctx, NDBT_Step* step) {
+ Ndb* pNdb = GETNDB(step);
+ NdbRestarter restarter;
+ int result = NDBT_OK;
+ int count = 0;
+ Uint32 i = 0;
+ Uint32 loops = ctx->getNumLoops();
+ int records = ctx->getNumRecords();
+ UtilTransactions utilTrans(*ctx->getTab());
+ HugoTransactions hugoTrans(*ctx->getTab());
+
+ g_info << "Loading records..." << endl; hugoTrans.loadTable(pNdb,
+ records);
+
+
+ while(i<=loops && result != NDBT_FAILED){
+ g_info << "Loop " << i << "/"<< loops <<" started" << endl;
+ CHECK(restarter.restartAll(false, true, false) == 0);
+ NdbSleep_SecSleep(3);
+ CHECK(restarter.waitClusterNoStart() == 0);
+ restarter.insertErrorInAllNodes(6007);
+ CHECK(restarter.startAll()== 0);
+ CHECK(restarter.waitClusterStarted() == 0);
+ CHECK(utilTrans.selectCount(pNdb, 64, &count) == 0);
+ CHECK(count == records);
+ i++;
+ }
+ ndbout << " runBug28770 finished" << endl;
+ return result;
+}
+
+
NDBT_TESTSUITE(testSystemRestart);
TESTCASE("SR1",
"Basic system restart test. Focus on testing restart from REDO log.\n"
@@ -1399,6 +1474,26 @@ TESTCASE("Bug24664",
STEP(runBug24664);
FINALIZER(runClearTable);
}
+TESTCASE("Bug29167", "")
+{
+ INITIALIZER(runWaitStarted);
+ STEP(runBug29167);
+}
+TESTCASE("Bug28770",
+ "Check readTableFile1 fails, readTableFile2 succeeds\n"
+ "1. Restart all node -nostart\n"
+ "2. Insert error 6100 into all nodes\n"
+ "3. Start all nodes\n"
+ "4. Ensure cluster start\n"
+ "5. Read and verify reocrds\n"
+ "6. Repeat until looping is completed\n"){
+ INITIALIZER(runWaitStarted);
+ INITIALIZER(runClearTable);
+ STEP(runBug28770);
+ FINALIZER(runClearTable);
+}
+
+
NDBT_TESTSUITE_END(testSystemRestart);
int main(int argc, const char** argv){
diff --git a/storage/ndb/test/ndbapi/test_event_merge.cpp b/storage/ndb/test/ndbapi/test_event_merge.cpp
index 662b1eb6f4c..d40b985adc2 100644
--- a/storage/ndb/test/ndbapi/test_event_merge.cpp
+++ b/storage/ndb/test/ndbapi/test_event_merge.cpp
@@ -2184,57 +2184,57 @@ my_long_options[] =
{
NDB_STD_OPTS("test_event_merge"),
{ "abort-on-error", 1001, "Do abort() on any error",
- (gptr*)&g_opts.abort_on_error, (gptr*)&g_opts.abort_on_error, 0,
+ (uchar **)&g_opts.abort_on_error, (uchar **)&g_opts.abort_on_error, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "loglevel", 1002, "Logging level in this program 0-3 (default 0)",
- (gptr*)&g_opts.loglevel, (gptr*)&g_opts.loglevel, 0,
+ (uchar **)&g_opts.loglevel, (uchar **)&g_opts.loglevel, 0,
GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "loop", 1003, "Number of test loops (default 5, 0=forever)",
- (gptr*)&g_opts.loop, (gptr*)&g_opts.loop, 0,
+ (uchar **)&g_opts.loop, (uchar **)&g_opts.loop, 0,
GET_INT, REQUIRED_ARG, 5, 0, 0, 0, 0, 0 },
{ "maxops", 1004, "Approx number of PK operations per table (default 1000)",
- (gptr*)&g_opts.maxops, (gptr*)&g_opts.maxops, 0,
+ (uchar **)&g_opts.maxops, (uchar **)&g_opts.maxops, 0,
GET_UINT, REQUIRED_ARG, 1000, 0, 0, 0, 0, 0 },
{ "maxpk", 1005, "Number of different PK values (default 10, max 1000)",
- (gptr*)&g_opts.maxpk, (gptr*)&g_opts.maxpk, 0,
+ (uchar **)&g_opts.maxpk, (uchar **)&g_opts.maxpk, 0,
GET_UINT, REQUIRED_ARG, 10, 0, 0, 0, 0, 0 },
{ "maxtab", 1006, "Number of tables (default 10, max 100)",
- (gptr*)&g_opts.maxtab, (gptr*)&g_opts.maxtab, 0,
+ (uchar **)&g_opts.maxtab, (uchar **)&g_opts.maxtab, 0,
GET_INT, REQUIRED_ARG, 10, 0, 0, 0, 0, 0 },
{ "no-blobs", 1007, "Omit blob attributes (5.0: true)",
- (gptr*)&g_opts.no_blobs, (gptr*)&g_opts.no_blobs, 0,
+ (uchar **)&g_opts.no_blobs, (uchar **)&g_opts.no_blobs, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "no-implicit-nulls", 1008, "Insert must include all attrs"
" i.e. no implicit NULLs",
- (gptr*)&g_opts.no_implicit_nulls, (gptr*)&g_opts.no_implicit_nulls, 0,
+ (uchar **)&g_opts.no_implicit_nulls, (uchar **)&g_opts.no_implicit_nulls, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "no-missing-update", 1009, "Update must include all non-PK attrs",
- (gptr*)&g_opts.no_missing_update, (gptr*)&g_opts.no_missing_update, 0,
+ (uchar **)&g_opts.no_missing_update, (uchar **)&g_opts.no_missing_update, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "no-multiops", 1010, "Allow only 1 operation per commit",
- (gptr*)&g_opts.no_multiops, (gptr*)&g_opts.no_multiops, 0,
+ (uchar **)&g_opts.no_multiops, (uchar **)&g_opts.no_multiops, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "no-nulls", 1011, "Create no NULL values",
- (gptr*)&g_opts.no_nulls, (gptr*)&g_opts.no_nulls, 0,
+ (uchar **)&g_opts.no_nulls, (uchar **)&g_opts.no_nulls, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "one-blob", 1012, "Only one blob attribute (default 2)",
- (gptr*)&g_opts.one_blob, (gptr*)&g_opts.one_blob, 0,
+ (uchar **)&g_opts.one_blob, (uchar **)&g_opts.one_blob, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "opstring", 1013, "Operations to run e.g. idiucdc (c is commit) or"
" iuuc:uudc (the : separates loops)",
- (gptr*)&g_opts.opstring, (gptr*)&g_opts.opstring, 0,
+ (uchar **)&g_opts.opstring, (uchar **)&g_opts.opstring, 0,
GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "seed", 1014, "Random seed (0=loop number, default -1=random)",
- (gptr*)&g_opts.seed, (gptr*)&g_opts.seed, 0,
+ (uchar **)&g_opts.seed, (uchar **)&g_opts.seed, 0,
GET_INT, REQUIRED_ARG, -1, 0, 0, 0, 0, 0 },
{ "separate-events", 1015, "Do not combine events per GCI (5.0: true)",
- (gptr*)&g_opts.separate_events, (gptr*)&g_opts.separate_events, 0,
+ (uchar **)&g_opts.separate_events, (uchar **)&g_opts.separate_events, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "tweak", 1016, "Whatever the source says",
- (gptr*)&g_opts.tweak, (gptr*)&g_opts.tweak, 0,
+ (uchar **)&g_opts.tweak, (uchar **)&g_opts.tweak, 0,
GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "use-table", 1017, "Use existing tables",
- (gptr*)&g_opts.use_table, (gptr*)&g_opts.use_table, 0,
+ (uchar **)&g_opts.use_table, (uchar **)&g_opts.use_table, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0,
0, 0, 0,
diff --git a/storage/ndb/test/odbc/SQL99_test/SQL99_test.cpp b/storage/ndb/test/odbc/SQL99_test/SQL99_test.cpp
index 039a77f4d53..fb77220773d 100644
--- a/storage/ndb/test/odbc/SQL99_test/SQL99_test.cpp
+++ b/storage/ndb/test/odbc/SQL99_test/SQL99_test.cpp
@@ -27,7 +27,14 @@ using namespace std; //
#define MAXROW 64
#define DEFROW 8
-#define MAXTHREADS 24
+/*
+ NDB_MAXTHREADS used to be just MAXTHREADS, which collides with a
+ #define from <sys/thread.h> on AIX (IBM compiler). We explicitly
+ #undef it here lest someone use it by habit and get really funny
+ results. K&R says we may #undef non-existent symbols, so let's go.
+*/
+#undef MAXTHREADS
+#define NDB_MAXTHREADS 24
#define DEFTHREADS 2
#define MAXTABLES 16
@@ -83,7 +90,7 @@ int main(int argc, char* argv[]){
char* szTableNames = (char*)malloc(sizeof(char)*nNoOfTables*MAX_TABLE_NAME) ;
memset(szTableNames, 0, sizeof(char)*nNoOfTables*MAX_TABLE_NAME) ;
- UintPtr pThreadHandles[MAXTHREADS] = { NULL } ;
+ UintPtr pThreadHandles[NDB_MAXTHREADS] = { NULL } ;
AssignTableNames(szTableNames, nNoOfTables) ;
@@ -313,7 +320,7 @@ void ParseArguments(int argc, const char** argv){
if (strcmp(argv[i], "-t") == 0)
{
nNoOfThreads = atoi(argv[i+1]);
- if ((nNoOfThreads < 1) || (nNoOfThreads > MAXTHREADS))
+ if ((nNoOfThreads < 1) || (nNoOfThreads > NDB_MAXTHREADS))
nNoOfThreads = DEFTHREADS ;
}
else if (strcmp(argv[i], "-c") == 0)
diff --git a/storage/ndb/test/run-test/Makefile.am b/storage/ndb/test/run-test/Makefile.am
index d6c6536cfc8..65aa62d11f0 100644
--- a/storage/ndb/test/run-test/Makefile.am
+++ b/storage/ndb/test/run-test/Makefile.am
@@ -23,7 +23,7 @@ include $(top_srcdir)/storage/ndb/config/type_ndbapitools.mk.am
test_PROGRAMS = atrt
test_DATA=daily-basic-tests.txt daily-devel-tests.txt 16node-tests.txt \
conf-ndbmaster.cnf \
- conf-dl145a.cnf test-tests.txt
+ conf-dl145a.cnf test-tests.txt conf-test.cnf
test_SCRIPTS=atrt-analyze-result.sh atrt-gather-result.sh atrt-setup.sh \
atrt-clear-result.sh autotest-run.sh
@@ -48,7 +48,7 @@ atrt_LDFLAGS = -static @ndb_bin_am_ldflags@
wrappersdir=$(prefix)/bin
wrappers_SCRIPTS=atrt-testBackup atrt-mysql-test-run
-EXTRA_DIST = $(test_DATA) $(test_SCRIPTS) $(wrappers_SCRIPTS) README.ATRT
+EXTRA_DIST = $(test_DATA) $(test_SCRIPTS) $(wrappers_SCRIPTS) README.ATRT atrt.hpp
# Don't update the files from bitkeeper
%::SCCS/s.%
diff --git a/storage/ndb/test/run-test/autotest-boot.sh b/storage/ndb/test/run-test/autotest-boot.sh
index 31f611460ec..1df3a26cc8a 100644
--- a/storage/ndb/test/run-test/autotest-boot.sh
+++ b/storage/ndb/test/run-test/autotest-boot.sh
@@ -24,7 +24,10 @@ verbose=0
do_clone=yes
build=yes
+tag=
conf=
+extra_args=
+extra_clone=
LOCK=$HOME/.autotest-lock
############################
@@ -40,6 +43,8 @@ do
--clone=*) clone=`echo $1 | sed s/--clone=//`;;
--version) echo $VERSION; exit;;
--conf=*) conf=`echo $1 | sed s/--conf=//`;;
+ --tag=*) tag=`echo $1 | sed s/--tag=//`;;
+ --*) echo "Unknown arg: $1";;
*) RUN=$*;;
esac
shift
@@ -52,14 +57,20 @@ done
#################################
if [ -z "$conf" ]
then
- conf=`pwd`/autotest.conf
+ if [ -f "`pwd`/autotest.conf" ]
+ then
+ conf="`pwd`/autotest.conf"
+ elif [ -f "$HOME/autotest.conf" ]
+ then
+ conf="$HOME/autotest.conf"
+ fi
fi
if [ -f $conf ]
then
. $conf
else
- echo "Can't find config file: $conf"
+ echo "Can't find config file: >$conf<"
exit
fi
@@ -92,7 +103,7 @@ fi
# Setup the clone source location #
####################################
-src_clone=$src_clone_base-$clone
+src_clone=${src_clone_base}${clone}
#######################################
# Check to see if the lock file exists#
@@ -125,7 +136,14 @@ fi
# You can add more to this path#
################################
-dst_place=${build_dir}/clone-mysql-$clone-$DATE.$$
+if [ -z "$tag" ]
+then
+ dst_place=${build_dir}/clone-$clone-$DATE.$$
+else
+ dst_place=${build_dir}/clone-$tag-$DATE.$$
+ extra_args="$extra_args --clone=$tag"
+ extra_clone="-r$tag"
+fi
#########################################
# Delete source and pull down the latest#
@@ -134,7 +152,12 @@ dst_place=${build_dir}/clone-mysql-$clone-$DATE.$$
if [ "$do_clone" ]
then
rm -rf $dst_place
- bk clone $src_clone $dst_place
+ if [ `echo $src_clone | grep -c 'file:\/\/'` = 1 ]
+ then
+ bk clone -l $extra_clone $src_clone $dst_place
+ else
+ bk clone $extra_clone $src_clone $dst_place
+ fi
fi
##########################################
@@ -156,7 +179,7 @@ fi
################################
script=$install_dir/mysql-test/ndb/autotest-run.sh
-$script $save_args --conf=$conf --install-dir=$install_dir --suite=$RUN --nolock
+sh -x $script $save_args --conf=$conf --install-dir=$install_dir --suite=$RUN --nolock $extra_args
if [ "$build" ]
then
diff --git a/storage/ndb/test/run-test/autotest-run.sh b/storage/ndb/test/run-test/autotest-run.sh
index 34c3fe53949..b543cd1efb9 100644
--- a/storage/ndb/test/run-test/autotest-run.sh
+++ b/storage/ndb/test/run-test/autotest-run.sh
@@ -216,8 +216,8 @@ fi
# Make directories needed
p=`pwd`
-run_dir=$install_dir/run-$RUN-mysql-$clone-$target
-res_dir=$base_dir/result-$RUN-mysql-$clone-$target/$DATE
+run_dir=$install_dir/run-$RUN-$clone-$target
+res_dir=$base_dir/result-$RUN-$clone-$target/$DATE
tar_dir=$base_dir/saved-results
mkdir -p $run_dir $res_dir $tar_dir
@@ -246,7 +246,7 @@ cd $res_dir
echo "date=$DATE" > info.txt
echo "suite=$RUN" >> info.txt
-echo "clone=mysql-$clone" >> info.txt
+echo "clone=$clone" >> info.txt
echo "arch=$target" >> info.txt
find . | xargs chmod ugo+r
diff --git a/storage/ndb/test/run-test/conf-dl145a.cnf b/storage/ndb/test/run-test/conf-dl145a.cnf
index ea344f1a62a..5f61bee755d 100644
--- a/storage/ndb/test/run-test/conf-dl145a.cnf
+++ b/storage/ndb/test/run-test/conf-dl145a.cnf
@@ -21,3 +21,6 @@ BackupMemory = 64M
MaxNoOfConcurrentScans = 100
MaxNoOfSavedMessages= 1000
SendBufferMemory = 2M
+NoOfFragmentLogFiles = 4
+FragmentLogFileSize = 64M
+
diff --git a/storage/ndb/test/run-test/conf-test.cnf b/storage/ndb/test/run-test/conf-test.cnf
new file mode 100644
index 00000000000..e528eeb1d8b
--- /dev/null
+++ b/storage/ndb/test/run-test/conf-test.cnf
@@ -0,0 +1,26 @@
+[atrt]
+basedir = CHOOSE_dir
+baseport = 14000
+clusters = .2node
+
+[ndb_mgmd]
+
+[mysqld]
+skip-innodb
+skip-bdb
+
+[cluster_config.2node]
+ndb_mgmd = CHOOSE_host1
+ndbd = CHOOSE_host2,CHOOSE_host3
+ndbapi= CHOOSE_host1,CHOOSE_host1,CHOOSE_host1
+
+NoOfReplicas = 2
+IndexMemory = 25M
+DataMemory = 100M
+BackupMemory = 64M
+MaxNoOfConcurrentScans = 100
+MaxNoOfSavedMessages= 1000
+SendBufferMemory = 2M
+NoOfFragmentLogFiles = 4
+FragmentLogFileSize = 64M
+
diff --git a/storage/ndb/test/run-test/daily-basic-tests.txt b/storage/ndb/test/run-test/daily-basic-tests.txt
index 96473c28199..b7a3a15dae7 100644
--- a/storage/ndb/test/run-test/daily-basic-tests.txt
+++ b/storage/ndb/test/run-test/daily-basic-tests.txt
@@ -248,6 +248,10 @@ cmd: testBasic
args: -n Bug28073
max-time: 500
+cmd: testBasic
+args: -n Bug20535
+
+max-time: 500
cmd: testIndex
args: -n Bug25059 -r 3000 T1
@@ -481,6 +485,14 @@ max-time: 1000
cmd: testNodeRestart
args: -n Bug27003 T1
+max-time: 300
+cmd: testSystemRestart
+args: -n Bug29167 T1
+
+max-time: 300
+cmd: testSystemRestart
+args: -l 2 -n Bug28770 T1
+
max-time: 1000
cmd: testNodeRestart
args: -n Bug27283 T1
@@ -557,12 +569,20 @@ max-time: 1000
cmd: testNodeRestart
args: -n Bug28023 T7 D2
+max-time: 1000
+cmd: testNodeRestart
+args: -n Bug29364 T1
+
#
# DICT TESTS
max-time: 1500
cmd: testDict
args: -n CreateAndDrop
+max-time: 1000
+cmd: testNodeRestart
+args: -n Bug28717 T1
+
max-time: 1500
cmd: testDict
args: -n CreateAndDropAtRandom -l 200 T1
@@ -615,6 +635,10 @@ max-time: 1500
cmd: testDict
args: -l 25 -n DictRestart T1
+max-time: 500
+cmd: testDict
+args: -n Bug24631 T1
+
#
# TEST NDBAPI
#
@@ -696,6 +720,10 @@ max-time: 500
cmd: testNdbApi
args: -n ExecuteAsynch T1
+max-time: 1000
+cmd: testNdbApi
+args: -n Bug28443
+
#max-time: 500
#cmd: testInterpreter
#args: T1
@@ -882,6 +910,10 @@ max-time: 120
cmd: testMgm
args: -n ApiSessionFailure T1
+max-time: 15
+cmd: testMgm
+args: -n ApiConnectTimeout T1
+
max-time: 120
cmd: testMgm
args: -n ApiTimeoutBasic T1
@@ -906,3 +938,10 @@ max-time: 120
cmd: testMgm
args: -n ApiMgmStructEventTimeout T1
+max-time: 180
+cmd: testIndex
+args: -n Bug28804 T1 T3
+
+max-time: 180
+cmd: testIndex
+args: -n Bug28804_ATTRINFO T1 T3
diff --git a/storage/ndb/test/run-test/main.cpp b/storage/ndb/test/run-test/main.cpp
index 2e8d6bfde6d..b5c4385f5d3 100644
--- a/storage/ndb/test/run-test/main.cpp
+++ b/storage/ndb/test/run-test/main.cpp
@@ -77,60 +77,60 @@ my_bool opt_core;
static struct my_option g_options[] =
{
{ "help", '?', "Display this help and exit.",
- (gptr*) &g_help, (gptr*) &g_help,
+ (uchar **) &g_help, (uchar **) &g_help,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "version", 'V', "Output version information and exit.", 0, 0, 0,
GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "clusters", 256, "Cluster",
- (gptr*) &g_clusters, (gptr*) &g_clusters,
+ (uchar **) &g_clusters, (uchar **) &g_clusters,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{ "replicate", 1024, "replicate",
- (gptr*) &g_dummy, (gptr*) &g_dummy,
+ (uchar **) &g_dummy, (uchar **) &g_dummy,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{ "log-file", 256, "log-file",
- (gptr*) &g_log_filename, (gptr*) &g_log_filename,
+ (uchar **) &g_log_filename, (uchar **) &g_log_filename,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{ "testcase-file", 'f', "testcase-file",
- (gptr*) &g_test_case_filename, (gptr*) &g_test_case_filename,
+ (uchar **) &g_test_case_filename, (uchar **) &g_test_case_filename,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{ "report-file", 'r', "report-file",
- (gptr*) &g_report_filename, (gptr*) &g_report_filename,
+ (uchar **) &g_report_filename, (uchar **) &g_report_filename,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{ "basedir", 256, "Base path",
- (gptr*) &g_basedir, (gptr*) &g_basedir,
+ (uchar **) &g_basedir, (uchar **) &g_basedir,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{ "baseport", 256, "Base port",
- (gptr*) &g_baseport, (gptr*) &g_baseport,
+ (uchar **) &g_baseport, (uchar **) &g_baseport,
0, GET_INT, REQUIRED_ARG, g_baseport, 0, 0, 0, 0, 0},
{ "prefix", 256, "mysql install dir",
- (gptr*) &g_prefix, (gptr*) &g_prefix,
+ (uchar **) &g_prefix, (uchar **) &g_prefix,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{ "verbose", 'v', "Verbosity",
- (gptr*) &g_verbosity, (gptr*) &g_verbosity,
+ (uchar **) &g_verbosity, (uchar **) &g_verbosity,
0, GET_INT, REQUIRED_ARG, g_verbosity, 0, 0, 0, 0, 0},
{ "configure", 256, "configure",
- (gptr*) &g_do_setup, (gptr*) &g_do_setup,
+ (uchar **) &g_do_setup, (uchar **) &g_do_setup,
0, GET_INT, REQUIRED_ARG, g_do_setup, 0, 0, 0, 0, 0 },
{ "deploy", 256, "deploy",
- (gptr*) &g_do_deploy, (gptr*) &g_do_deploy,
+ (uchar **) &g_do_deploy, (uchar **) &g_do_deploy,
0, GET_INT, REQUIRED_ARG, g_do_deploy, 0, 0, 0, 0, 0 },
{ "sshx", 256, "sshx",
- (gptr*) &g_do_sshx, (gptr*) &g_do_sshx,
+ (uchar **) &g_do_sshx, (uchar **) &g_do_sshx,
0, GET_INT, REQUIRED_ARG, g_do_sshx, 0, 0, 0, 0, 0 },
{ "start", 256, "start",
- (gptr*) &g_do_start, (gptr*) &g_do_start,
+ (uchar **) &g_do_start, (uchar **) &g_do_start,
0, GET_INT, REQUIRED_ARG, g_do_start, 0, 0, 0, 0, 0 },
{ "fqpn", 256, "Fully qualified path-names ",
- (gptr*) &g_fqpn, (gptr*) &g_fqpn,
+ (uchar **) &g_fqpn, (uchar **) &g_fqpn,
0, GET_INT, REQUIRED_ARG, g_fqpn, 0, 0, 0, 0, 0 },
{ "default-ports", 256, "Use default ports when possible",
- (gptr*) &g_default_ports, (gptr*) &g_default_ports,
+ (uchar **) &g_default_ports, (uchar **) &g_default_ports,
0, GET_INT, REQUIRED_ARG, g_default_ports, 0, 0, 0, 0, 0 },
{ "mode", 256, "Mode 0=interactive 1=regression 2=bench",
- (gptr*) &g_mode, (gptr*) &g_mode,
+ (uchar **) &g_mode, (uchar **) &g_mode,
0, GET_INT, REQUIRED_ARG, g_mode, 0, 0, 0, 0, 0 },
{ "quit", 256, "Quit before starting tests",
- (gptr*) &g_mode, (gptr*) &g_do_quit,
+ (uchar **) &g_mode, (uchar **) &g_do_quit,
0, GET_BOOL, NO_ARG, g_do_quit, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
diff --git a/storage/ndb/test/run-test/upgrade-boot.sh b/storage/ndb/test/run-test/upgrade-boot.sh
new file mode 100644
index 00000000000..d3542166551
--- /dev/null
+++ b/storage/ndb/test/run-test/upgrade-boot.sh
@@ -0,0 +1,218 @@
+#!/bin/sh
+#############################################################
+# This script created by Jonas does the following #
+# Cleans up clones and pevious builds, pulls new clones, #
+# builds, deploys, configures the tests and launches ATRT #
+#############################################################
+
+###############
+#Script setup #
+##############
+
+save_args=$*
+VERSION="upgrade-boot.sh version 1.00"
+
+DATE=`date '+%Y-%m-%d'`
+HOST=`hostname -s`
+export DATE HOST
+
+set -e
+
+echo "`date` starting: $*"
+
+verbose=0
+do_clone=yes
+build=yes
+
+tag0=
+tag1=
+conf=
+extra_args=
+extra_clone=
+LOCK=$HOME/.autotest-lock
+
+############################
+# Read command line entries#
+############################
+
+while [ "$1" ]
+do
+ case "$1" in
+ --no-clone) do_clone="";;
+ --no-build) build="";;
+ --verbose) verbose=`expr $verbose + 1`;;
+ --clone=*) clone0=`echo $1 | sed s/--clone=//`;;
+ --clone0=*) clone0=`echo $1 | sed s/--clone0=//`;;
+ --clone1=*) clone1=`echo $1 | sed s/--clone1=//`;;
+ --version) echo $VERSION; exit;;
+ --conf=*) conf=`echo $1 | sed s/--conf=//`;;
+ --tag=*) tag0=`echo $1 | sed s/--tag=//`;;
+ --tag0=*) tag0=`echo $1 | sed s/--tag0=//`;;
+ --tag1=*) tag1=`echo $1 | sed s/--tag1=//`;;
+ --*) echo "Unknown arg: $1";;
+ *) RUN=$*;;
+ esac
+ shift
+done
+
+if [ -z "$clone1" ]
+then
+ clone1=$clone0
+fi
+
+if [ -z "$tag0" ]
+then
+ echo "No tag0 specified"
+ exit
+fi
+
+if [ -z "$tag1" ]
+then
+ echo "No tag1 specified"
+ exit
+fi
+
+#################################
+#Make sure the configfile exists#
+#if it does not exit. if it does#
+# (.) load it #
+#################################
+if [ -z "$conf" ]
+then
+ if [ -f "`pwd`/autotest.conf" ]
+ then
+ conf="`pwd`/autotest.conf"
+ elif [ -f "$HOME/autotest.conf" ]
+ then
+ conf="$HOME/autotest.conf"
+ fi
+fi
+
+if [ -f $conf ]
+then
+ . $conf
+else
+ echo "Can't find config file: >$conf<"
+ exit
+fi
+
+###############################
+# Validate that all interesting
+# variables where set in conf
+###############################
+vars="src_clone_base install_dir build_dir"
+for i in $vars
+do
+ t=`echo echo \\$$i`
+ if [ -z "`eval $t`" ]
+ then
+ echo "Invalid config: $conf, variable $i is not set"
+ exit
+ fi
+done
+
+###############################
+#Print out the enviroment vars#
+###############################
+
+if [ $verbose -gt 0 ]
+then
+ env
+fi
+
+####################################
+# Setup the lock file name and path#
+# Setup the clone source location #
+####################################
+
+src_clone0=${src_clone_base}${clone0}
+src_clone1=${src_clone_base}${clone1}
+
+#######################################
+# Check to see if the lock file exists#
+# If it does exit. #
+#######################################
+
+if [ -f $LOCK ]
+then
+ echo "Lock file exists: $LOCK"
+ exit 1
+fi
+
+#######################################
+# If the lock file does not exist then#
+# create it with date and run info #
+#######################################
+
+echo "$DATE $RUN" > $LOCK
+
+#############################
+#If any errors here down, we#
+# trap them, and remove the #
+# Lock file before exit #
+#############################
+if [ `uname -s` != "SunOS" ]
+then
+ trap "rm -f $LOCK" ERR
+fi
+
+# You can add more to this path#
+################################
+
+dst_place0=${build_dir}/clone-$tag0-$DATE.$$
+dst_place1=${build_dir}/clone-$tag1-$DATE.$$
+
+#########################################
+# Delete source and pull down the latest#
+#########################################
+
+if [ "$do_clone" ]
+then
+ rm -rf $dst_place0 $dst_place1
+ if [ `echo $src_clone0 | grep -c 'file:\/\/'` = 1 ]
+ then
+ bk clone -l -r$tag0 $src_clone0 $dst_place0
+ else
+ bk clone -r$tag0 $src_clone0 $dst_place0
+ fi
+
+ if [ `echo $src_clone1 | grep -c 'file:\/\/'` = 1 ]
+ then
+ bk clone -l -r$tag1 $src_clone1 $dst_place1
+ else
+ bk clone -r$tag1 $src_clone1 $dst_place1
+ fi
+fi
+
+##########################################
+# Build the source, make installs, and #
+# create the database to be rsynced #
+##########################################
+install_dir0=$install_dir/$tag0
+install_dir1=$install_dir/$tag1
+if [ "$build" ]
+then
+ cd $dst_place0
+ rm -rf $install_dir0
+ BUILD/compile-ndb-autotest --prefix=$install_dir0
+ make install
+
+ cd $dst_place1
+ rm -rf $install_dir1
+ BUILD/compile-ndb-autotest --prefix=$install_dir1
+ make install
+fi
+
+
+################################
+# Start run script #
+################################
+
+script=$install_dir1/mysql-test/ndb/upgrade-run.sh
+$script $save_args --conf=$conf --install-dir=$install_dir --suite=$RUN --nolock $extra_args
+
+if [ "$build" ]
+then
+ rm -rf $dst_place0 $dst_place1
+fi
+rm -f $LOCK
diff --git a/storage/ndb/test/src/HugoTransactions.cpp b/storage/ndb/test/src/HugoTransactions.cpp
index 456782f4726..3a1600815e0 100644
--- a/storage/ndb/test/src/HugoTransactions.cpp
+++ b/storage/ndb/test/src/HugoTransactions.cpp
@@ -14,8 +14,9 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include "HugoTransactions.hpp"
+#include <NDBT_Stats.hpp>
#include <NdbSleep.h>
-
+#include <NdbTick.h>
HugoTransactions::HugoTransactions(const NdbDictionary::Table& _tab,
const NdbDictionary::Index* idx):
@@ -24,6 +25,10 @@ HugoTransactions::HugoTransactions(const NdbDictionary::Table& _tab,
m_defaultScanUpdateMethod = 3;
setRetryMax();
+ m_stats_latency = 0;
+
+ m_thr_count = 0;
+ m_thr_no = -1;
}
HugoTransactions::~HugoTransactions(){
@@ -820,6 +825,16 @@ HugoTransactions::pkReadRecords(Ndb* pNdb,
return NDBT_FAILED;
}
+ MicroSecondTimer timer_start;
+ MicroSecondTimer timer_stop;
+ bool timer_active =
+ m_stats_latency != 0 &&
+ r >= batch && // first batch is "warmup"
+ r + batch != records; // last batch is usually partial
+
+ if (timer_active)
+ NdbTick_getMicroTimer(&timer_start);
+
if(pkReadRecord(pNdb, r, batch, lm) != NDBT_OK)
{
ERR(pTrans->getNdbError());
@@ -892,6 +907,12 @@ HugoTransactions::pkReadRecords(Ndb* pNdb,
}
closeTransaction(pNdb);
+
+ if (timer_active) {
+ NdbTick_getMicroTimer(&timer_stop);
+ NDB_TICKS ticks = NdbTick_getMicrosPassed(timer_start, timer_stop);
+ m_stats_latency->addObservation((double)ticks);
+ }
}
deallocRows();
g_info << reads << " records read" << endl;
@@ -913,9 +934,17 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb,
allocRows(batch);
g_info << "|- Updating records (batch=" << batch << ")..." << endl;
+ int batch_no = 0;
while (r < records){
if(r + batch > records)
batch = records - r;
+
+ if (m_thr_count != 0 && m_thr_no != batch_no % m_thr_count)
+ {
+ r += batch;
+ batch_no++;
+ continue;
+ }
if (retryAttempt >= m_retryMax){
g_info << "ERROR: has retried this operation " << retryAttempt
@@ -963,6 +992,16 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb,
return NDBT_FAILED;
}
+ MicroSecondTimer timer_start;
+ MicroSecondTimer timer_stop;
+ bool timer_active =
+ m_stats_latency != 0 &&
+ r >= batch && // first batch is "warmup"
+ r + batch != records; // last batch is usually partial
+
+ if (timer_active)
+ NdbTick_getMicroTimer(&timer_start);
+
if(pIndexScanOp)
{
int rows_found = 0;
@@ -1039,8 +1078,15 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb,
}
closeTransaction(pNdb);
-
+
+ if (timer_active) {
+ NdbTick_getMicroTimer(&timer_stop);
+ NDB_TICKS ticks = NdbTick_getMicrosPassed(timer_start, timer_stop);
+ m_stats_latency->addObservation((double)ticks);
+ }
+
r += batch; // Read next record
+ batch_no++;
}
deallocRows();
@@ -1228,10 +1274,18 @@ HugoTransactions::pkDelRecords(Ndb* pNdb,
int check;
g_info << "|- Deleting records..." << endl;
+ int batch_no = 0;
while (r < records){
if(r + batch > records)
batch = records - r;
+ if (m_thr_count != 0 && m_thr_no != batch_no % m_thr_count)
+ {
+ r += batch;
+ batch_no++;
+ continue;
+ }
+
if (retryAttempt >= m_retryMax){
g_info << "ERROR: has retried this operation " << retryAttempt
<< " times, failing!" << endl;
@@ -1255,6 +1309,16 @@ HugoTransactions::pkDelRecords(Ndb* pNdb,
return NDBT_FAILED;
}
+ MicroSecondTimer timer_start;
+ MicroSecondTimer timer_stop;
+ bool timer_active =
+ m_stats_latency != 0 &&
+ r >= batch && // first batch is "warmup"
+ r + batch != records; // last batch is usually partial
+
+ if (timer_active)
+ NdbTick_getMicroTimer(&timer_start);
+
if(pkDeleteRecord(pNdb, r, batch) != NDBT_OK)
{
ERR(pTrans->getNdbError());
@@ -1303,9 +1367,15 @@ HugoTransactions::pkDelRecords(Ndb* pNdb,
m_latest_gci = pTrans->getGCI();
}
closeTransaction(pNdb);
-
- r += batch; // Read next record
+ if (timer_active) {
+ NdbTick_getMicroTimer(&timer_stop);
+ NDB_TICKS ticks = NdbTick_getMicrosPassed(timer_start, timer_stop);
+ m_stats_latency->addObservation((double)ticks);
+ }
+
+ r += batch; // Read next record
+ batch_no++;
}
g_info << "|- " << deleted << " records deleted" << endl;
diff --git a/storage/ndb/test/src/Makefile.am b/storage/ndb/test/src/Makefile.am
index 37f6497e508..a025579cb72 100644
--- a/storage/ndb/test/src/Makefile.am
+++ b/storage/ndb/test/src/Makefile.am
@@ -24,7 +24,7 @@ libNDBT_a_SOURCES = \
NdbRestarter.cpp NdbRestarts.cpp NDBT_Output.cpp \
NdbBackup.cpp NdbConfig.cpp NdbGrep.cpp NDBT_Table.cpp \
NdbSchemaCon.cpp NdbSchemaOp.cpp getarg.c \
- CpcClient.cpp NdbMixRestarter.cpp
+ CpcClient.cpp NdbMixRestarter.cpp NDBT_Thread.cpp
INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/src/common/mgmcommon -I$(top_srcdir)/storage/ndb/include/mgmcommon -I$(top_srcdir)/storage/ndb/include/kernel -I$(top_srcdir)/storage/ndb/src/mgmapi
diff --git a/storage/ndb/test/src/NDBT_Test.cpp b/storage/ndb/test/src/NDBT_Test.cpp
index b30430c73c1..69f3723ca75 100644
--- a/storage/ndb/test/src/NDBT_Test.cpp
+++ b/storage/ndb/test/src/NDBT_Test.cpp
@@ -1195,35 +1195,35 @@ static struct my_option my_long_options[] =
{
NDB_STD_OPTS(""),
{ "print", OPT_PRINT, "Print execution tree",
- (gptr*) &opt_print, (gptr*) &opt_print, 0,
+ (uchar **) &opt_print, (uchar **) &opt_print, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "print_html", OPT_PRINT_HTML, "Print execution tree in html table format",
- (gptr*) &opt_print_html, (gptr*) &opt_print_html, 0,
+ (uchar **) &opt_print_html, (uchar **) &opt_print_html, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "print_cases", OPT_PRINT_CASES, "Print list of test cases",
- (gptr*) &opt_print_cases, (gptr*) &opt_print_cases, 0,
+ (uchar **) &opt_print_cases, (uchar **) &opt_print_cases, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "records", 'r', "Number of records",
- (gptr*) &opt_records, (gptr*) &opt_records, 0,
+ (uchar **) &opt_records, (uchar **) &opt_records, 0,
GET_INT, REQUIRED_ARG, 1000, 0, 0, 0, 0, 0 },
{ "loops", 'l', "Number of loops",
- (gptr*) &opt_loops, (gptr*) &opt_loops, 0,
+ (uchar **) &opt_loops, (uchar **) &opt_loops, 0,
GET_INT, REQUIRED_ARG, 5, 0, 0, 0, 0, 0 },
{ "seed", 1024, "Random seed",
- (gptr*) &opt_seed, (gptr*) &opt_seed, 0,
+ (uchar **) &opt_seed, (uchar **) &opt_seed, 0,
GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "testname", 'n', "Name of test to run",
- (gptr*) &opt_testname, (gptr*) &opt_testname, 0,
+ (uchar **) &opt_testname, (uchar **) &opt_testname, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "remote_mgm", 'm',
"host:port to mgmsrv of remote cluster",
- (gptr*) &opt_remote_mgm, (gptr*) &opt_remote_mgm, 0,
+ (uchar **) &opt_remote_mgm, (uchar **) &opt_remote_mgm, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "timer", 't', "Print execution time",
- (gptr*) &opt_timer, (gptr*) &opt_timer, 0,
+ (uchar **) &opt_timer, (uchar **) &opt_timer, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "verbose", 'v', "Print verbose status",
- (gptr*) &opt_verbose, (gptr*) &opt_verbose, 0,
+ (uchar **) &opt_verbose, (uchar **) &opt_verbose, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
diff --git a/storage/ndb/test/src/NDBT_Thread.cpp b/storage/ndb/test/src/NDBT_Thread.cpp
new file mode 100644
index 00000000000..56cf2f6815b
--- /dev/null
+++ b/storage/ndb/test/src/NDBT_Thread.cpp
@@ -0,0 +1,283 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <ndb_global.h>
+#include <NDBT_Thread.hpp>
+#include <NdbApi.hpp>
+
+NDBT_Thread::NDBT_Thread()
+{
+ create(0, -1);
+}
+
+NDBT_Thread::NDBT_Thread(NDBT_ThreadSet* thread_set, int thread_no)
+{
+ create(thread_set, thread_no);
+}
+
+void
+NDBT_Thread::create(NDBT_ThreadSet* thread_set, int thread_no)
+{
+ m_magic = NDBT_Thread::Magic;
+
+ m_state = Wait;
+ m_thread_set = thread_set;
+ m_thread_no = thread_no;
+ m_func = 0;
+ m_input = 0;
+ m_output = 0;
+ m_ndb = 0;
+ m_err = 0;
+
+ m_mutex = NdbMutex_Create();
+ assert(m_mutex != 0);
+ m_cond = NdbCondition_Create();
+ assert(m_cond != 0);
+
+ char buf[20];
+ sprintf(buf, "NDBT_%04u");
+ const char* name = strdup(buf);
+ assert(name != 0);
+
+ unsigned stacksize = 512 * 1024;
+ NDB_THREAD_PRIO prio = NDB_THREAD_PRIO_LOW;
+ m_thread = NdbThread_Create(NDBT_Thread_run,
+ (void**)this, stacksize, name, prio);
+ assert(m_thread != 0);
+}
+
+NDBT_Thread::~NDBT_Thread()
+{
+ if (m_thread != 0) {
+ NdbThread_Destroy(&m_thread);
+ m_thread = 0;
+ }
+ if (m_cond != 0) {
+ NdbCondition_Destroy(m_cond);
+ m_cond = 0;
+ }
+ if (m_mutex != 0) {
+ NdbMutex_Destroy(m_mutex);
+ m_mutex = 0;
+ }
+}
+
+static void*
+NDBT_Thread_run(void* arg)
+{
+ assert(arg != 0);
+ NDBT_Thread& thr = *(NDBT_Thread*)arg;
+ assert(thr.m_magic == NDBT_Thread::Magic);
+ thr.run();
+ return 0;
+}
+
+void
+NDBT_Thread::run()
+{
+ while (1) {
+ lock();
+ while (m_state != Start && m_state != Exit) {
+ wait();
+ }
+ if (m_state == Exit) {
+ unlock();
+ break;
+ }
+ (*m_func)(*this);
+ m_state = Stop;
+ signal();
+ unlock();
+ }
+}
+
+// methods for main process
+
+void
+NDBT_Thread::start()
+{
+ lock();
+ m_state = Start;
+ signal();
+ unlock();
+}
+
+void
+NDBT_Thread::stop()
+{
+ lock();
+ while (m_state != Stop)
+ wait();
+ m_state = Wait;
+ unlock();
+}
+
+void
+NDBT_Thread::exit()
+{
+ lock();
+ m_state = Exit;
+ signal();
+ unlock();
+};
+
+void
+NDBT_Thread::join()
+{
+ NdbThread_WaitFor(m_thread, &m_status);
+ m_thread = 0;
+}
+
+int
+NDBT_Thread::connect(class Ndb_cluster_connection* ncc, const char* db)
+{
+ m_ndb = new Ndb(ncc, db);
+ if (m_ndb->init() == -1 ||
+ m_ndb->waitUntilReady() == -1) {
+ m_err = m_ndb->getNdbError().code;
+ return -1;
+ }
+ return 0;
+}
+
+void
+NDBT_Thread::disconnect()
+{
+ delete m_ndb;
+ m_ndb = 0;
+}
+
+// set of threads
+
+NDBT_ThreadSet::NDBT_ThreadSet(int count)
+{
+ m_count = count;
+ m_thread = new NDBT_Thread* [count];
+ for (int n = 0; n < count; n++) {
+ m_thread[n] = new NDBT_Thread(this, n);
+ }
+}
+
+NDBT_ThreadSet::~NDBT_ThreadSet()
+{
+ delete_output();
+ for (int n = 0; n < m_count; n++) {
+ delete m_thread[n];
+ m_thread[n] = 0;
+ }
+ delete [] m_thread;
+}
+
+void
+NDBT_ThreadSet::start()
+{
+ for (int n = 0; n < m_count; n++) {
+ NDBT_Thread& thr = *m_thread[n];
+ thr.start();
+ }
+}
+
+void
+NDBT_ThreadSet::stop()
+{
+ for (int n = 0; n < m_count; n++) {
+ NDBT_Thread& thr = *m_thread[n];
+ thr.stop();
+ }
+}
+
+void
+NDBT_ThreadSet::exit()
+{
+ for (int n = 0; n < m_count; n++) {
+ NDBT_Thread& thr = *m_thread[n];
+ thr.exit();
+ }
+}
+
+void
+NDBT_ThreadSet::join()
+{
+ for (int n = 0; n < m_count; n++) {
+ NDBT_Thread& thr = *m_thread[n];
+ thr.join();
+ }
+}
+
+void
+NDBT_ThreadSet::set_func(NDBT_ThreadFunc* func)
+{
+ for (int n = 0; n < m_count; n++) {
+ NDBT_Thread& thr = *m_thread[n];
+ thr.set_func(func);
+ }
+}
+
+void
+NDBT_ThreadSet::set_input(const void* input)
+{
+ for (int n = 0; n < m_count; n++) {
+ NDBT_Thread& thr = *m_thread[n];
+ thr.set_input(input);
+ }
+}
+
+void
+NDBT_ThreadSet::delete_output()
+{
+ for (int n = 0; n < m_count; n++) {
+ if (m_thread[n] != 0) {
+ NDBT_Thread& thr = *m_thread[n];
+ thr.delete_output();
+ }
+ }
+}
+
+int
+NDBT_ThreadSet::connect(class Ndb_cluster_connection* ncc, const char* db)
+{
+ for (int n = 0; n < m_count; n++) {
+ assert(m_thread[n] != 0);
+ NDBT_Thread& thr = *m_thread[n];
+ if (thr.connect(ncc, db) == -1)
+ return -1;
+ }
+ return 0;
+}
+
+void
+NDBT_ThreadSet::disconnect()
+{
+ for (int n = 0; n < m_count; n++) {
+ if (m_thread[n] != 0) {
+ NDBT_Thread& thr = *m_thread[n];
+ thr.disconnect();
+ }
+ }
+}
+
+int
+NDBT_ThreadSet::get_err() const
+{
+ for (int n = 0; n < m_count; n++) {
+ if (m_thread[n] != 0) {
+ NDBT_Thread& thr = *m_thread[n];
+ int err = thr.get_err();
+ if (err != 0)
+ return err;
+ }
+ }
+ return 0;
+}
diff --git a/storage/ndb/test/tools/hugoFill.cpp b/storage/ndb/test/tools/hugoFill.cpp
index 713c2ca5152..20ceb61b066 100644
--- a/storage/ndb/test/tools/hugoFill.cpp
+++ b/storage/ndb/test/tools/hugoFill.cpp
@@ -30,9 +30,11 @@ int main(int argc, const char** argv){
const char* _tabname = NULL;
int _help = 0;
int _batch = 512;
+ const char* db = "TEST_DB";
struct getargs args[] = {
{ "batch", 'b', arg_integer, &_batch, "Number of operations in each transaction", "batch" },
+ { "database", 'd', arg_string, &db, "Database", "" },
{ "usage", '?', arg_flag, &_help, "Print help", "" }
};
int num_args = sizeof(args) / sizeof(args[0]);
@@ -55,7 +57,7 @@ int main(int argc, const char** argv){
{
return NDBT_ProgramExit(NDBT_FAILED);
}
- Ndb MyNdb(&con, "TEST_DB" );
+ Ndb MyNdb(&con, db);
if(MyNdb.init() != 0){
ERR(MyNdb.getNdbError());
diff --git a/storage/ndb/test/tools/hugoPkDelete.cpp b/storage/ndb/test/tools/hugoPkDelete.cpp
index b185eacdddf..aa8e6c654a7 100644
--- a/storage/ndb/test/tools/hugoPkDelete.cpp
+++ b/storage/ndb/test/tools/hugoPkDelete.cpp
@@ -20,22 +20,41 @@
#include <NdbApi.hpp>
#include <NdbMain.h>
#include <NDBT.hpp>
+#include <NDBT_Thread.hpp>
+#include <NDBT_Stats.hpp>
#include <NdbSleep.h>
#include <getarg.h>
#include <HugoTransactions.hpp>
+static NDBT_ThreadFunc hugoPkDelete;
+
+struct ThrInput {
+ const NdbDictionary::Table* pTab;
+ int records;
+ int batch;
+ int stats;
+};
+
+struct ThrOutput {
+ NDBT_Stats latency;
+};
+
int main(int argc, const char** argv){
ndb_init();
int _records = 0;
int _loops = 1;
- int _batch = 0;
+ int _threads = 1;
+ int _stats = 0;
+ int _batch = 1;
const char* _tabname = NULL;
int _help = 0;
struct getargs args[] = {
{ "loops", 'l', arg_integer, &_loops, "number of times to run this program(0=infinite loop)", "loops" },
+ { "threads", 't', arg_integer, &_threads, "number of threads (default 1)", "threads" },
+ { "stats", 's', arg_flag, &_stats, "report latency per batch", "stats" },
// { "batch", 'b', arg_integer, &_batch, "batch value", "batch" },
{ "records", 'r', arg_integer, &_records, "Number of records", "records" },
{ "usage", '?', arg_flag, &_help, "Print help", "" }
@@ -81,12 +100,57 @@ int main(int argc, const char** argv){
return NDBT_ProgramExit(NDBT_WRONGARGS);
}
- HugoTransactions hugoTrans(*pTab);
+ // threads
+ NDBT_ThreadSet ths(_threads);
+
+ // create Ndb object for each thread
+ if (ths.connect(&con, "TEST_DB") == -1) {
+ ndbout << "connect failed: err=" << ths.get_err() << endl;
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+
+ // input is options
+ ThrInput input;
+ ths.set_input(&input);
+ input.pTab = pTab;
+ input.records = _records;
+ input.batch = _batch;
+ input.stats = _stats;
+
+ // output is stats
+ ThrOutput output;
+ ths.set_output<ThrOutput>();
+
int i = 0;
- while (i<_loops || _loops==0) {
+ while (i < _loops || _loops == 0) {
ndbout << i << ": ";
- if (hugoTrans.pkDelRecords(&MyNdb, _records) != 0){
- return NDBT_ProgramExit(NDBT_FAILED);
+
+ ths.set_func(hugoPkDelete);
+ ths.start();
+ ths.stop();
+
+ if (ths.get_err())
+ NDBT_ProgramExit(NDBT_FAILED);
+
+ if (_stats) {
+ NDBT_Stats latency;
+
+ // add stats from each thread
+ int n;
+ for (n = 0; n < ths.get_count(); n++) {
+ NDBT_Thread& thr = ths.get_thread(n);
+ ThrOutput* output = (ThrOutput*)thr.get_output();
+ latency += output->latency;
+ }
+
+ ndbout
+ << "latency per batch (us): "
+ << " samples=" << latency.getCount()
+ << " min=" << (int)latency.getMin()
+ << " max=" << (int)latency.getMax()
+ << " mean=" << (int)latency.getMean()
+ << " stddev=" << (int)latency.getStddev()
+ << endl;
}
i++;
}
@@ -94,3 +158,23 @@ int main(int argc, const char** argv){
return NDBT_ProgramExit(NDBT_OK);
}
+static void hugoPkDelete(NDBT_Thread& thr)
+{
+ const ThrInput* input = (const ThrInput*)thr.get_input();
+ ThrOutput* output = (ThrOutput*)thr.get_output();
+
+ HugoTransactions hugoTrans(*input->pTab);
+ output->latency.reset();
+ if (input->stats)
+ hugoTrans.setStatsLatency(&output->latency);
+
+ NDBT_ThreadSet& ths = thr.get_thread_set();
+ hugoTrans.setThrInfo(ths.get_count(), thr.get_thread_no());
+
+ int ret;
+ ret = hugoTrans.pkDelRecords(thr.get_ndb(),
+ input->records,
+ input->batch);
+ if (ret != 0)
+ thr.set_err(ret);
+}
diff --git a/storage/ndb/test/tools/hugoPkRead.cpp b/storage/ndb/test/tools/hugoPkRead.cpp
index dd14203c16e..232f55b35b8 100644
--- a/storage/ndb/test/tools/hugoPkRead.cpp
+++ b/storage/ndb/test/tools/hugoPkRead.cpp
@@ -20,17 +20,33 @@
#include <NdbApi.hpp>
#include <NdbMain.h>
#include <NDBT.hpp>
+#include <NDBT_Thread.hpp>
+#include <NDBT_Stats.hpp>
#include <NdbSleep.h>
#include <getarg.h>
#include <HugoTransactions.hpp>
+static NDBT_ThreadFunc hugoPkRead;
+
+struct ThrInput {
+ const NdbDictionary::Table* pTab;
+ int records;
+ int batch;
+ int stats;
+};
+
+struct ThrOutput {
+ NDBT_Stats latency;
+};
int main(int argc, const char** argv){
ndb_init();
int _records = 0;
int _loops = 1;
+ int _threads = 1;
+ int _stats = 0;
int _abort = 0;
int _batch = 1;
const char* _tabname = NULL;
@@ -39,6 +55,8 @@ int main(int argc, const char** argv){
struct getargs args[] = {
{ "aborts", 'a', arg_integer, &_abort, "percent of transactions that are aborted", "abort%" },
{ "loops", 'l', arg_integer, &_loops, "number of times to run this program(0=infinite loop)", "loops" },
+ { "threads", 't', arg_integer, &_threads, "number of threads (default 1)", "threads" },
+ { "stats", 's', arg_flag, &_stats, "report latency per batch", "stats" },
{ "batch", 'b', arg_integer, &_batch, "batch value(not 0)", "batch" },
{ "records", 'r', arg_integer, &_records, "Number of records", "records" },
{ "usage", '?', arg_flag, &_help, "Print help", "" }
@@ -64,6 +82,7 @@ int main(int argc, const char** argv){
{
return NDBT_ProgramExit(NDBT_FAILED);
}
+
Ndb MyNdb(&con, "TEST_DB" );
if(MyNdb.init() != 0){
@@ -81,12 +100,57 @@ int main(int argc, const char** argv){
return NDBT_ProgramExit(NDBT_WRONGARGS);
}
- HugoTransactions hugoTrans(*pTab);
+ // threads
+ NDBT_ThreadSet ths(_threads);
+
+ // create Ndb object for each thread
+ if (ths.connect(&con, "TEST_DB") == -1) {
+ ndbout << "connect failed: err=" << ths.get_err() << endl;
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+
+ // input is options
+ ThrInput input;
+ ths.set_input(&input);
+ input.pTab = pTab;
+ input.records = _records;
+ input.batch = _batch;
+ input.stats = _stats;
+
+ // output is stats
+ ThrOutput output;
+ ths.set_output<ThrOutput>();
+
int i = 0;
- while (i<_loops || _loops==0) {
+ while (i < _loops || _loops == 0) {
ndbout << i << ": ";
- if (hugoTrans.pkReadRecords(&MyNdb, _records, _batch) != 0){
- return NDBT_ProgramExit(NDBT_FAILED);
+
+ ths.set_func(hugoPkRead);
+ ths.start();
+ ths.stop();
+
+ if (ths.get_err())
+ NDBT_ProgramExit(NDBT_FAILED);
+
+ if (_stats) {
+ NDBT_Stats latency;
+
+ // add stats from each thread
+ int n;
+ for (n = 0; n < ths.get_count(); n++) {
+ NDBT_Thread& thr = ths.get_thread(n);
+ ThrOutput* output = (ThrOutput*)thr.get_output();
+ latency += output->latency;
+ }
+
+ ndbout
+ << "latency per batch (us): "
+ << " samples=" << latency.getCount()
+ << " min=" << (int)latency.getMin()
+ << " max=" << (int)latency.getMax()
+ << " mean=" << (int)latency.getMean()
+ << " stddev=" << (int)latency.getStddev()
+ << endl;
}
i++;
}
@@ -94,3 +158,20 @@ int main(int argc, const char** argv){
return NDBT_ProgramExit(NDBT_OK);
}
+static void hugoPkRead(NDBT_Thread& thr)
+{
+ const ThrInput* input = (const ThrInput*)thr.get_input();
+ ThrOutput* output = (ThrOutput*)thr.get_output();
+
+ HugoTransactions hugoTrans(*input->pTab);
+ output->latency.reset();
+ if (input->stats)
+ hugoTrans.setStatsLatency(&output->latency);
+
+ int ret;
+ ret = hugoTrans.pkReadRecords(thr.get_ndb(),
+ input->records,
+ input->batch);
+ if (ret != 0)
+ thr.set_err(ret);
+}
diff --git a/storage/ndb/test/tools/hugoPkUpdate.cpp b/storage/ndb/test/tools/hugoPkUpdate.cpp
index 3e950bc96cd..b920a4f396a 100644
--- a/storage/ndb/test/tools/hugoPkUpdate.cpp
+++ b/storage/ndb/test/tools/hugoPkUpdate.cpp
@@ -20,24 +20,43 @@
#include <NdbApi.hpp>
#include <NdbMain.h>
#include <NDBT.hpp>
+#include <NDBT_Thread.hpp>
+#include <NDBT_Stats.hpp>
#include <NdbSleep.h>
#include <getarg.h>
#include <HugoTransactions.hpp>
+static NDBT_ThreadFunc hugoPkUpdate;
+
+struct ThrInput {
+ const NdbDictionary::Table* pTab;
+ int records;
+ int batch;
+ int stats;
+};
+
+struct ThrOutput {
+ NDBT_Stats latency;
+};
+
int main(int argc, const char** argv){
ndb_init();
int _records = 0;
int _loops = 1;
+ int _threads = 1;
+ int _stats = 0;
int _abort = 0;
- int _batch = 0;
+ int _batch = 1;
const char* _tabname = NULL, *db = 0;
int _help = 0;
struct getargs args[] = {
{ "aborts", 'a', arg_integer, &_abort, "percent of transactions that are aborted", "abort%" },
{ "loops", 'l', arg_integer, &_loops, "number of times to run this program(0=infinite loop)", "loops" },
+ { "threads", 't', arg_integer, &_threads, "number of threads (default 1)", "threads" },
+ { "stats", 's', arg_flag, &_stats, "report latency per batch", "stats" },
// { "batch", 'b', arg_integer, &_batch, "batch value", "batch" },
{ "records", 'r', arg_integer, &_records, "Number of records", "records" },
{ "usage", '?', arg_flag, &_help, "Print help", "" },
@@ -83,16 +102,81 @@ int main(int argc, const char** argv){
return NDBT_ProgramExit(NDBT_WRONGARGS);
}
- HugoTransactions hugoTrans(*pTab);
+ // threads
+ NDBT_ThreadSet ths(_threads);
+
+ // create Ndb object for each thread
+ if (ths.connect(&con, "TEST_DB") == -1) {
+ ndbout << "connect failed: err=" << ths.get_err() << endl;
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+
+ // input is options
+ ThrInput input;
+ ths.set_input(&input);
+ input.pTab = pTab;
+ input.records = _records;
+ input.batch = _batch;
+ input.stats = _stats;
+
+ // output is stats
+ ThrOutput output;
+ ths.set_output<ThrOutput>();
+
int i = 0;
- while (i<_loops || _loops==0) {
- ndbout << "loop " << i << ": ";
- if (hugoTrans.pkUpdateRecords(&MyNdb,
- _records) != 0){
- return NDBT_ProgramExit(NDBT_FAILED);
+ while (i < _loops || _loops == 0) {
+ ndbout << i << ": ";
+
+ ths.set_func(hugoPkUpdate);
+ ths.start();
+ ths.stop();
+
+ if (ths.get_err())
+ NDBT_ProgramExit(NDBT_FAILED);
+
+ if (_stats) {
+ NDBT_Stats latency;
+
+ // add stats from each thread
+ int n;
+ for (n = 0; n < ths.get_count(); n++) {
+ NDBT_Thread& thr = ths.get_thread(n);
+ ThrOutput* output = (ThrOutput*)thr.get_output();
+ latency += output->latency;
+ }
+
+ ndbout
+ << "latency per batch (us): "
+ << " samples=" << latency.getCount()
+ << " min=" << (int)latency.getMin()
+ << " max=" << (int)latency.getMax()
+ << " mean=" << (int)latency.getMean()
+ << " stddev=" << (int)latency.getStddev()
+ << endl;
}
i++;
}
return NDBT_ProgramExit(NDBT_OK);
}
+
+static void hugoPkUpdate(NDBT_Thread& thr)
+{
+ const ThrInput* input = (const ThrInput*)thr.get_input();
+ ThrOutput* output = (ThrOutput*)thr.get_output();
+
+ HugoTransactions hugoTrans(*input->pTab);
+ output->latency.reset();
+ if (input->stats)
+ hugoTrans.setStatsLatency(&output->latency);
+
+ NDBT_ThreadSet& ths = thr.get_thread_set();
+ hugoTrans.setThrInfo(ths.get_count(), thr.get_thread_no());
+
+ int ret;
+ ret = hugoTrans.pkUpdateRecords(thr.get_ndb(),
+ input->records,
+ input->batch);
+ if (ret != 0)
+ thr.set_err(ret);
+}
diff --git a/storage/ndb/tools/delete_all.cpp b/storage/ndb/tools/delete_all.cpp
index 4e3037f1941..1bf89f5a32f 100644
--- a/storage/ndb/tools/delete_all.cpp
+++ b/storage/ndb/tools/delete_all.cpp
@@ -36,16 +36,16 @@ static struct my_option my_long_options[] =
{
NDB_STD_OPTS("ndb_desc"),
{ "database", 'd', "Name of database table is in",
- (gptr*) &_dbname, (gptr*) &_dbname, 0,
+ (uchar**) &_dbname, (uchar**) &_dbname, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "transactional", 't', "Single transaction (may run out of operations)",
- (gptr*) &_transactional, (gptr*) &_transactional, 0,
+ (uchar**) &_transactional, (uchar**) &_transactional, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "tupscan", 999, "Run tupscan",
- (gptr*) &_tupscan, (gptr*) &_tupscan, 0,
+ (uchar**) &_tupscan, (uchar**) &_tupscan, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "diskscan", 999, "Run diskcan",
- (gptr*) &_diskscan, (gptr*) &_diskscan, 0,
+ (uchar**) &_diskscan, (uchar**) &_diskscan, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
diff --git a/storage/ndb/tools/desc.cpp b/storage/ndb/tools/desc.cpp
index 9eb0cf67ceb..831005139de 100644
--- a/storage/ndb/tools/desc.cpp
+++ b/storage/ndb/tools/desc.cpp
@@ -39,16 +39,16 @@ static struct my_option my_long_options[] =
{
NDB_STD_OPTS("ndb_desc"),
{ "database", 'd', "Name of database table is in",
- (gptr*) &_dbname, (gptr*) &_dbname, 0,
+ (uchar**) &_dbname, (uchar**) &_dbname, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "unqualified", 'u', "Use unqualified table names",
- (gptr*) &_unqualified, (gptr*) &_unqualified, 0,
+ (uchar**) &_unqualified, (uchar**) &_unqualified, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "extra-partition-info", 'p', "Print more info per partition",
- (gptr*) &_partinfo, (gptr*) &_partinfo, 0,
+ (uchar**) &_partinfo, (uchar**) &_partinfo, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "retries", 'r', "Retry every second for # retries",
- (gptr*) &_retries, (gptr*) &_retries, 0,
+ (uchar**) &_retries, (uchar**) &_retries, 0,
GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
diff --git a/storage/ndb/tools/drop_index.cpp b/storage/ndb/tools/drop_index.cpp
index 256c40e1924..ec88f331a80 100644
--- a/storage/ndb/tools/drop_index.cpp
+++ b/storage/ndb/tools/drop_index.cpp
@@ -30,7 +30,7 @@ static struct my_option my_long_options[] =
{
NDB_STD_OPTS("ndb_desc"),
{ "database", 'd', "Name of database table is in",
- (gptr*) &_dbname, (gptr*) &_dbname, 0,
+ (uchar**) &_dbname, (uchar**) &_dbname, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
diff --git a/storage/ndb/tools/drop_tab.cpp b/storage/ndb/tools/drop_tab.cpp
index a7accb904a4..8d07afbbf50 100644
--- a/storage/ndb/tools/drop_tab.cpp
+++ b/storage/ndb/tools/drop_tab.cpp
@@ -30,7 +30,7 @@ static struct my_option my_long_options[] =
{
NDB_STD_OPTS("ndb_desc"),
{ "database", 'd', "Name of database table is in",
- (gptr*) &_dbname, (gptr*) &_dbname, 0,
+ (uchar**) &_dbname, (uchar**) &_dbname, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
diff --git a/storage/ndb/tools/listTables.cpp b/storage/ndb/tools/listTables.cpp
index 6a73bcc54f5..45129cb34af 100644
--- a/storage/ndb/tools/listTables.cpp
+++ b/storage/ndb/tools/listTables.cpp
@@ -256,22 +256,22 @@ static struct my_option my_long_options[] =
{
NDB_STD_OPTS("ndb_show_tables"),
{ "database", 'd', "Name of database table is in",
- (gptr*) &_dbname, (gptr*) &_dbname, 0,
+ (uchar**) &_dbname, (uchar**) &_dbname, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "loops", 'l', "loops",
- (gptr*) &_loops, (gptr*) &_loops, 0,
+ (uchar**) &_loops, (uchar**) &_loops, 0,
GET_INT, REQUIRED_ARG, 1, 0, 0, 0, 0, 0 },
{ "type", 't', "type",
- (gptr*) &_type, (gptr*) &_type, 0,
+ (uchar**) &_type, (uchar**) &_type, 0,
GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "unqualified", 'u', "Use unqualified table names",
- (gptr*) &_unqualified, (gptr*) &_unqualified, 0,
+ (uchar**) &_unqualified, (uchar**) &_unqualified, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "parsable", 'p', "Return output suitable for mysql LOAD DATA INFILE",
- (gptr*) &_parsable, (gptr*) &_parsable, 0,
+ (uchar**) &_parsable, (uchar**) &_parsable, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "show-temp-status", OPT_SHOW_TMP_STATUS, "Show table temporary flag",
- (gptr*) &show_temp_status, (gptr*) &show_temp_status, 0,
+ (uchar**) &show_temp_status, (uchar**) &show_temp_status, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
diff --git a/storage/ndb/tools/ndb_config.cpp b/storage/ndb/tools/ndb_config.cpp
index 31fc59a8b83..af36103f947 100644
--- a/storage/ndb/tools/ndb_config.cpp
+++ b/storage/ndb/tools/ndb_config.cpp
@@ -58,37 +58,37 @@ static struct my_option my_long_options[] =
{
NDB_STD_OPTS("ndb_config"),
{ "nodes", 256, "Print nodes",
- (gptr*) &g_nodes, (gptr*) &g_nodes,
+ (uchar**) &g_nodes, (uchar**) &g_nodes,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{ "connections", 256, "Print connections",
- (gptr*) &g_connections, (gptr*) &g_connections,
+ (uchar**) &g_connections, (uchar**) &g_connections,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{ "query", 'q', "Query option(s)",
- (gptr*) &g_query, (gptr*) &g_query,
+ (uchar**) &g_query, (uchar**) &g_query,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{ "host", 256, "Host",
- (gptr*) &g_host, (gptr*) &g_host,
+ (uchar**) &g_host, (uchar**) &g_host,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{ "type", 258, "Type of node/connection",
- (gptr*) &g_type, (gptr*) &g_type,
+ (uchar**) &g_type, (uchar**) &g_type,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{ "id", 258, "Nodeid",
- (gptr*) &g_nodeid, (gptr*) &g_nodeid,
+ (uchar**) &g_nodeid, (uchar**) &g_nodeid,
0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{ "nodeid", 258, "Nodeid",
- (gptr*) &g_nodeid, (gptr*) &g_nodeid,
+ (uchar**) &g_nodeid, (uchar**) &g_nodeid,
0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{ "fields", 'f', "Field separator",
- (gptr*) &g_field_delimiter, (gptr*) &g_field_delimiter,
+ (uchar**) &g_field_delimiter, (uchar**) &g_field_delimiter,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{ "rows", 'r', "Row separator",
- (gptr*) &g_row_delimiter, (gptr*) &g_row_delimiter,
+ (uchar**) &g_row_delimiter, (uchar**) &g_row_delimiter,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{ "config-file", 256, "Path to config.ini",
- (gptr*) &g_config_file, (gptr*) &g_config_file,
+ (uchar**) &g_config_file, (uchar**) &g_config_file,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{ "mycnf", 256, "Read config from my.cnf",
- (gptr*) &g_mycnf, (gptr*) &g_mycnf,
+ (uchar**) &g_mycnf, (uchar**) &g_mycnf,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{ 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
diff --git a/storage/ndb/tools/ndb_error_reporter b/storage/ndb/tools/ndb_error_reporter
index 2b5aadb6171..7ad7a2f478a 100644
--- a/storage/ndb/tools/ndb_error_reporter
+++ b/storage/ndb/tools/ndb_error_reporter
@@ -62,13 +62,13 @@ foreach my $node (@nodes)
(($config_get_fs)?" with filesystem":"").
"\n\n";
my $recurse= ($config_get_fs)?'-r ':'';
- system 'scp '.$recurse.$config_username.config($node,'host').
+ system 'scp -p '.$recurse.$config_username.config($node,'host').
':'.config($node,'datadir')."/ndb_".$node."* ".
"$reportdir/\n";
}
print "\n\n Copying configuration file...\n\n\t$config_file\n\n";
-system "cp $config_file $reportdir/";
+system "cp -p $config_file $reportdir/";
my $r = system 'bzip2 2>&1 > /dev/null < /dev/null';
my $outfile;
diff --git a/storage/ndb/tools/ndb_size.pl b/storage/ndb/tools/ndb_size.pl
index 4ecdc297cd3..3537a9e8490 100644
--- a/storage/ndb/tools/ndb_size.pl
+++ b/storage/ndb/tools/ndb_size.pl
@@ -169,7 +169,9 @@ use Class::MethodMaker [
vdm_versions
ddm_versions ) ],
scalar => [ qw( name
- rows ) ],
+ rows
+ schema
+ real_table_name) ],
hash => [ qw( columns
indexes
indexed_columns
@@ -198,6 +200,16 @@ use Class::MethodMaker [
scalar => [ { -default=> 4 },'align'],
];
+sub table_name
+{
+ my ($self) = @_;
+ if ($self->real_table_name) {
+ return $self->real_table_name;
+ }else {
+ return $self->name;
+ }
+}
+
sub compute_row_size
{
my ($self, $releases) = @_;
@@ -391,14 +403,30 @@ sub compute_estimate
package main;
-my ($dbh,$database,$hostname,$user,$password,$help,$savequeries,$loadqueries,$debug,$format);
+my ($dbh,
+ $database,
+ $socket,
+ $hostname,
+ $user,
+ $password);
+
+my ($help,
+ $savequeries,
+ $loadqueries,
+ $debug,
+ $format,
+ $excludetables,
+ $excludedbs);
GetOptions('database|d=s'=>\$database,
'hostname=s'=>\$hostname,
+ 'socket=s'=>\$socket,
'user|u=s'=>\$user,
'password|p=s'=>\$password,
'savequeries|s=s'=>\$savequeries,
'loadqueries|l=s'=>\$loadqueries,
+ 'excludetables=s'=>\$excludetables,
+ 'excludedbs=s'=>\$excludedbs,
'help|usage|h!'=>\$help,
'debug'=>\$debug,
'format|f=s'=>\$format,
@@ -406,32 +434,75 @@ GetOptions('database|d=s'=>\$database,
my $report= new MySQL::NDB::Size::Report;
-if($help || !$database)
+if($help)
{
print STDERR "Usage:\n";
- print STDERR "\tndb_size.pl --database=<db name> [--hostname=<host>]"
+ print STDERR "\tndb_size.pl --database=<db name>|ALL [--hostname=<host>] "
+ ."[--socket=<socket>] "
."[--user=<user>] [--password=<password>] [--help|-h] [--format=(html|text)] [--loadqueries=<file>] [--savequeries=<file>]\n\n";
+ print STDERR "\t--database=<db name> ALL may be specified to examine all "
+ ."databases\n";
print STDERR "\t--hostname=<host>:<port> can be used to designate a "
."specific port\n";
print STDERR "\t--hostname defaults to localhost\n";
print STDERR "\t--user and --password default to empty string\n";
print STDERR "\t--format=(html|text) Output format\n";
+ print STDERR "\t--excludetables Comma separated list of table names to skip\n";
+ print STDERR "\t--excludedbs Comma separated list of database names to skip\n";
print STDERR "\t--savequeries=<file> saves all queries to the DB into <file>\n";
print STDERR "\t--loadqueries=<file> loads query results from <file>. Doesn't connect to DB.\n";
exit(1);
}
+
$hostname= 'localhost' unless $hostname;
my %queries; # used for loadqueries/savequeries
if(!$loadqueries)
{
- my $dsn = "DBI:mysql:database=$database;host=$hostname";
+ my $dsn = "DBI:mysql:host=$hostname";
+ $dsn.= ";mysql_socket=$socket" if ($socket);
$dbh= DBI->connect($dsn, $user, $password) or exit(1);
- $report->database($database);
$report->dsn($dsn);
}
+
+my @dbs;
+if ($database && !($database =~ /^ALL$/i))
+{
+ @dbs = split(',', $database);
+}
+else
+{
+ # Do all databases
+ @dbs = map { $_->[0] } @{ $dbh->selectall_arrayref("show databases") };
+}
+
+my %withdb = map {$_ => 1} @dbs;
+foreach (split ",", $excludedbs || '')
+{
+ delete $withdb{$_};
+}
+delete $withdb{'mysql'};
+delete $withdb{'INFORMATION_SCHEMA'};
+delete $withdb{'information_schema'};
+
+my $dblist = join (',', map { $dbh->quote($_) } keys %withdb );
+
+$excludetables = join (',', map { $dbh->quote($_) } split ',', $excludetables )
+ if $excludetables;
+
+if(!$loadqueries)
+{
+ if (scalar(keys %withdb)>1)
+ {
+ $report->database("databases: $dblist");
+ }
+ else
+ {
+ $report->database("database: $dblist");
+ }
+}
else
{
open Q,"< $loadqueries";
@@ -441,7 +512,6 @@ else
%queries= %$e;
close Q;
$report->database("file:$loadqueries");
- $report->dsn("file:$loadqueries");
}
$report->versions('4.1','5.0','5.1');
@@ -454,7 +524,25 @@ if($loadqueries)
}
else
{
- $tables= $dbh->selectall_arrayref("show tables");
+ my $sql= "select t.TABLE_NAME,t.TABLE_SCHEMA " .
+ " from information_schema.TABLES t " .
+ " where t.TABLE_SCHEMA in ( $dblist ) ";
+
+ $sql.=" and t.TABLE_NAME not in " .
+ " ( $excludetables )"
+ if ($excludetables);
+
+ $tables= $dbh->selectall_arrayref($sql);
+
+ if (!$tables) {
+ print "WARNING: problem selecing from INFORMATION SCHEMA ($sql)\n";
+ if ($#dbs>0) {
+ print "\t attempting to fallback to show tables from $database";
+ $tables= $dbh->selectall_arrayref("show tables from $database\n");
+ } else {
+ print "All Databases not supported in 4.1. Please specify --database=\n";
+ }
+ }
$queries{"show tables"}= $tables;
}
@@ -510,6 +598,8 @@ sub do_table {
{$col->dm(4)}
elsif($type =~ /float/)
{
+ my @sz= split ',', $size;
+ $size= $sz[0]+$sz[1];
if(!defined($size) || $size<=24)
{$col->dm(4)}
else
@@ -543,9 +633,10 @@ sub do_table {
$col->dm($fixed);
if(!$col->Key()) # currently keys must be non varsized
{
- my $sql= "select avg(length(`"
- .$colname
- ."`)) from `".$t->name().'`';
+ my $sql= sprintf("select avg(length(`%s`)) " .
+ " from `%s`.`%s` " ,
+ $colname, $t->schema(), $t->table_name());
+
my @dynamic;
if($loadqueries)
{
@@ -573,9 +664,11 @@ sub do_table {
$blobhunk= 8000 if $type=~ /longblob/;
$blobhunk= 4000 if $type=~ /mediumblob/;
- my $sql= "select SUM(CEILING(".
- "length(`$colname`)/$blobhunk))"
- ."from `".$t->name."`";
+ my $sql= sprintf("select SUM(CEILING(length(`%s`)/%s)) " .
+ " from `%s`.`%s`" ,
+ $colname, $blobhunk,
+ $t->schema(), $t->table_name() );
+
my @blobsize;
if($loadqueries)
{
@@ -589,11 +682,12 @@ sub do_table {
$blobsize[0]=0 if !defined($blobsize[0]);
# Is a supporting table, add it to the lists:
- $report->supporting_tables_set($t->name()."\$BLOB_$colname" => 1);
- $t->supporting_tables_push($t->name()."\$BLOB_$colname");
+ $report->supporting_tables_set($t->schema().".".$t->name()."\$BLOB_$colname" => 1);
+ $t->supporting_tables_push($t->schema().".".$t->name()."\$BLOB_$colname");
my $st= new MySQL::NDB::Size::Table(name =>
$t->name()."\$BLOB_$colname",
+ schema => $t->schema(),
rows => $blobsize[0],
row_dm_overhead =>
{ '4.1' => 12,
@@ -632,7 +726,9 @@ sub do_table {
$col->size($size);
$t->columns_set( $colname => $col );
}
- $report->tables_set( $t->name => $t );
+ #print "setting tables: ",$t->schema(), $t->table_name(), $t->name, $t->real_table_name || "" , "\n";
+ # Use $t->name here instead of $t->table_name() to avoid namespace conflicts
+ $report->tables_set( $t->schema().".".$t->name() => $t );
# And now... the IndexMemory usage.
#
@@ -727,14 +823,16 @@ sub do_table {
# Is a supporting table, add it to the lists:
my $idxname= $t->name().'_'.join('_',@{$indexes{$index}{columns}}).
"\$unique";
- $report->supporting_tables_set($idxname => 1);
- $t->supporting_tables_push($idxname);
+ $report->supporting_tables_set($t->schema().".".$idxname => 1);
+ $t->supporting_tables_push($t->schema().".".$idxname);
$t->indexed_columns_set($_ => 1)
foreach @{$indexes{$index}{columns}};
my $st= new MySQL::NDB::Size::Table(name => $idxname,
+ real_table_name => $t->table_name(),
rows => $count[0],
+ schema => $t->schema(),
row_dm_overhead =>
{ '4.1' => 12,
'5.0' => 12,
@@ -745,7 +843,6 @@ sub do_table {
row_ddm_overhead =>
{ '5.1' => 8 },
);
-
do_table($st,
\%idxcols,
{
@@ -766,9 +863,10 @@ sub do_table {
foreach(@{$tables})
{
my $table= @{$_}[0];
+ my $schema = @{$_}[1] || $database;
my $info;
{
- my $sql= 'describe `'.$table.'`';
+ my $sql= 'describe `'.$schema.'`.`'.$table.'`';
if($loadqueries)
{
$info= $queries{$sql};
@@ -781,7 +879,7 @@ foreach(@{$tables})
}
my @count;
{
- my $sql= 'select count(*) from `'.$table.'`';
+ my $sql= 'select count(*) from `'.$schema.'`.`'.$table.'`';
if($loadqueries)
{
@count= @{$queries{$sql}};
@@ -797,7 +895,7 @@ foreach(@{$tables})
{
my @show_indexes;
{
- my $sql= "show index from `".$table.'`';
+ my $sql= "show index from `".$schema.'`.`'.$table.'`';
if($loadqueries)
{
@show_indexes= @{$queries{$sql}};
@@ -826,6 +924,7 @@ foreach(@{$tables})
}
}
my $t= new MySQL::NDB::Size::Table(name => $table,
+ schema => $schema,
rows => $count[0],
row_dm_overhead =>
{ '4.1' => 12,
@@ -974,6 +1073,8 @@ if($debug)
eval 'print STDERR Dumper($report)';
}
+$format= "text" unless $format;
+
if($format eq 'text')
{
my $text_out= new MySQL::NDB::Size::Output::Text($report);
@@ -984,12 +1085,6 @@ elsif($format eq 'html')
my $html_out= new MySQL::NDB::Size::Output::HTML($report);
$html_out->output();
}
-else
-{
- # default to text output
- my $text_out= new MySQL::NDB::Size::Output::Text($report);
- $text_out->output();
-}
package MySQL::NDB::Size::Output::Text;
use Data::Dumper;
@@ -1008,7 +1103,7 @@ sub output
my $self= shift;
my $r= $self->{report};
- print $self->ul("ndb_size.pl report for database ". $r->database().
+ print $self->ul("ndb_size.pl report for ". $r->database().
" (".(($r->tables_count()||0)-($r->supporting_tables_count()||0)).
" tables)");
@@ -1188,8 +1283,8 @@ sub output
my $st= $r->tables->{$_};
foreach(@{$st->indexes_keys()})
{
- printf $f, $st->name() if $_ eq 'PRIMARY';
- printf $f, $st->name().$_ if $_ ne 'PRIMARY';
+ printf $f, $st->schema().".".$st->name() if $_ eq 'PRIMARY';
+ printf $f, $st->schema().".".$st->name().$_ if $_ ne 'PRIMARY';
my $sti= $st->indexes->{$_};
printf $v, ($sti->ver_im_exists($_))
?$sti->ver_im->{$_}
@@ -1367,7 +1462,7 @@ print <<ENDHTML;
<body>
ENDHTML
- print $self->h1("ndb_size.pl report for database ". $r->database().
+ print $self->h1("ndb_size.pl report for ". $r->database().
" (".(($r->tables_count()||0)-($r->supporting_tables_count()||0)).
" tables)");
@@ -1579,8 +1674,8 @@ ENDHTML
foreach(@{$st->indexes_keys()})
{
my @r;
- push @r, $st->name() if $_ eq 'PRIMARY';
- push @r, $st->name().$_ if $_ ne 'PRIMARY';
+ push @r, $st->schema().".".$st->name() if $_ eq 'PRIMARY';
+ push @r, $st->schema().".".$st->name().$_ if $_ ne 'PRIMARY';
my $sti= $st->indexes->{$_};
push @r, ($sti->ver_im_exists($_))
?$sti->ver_im->{$_}
diff --git a/storage/ndb/tools/restore/Restore.cpp b/storage/ndb/tools/restore/Restore.cpp
index f99cacfc613..a7d8a9d10d9 100644
--- a/storage/ndb/tools/restore/Restore.cpp
+++ b/storage/ndb/tools/restore/Restore.cpp
@@ -607,7 +607,10 @@ RestoreDataIterator::getNextTuple(int & res)
attr_data->size = 4*sz;
//if (m_currentTable->getTableId() >= 2) { ndbout << "fix i=" << i << " off=" << ptr-buf_ptr << " attrId=" << attrId << endl; }
-
+ if(!m_hostByteOrder
+ && attr_desc->m_column->getType() == NdbDictionary::Column::Timestamp)
+ attr_data->u_int32_value[0] = Twiddle32(attr_data->u_int32_value[0]);
+
if(!Twiddle(attr_desc, attr_data))
{
res = -1;
@@ -664,6 +667,31 @@ RestoreDataIterator::getNextTuple(int & res)
*/
const Uint32 arraySize = sz / (attr_desc->size / 8);
assert(arraySize <= attr_desc->arraySize);
+
+ //convert the length of blob(v1) and text(v1)
+ if(!m_hostByteOrder
+ && (attr_desc->m_column->getType() == NdbDictionary::Column::Blob
+ || attr_desc->m_column->getType() == NdbDictionary::Column::Text)
+ && attr_desc->m_column->getArrayType() == NdbDictionary::Column::ArrayTypeFixed)
+ {
+ char* p = (char*)&attr_data->u_int64_value[0];
+ Uint64 x;
+ memcpy(&x, p, sizeof(Uint64));
+ x = Twiddle64(x);
+ memcpy(p, &x, sizeof(Uint64));
+ }
+
+ //convert datetime type
+ if(!m_hostByteOrder
+ && attr_desc->m_column->getType() == NdbDictionary::Column::Datetime)
+ {
+ char* p = (char*)&attr_data->u_int64_value[0];
+ Uint64 x;
+ memcpy(&x, p, sizeof(Uint64));
+ x = Twiddle64(x);
+ memcpy(p, &x, sizeof(Uint64));
+ }
+
if(!Twiddle(attr_desc, attr_data, attr_desc->arraySize))
{
res = -1;
@@ -873,13 +901,32 @@ bool RestoreDataIterator::readFragmentHeader(int & ret, Uint32 *fragmentId)
debug << "RestoreDataIterator::getNextFragment" << endl;
- if (buffer_read(&Header, sizeof(Header), 1) != 1){
+ while (1)
+ {
+ /* read first part of header */
+ if (buffer_read(&Header, 8, 1) != 1)
+ {
+ ret = 0;
+ return false;
+ } // if
+
+ /* skip if EMPTY_ENTRY */
+ Header.SectionType = ntohl(Header.SectionType);
+ Header.SectionLength = ntohl(Header.SectionLength);
+ if (Header.SectionType == BackupFormat::EMPTY_ENTRY)
+ {
+ void *tmp;
+ buffer_get_ptr(&tmp, Header.SectionLength*4-8, 1);
+ continue;
+ }
+ break;
+ }
+ /* read rest of header */
+ if (buffer_read(((char*)&Header)+8, sizeof(Header)-8, 1) != 1)
+ {
ret = 0;
return false;
- } // if
-
- Header.SectionType = ntohl(Header.SectionType);
- Header.SectionLength = ntohl(Header.SectionLength);
+ }
Header.TableId = ntohl(Header.TableId);
Header.FragmentNo = ntohl(Header.FragmentNo);
Header.ChecksumType = ntohl(Header.ChecksumType);
diff --git a/storage/ndb/tools/restore/consumer_restore.cpp b/storage/ndb/tools/restore/consumer_restore.cpp
index 3dd4b072671..89f680a80e4 100644
--- a/storage/ndb/tools/restore/consumer_restore.cpp
+++ b/storage/ndb/tools/restore/consumer_restore.cpp
@@ -424,13 +424,14 @@ error:
bool BackupRestore::translate_frm(NdbDictionary::Table *table)
{
- const void *pack_data, *data, *new_pack_data;
+ uchar *pack_data, *data, *new_pack_data;
char *new_data;
- uint data_len, new_data_len, new_pack_len;
+ uint new_data_len;
+ size_t data_len, new_pack_len;
uint no_parts, extra_growth;
DBUG_ENTER("translate_frm");
- pack_data = table->getFrmData();
+ pack_data = (uchar*) table->getFrmData();
no_parts = table->getFragmentCount();
/*
Add max 4 characters per partition to handle worst case
@@ -442,7 +443,7 @@ bool BackupRestore::translate_frm(NdbDictionary::Table *table)
{
DBUG_RETURN(TRUE);
}
- if ((new_data = my_malloc(data_len + extra_growth, MYF(0))))
+ if ((new_data = (char*) my_malloc(data_len + extra_growth, MYF(0))))
{
DBUG_RETURN(TRUE);
}
@@ -451,7 +452,7 @@ bool BackupRestore::translate_frm(NdbDictionary::Table *table)
my_free(new_data, MYF(0));
DBUG_RETURN(TRUE);
}
- if (packfrm((const void*)new_data, new_data_len,
+ if (packfrm((uchar*) new_data, new_data_len,
&new_pack_data, &new_pack_len))
{
my_free(new_data, MYF(0));
@@ -894,6 +895,21 @@ BackupRestore::table(const TableS & table){
{
copy.setMaxRows(table.getNoOfRecords());
}
+
+ NdbTableImpl &tableImpl = NdbTableImpl::getImpl(copy);
+ if (table.getBackupVersion() < MAKE_VERSION(5,1,0) && !m_no_upgrade){
+ for(int i= 0; i < copy.getNoOfColumns(); i++)
+ {
+ NdbDictionary::Column::Type t = copy.getColumn(i)->getType();
+
+ if (t == NdbDictionary::Column::Varchar ||
+ t == NdbDictionary::Column::Varbinary)
+ tableImpl.getColumn(i)->setArrayType(NdbDictionary::Column::ArrayTypeShortVar);
+ if (t == NdbDictionary::Column::Longvarchar ||
+ t == NdbDictionary::Column::Longvarbinary)
+ tableImpl.getColumn(i)->setArrayType(NdbDictionary::Column::ArrayTypeMediumVar);
+ }
+ }
if (dict->createTable(copy) == -1)
{
@@ -1141,8 +1157,25 @@ void BackupRestore::tuple_a(restore_callback_t *cb)
int size = attr_desc->size;
int arraySize = attr_desc->arraySize;
char * dataPtr = attr_data->string_value;
- Uint32 length = attr_data->size;
-
+ Uint32 length = 0;
+
+ if (!attr_data->null)
+ {
+ const unsigned char * src = (const unsigned char *)dataPtr;
+ switch(attr_desc->m_column->getType()){
+ case NdbDictionary::Column::Varchar:
+ case NdbDictionary::Column::Varbinary:
+ length = src[0] + 1;
+ break;
+ case NdbDictionary::Column::Longvarchar:
+ case NdbDictionary::Column::Longvarbinary:
+ length = src[0] + (src[1] << 8) + 2;
+ break;
+ default:
+ length = attr_data->size;
+ break;
+ }
+ }
if (j == 0 && tup.getTable()->have_auto_inc(i))
tup.getTable()->update_max_auto_val(dataPtr,size*arraySize);
@@ -1162,7 +1195,7 @@ void BackupRestore::tuple_a(restore_callback_t *cb)
if (ret < 0) {
ndbout_c("Column: %d type %d %d %d %d",i,
attr_desc->m_column->getType(),
- size, arraySize, attr_data->size);
+ size, arraySize, length);
break;
}
}
diff --git a/storage/ndb/tools/restore/consumer_restore.hpp b/storage/ndb/tools/restore/consumer_restore.hpp
index 0bc9d8e8d20..8694cbffb0c 100644
--- a/storage/ndb/tools/restore/consumer_restore.hpp
+++ b/storage/ndb/tools/restore/consumer_restore.hpp
@@ -51,6 +51,7 @@ public:
m_callback = 0;
m_free_callback = 0;
m_temp_error = false;
+ m_no_upgrade = false;
m_transactions = 0;
m_cache.m_old_table = 0;
}
@@ -91,6 +92,7 @@ public:
bool m_restore_meta;
bool m_no_restore_disk;
bool m_restore_epoch;
+ bool m_no_upgrade; // for upgrade ArrayType from 5.0 backup file.
Uint32 m_logCount;
Uint32 m_dataCount;
diff --git a/storage/ndb/tools/restore/restore_main.cpp b/storage/ndb/tools/restore/restore_main.cpp
index adccd024d88..7db77524ad8 100644
--- a/storage/ndb/tools/restore/restore_main.cpp
+++ b/storage/ndb/tools/restore/restore_main.cpp
@@ -34,6 +34,7 @@ static int ga_nodeId = 0;
static int ga_nParallelism = 128;
static int ga_backupId = 0;
static bool ga_dont_ignore_systab_0 = false;
+static bool ga_no_upgrade = false;
static Vector<class BackupConsumer *> g_consumers;
static BackupPrinter* g_printer = NULL;
@@ -99,95 +100,99 @@ static struct my_option my_long_options[] =
{
NDB_STD_OPTS("ndb_restore"),
{ "connect", 'c', "same as --connect-string",
- (gptr*) &opt_connect_str, (gptr*) &opt_connect_str, 0,
+ (uchar**) &opt_connect_str, (uchar**) &opt_connect_str, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "nodeid", 'n', "Backup files from node with id",
- (gptr*) &ga_nodeId, (gptr*) &ga_nodeId, 0,
+ (uchar**) &ga_nodeId, (uchar**) &ga_nodeId, 0,
GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "backupid", 'b', "Backup id",
- (gptr*) &ga_backupId, (gptr*) &ga_backupId, 0,
+ (uchar**) &ga_backupId, (uchar**) &ga_backupId, 0,
GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "restore_data", 'r',
"Restore table data/logs into NDB Cluster using NDBAPI",
- (gptr*) &_restore_data, (gptr*) &_restore_data, 0,
+ (uchar**) &_restore_data, (uchar**) &_restore_data, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "restore_meta", 'm',
"Restore meta data into NDB Cluster using NDBAPI",
- (gptr*) &_restore_meta, (gptr*) &_restore_meta, 0,
+ (uchar**) &_restore_meta, (uchar**) &_restore_meta, 0,
+ GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
+ { "no-upgrade", 'u',
+ "Don't upgrade array type for var attributes, which don't resize VAR data and don't change column attributes",
+ (uchar**) &ga_no_upgrade, (uchar**) &ga_no_upgrade, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "no-restore-disk-objects", 'd',
"Dont restore disk objects (tablespace/logfilegroups etc)",
- (gptr*) &_no_restore_disk, (gptr*) &_no_restore_disk, 0,
+ (uchar**) &_no_restore_disk, (uchar**) &_no_restore_disk, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "restore_epoch", 'e',
"Restore epoch info into the status table. Convenient on a MySQL Cluster "
"replication slave, for starting replication. The row in "
NDB_REP_DB "." NDB_APPLY_TABLE " with id 0 will be updated/inserted.",
- (gptr*) &ga_restore_epoch, (gptr*) &ga_restore_epoch, 0,
+ (uchar**) &ga_restore_epoch, (uchar**) &ga_restore_epoch, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "skip-table-check", 's', "Skip table structure check during restore of data",
- (gptr*) &ga_skip_table_check, (gptr*) &ga_skip_table_check, 0,
+ (uchar**) &ga_skip_table_check, (uchar**) &ga_skip_table_check, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "parallelism", 'p',
"No of parallel transactions during restore of data."
"(parallelism can be 1 to 1024)",
- (gptr*) &ga_nParallelism, (gptr*) &ga_nParallelism, 0,
+ (uchar**) &ga_nParallelism, (uchar**) &ga_nParallelism, 0,
GET_INT, REQUIRED_ARG, 128, 1, 1024, 0, 1, 0 },
- { "print", OPT_PRINT, "Print data and log to stdout",
- (gptr*) &_print, (gptr*) &_print, 0,
+ { "print", OPT_PRINT, "Print metadata, data and log to stdout",
+ (uchar**) &_print, (uchar**) &_print, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "print_data", OPT_PRINT_DATA, "Print data to stdout",
- (gptr*) &_print_data, (gptr*) &_print_data, 0,
+ (uchar**) &_print_data, (uchar**) &_print_data, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "print_meta", OPT_PRINT_META, "Print meta data to stdout",
- (gptr*) &_print_meta, (gptr*) &_print_meta, 0,
+ (uchar**) &_print_meta, (uchar**) &_print_meta, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "print_log", OPT_PRINT_LOG, "Print log to stdout",
- (gptr*) &_print_log, (gptr*) &_print_log, 0,
+ (uchar**) &_print_log, (uchar**) &_print_log, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "backup_path", OPT_BACKUP_PATH, "Path to backup files",
- (gptr*) &ga_backupPath, (gptr*) &ga_backupPath, 0,
+ (uchar**) &ga_backupPath, (uchar**) &ga_backupPath, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "dont_ignore_systab_0", 'f',
"Experimental. Do not ignore system table during restore.",
- (gptr*) &ga_dont_ignore_systab_0, (gptr*) &ga_dont_ignore_systab_0, 0,
+ (uchar**) &ga_dont_ignore_systab_0, (uchar**) &ga_dont_ignore_systab_0, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "ndb-nodegroup-map", OPT_NDB_NODEGROUP_MAP,
"Nodegroup map for ndbcluster. Syntax: list of (source_ng, dest_ng)",
- (gptr*) &opt_nodegroup_map_str,
- (gptr*) &opt_nodegroup_map_str,
+ (uchar**) &opt_nodegroup_map_str,
+ (uchar**) &opt_nodegroup_map_str,
0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "fields-enclosed-by", OPT_FIELDS_ENCLOSED_BY,
"Fields are enclosed by ...",
- (gptr*) &opt_fields_enclosed_by, (gptr*) &opt_fields_enclosed_by, 0,
+ (uchar**) &opt_fields_enclosed_by, (uchar**) &opt_fields_enclosed_by, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "fields-terminated-by", OPT_FIELDS_TERMINATED_BY,
"Fields are terminated by ...",
- (gptr*) &opt_fields_terminated_by,
- (gptr*) &opt_fields_terminated_by, 0,
+ (uchar**) &opt_fields_terminated_by,
+ (uchar**) &opt_fields_terminated_by, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "fields-optionally-enclosed-by", OPT_FIELDS_OPTIONALLY_ENCLOSED_BY,
"Fields are optionally enclosed by ...",
- (gptr*) &opt_fields_optionally_enclosed_by,
- (gptr*) &opt_fields_optionally_enclosed_by, 0,
+ (uchar**) &opt_fields_optionally_enclosed_by,
+ (uchar**) &opt_fields_optionally_enclosed_by, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "hex", OPT_HEX_FORMAT, "print binary types in hex format",
- (gptr*) &opt_hex_format, (gptr*) &opt_hex_format, 0,
+ (uchar**) &opt_hex_format, (uchar**) &opt_hex_format, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "tab", 'T', "Creates tab separated textfile for each table to "
"given path. (creates .txt files)",
- (gptr*) &tab_path, (gptr*) &tab_path, 0,
+ (uchar**) &tab_path, (uchar**) &tab_path, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{ "append", OPT_APPEND, "for --tab append data to file",
- (gptr*) &opt_append, (gptr*) &opt_append, 0,
+ (uchar**) &opt_append, (uchar**) &opt_append, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "lines-terminated-by", OPT_LINES_TERMINATED_BY, "",
- (gptr*) &opt_lines_terminated_by, (gptr*) &opt_lines_terminated_by, 0,
+ (uchar**) &opt_lines_terminated_by, (uchar**) &opt_lines_terminated_by, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "verbose", OPT_VERBOSE,
"verbosity",
- (gptr*) &opt_verbose, (gptr*) &opt_verbose, 0,
+ (uchar**) &opt_verbose, (uchar**) &opt_verbose, 0,
GET_INT, REQUIRED_ARG, 1, 0, 255, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
@@ -460,6 +465,11 @@ o verify nodegroup mapping
restore->m_no_restore_disk = true;
}
+ if (ga_no_upgrade)
+ {
+ restore->m_no_upgrade = true;
+ }
+
if (ga_restore_epoch)
{
restore->m_restore_epoch = true;
@@ -644,6 +654,8 @@ main(int argc, char** argv)
g_options.appfmt(" -n %d", ga_nodeId);
if (_restore_meta)
g_options.appfmt(" -m");
+ if (ga_no_upgrade)
+ g_options.appfmt(" -u");
if (ga_skip_table_check)
g_options.appfmt(" -s");
if (_restore_data)
@@ -655,7 +667,6 @@ main(int argc, char** argv)
g_options.appfmt(" -p %d", ga_nParallelism);
g_connect_string = opt_connect_str;
-
/**
* we must always load meta data, even if we will only print it to stdout
*/
@@ -673,7 +684,7 @@ main(int argc, char** argv)
char buf[NDB_VERSION_STRING_BUF_SZ];
info.setLevel(254);
info << "Ndb version in backup files: "
- << getVersionString(version, 0, buf, sizeof(buf)) << endl;
+ << ndbGetVersionString(version, 0, buf, sizeof(buf)) << endl;
/**
* check wheater we can restore the backup (right version).
@@ -683,9 +694,9 @@ main(int argc, char** argv)
if (version >= MAKE_VERSION(5,1,3) && version <= MAKE_VERSION(5,1,9))
{
err << "Restore program incompatible with backup versions between "
- << getVersionString(MAKE_VERSION(5,1,3), 0, buf, sizeof(buf))
+ << ndbGetVersionString(MAKE_VERSION(5,1,3), 0, buf, sizeof(buf))
<< " and "
- << getVersionString(MAKE_VERSION(5,1,9), 0, buf, sizeof(buf))
+ << ndbGetVersionString(MAKE_VERSION(5,1,9), 0, buf, sizeof(buf))
<< endl;
exitHandler(NDBT_FAILED);
}
diff --git a/storage/ndb/tools/select_all.cpp b/storage/ndb/tools/select_all.cpp
index e2072f30edf..23d5f95f3f7 100644
--- a/storage/ndb/tools/select_all.cpp
+++ b/storage/ndb/tools/select_all.cpp
@@ -54,43 +54,43 @@ static struct my_option my_long_options[] =
{
NDB_STD_OPTS("ndb_desc"),
{ "database", 'd', "Name of database table is in",
- (gptr*) &_dbname, (gptr*) &_dbname, 0,
+ (uchar**) &_dbname, (uchar**) &_dbname, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "parallelism", 'p', "parallelism",
- (gptr*) &_parallelism, (gptr*) &_parallelism, 0,
+ (uchar**) &_parallelism, (uchar**) &_parallelism, 0,
GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "lock", 'l', "Read(0), Read-hold(1), Exclusive(2)",
- (gptr*) &_lock, (gptr*) &_lock, 0,
+ (uchar**) &_lock, (uchar**) &_lock, 0,
GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "order", 'o', "Sort resultset according to index",
- (gptr*) &_order, (gptr*) &_order, 0,
+ (uchar**) &_order, (uchar**) &_order, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "descending", 'z', "Sort descending (requires order flag)",
- (gptr*) &_descending, (gptr*) &_descending, 0,
+ (uchar**) &_descending, (uchar**) &_descending, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "header", 'h', "Print header",
- (gptr*) &_header, (gptr*) &_header, 0,
+ (uchar**) &_header, (uchar**) &_header, 0,
GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0 },
{ "useHexFormat", 'x', "Output numbers in hexadecimal format",
- (gptr*) &_useHexFormat, (gptr*) &_useHexFormat, 0,
+ (uchar**) &_useHexFormat, (uchar**) &_useHexFormat, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "delimiter", 'D', "Column delimiter",
- (gptr*) &_delimiter, (gptr*) &_delimiter, 0,
+ (uchar**) &_delimiter, (uchar**) &_delimiter, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "disk", 256, "Dump disk ref",
- (gptr*) &_dumpDisk, (gptr*) &_dumpDisk, 0,
+ (uchar**) &_dumpDisk, (uchar**) &_dumpDisk, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "rowid", 256, "Dump rowid",
- (gptr*) &use_rowid, (gptr*) &use_rowid, 0,
+ (uchar**) &use_rowid, (uchar**) &use_rowid, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "gci", 256, "Dump gci",
- (gptr*) &use_gci, (gptr*) &use_gci, 0,
+ (uchar**) &use_gci, (uchar**) &use_gci, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "tupscan", 't', "Scan in tup order",
- (gptr*) &_tup, (gptr*) &_tup, 0,
+ (uchar**) &_tup, (uchar**) &_tup, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "nodata", 256, "Dont print data",
- (gptr*) &nodata, (gptr*) &nodata, 0,
+ (uchar**) &nodata, (uchar**) &nodata, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
diff --git a/storage/ndb/tools/select_count.cpp b/storage/ndb/tools/select_count.cpp
index 552d156b665..73982e886b5 100644
--- a/storage/ndb/tools/select_count.cpp
+++ b/storage/ndb/tools/select_count.cpp
@@ -43,13 +43,13 @@ static struct my_option my_long_options[] =
{
NDB_STD_OPTS("ndb_desc"),
{ "database", 'd', "Name of database table is in",
- (gptr*) &_dbname, (gptr*) &_dbname, 0,
+ (uchar**) &_dbname, (uchar**) &_dbname, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "parallelism", 'p', "parallelism",
- (gptr*) &_parallelism, (gptr*) &_parallelism, 0,
+ (uchar**) &_parallelism, (uchar**) &_parallelism, 0,
GET_INT, REQUIRED_ARG, 240, 0, 0, 0, 0, 0 },
{ "lock", 'l', "Read(0), Read-hold(1), Exclusive(2)",
- (gptr*) &_lock, (gptr*) &_lock, 0,
+ (uchar**) &_lock, (uchar**) &_lock, 0,
GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
diff --git a/storage/ndb/tools/waiter.cpp b/storage/ndb/tools/waiter.cpp
index de8d15ac17a..a292ab9140a 100644
--- a/storage/ndb/tools/waiter.cpp
+++ b/storage/ndb/tools/waiter.cpp
@@ -46,17 +46,17 @@ static struct my_option my_long_options[] =
{
NDB_STD_OPTS("ndb_desc"),
{ "no-contact", 'n', "Wait for cluster no contact",
- (gptr*) &_no_contact, (gptr*) &_no_contact, 0,
+ (uchar**) &_no_contact, (uchar**) &_no_contact, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "not-started", OPT_WAIT_STATUS_NOT_STARTED, "Wait for cluster not started",
- (gptr*) &_not_started, (gptr*) &_not_started, 0,
+ (uchar**) &_not_started, (uchar**) &_not_started, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "single-user", OPT_WAIT_STATUS_SINGLE_USER,
"Wait for cluster to enter single user mode",
- (gptr*) &_single_user, (gptr*) &_single_user, 0,
+ (uchar**) &_single_user, (uchar**) &_single_user, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "timeout", 't', "Timeout to wait",
- (gptr*) &_timeout, (gptr*) &_timeout, 0,
+ (uchar**) &_timeout, (uchar**) &_timeout, 0,
GET_INT, REQUIRED_ARG, 120, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};