summaryrefslogtreecommitdiff
path: root/storage
diff options
context:
space:
mode:
authorunknown <ndbdev@dl145c.mysql.com>2005-09-06 09:20:15 +0200
committerunknown <ndbdev@dl145c.mysql.com>2005-09-06 09:20:15 +0200
commitf6c1d4191cf001ba9e895e174bec2f443b28dbc0 (patch)
treeeba8383b8ab4df02b107bb55ef6ec8c5abb477ce /storage
parent644a444b6b3df10bc2de4f26a486594df42b932c (diff)
parentb48e721d2f2e4f2ae591de5c581e002db8d7cc88 (diff)
downloadmariadb-git-f6c1d4191cf001ba9e895e174bec2f443b28dbc0.tar.gz
Merge
Makefile.am: Auto merged BitKeeper/etc/config: Auto merged scripts/Makefile.am: Auto merged sql/ha_federated.h: Auto merged sql/ha_innodb.cc: Auto merged sql/item.cc: Auto merged sql/item_subselect.cc: Auto merged sql/mysql_priv.h: Auto merged sql/set_var.cc: Auto merged sql/sp.cc: Auto merged sql/sql_base.cc: Auto merged sql/sql_class.h: Auto merged sql/sql_delete.cc: Auto merged sql/sql_insert.cc: Auto merged sql/sql_lex.cc: Auto merged sql/sql_lex.h: Auto merged sql/sql_parse.cc: Auto merged sql/sql_select.cc: Auto merged sql/sql_show.cc: Auto merged sql/sql_update.cc: Auto merged sql/sql_yacc.yy: Auto merged sql/table.cc: Auto merged sql/table.h: Auto merged storage/innobase/include/page0page.ic: Auto merged storage/innobase/include/univ.i: Auto merged storage/innobase/row/row0mysql.c: Auto merged storage/innobase/row/row0sel.c: Auto merged storage/myisam/mi_check.c: Auto merged storage/myisam/mi_packrec.c: Auto merged storage/myisam/mi_static.c: Auto merged storage/myisam/myisamdef.h: Auto merged storage/myisam/myisampack.c: Auto merged storage/ndb/include/kernel/signaldata/SignalData.hpp: Auto merged storage/ndb/include/kernel/signaldata/StartFragReq.hpp: Auto merged storage/ndb/src/common/debugger/EventLogger.cpp: Auto merged storage/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp: Auto merged storage/ndb/src/common/debugger/signaldata/StartRec.cpp: Auto merged storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp: Auto merged storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.cpp: Auto merged storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.hpp: Auto merged storage/ndb/src/kernel/blocks/dblqh/redoLogReader/redoLogFileReader.cpp: Auto merged storage/ndb/src/kernel/main.cpp: Auto merged storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp: Auto merged storage/ndb/src/kernel/vm/Emulator.cpp: Auto merged sql/log_event.cc: SCCS merged sql/sql_prepare.cc: SCCS merged sql/share/errmsg.txt: merge
Diffstat (limited to 'storage')
-rw-r--r--storage/innobase/include/page0page.ic13
-rw-r--r--storage/innobase/include/univ.i4
-rw-r--r--storage/innobase/row/row0mysql.c5
-rw-r--r--storage/innobase/row/row0sel.c101
-rw-r--r--storage/myisam/mi_check.c6
-rw-r--r--storage/myisam/mi_packrec.c113
-rw-r--r--storage/myisam/mi_static.c2
-rw-r--r--storage/myisam/myisamdef.h5
-rw-r--r--storage/myisam/myisampack.c26
-rw-r--r--storage/ndb/include/kernel/signaldata/SignalData.hpp1
-rw-r--r--storage/ndb/include/kernel/signaldata/StartFragReq.hpp2
-rw-r--r--storage/ndb/src/common/debugger/EventLogger.cpp7
-rw-r--r--storage/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp1
-rw-r--r--storage/ndb/src/common/debugger/signaldata/StartRec.cpp25
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp12
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.cpp4
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.hpp2
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/redoLogReader/redoLogFileReader.cpp25
-rw-r--r--storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp2
-rw-r--r--storage/ndb/src/kernel/main.cpp2
-rw-r--r--storage/ndb/src/kernel/vm/Emulator.cpp26
21 files changed, 233 insertions, 151 deletions
diff --git a/storage/innobase/include/page0page.ic b/storage/innobase/include/page0page.ic
index fd5281fdbec..655ff245aa8 100644
--- a/storage/innobase/include/page0page.ic
+++ b/storage/innobase/include/page0page.ic
@@ -175,6 +175,19 @@ page_rec_is_comp(
/* out: nonzero if in compact format */
const rec_t* rec) /* in: record */
{
+#ifdef UNIV_RELEASE_NOT_YET_STABLE
+ if (UNIV_UNLIKELY((ulint)rec < (ulint)(buf_pool->frame_zero))
+ || UNIV_UNLIKELY((ulint)rec >= (ulint)(buf_pool->high_end))) {
+
+ ut_print_timestamp(stderr);
+ fprintf(stderr,
+"InnoDB: Error: trying to read a stray page rec %p\n"
+"InnoDB: buf pool start is at %p, end at %p\n",
+ rec, buf_pool->frame_zero,
+ buf_pool->high_end);
+ ut_error;
+ }
+#endif
return(page_is_comp(ut_align_down((rec_t*) rec, UNIV_PAGE_SIZE)));
}
diff --git a/storage/innobase/include/univ.i b/storage/innobase/include/univ.i
index 6849dcd9c51..15650f22ed8 100644
--- a/storage/innobase/include/univ.i
+++ b/storage/innobase/include/univ.i
@@ -80,6 +80,10 @@ memory is read outside the allocated blocks. */
/* Make a non-inline debug version */
+/* You can remove this define when the release is stable. This define adds
+some consistency checks to code. They use a little CPU time. */
+#define UNIV_RELEASE_NOT_YET_STABLE
+
/*
#define UNIV_DEBUG
#define UNIV_MEM_DEBUG
diff --git a/storage/innobase/row/row0mysql.c b/storage/innobase/row/row0mysql.c
index 2ac0824b331..29239210183 100644
--- a/storage/innobase/row/row0mysql.c
+++ b/storage/innobase/row/row0mysql.c
@@ -513,14 +513,15 @@ handle_new_error:
return(TRUE);
- } else if (err == DB_DEADLOCK || err == DB_LOCK_WAIT_TIMEOUT
+ } else if (err == DB_DEADLOCK
|| err == DB_LOCK_TABLE_FULL) {
/* Roll back the whole transaction; this resolution was added
to version 3.23.43 */
trx_general_rollback_for_mysql(trx, FALSE, NULL);
- } else if (err == DB_OUT_OF_FILE_SPACE) {
+ } else if (err == DB_OUT_OF_FILE_SPACE
+ || err == DB_LOCK_WAIT_TIMEOUT) {
if (savept) {
/* Roll back the latest, possibly incomplete
insertion or update */
diff --git a/storage/innobase/row/row0sel.c b/storage/innobase/row/row0sel.c
index a77010d939b..1b66f14f5d7 100644
--- a/storage/innobase/row/row0sel.c
+++ b/storage/innobase/row/row0sel.c
@@ -2724,7 +2724,9 @@ row_sel_get_clust_rec_for_mysql(
if (trx->isolation_level > TRX_ISO_READ_UNCOMMITTED
&& !lock_clust_rec_cons_read_sees(clust_rec, clust_index,
*offsets, trx->read_view)) {
-
+
+ /* The following call returns 'offsets' associated with
+ 'old_vers' */
err = row_sel_build_prev_vers_for_mysql(
trx->read_view, clust_index,
prebuilt, clust_rec,
@@ -3055,13 +3057,14 @@ row_search_for_mysql(
cursor 'direction' should be 0. */
{
dict_index_t* index = prebuilt->index;
+ ibool comp = index->table->comp;
dtuple_t* search_tuple = prebuilt->search_tuple;
btr_pcur_t* pcur = prebuilt->pcur;
trx_t* trx = prebuilt->trx;
dict_index_t* clust_index;
que_thr_t* thr;
rec_t* rec;
- rec_t* index_rec;
+ rec_t* result_rec;
rec_t* clust_rec;
rec_t* old_vers;
ulint err = DB_SUCCESS;
@@ -3491,7 +3494,7 @@ rec_loop:
/* PHASE 4: Look for matching records in a loop */
rec = btr_pcur_get_rec(pcur);
- ut_ad(!!page_rec_is_comp(rec) == index->table->comp);
+ ut_ad(!!page_rec_is_comp(rec) == comp);
#ifdef UNIV_SEARCH_DEBUG
/*
fputs("Using ", stderr);
@@ -3544,19 +3547,23 @@ rec_loop:
/* Do sanity checks in case our cursor has bumped into page
corruption */
- if (page_rec_is_comp(rec)) {
+ if (comp) {
next_offs = rec_get_next_offs(rec, TRUE);
if (UNIV_UNLIKELY(next_offs < PAGE_NEW_SUPREMUM)) {
+
goto wrong_offs;
}
} else {
next_offs = rec_get_next_offs(rec, FALSE);
if (UNIV_UNLIKELY(next_offs < PAGE_OLD_SUPREMUM)) {
+
goto wrong_offs;
}
}
+
if (UNIV_UNLIKELY(next_offs >= UNIV_PAGE_SIZE - PAGE_DIR)) {
- wrong_offs:
+
+wrong_offs:
if (srv_force_recovery == 0 || moves_up == FALSE) {
ut_print_timestamp(stderr);
buf_page_print(buf_frame_align(rec));
@@ -3599,6 +3606,9 @@ rec_loop:
goto next_rec;
}
}
+ /*-------------------------------------------------------------*/
+
+ /* Calculate the 'offsets' associated with 'rec' */
offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, &heap);
@@ -3619,8 +3629,6 @@ rec_loop:
}
}
- /*-------------------------------------------------------------*/
-
/* Note that we cannot trust the up_match value in the cursor at this
place because we can arrive here after moving the cursor! Thus
we have to recompare rec and search_tuple to determine if they
@@ -3711,7 +3719,7 @@ rec_loop:
if (!set_also_gap_locks
|| srv_locks_unsafe_for_binlog
|| (unique_search && !UNIV_UNLIKELY(rec_get_deleted_flag(
- rec, page_rec_is_comp(rec))))) {
+ rec, comp)))) {
goto no_gap_lock;
} else {
@@ -3767,6 +3775,8 @@ no_gap_lock:
&& !lock_clust_rec_cons_read_sees(rec, index,
offsets, trx->read_view)) {
+ /* The following call returns 'offsets'
+ associated with 'old_vers' */
err = row_sel_build_prev_vers_for_mysql(
trx->read_view, clust_index,
prebuilt, rec,
@@ -3795,19 +3805,20 @@ no_gap_lock:
is necessary, because we can only get the undo
information via the clustered index record. */
- /* Get the clustered index record if needed */
- index_rec = rec;
ut_ad(index != clust_index);
goto requires_clust_rec;
}
}
- if (UNIV_UNLIKELY(rec_get_deleted_flag(rec, page_rec_is_comp(rec)))) {
+ /* NOTE that at this point rec can be an old version of a clustered
+ index record built for a consistent read. We cannot assume after this
+ point that rec is on a buffer pool page. Functions like
+ page_rec_is_comp() cannot be used! */
- /* The record is delete-marked: we can skip it if this is
- not a consistent read which might see an earlier version
- of a non-clustered index record */
+ if (UNIV_UNLIKELY(rec_get_deleted_flag(rec, comp))) {
+
+ /* The record is delete-marked: we can skip it */
if (srv_locks_unsafe_for_binlog
&& prebuilt->select_lock_type != LOCK_NONE) {
@@ -3823,25 +3834,26 @@ no_gap_lock:
goto next_rec;
}
- /* Get the clustered index record if needed and if we did
- not do the search using the clustered index */
-
- index_rec = rec;
+ /* Get the clustered index record if needed, if we did not do the
+ search using the clustered index. */
if (index != clust_index && prebuilt->need_to_access_clustered) {
requires_clust_rec:
- /* Before and after this "if" block, "offsets" will be
- related to "rec", which may be in a secondary index "index" or
- the clustered index ("clust_index"). However, after this
- "if" block, "rec" may be pointing to
- "clust_rec" of "clust_index". */
+ /* We use a 'goto' to the preceding label if a consistent
+ read of a secondary index record requires us to look up old
+ versions of the associated clustered index record. */
+
ut_ad(rec_offs_validate(rec, index, offsets));
/* It was a non-clustered index and we must fetch also the
clustered index record */
mtr_has_extra_clust_latch = TRUE;
+
+ /* The following call returns 'offsets' associated with
+ 'clust_rec'. Note that 'clust_rec' can be an old version
+ built for a consistent read. */
err = row_sel_get_clust_rec_for_mysql(prebuilt, index, rec,
thr, &clust_rec,
@@ -3858,8 +3870,7 @@ requires_clust_rec:
goto next_rec;
}
- if (UNIV_UNLIKELY(rec_get_deleted_flag(clust_rec,
- page_rec_is_comp(clust_rec)))) {
+ if (UNIV_UNLIKELY(rec_get_deleted_flag(clust_rec, comp))) {
/* The record is delete marked: we can skip it */
@@ -3879,17 +3890,27 @@ requires_clust_rec:
}
if (prebuilt->need_to_access_clustered) {
- rec = clust_rec;
- ut_ad(rec_offs_validate(rec, clust_index, offsets));
+
+ result_rec = clust_rec;
+
+ ut_ad(rec_offs_validate(result_rec, clust_index,
+ offsets));
} else {
+ /* We used 'offsets' for the clust rec, recalculate
+ them for 'rec' */
offsets = rec_get_offsets(rec, index, offsets,
ULINT_UNDEFINED, &heap);
+ result_rec = rec;
}
+ } else {
+ result_rec = rec;
}
- /* We found a qualifying row */
- ut_ad(rec_offs_validate(rec,
- rec == clust_rec ? clust_index : index,
+ /* We found a qualifying record 'result_rec'. At this point,
+ 'offsets' are associated with 'result_rec'. */
+
+ ut_ad(rec_offs_validate(result_rec,
+ result_rec != rec ? clust_index : index,
offsets));
if ((match_mode == ROW_SEL_EXACT
@@ -3910,8 +3931,8 @@ requires_clust_rec:
not cache rows because there the cursor is a scrollable
cursor. */
- row_sel_push_cache_row_for_mysql(prebuilt, rec, offsets);
-
+ row_sel_push_cache_row_for_mysql(prebuilt, result_rec,
+ offsets);
if (prebuilt->n_fetch_cached == MYSQL_FETCH_CACHE_SIZE) {
goto got_row;
@@ -3920,13 +3941,14 @@ requires_clust_rec:
goto next_rec;
} else {
if (prebuilt->template_type == ROW_MYSQL_DUMMY_TEMPLATE) {
- memcpy(buf + 4, rec - rec_offs_extra_size(offsets),
+ memcpy(buf + 4, result_rec
+ - rec_offs_extra_size(offsets),
rec_offs_size(offsets));
mach_write_to_4(buf,
rec_offs_extra_size(offsets) + 4);
} else {
if (!row_sel_store_mysql_rec(buf, prebuilt,
- rec, offsets)) {
+ result_rec, offsets)) {
err = DB_TOO_BIG_RECORD;
goto lock_wait_or_error;
@@ -3934,15 +3956,18 @@ requires_clust_rec:
}
if (prebuilt->clust_index_was_generated) {
- if (rec != index_rec) {
+ if (result_rec != rec) {
offsets = rec_get_offsets(
- index_rec, index, offsets,
+ rec, index, offsets,
ULINT_UNDEFINED, &heap);
}
- row_sel_store_row_id_to_prebuilt(prebuilt, index_rec,
+ row_sel_store_row_id_to_prebuilt(prebuilt, rec,
index, offsets);
}
}
+
+ /* From this point on, 'offsets' are invalid. */
+
got_row:
/* We have an optimization to save CPU time: if this is a consistent
read on a unique condition on the clustered index, then we do not
@@ -3993,7 +4018,7 @@ next_rec:
if (moves_up) {
if (UNIV_UNLIKELY(!btr_pcur_move_to_next(pcur, &mtr))) {
- not_moved:
+not_moved:
btr_pcur_store_position(pcur, &mtr);
if (match_mode != 0) {
diff --git a/storage/myisam/mi_check.c b/storage/myisam/mi_check.c
index ffb7cdd503f..ee64f9b9979 100644
--- a/storage/myisam/mi_check.c
+++ b/storage/myisam/mi_check.c
@@ -3194,9 +3194,11 @@ int sort_write_record(MI_SORT_PARAM *sort_param)
break;
case COMPRESSED_RECORD:
reclength=info->packed_length;
- length=save_pack_length(block_buff,reclength);
+ length= save_pack_length((uint) share->pack.version, block_buff,
+ reclength);
if (info->s->base.blobs)
- length+=save_pack_length(block_buff+length,info->blob_length);
+ length+= save_pack_length((uint) share->pack.version,
+ block_buff + length, info->blob_length);
if (my_b_write(&info->rec_cache,block_buff,length) ||
my_b_write(&info->rec_cache,(byte*) sort_param->rec_buff,reclength))
{
diff --git a/storage/myisam/mi_packrec.c b/storage/myisam/mi_packrec.c
index c251e4dda4a..e242e9d506d 100644
--- a/storage/myisam/mi_packrec.c
+++ b/storage/myisam/mi_packrec.c
@@ -151,11 +151,12 @@ my_bool _mi_read_pack_info(MI_INFO *info, pbool fix_keys)
my_errno=HA_ERR_END_OF_FILE;
goto err0;
}
- if (memcmp((byte*) header,(byte*) myisam_pack_file_magic,4))
+ if (memcmp((byte*) header, (byte*) myisam_pack_file_magic, 3))
{
my_errno=HA_ERR_WRONG_IN_RECORD;
goto err0;
}
+ share->pack.version= header[3];
share->pack.header_length= uint4korr(header+4);
share->min_pack_length=(uint) uint4korr(header+8);
share->max_pack_length=(uint) uint4korr(header+12);
@@ -1070,38 +1071,12 @@ uint _mi_pack_get_block_info(MI_INFO *myisam, MI_BLOCK_INFO *info, File file,
return BLOCK_FATAL_ERROR;
DBUG_DUMP("header",(byte*) header,ref_length);
}
- if (header[0] < 254)
- {
- info->rec_len=header[0];
- head_length=1;
- }
- else if (header[0] == 254)
- {
- info->rec_len=uint2korr(header+1);
- head_length=3;
- }
- else
- {
- info->rec_len=uint3korr(header+1);
- head_length=4;
- }
+ head_length= read_pack_length((uint) myisam->s->pack.version, header,
+ &info->rec_len);
if (myisam->s->base.blobs)
{
- if (header[head_length] < 254)
- {
- info->blob_len=header[head_length];
- head_length++;
- }
- else if (header[head_length] == 254)
- {
- info->blob_len=uint2korr(header+head_length+1);
- head_length+=3;
- }
- else
- {
- info->blob_len=uint3korr(header+head_length+1);
- head_length+=4;
- }
+ head_length+= read_pack_length((uint) myisam->s->pack.version,
+ header + head_length, &info->blob_len);
if (!(mi_alloc_rec_buff(myisam,info->rec_len + info->blob_len,
&myisam->rec_buff)))
return BLOCK_FATAL_ERROR; /* not enough memory */
@@ -1251,34 +1226,12 @@ void _mi_unmap_file(MI_INFO *info)
static uchar *_mi_mempack_get_block_info(MI_INFO *myisam,MI_BLOCK_INFO *info,
uchar *header)
{
- if (header[0] < 254)
- info->rec_len= *header++;
- else if (header[0] == 254)
- {
- info->rec_len=uint2korr(header+1);
- header+=3;
- }
- else
- {
- info->rec_len=uint3korr(header+1);
- header+=4;
- }
+ header+= read_pack_length((uint) myisam->s->pack.version, header,
+ &info->rec_len);
if (myisam->s->base.blobs)
{
- if (header[0] < 254)
- {
- info->blob_len= *header++;
- }
- else if (header[0] == 254)
- {
- info->blob_len=uint2korr(header+1);
- header+=3;
- }
- else
- {
- info->blob_len=uint3korr(header+1);
- header+=4;
- }
+ header+= read_pack_length((uint) myisam->s->pack.version, header,
+ &info->blob_len);
/* mi_alloc_rec_buff sets my_errno on error */
if (!(mi_alloc_rec_buff(myisam, info->blob_len,
&myisam->rec_buff)))
@@ -1350,7 +1303,7 @@ static int _mi_read_rnd_mempack_record(MI_INFO *info, byte *buf,
/* Save length of row */
-uint save_pack_length(byte *block_buff,ulong length)
+uint save_pack_length(uint version, byte *block_buff, ulong length)
{
if (length < 254)
{
@@ -1364,6 +1317,46 @@ uint save_pack_length(byte *block_buff,ulong length)
return 3;
}
*(uchar*) block_buff=255;
- int3store(block_buff+1,(ulong) length);
- return 4;
+ if (version == 1) /* old format */
+ {
+ DBUG_ASSERT(length <= 0xFFFFFF);
+ int3store(block_buff + 1, (ulong) length);
+ return 4;
+ }
+ else
+ {
+ int4store(block_buff + 1, (ulong) length);
+ return 5;
+ }
+}
+
+
+uint read_pack_length(uint version, const uchar *buf, ulong *length)
+{
+ if (buf[0] < 254)
+ {
+ *length= buf[0];
+ return 1;
+ }
+ else if (buf[0] == 254)
+ {
+ *length= uint2korr(buf + 1);
+ return 3;
+ }
+ if (version == 1) /* old format */
+ {
+ *length= uint3korr(buf + 1);
+ return 4;
+ }
+ else
+ {
+ *length= uint4korr(buf + 1);
+ return 5;
+ }
+}
+
+
+uint calc_pack_length(uint version, ulong length)
+{
+ return (length < 254) ? 1 : (length < 65536) ? 3 : (version == 1) ? 4 : 5;
}
diff --git a/storage/myisam/mi_static.c b/storage/myisam/mi_static.c
index 4c9d814f7d6..fc585eb5543 100644
--- a/storage/myisam/mi_static.c
+++ b/storage/myisam/mi_static.c
@@ -27,7 +27,7 @@ LIST *myisam_open_list=0;
uchar NEAR myisam_file_magic[]=
{ (uchar) 254, (uchar) 254,'\007', '\001', };
uchar NEAR myisam_pack_file_magic[]=
-{ (uchar) 254, (uchar) 254,'\010', '\001', };
+{ (uchar) 254, (uchar) 254,'\010', '\002', };
my_string myisam_log_filename=(char*) "myisam.log";
File myisam_log_file= -1;
uint myisam_quick_table_bits=9;
diff --git a/storage/myisam/myisamdef.h b/storage/myisam/myisamdef.h
index 74463ec065a..82f7fd7360e 100644
--- a/storage/myisam/myisamdef.h
+++ b/storage/myisam/myisamdef.h
@@ -149,6 +149,7 @@ typedef struct st_mi_blob /* Info of record */
typedef struct st_mi_isam_pack {
ulong header_length;
uint ref_length;
+ uchar version;
} MI_PACK;
@@ -673,7 +674,9 @@ extern void _myisam_log_record(enum myisam_log_commands command,MI_INFO *info,
extern void mi_report_error(int errcode, const char *file_name);
extern my_bool _mi_memmap_file(MI_INFO *info);
extern void _mi_unmap_file(MI_INFO *info);
-extern uint save_pack_length(byte *block_buff,ulong length);
+extern uint save_pack_length(uint version, byte *block_buff, ulong length);
+extern uint read_pack_length(uint version, const uchar *buf, ulong *length);
+extern uint calc_pack_length(uint version, ulong length);
uint mi_state_info_write(File file, MI_STATE_INFO *state, uint pWrite);
char *mi_state_info_read(uchar *ptr, MI_STATE_INFO *state);
diff --git a/storage/myisam/myisampack.c b/storage/myisam/myisampack.c
index b8f21392f8a..3b091cd6ea2 100644
--- a/storage/myisam/myisampack.c
+++ b/storage/myisam/myisampack.c
@@ -2417,6 +2417,7 @@ static int compress_isam_file(PACK_MRG_INFO *mrg, HUFF_COUNTS *huff_counts)
HUFF_COUNTS *count,*end_count;
HUFF_TREE *tree;
MI_INFO *isam_file=mrg->file[0];
+ uint pack_version= (uint) isam_file->s->pack.version;
DBUG_ENTER("compress_isam_file");
/* Allocate a buffer for the records (excluding blobs). */
@@ -2455,25 +2456,11 @@ static int compress_isam_file(PACK_MRG_INFO *mrg, HUFF_COUNTS *huff_counts)
huff_counts[i].tree->height+huff_counts[i].length_bits;
}
max_calc_length= (max_calc_length + 7) / 8;
- if (max_calc_length < 254)
- pack_ref_length=1;
- else if (max_calc_length <= 65535)
- pack_ref_length=3;
- else
- pack_ref_length=4;
-
+ pack_ref_length= calc_pack_length(pack_version, max_calc_length);
record_count=0;
/* 'max_blob_length' is the max length of all blobs of a record. */
- pack_blob_length=0;
- if (isam_file->s->base.blobs)
- {
- if (mrg->max_blob_length < 254)
- pack_blob_length=1;
- else if (mrg->max_blob_length <= 65535)
- pack_blob_length=3;
- else
- pack_blob_length=4;
- }
+ pack_blob_length= isam_file->s->base.blobs ?
+ calc_pack_length(pack_version, mrg->max_blob_length) : 0;
max_pack_length=pack_ref_length+pack_blob_length;
DBUG_PRINT("fields", ("==="));
@@ -2746,9 +2733,10 @@ static int compress_isam_file(PACK_MRG_INFO *mrg, HUFF_COUNTS *huff_counts)
}
flush_bits();
length=(ulong) ((byte*) file_buffer.pos - record_pos) - max_pack_length;
- pack_length=save_pack_length(record_pos,length);
+ pack_length= save_pack_length(pack_version, record_pos, length);
if (pack_blob_length)
- pack_length+=save_pack_length(record_pos+pack_length,tot_blob_length);
+ pack_length+= save_pack_length(pack_version, record_pos + pack_length,
+ tot_blob_length);
DBUG_PRINT("fields", ("record: %lu length: %lu blob-length: %lu "
"length-bytes: %lu", (ulong) record_count, length,
tot_blob_length, pack_length));
diff --git a/storage/ndb/include/kernel/signaldata/SignalData.hpp b/storage/ndb/include/kernel/signaldata/SignalData.hpp
index f825b0feb7b..0591a85d6e6 100644
--- a/storage/ndb/include/kernel/signaldata/SignalData.hpp
+++ b/storage/ndb/include/kernel/signaldata/SignalData.hpp
@@ -222,5 +222,6 @@ GSN_PRINT_SIGNATURE(printSCAN_FRAGREQ);
GSN_PRINT_SIGNATURE(printCONTINUEB_NDBFS);
GSN_PRINT_SIGNATURE(printCONTINUEB_DBDIH);
+GSN_PRINT_SIGNATURE(printSTART_FRAG_REQ);
#endif
diff --git a/storage/ndb/include/kernel/signaldata/StartFragReq.hpp b/storage/ndb/include/kernel/signaldata/StartFragReq.hpp
index ec05c1ee366..ab17a147195 100644
--- a/storage/ndb/include/kernel/signaldata/StartFragReq.hpp
+++ b/storage/ndb/include/kernel/signaldata/StartFragReq.hpp
@@ -32,6 +32,8 @@ class StartFragReq {
public:
STATIC_CONST( SignalLength = 19 );
+ friend bool printSTART_FRAG_REQ(FILE *, const Uint32 *, Uint32, Uint16);
+
private:
Uint32 userPtr;
Uint32 userRef;
diff --git a/storage/ndb/src/common/debugger/EventLogger.cpp b/storage/ndb/src/common/debugger/EventLogger.cpp
index 5a534b36b59..d18b0feb1ad 100644
--- a/storage/ndb/src/common/debugger/EventLogger.cpp
+++ b/storage/ndb/src/common/debugger/EventLogger.cpp
@@ -33,7 +33,6 @@ EventLoggerBase::~EventLoggerBase()
}
-
#define QQQQ char *m_text, size_t m_text_len, const Uint32* theData
void getTextConnected(QQQQ) {
@@ -434,10 +433,12 @@ void getTextNR_CopyFragsCompleted(QQQQ) {
void getTextLCPFragmentCompleted(QQQQ) {
BaseString::snprintf(m_text, m_text_len,
"Table ID = %u, fragment ID = %u has completed LCP "
- "on Node %u",
+ "on Node %u maxGciStarted: %d maxGciCompleted: %d",
theData[2],
theData[3],
- theData[1]);
+ theData[1],
+ theData[4],
+ theData[5]);
}
void getTextTransReportCounters(QQQQ) {
// -------------------------------------------------------------------
diff --git a/storage/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp b/storage/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp
index ab23c04bffa..34cae9f618f 100644
--- a/storage/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp
@@ -198,6 +198,7 @@ SignalDataPrintFunctions[] = {
,{ GSN_ACC_LOCKREQ, printACC_LOCKREQ }
,{ GSN_LQH_TRANSCONF, printLQH_TRANSCONF }
,{ GSN_SCAN_FRAGREQ, printSCAN_FRAGREQ }
+ ,{ GSN_START_FRAGREQ, printSTART_FRAG_REQ }
,{ 0, 0 }
};
diff --git a/storage/ndb/src/common/debugger/signaldata/StartRec.cpp b/storage/ndb/src/common/debugger/signaldata/StartRec.cpp
index 482e3cb0728..54830e533c5 100644
--- a/storage/ndb/src/common/debugger/signaldata/StartRec.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/StartRec.cpp
@@ -17,6 +17,7 @@
#include <RefConvert.hpp>
#include <signaldata/StartRec.hpp>
+#include <signaldata/StartFragReq.hpp>
bool
printSTART_REC_REQ(FILE * output,
@@ -50,3 +51,27 @@ printSTART_REC_CONF(FILE * output,
return true;
}
+
+bool
+printSTART_FRAG_REQ(FILE * output,
+ const Uint32 * theData,
+ Uint32 len,
+ Uint16 recBlockNo)
+{
+ StartFragReq* sig = (StartFragReq*)theData;
+
+ fprintf(output, " table: %d frag: %d lcpId: %d lcpNo: %d #nodes: %d \n",
+ sig->tableId, sig->fragId, sig->lcpId, sig->lcpNo,
+ sig->noOfLogNodes);
+
+ for(Uint32 i = 0; i<sig->noOfLogNodes; i++)
+ {
+ fprintf(output, " (node: %d startGci: %d lastGci: %d)",
+ sig->lqhLogNode[i],
+ sig->startGci[i],
+ sig->lastGci[i]);
+ }
+
+ fprintf(output, "\n");
+ return true;
+}
diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
index 57af5ebec72..21d6a1182be 100644
--- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
@@ -9716,6 +9716,9 @@ void Dbdih::execLCP_FRAG_REP(Signal* signal)
}
bool tableDone = reportLcpCompletion(lcpReport);
+
+ Uint32 started = lcpReport->maxGciStarted;
+ Uint32 completed = lcpReport->maxGciCompleted;
if(tableDone){
jam();
@@ -9749,7 +9752,9 @@ void Dbdih::execLCP_FRAG_REP(Signal* signal)
signal->theData[1] = nodeId;
signal->theData[2] = tableId;
signal->theData[3] = fragId;
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB);
+ signal->theData[4] = started;
+ signal->theData[5] = completed;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 6, JBB);
#endif
bool ok = false;
@@ -10946,7 +10951,9 @@ void Dbdih::findMinGci(ReplicaRecordPtr fmgReplicaPtr,
lcpNo = fmgReplicaPtr.p->nextLcp;
do {
ndbrequire(lcpNo < MAX_LCP_STORED);
- if (fmgReplicaPtr.p->lcpStatus[lcpNo] == ZVALID) {
+ if (fmgReplicaPtr.p->lcpStatus[lcpNo] == ZVALID &&
+ fmgReplicaPtr.p->maxGciStarted[lcpNo] <= coldgcp)
+ {
jam();
keepGci = fmgReplicaPtr.p->maxGciCompleted[lcpNo];
oldestRestorableGci = fmgReplicaPtr.p->maxGciStarted[lcpNo];
@@ -10954,7 +10961,6 @@ void Dbdih::findMinGci(ReplicaRecordPtr fmgReplicaPtr,
return;
} else {
jam();
- ndbrequire(fmgReplicaPtr.p->lcpStatus[lcpNo] == ZINVALID);
if (fmgReplicaPtr.p->createGci[0] == fmgReplicaPtr.p->initialGci) {
jam();
/*-------------------------------------------------------------------
diff --git a/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.cpp b/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.cpp
index ba6d65ca838..b7e2ab072b5 100644
--- a/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.cpp
+++ b/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.cpp
@@ -134,7 +134,9 @@ bool PrepareOperationRecord::check() {
return true;
}
-Uint32 PrepareOperationRecord::getLogRecordSize() {
+Uint32 PrepareOperationRecord::getLogRecordSize(Uint32 wordsRead) {
+ if (wordsRead < 2)
+ return 2; // make sure we read more
return m_logRecordSize;
}
diff --git a/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.hpp b/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.hpp
index 11b8dc4a6fa..b2da7427f4e 100644
--- a/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.hpp
+++ b/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.hpp
@@ -83,7 +83,7 @@ class PrepareOperationRecord {
friend NdbOut& operator<<(NdbOut&, const PrepareOperationRecord&);
public:
bool check();
- Uint32 getLogRecordSize();
+ Uint32 getLogRecordSize(Uint32 wordsRead);
protected:
Uint32 m_recordType;
diff --git a/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/redoLogFileReader.cpp b/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/redoLogFileReader.cpp
index aa8b1d25e4e..751d27db74e 100644
--- a/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/redoLogFileReader.cpp
+++ b/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/redoLogFileReader.cpp
@@ -41,6 +41,7 @@ void doExit();
FILE * f= 0;
char fileName[256];
+bool theDumpFlag = false;
bool thePrintFlag = true;
bool theCheckFlag = true;
bool onlyPageHeaders = false;
@@ -208,7 +209,7 @@ NDB_COMMAND(redoLogFileReader, "redoLogFileReader", "redoLogFileReader", "Read
case ZPREP_OP_TYPE:
poRecord = (PrepareOperationRecord *) redoLogPagePos;
- wordIndex += poRecord->getLogRecordSize();
+ wordIndex += poRecord->getLogRecordSize(PAGESIZE-wordIndex);
if (wordIndex <= PAGESIZE) {
if (thePrintFlag) ndbout << (*poRecord);
if (theCheckFlag) {
@@ -277,10 +278,9 @@ NDB_COMMAND(redoLogFileReader, "redoLogFileReader", "redoLogFileReader", "Read
ndbout << " ------ERROR: UNKNOWN RECORD TYPE------" << endl;
// Print out remaining data in this page
- for (int j = wordIndex; j < PAGESIZE; j++){
- Uint32 unknown = redoLogPage[i*PAGESIZE + j];
-
- ndbout_c("%-30d%-12u%-12x", j, unknown, unknown);
+ for (int k = wordIndex; k < PAGESIZE; k++){
+ Uint32 unknown = redoLogPage[i*PAGESIZE + k];
+ ndbout_c("%-30d%-12u%-12x", k, unknown, unknown);
}
doExit();
@@ -289,8 +289,19 @@ NDB_COMMAND(redoLogFileReader, "redoLogFileReader", "redoLogFileReader", "Read
if (lastPage)
+ {
+ if (theDumpFlag)
+ {
+ ndbout << " ------PAGE END: DUMPING REST OF PAGE------" << endl;
+ for (int k = wordIndex > PAGESIZE ? oldWordIndex : wordIndex;
+ k < PAGESIZE; k++)
+ {
+ Uint32 word = redoLogPage[i*PAGESIZE + k];
+ ndbout_c("%-30d%-12u%-12x", k, word, word);
+ }
+ }
break;
-
+ }
if (wordIndex > PAGESIZE) {
words_from_previous_page = PAGESIZE - oldWordIndex;
ndbout << " ----------- Record continues on next page -----------" << endl;
@@ -353,6 +364,8 @@ void readArguments(int argc, const char** argv)
{
if (strcmp(argv[i], "-noprint") == 0) {
thePrintFlag = false;
+ } else if (strcmp(argv[i], "-dump") == 0) {
+ theDumpFlag = true;
} else if (strcmp(argv[i], "-nocheck") == 0) {
theCheckFlag = false;
} else if (strcmp(argv[i], "-mbyteheaders") == 0) {
diff --git a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
index 8c32c87e05e..27087b4c012 100644
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
@@ -4454,7 +4454,7 @@ Dbtc::DIVER_node_fail_handling(Signal* signal, UintR Tgci)
*------------------------------------------------------------------------*/
tabortInd = ZFALSE;
setupFailData(signal);
- if (tabortInd == ZFALSE) {
+ if (false && tabortInd == ZFALSE) {
jam();
commitGciHandling(signal, Tgci);
toCommitHandlingLab(signal);
diff --git a/storage/ndb/src/kernel/main.cpp b/storage/ndb/src/kernel/main.cpp
index f679646e14a..b35929247d9 100644
--- a/storage/ndb/src/kernel/main.cpp
+++ b/storage/ndb/src/kernel/main.cpp
@@ -60,7 +60,7 @@ int main(int argc, char** argv)
NDB_INIT(argv[0]);
// Print to stdout/console
g_eventLogger.createConsoleHandler();
- g_eventLogger.setCategory("NDB");
+ g_eventLogger.setCategory("ndbd");
g_eventLogger.enable(Logger::LL_ON, Logger::LL_CRITICAL);
g_eventLogger.enable(Logger::LL_ON, Logger::LL_ERROR);
g_eventLogger.enable(Logger::LL_ON, Logger::LL_WARNING);
diff --git a/storage/ndb/src/kernel/vm/Emulator.cpp b/storage/ndb/src/kernel/vm/Emulator.cpp
index d6ed6c0dafd..f52233fc276 100644
--- a/storage/ndb/src/kernel/vm/Emulator.cpp
+++ b/storage/ndb/src/kernel/vm/Emulator.cpp
@@ -30,13 +30,15 @@
#include <NodeState.hpp>
#include <NdbMem.h>
-#include <NdbOut.hpp>
#include <NdbMutex.h>
#include <NdbSleep.h>
+#include <EventLogger.hpp>
+
extern "C" {
extern void (* ndb_new_handler)();
}
+extern EventLogger g_eventLogger;
/**
* Declare the global variables
@@ -141,23 +143,23 @@ NdbShutdown(NdbShutdownType type,
switch(type){
case NST_Normal:
- ndbout << "Shutdown initiated" << endl;
+ g_eventLogger.info("Shutdown initiated");
break;
case NST_Watchdog:
- ndbout << "Watchdog " << shutting << " system" << endl;
+ g_eventLogger.info("Watchdog %s system", shutting);
break;
case NST_ErrorHandler:
- ndbout << "Error handler " << shutting << " system" << endl;
+ g_eventLogger.info("Error handler %s system", shutting);
break;
case NST_ErrorHandlerSignal:
- ndbout << "Error handler signal " << shutting << " system" << endl;
+ g_eventLogger.info("Error handler signal %s system", shutting);
break;
case NST_Restart:
- ndbout << "Restarting system" << endl;
+ g_eventLogger.info("Restarting system");
break;
default:
- ndbout << "Error handler " << shutting << " system"
- << " (unknown type: " << (unsigned)type << ")" << endl;
+ g_eventLogger.info("Error handler %s system (unknown type: %u)",
+ shutting, (unsigned)type);
type = NST_ErrorHandler;
break;
}
@@ -173,7 +175,7 @@ NdbShutdown(NdbShutdownType type,
/**
* Very serious, don't attempt to free, just die!!
*/
- ndbout << "Watchdog shutdown completed - " << exitAbort << endl;
+ g_eventLogger.info("Watchdog shutdown completed - %s", exitAbort);
#if defined VM_TRACE && ( ! ( defined NDB_OSE || defined NDB_SOFTOSE) )
signal(6, SIG_DFL);
abort();
@@ -227,7 +229,7 @@ NdbShutdown(NdbShutdownType type,
}
if(type != NST_Normal && type != NST_Restart){
- ndbout << "Error handler shutdown completed - " << exitAbort << endl;
+ g_eventLogger.info("Error handler shutdown completed - %s", exitAbort);
#if ( defined VM_TRACE || defined ERROR_INSERT ) && ( ! ( defined NDB_OSE || defined NDB_SOFTOSE) )
signal(6, SIG_DFL);
abort();
@@ -243,7 +245,7 @@ NdbShutdown(NdbShutdownType type,
exit(restartType);
}
- ndbout << "Shutdown completed - exiting" << endl;
+ g_eventLogger.info("Shutdown completed - exiting");
} else {
/**
* Shutdown is already in progress
@@ -253,7 +255,7 @@ NdbShutdown(NdbShutdownType type,
* If this is the watchdog, kill system the hard way
*/
if (type== NST_Watchdog){
- ndbout << "Watchdog is killing system the hard way" << endl;
+ g_eventLogger.info("Watchdog is killing system the hard way");
#if defined VM_TRACE && ( ! ( defined NDB_OSE || defined NDB_SOFTOSE) )
signal(6, SIG_DFL);
abort();