summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Docs/Makefile.am3
-rw-r--r--Docs/manual.texi35
-rw-r--r--myisam/myisamchk.c1
-rw-r--r--mysql.projbin167936 -> 176128 bytes
-rw-r--r--sql/field.cc65
-rw-r--r--sql/field.h5
-rw-r--r--sql/ha_berkeley.cc47
-rw-r--r--sql/ha_berkeley.h2
-rw-r--r--sql/key.cc2
-rw-r--r--sql/mysqld.cc3
10 files changed, 150 insertions, 13 deletions
diff --git a/Docs/Makefile.am b/Docs/Makefile.am
index 0fe0ab3a56d..2840bdae38d 100644
--- a/Docs/Makefile.am
+++ b/Docs/Makefile.am
@@ -139,3 +139,6 @@ INSTALL-BINARY: mysql.info $(GT)
../MIRRORS: manual.texi $(srcdir)/Support/generate-mirror-listing.pl
perl -w $(srcdir)/Support/generate-mirror-listing.pl manual.texi > $@
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
diff --git a/Docs/manual.texi b/Docs/manual.texi
index 510c2b45317..821a3516816 100644
--- a/Docs/manual.texi
+++ b/Docs/manual.texi
@@ -813,6 +813,7 @@ MySQL change history
Changes in release 3.23.x (Recommended; beta)
+* News-3.23.28::
* News-3.23.27:: Changes in release 3.23.27
* News-3.23.26:: Changes in release 3.23.26
* News-3.23.25:: Changes in release 3.23.25
@@ -19862,6 +19863,13 @@ The buffer that is allocated to cache index and rows for @code{BDB} tables.
If you don't use @code{BDB} tables, you should set this to 0 or
start @code{mysqld} with @code{--skip-bdb} o not waste memory for this cache.
+@item @code{bdb_lock_max}
+The maximum number of locks (1000 by default) you can have active on a BDB
+table. You should increase this if you get errors of type
+@code{bdb: Lock table is out of available locks} when you have do long
+transactions or when mysqld has to examine a lot of rows to calculate
+the query.
+
@item @code{concurrent_inserts}
If @code{ON} (the default), @strong{MySQL} will allow you to use @code{INSERT}
on @code{MyISAM} tables at the same time as you run @code{SELECT} queries
@@ -24669,7 +24677,7 @@ limits. Here are some examples:
@multitable @columnfractions .5 .5
@item @strong{Operating System} @tab @strong{File Size Limit}
-@item Linux-Intel @tab 2G (or 4G with reiserfs)
+@item Linux-Intel 32 bit@tab 2G, 4G or bigger depending on Linux version
@item Linux-Alpha @tab 8T (?)
@item Solaris 2.5.1 @tab 2G (possible 4G with patch)
@item Solaris 2.6 @tab 4G
@@ -24677,6 +24685,10 @@ limits. Here are some examples:
@item Solaris 2.7 ULTRA-SPARC @tab 8T (?)
@end multitable
+On Linux 2.2 you can get bigger tables than 2G by using the LFS patch for
+the ext2 file system. On Linux 2.4 there exists also patches for ReiserFS
+to get support for big files.
+
This means that the table size for @strong{MySQL} is normally limited by
the operating system.
@@ -24690,14 +24702,15 @@ this), you should set the @code{AVG_ROW_LENGTH} and @code{MAX_ROWS}
parameter when you create your table. @xref{CREATE TABLE}. You can
also set these later with @code{ALTER TABLE}. @xref{ALTER TABLE}.
-If you need to have bigger tables than 2G / 4G
-
If your big table is going to be read-only, you could use
@code{myisampack} to merge and compress many tables to one.
@code{myisampack} usually compresses a table by at least 50%, so you can
have, in effect, much bigger tables. @xref{myisampack, ,
@code{myisampack}}.
+You can go around the operating system file limit for @code{MyISAM} data
+files by using the @code{RAID} option. @xref{CREATE TABLE}.
+
Another solution can be the included MERGE library, which allows you to
handle a collection of identical tables as one. @xref{MERGE, MERGE
tables}.
@@ -25430,7 +25443,9 @@ multiple CPU machines one should use Solaris (because the threads works
really nice) or Linux (because the 2.2 kernel has really good SMP
support). Also on 32bit machines Linux has a 2G file size limit by
default. Hopefully this will be fixed soon when new filesystems is
-released (XFS/Reiserfs).
+released (XFS/Reiserfs). If you have a desperate need for files bigger
+tan 2G on Linux-intel 32 bit, you should get the LFS patch for the ext2
+file system.
Because we have not run @strong{MySQL} in production on that many platforms we
advice you to test your intended platform before choosing it, if possible.
@@ -38136,6 +38151,7 @@ version. The replication and BerkeleyDB code is still under development,
though, so 3.23 is not released as a stable version yet.
@menu
+* News-3.23.28:: Changes in release 3.23.28
* News-3.23.27:: Changes in release 3.23.27
* News-3.23.26:: Changes in release 3.23.26
* News-3.23.25:: Changes in release 3.23.25
@@ -38166,7 +38182,16 @@ though, so 3.23 is not released as a stable version yet.
* News-3.23.0:: Changes in release 3.23.0
@end menu
-@node News-3.23.27, News-3.23.26, News-3.23.x, News-3.23.x
+@node News-3.23.28, News-3.23.27, News-3.23.x, News-3.23.x
+@appendixsubsec Changes in release 3.23.28
+@itemize @bullet
+@item
+Fixed bug in a BDB key compare function when comparing part keys.
+@item
+Added variable @code{bdb_lock_max} to @code{mysqld}.
+@end itemize
+
+@node News-3.23.27, News-3.23.26, News-3.23.28, News-3.23.x
@appendixsubsec Changes in release 3.23.27
@itemize @bullet
@item
diff --git a/myisam/myisamchk.c b/myisam/myisamchk.c
index 29259d15c1a..b18f42bfb81 100644
--- a/myisam/myisamchk.c
+++ b/myisam/myisamchk.c
@@ -1378,7 +1378,6 @@ void mi_check_print_info(MI_CHECK *param __attribute__((unused)),
VOID(vfprintf(stdout, fmt, args));
VOID(fputc('\n',stdout));
va_end(args);
- return;
}
/* VARARGS */
diff --git a/mysql.proj b/mysql.proj
index 0cf0074c048..ed591da265e 100644
--- a/mysql.proj
+++ b/mysql.proj
Binary files differ
diff --git a/sql/field.cc b/sql/field.cc
index 459c53ffcb1..639c2def068 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -3439,6 +3439,23 @@ int Field_string::pack_cmp(const char *a, const char *b, uint length)
}
+int Field_string::pack_cmp(const char *b, uint length)
+{
+ uint b_length= (uint) (uchar) *b++;
+ char *end= ptr + field_length;
+ while (end > ptr && end[-1] == ' ')
+ end--;
+ uint a_length = (uint) (end - ptr);
+
+ if (binary_flag)
+ {
+ int cmp= memcmp(ptr,b,min(a_length,b_length));
+ return cmp ? cmp : (int) (a_length - b_length);
+ }
+ return my_sortncmp(ptr,a_length, b, b_length);
+}
+
+
uint Field_string::packed_col_length(const char *ptr)
{
if (field_length > 255)
@@ -3637,6 +3654,27 @@ int Field_varstring::pack_cmp(const char *a, const char *b, uint key_length)
return my_sortncmp(a,a_length, b,b_length);
}
+int Field_varstring::pack_cmp(const char *b, uint key_length)
+{
+ char *a=ptr+2;
+ uint a_length=uint2korr(ptr);
+ uint b_length;
+ if (key_length > 255)
+ {
+ b_length=uint2korr(b); b+=2;
+ }
+ else
+ {
+ b_length= (uint) (uchar) *b++;
+ }
+ if (binary_flag)
+ {
+ int cmp= memcmp(a,b,min(a_length,b_length));
+ return cmp ? cmp : (int) (a_length - b_length);
+ }
+ return my_sortncmp(a,a_length, b,b_length);
+}
+
uint Field_varstring::packed_col_length(const char *ptr)
{
if (field_length > 255)
@@ -4019,6 +4057,33 @@ int Field_blob::pack_cmp(const char *a, const char *b, uint key_length)
return my_sortncmp(a,a_length, b,b_length);
}
+
+int Field_blob::pack_cmp(const char *b, uint key_length)
+{
+ char *a;
+ memcpy_fixed(&a,ptr+packlength,sizeof(char*));
+ if (!a)
+ return key_length > 0 ? -1 : 0;
+ uint a_length=get_length(ptr);
+ uint b_length;
+
+ if (key_length > 255)
+ {
+ b_length=uint2korr(b); b+=2;
+ }
+ else
+ {
+ b_length= (uint) (uchar) *b++;
+ }
+ if (binary_flag)
+ {
+ int cmp= memcmp(a,b,min(a_length,b_length));
+ return cmp ? cmp : (int) (a_length - b_length);
+ }
+ return my_sortncmp(a,a_length, b,b_length);
+}
+
+
char *Field_blob::pack_key(char *to, const char *from, uint max_length)
{
uint length=uint2korr(to);
diff --git a/sql/field.h b/sql/field.h
index f8ba329375b..4af7c17486a 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -177,6 +177,8 @@ public:
virtual int pack_cmp(const char *a,const char *b, uint key_length_arg)
{ return cmp(a,b); }
+ virtual int pack_cmp(const char *b, uint key_length_arg)
+ { return cmp(ptr,b); }
uint offset(); // Should be inline ...
void copy_from_tmp(int offset);
uint fill_cache_field(struct st_cache_field *copy);
@@ -726,6 +728,7 @@ public:
char *pack(char *to, const char *from, uint max_length=~(uint) 0);
const char *unpack(char* to, const char *from);
int pack_cmp(const char *a,const char *b,uint key_length);
+ int pack_cmp(const char *b,uint key_length);
uint packed_col_length(const char *to);
uint max_packed_col_length(uint max_length);
uint size_of() const { return sizeof(*this); }
@@ -777,6 +780,7 @@ public:
char *pack(char *to, const char *from, uint max_length=~(uint) 0);
const char *unpack(char* to, const char *from);
int pack_cmp(const char *a, const char *b, uint key_length);
+ int pack_cmp(const char *b, uint key_length);
uint packed_col_length(const char *to);
uint max_packed_col_length(uint max_length);
uint size_of() const { return sizeof(*this); }
@@ -891,6 +895,7 @@ public:
}
char *pack_key(char *to, const char *from, uint max_length=~(uint) 0);
int pack_cmp(const char *a, const char *b, uint key_length);
+ int pack_cmp(const char *b, uint key_length);
uint packed_col_length(const char *col_ptr)
{ return get_length(col_ptr)+packlength;}
virtual uint max_packed_col_length(uint max_length)
diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc
index 06d0927854f..1239c7db7d3 100644
--- a/sql/ha_berkeley.cc
+++ b/sql/ha_berkeley.cc
@@ -68,6 +68,7 @@ ulong berkeley_cache_size;
char *berkeley_home, *berkeley_tmpdir, *berkeley_logdir;
long berkeley_lock_scan_time=0;
ulong berkeley_trans_retry=5;
+ulong berkeley_lock_max;
pthread_mutex_t bdb_mutex;
static DB_ENV *db_env;
@@ -116,6 +117,8 @@ bool berkeley_init(void)
db_env->set_cachesize(db_env, 0, berkeley_cache_size, 0);
db_env->set_lk_detect(db_env, berkeley_lock_type);
+ if (berkeley_lock_max)
+ db_env->set_lk_max(db_env, berkeley_lock_max);
if (db_env->open(db_env,
berkeley_home,
berkeley_init_flags | DB_INIT_LOCK |
@@ -224,8 +227,13 @@ berkeley_cmp_packed_key(DB *file, const DBT *new_key, const DBT *saved_key)
int cmp;
if (key_part->null_bit)
{
- if (*new_key_ptr++ != *saved_key_ptr++)
- return ((int) new_key_ptr[-1] - (int) saved_key_ptr[-1]);
+ if (*new_key_ptr != *saved_key_ptr++)
+ return ((int) *new_key_ptr - (int) saved_key_ptr[-1]);
+ if (!*new_key_ptr++)
+ {
+ key_length--;
+ continue;
+ }
}
if ((cmp=key_part->field->pack_cmp(new_key_ptr,saved_key_ptr,
key_part->length)))
@@ -263,6 +271,36 @@ berkeley_cmp_fix_length_key(DB *file, const DBT *new_key, const DBT *saved_key)
}
+/* Compare key against row */
+
+static bool
+berkeley_key_cmp(TABLE *table, KEY *key_info, const char *key, uint key_length)
+{
+ KEY_PART_INFO *key_part= key_info->key_part,
+ *end=key_part+key_info->key_parts;
+
+ for ( ; key_part != end && (int) key_length > 0; key_part++)
+ {
+ int cmp;
+ if (key_part->null_bit)
+ {
+ key_length--;
+ if (*key != (table->record[0][key_part->null_offset] &
+ key_part->null_bit) ? 0 : 1)
+ return 1;
+ if (!*key++) // Null value
+ continue;
+ }
+ if ((cmp=key_part->field->pack_cmp(key,key_part->length)))
+ return cmp;
+ uint length=key_part->field->packed_col_length(key);
+ key+=length;
+ key_length-=length;
+ }
+ return 0;
+}
+
+
int ha_berkeley::open(const char *name, int mode, uint test_if_locked)
{
char name_buff[FN_REFLEN];
@@ -1118,9 +1156,8 @@ int ha_berkeley::index_read(byte * buf, const byte * key,
key_info->handler.bdb_return_if_eq= 0;
if (!error && find_flag == HA_READ_KEY_EXACT)
{
- /* Check that we didn't find a key that wasn't equal to the current
- one */
- if (!error && ::key_cmp(table, key_buff2, active_index, key_len))
+ /* Ensure that we found a key that is equal to the current one */
+ if (!error && berkeley_key_cmp(table, key_info, key_buff2, key_len))
error=HA_ERR_KEY_NOT_FOUND;
}
}
diff --git a/sql/ha_berkeley.h b/sql/ha_berkeley.h
index b44b112b0ed..84061ae09be 100644
--- a/sql/ha_berkeley.h
+++ b/sql/ha_berkeley.h
@@ -147,7 +147,7 @@ class ha_berkeley: public handler
extern bool berkeley_skip;
extern u_int32_t berkeley_init_flags,berkeley_lock_type,berkeley_lock_types[];
-extern ulong berkeley_cache_size;
+extern ulong berkeley_cache_size, berkeley_lock_max;
extern char *berkeley_home, *berkeley_tmpdir, *berkeley_logdir;
extern long berkeley_lock_scan_time;
extern TYPELIB berkeley_lock_typelib;
diff --git a/sql/key.cc b/sql/key.cc
index 8678202922e..87595fda06d 100644
--- a/sql/key.cc
+++ b/sql/key.cc
@@ -171,7 +171,7 @@ int key_cmp(TABLE *table,const byte *key,uint idx,uint key_length)
{
key_length--;
if (*key != test(table->record[0][key_part->null_offset] &
- key_part->null_bit))
+ key_part->null_bit))
return 1;
if (*key)
{
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 7e156077723..6cb24de17c5 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -2333,6 +2333,8 @@ CHANGEABLE_VAR changeable_vars[] = {
#ifdef HAVE_BERKELEY_DB
{ "bdb_cache_size", (long*) &berkeley_cache_size,
KEY_CACHE_SIZE, 20*1024, (long) ~0, 0, IO_SIZE },
+ { "bdb_lock_max", (long*) &berkeley_lock_max,
+ 1000, 0, (long) ~0, 0, 1 },
#endif
{ "connect_timeout", (long*) &connect_timeout,
CONNECT_TIMEOUT, 2, 65535, 0, 1 },
@@ -2413,6 +2415,7 @@ struct show_var_st init_vars[]= {
#ifdef HAVE_BERKELEY_DB
{"bdb_cache_size", (char*) &berkeley_cache_size, SHOW_LONG},
{"bdb_home", (char*) &berkeley_home, SHOW_CHAR_PTR},
+ {"bdb_lock_max", (char*) &berkeley_lock_max, SHOW_LONG},
{"bdb_logdir", (char*) &berkeley_logdir, SHOW_CHAR_PTR},
{"bdb_tmpdir", (char*) &berkeley_tmpdir, SHOW_CHAR_PTR},
#endif